summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--meta/classes/base.bbclass1
-rw-r--r--meta/classes/cve-check.bbclass107
-rw-r--r--meta/classes/populate_sdk_ext.bbclass2
-rw-r--r--meta/classes/sanity.bbclass2
-rw-r--r--meta/conf/distro/include/yocto-uninative.inc10
-rw-r--r--meta/lib/oe/sdk.py4
-rw-r--r--meta/lib/oeqa/buildperf/base.py2
-rw-r--r--meta/recipes-connectivity/bind/bind/0001-bind-fix-CVE-2019-6471.patch64
-rw-r--r--meta/recipes-connectivity/bind/bind/0001-fix-enforcement-of-tcp-clients-v1.patch60
-rw-r--r--meta/recipes-connectivity/bind/bind/0002-tcp-clients-could-still-be-exceeded-v2.patch670
-rw-r--r--meta/recipes-connectivity/bind/bind/0003-use-reference-counter-for-pipeline-groups-v3.patch278
-rw-r--r--meta/recipes-connectivity/bind/bind/0004-better-tcpquota-accounting-and-client-mortality-chec.patch512
-rw-r--r--meta/recipes-connectivity/bind/bind/0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch911
-rw-r--r--meta/recipes-connectivity/bind/bind/0006-restore-allowance-for-tcp-clients-interfaces.patch80
-rw-r--r--meta/recipes-connectivity/bind/bind/0007-Replace-atomic-operations-in-bin-named-client.c-with.patch140
-rw-r--r--meta/recipes-connectivity/bind/bind_9.11.5-P4.bb12
-rw-r--r--meta/recipes-connectivity/libpcap/libpcap/0001-pcap-usb-linux.c-add-missing-limits.h-for-musl-syste.patch29
-rw-r--r--meta/recipes-connectivity/libpcap/libpcap_1.9.1.bb (renamed from meta/recipes-connectivity/libpcap/libpcap_1.9.0.bb)5
-rw-r--r--meta/recipes-connectivity/openssh/openssh/0001-upstream-what-bozo-decided-to-use-2020-as-a-future-d.patch46
-rw-r--r--meta/recipes-connectivity/openssh/openssh_7.9p1.bb1
-rw-r--r--meta/recipes-connectivity/openssl/openssl/CVE-2019-1543.patch69
-rw-r--r--meta/recipes-connectivity/openssl/openssl/afalg.patch6
-rw-r--r--meta/recipes-connectivity/openssl/openssl/reproducible.patch32
-rw-r--r--meta/recipes-connectivity/openssl/openssl10_1.0.2u.bb (renamed from meta/recipes-connectivity/openssl/openssl10_1.0.2r.bb)6
-rw-r--r--meta/recipes-connectivity/openssl/openssl_1.1.1g.bb (renamed from meta/recipes-connectivity/openssl/openssl_1.1.1b.bb)19
-rw-r--r--meta/recipes-connectivity/ppp/ppp/0001-pppd-Fix-bounds-check-in-EAP-code.patch47
-rw-r--r--meta/recipes-connectivity/ppp/ppp_2.4.7.bb1
-rw-r--r--meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0001-AP-Silently-ignore-management-frame-from-unexpected-.patch82
-rw-r--r--meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.7.bb1
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/0001-gfile-Limit-access-to-files-when-copying.patch57
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0_2.58.3.bb1
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2019-19126.patch32
-rw-r--r--meta/recipes-core/glibc/glibc_2.29.bb1
-rw-r--r--meta/recipes-core/images/build-appliance-image_15.0.0.bb2
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2019-20388.patch37
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2020-7595.patch36
-rw-r--r--meta/recipes-core/libxml/libxml2/fix-CVE-2019-19956.patch38
-rw-r--r--meta/recipes-core/libxml/libxml2_2.9.8.bb3
-rw-r--r--meta/recipes-core/meta/cve-update-db-native.bb48
-rw-r--r--meta/recipes-core/ncurses/ncurses_6.1+20181013.bb2
-rw-r--r--meta/recipes-core/systemd/systemd.inc9
-rw-r--r--meta/recipes-core/systemd/systemd/0001-bus_open-leak-sd_event_source-when-udevadm-trigger.patch35
-rw-r--r--meta/recipes-core/systemd/systemd_241.bb1
-rw-r--r--meta/recipes-devtools/binutils/binutils-2.32.inc2
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2019-17450.patch99
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2019-17451.patch51
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-e2fsck-don-t-try-to-rehash-a-deleted-directory.patch49
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2019-5188.patch57
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsck-fix-use-after-free-in-calculate_tree.patch76
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs_1.44.5.bb3
-rw-r--r--meta/recipes-devtools/file/file/CVE-2019-18218.patch55
-rw-r--r--meta/recipes-devtools/file/file_5.36.bb3
-rw-r--r--meta/recipes-devtools/flex/flex_2.6.0.bb3
-rw-r--r--meta/recipes-devtools/gdb/gdb-8.2.1.inc1
-rw-r--r--meta/recipes-devtools/gdb/gdb/CVE-2019-1010180.patch132
-rw-r--r--meta/recipes-devtools/git/git.inc2
-rw-r--r--meta/recipes-devtools/git/git_2.20.1.bb11
-rw-r--r--meta/recipes-devtools/git/git_2.20.4.bb11
-rw-r--r--meta/recipes-devtools/go/go-1.12.inc1
-rw-r--r--meta/recipes-devtools/go/go-1.12/0010-fix-CVE-2019-17596.patch42
-rw-r--r--meta/recipes-devtools/nasm/nasm/CVE-2018-19755.patch116
-rw-r--r--meta/recipes-devtools/nasm/nasm/CVE-2019-14248.patch43
-rw-r--r--meta/recipes-devtools/nasm/nasm_2.14.02.bb5
-rw-r--r--meta/recipes-devtools/python/python-native/0001-python-native-fix-one-do_populate_sysroot-warning.patch25
-rw-r--r--meta/recipes-devtools/python/python-native_2.7.18.bb (renamed from meta/recipes-devtools/python/python-native_2.7.16.bb)2
-rw-r--r--meta/recipes-devtools/python/python.inc24
-rw-r--r--meta/recipes-devtools/python/python/0001-2.7-bpo-34155-Dont-parse-domains-containing-GH-13079.patch90
-rw-r--r--meta/recipes-devtools/python/python/0001-python-Resolve-intermediate-staging-issues.patch (renamed from meta/recipes-devtools/python/python/builddir.patch)53
-rw-r--r--meta/recipes-devtools/python/python/CVE-2019-9740.patch215
-rw-r--r--meta/recipes-devtools/python/python/bpo-35907-cve-2019-9948-fix.patch55
-rw-r--r--meta/recipes-devtools/python/python/bpo-35907-cve-2019-9948.patch55
-rw-r--r--meta/recipes-devtools/python/python/bpo-36216-cve-2019-9636-fix.patch28
-rw-r--r--meta/recipes-devtools/python/python/bpo-36216-cve-2019-9636.patch111
-rw-r--r--meta/recipes-devtools/python/python/bpo-36742-cve-2019-10160.patch81
-rw-r--r--meta/recipes-devtools/python/python/python2-manifest.json1
-rw-r--r--meta/recipes-devtools/python/python3_3.7.7.bb (renamed from meta/recipes-devtools/python/python3_3.7.5.bb)11
-rw-r--r--meta/recipes-devtools/python/python_2.7.18.bb (renamed from meta/recipes-devtools/python/python_2.7.16.bb)2
-rw-r--r--meta/recipes-devtools/rsync/rsync_3.1.3.bb3
-rw-r--r--meta/recipes-devtools/subversion/subversion_1.11.1.bb2
-rw-r--r--meta/recipes-extended/cpio/cpio-2.12/CVE-2019-14866.patch316
-rw-r--r--meta/recipes-extended/cpio/cpio_2.12.bb1
-rw-r--r--meta/recipes-extended/ed/ed_1.15.bb2
-rw-r--r--meta/recipes-extended/iputils/iputils_s20180629.bb4
-rw-r--r--meta/recipes-extended/libarchive/libarchive/CVE-2019-19221.patch101
-rw-r--r--meta/recipes-extended/libarchive/libarchive_3.3.3.bb1
-rw-r--r--meta/recipes-extended/lighttpd/lighttpd/0001-core-fix-abort-in-http-parseopts-fixes-2945.patch54
-rw-r--r--meta/recipes-extended/lighttpd/lighttpd_1.4.53.bb1
-rw-r--r--meta/recipes-extended/pam/libpam_1.3.0.bb2
-rw-r--r--meta/recipes-extended/procps/procps_3.3.15.bb3
-rw-r--r--meta/recipes-extended/stress/stress_1.0.4.bb2
-rw-r--r--meta/recipes-extended/sudo/sudo_1.8.27.bb2
-rw-r--r--meta/recipes-extended/sysstat/sysstat/CVE-2019-19725.patch28
-rw-r--r--meta/recipes-extended/sysstat/sysstat_12.1.3.bb4
-rw-r--r--meta/recipes-extended/timezone/timezone.inc10
-rw-r--r--meta/recipes-graphics/xorg-lib/libxfont2_2.0.3.bb2
-rw-r--r--meta/recipes-kernel/linux-firmware/linux-firmware_20190815.bb (renamed from meta/recipes-kernel/linux-firmware/linux-firmware_git.bb)26
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-rt_4.19.bb6
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-tiny_4.19.bb8
-rw-r--r--meta/recipes-kernel/linux/linux-yocto_4.19.bb20
-rw-r--r--meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb3
-rw-r--r--meta/recipes-sato/webkit/webkitgtk_2.22.7.bb2
-rw-r--r--meta/recipes-support/boost/boost.inc2
-rw-r--r--meta/recipes-support/boost/boost/0001-dont-setup-compiler-flags-m32-m64.patch42
-rw-r--r--meta/recipes-support/boost/boost_1.69.0.bb1
-rw-r--r--meta/recipes-support/gnupg/gnupg/0001-Woverride-init-is-not-needed-with-gcc-9.patch13
-rw-r--r--meta/recipes-support/gnupg/gnupg_2.2.17.bb (renamed from meta/recipes-support/gnupg/gnupg_2.2.13.bb)5
-rw-r--r--meta/recipes-support/libsoup/libsoup-2.4_2.64.2.bb2
-rw-r--r--meta/recipes-support/lz4/lz4_1.8.3.bb3
-rw-r--r--meta/recipes-support/popt/popt_1.16.bb2
-rw-r--r--meta/recipes-support/sqlite/files/0001-Fix-CVE-2019-16168.patch40
-rw-r--r--meta/recipes-support/sqlite/sqlite3_3.27.2.bb1
111 files changed, 4782 insertions, 948 deletions
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
index 1636c6ef93..d6f566a413 100644
--- a/meta/classes/base.bbclass
+++ b/meta/classes/base.bbclass
@@ -476,6 +476,7 @@ python () {
# If we're building a target package we need to use fakeroot (pseudo)
# in order to capture permissions, owners, groups and special files
if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
+ d.appendVarFlag('do_prepare_recipe_sysroot', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_unpack', 'umask', '022')
d.setVarFlag('do_configure', 'umask', '022')
d.setVarFlag('do_compile', 'umask', '022')
diff --git a/meta/classes/cve-check.bbclass b/meta/classes/cve-check.bbclass
index c00d2910be..0ab022b135 100644
--- a/meta/classes/cve-check.bbclass
+++ b/meta/classes/cve-check.bbclass
@@ -26,7 +26,7 @@ CVE_PRODUCT ??= "${BPN}"
CVE_VERSION ??= "${PV}"
CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK"
-CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.0.db"
+CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.1.db"
CVE_CHECK_LOG ?= "${T}/cve.log"
CVE_CHECK_TMP_FILE ?= "${TMPDIR}/cve_check"
@@ -62,7 +62,7 @@ python do_cve_check () {
}
-addtask cve_check after do_unpack before do_build
+addtask cve_check before do_build
do_cve_check[depends] = "cve-update-db-native:do_populate_cve_db"
do_cve_check[nostamp] = "1"
@@ -70,7 +70,6 @@ python cve_check_cleanup () {
"""
Delete the file used to gather all the CVE information.
"""
-
bb.utils.remove(e.data.getVar("CVE_CHECK_TMP_FILE"))
}
@@ -166,7 +165,6 @@ def check_cves(d, patched_cves):
"""
Connect to the NVD database and find unpatched cves.
"""
- import ast, csv, tempfile, subprocess, io
from distutils.version import LooseVersion
cves_unpatched = []
@@ -188,63 +186,74 @@ def check_cves(d, patched_cves):
cve_whitelist = d.getVar("CVE_CHECK_WHITELIST").split()
import sqlite3
- db_file = d.getVar("CVE_CHECK_DB_FILE")
- conn = sqlite3.connect(db_file)
+ db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
+ conn = sqlite3.connect(db_file, uri=True)
+ # For each of the known product names (e.g. curl has CPEs using curl and libcurl)...
for product in products:
- c = conn.cursor()
if ":" in product:
vendor, product = product.split(":", 1)
- c.execute("SELECT * FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR IS ?", (product, vendor))
else:
- c.execute("SELECT * FROM PRODUCTS WHERE PRODUCT IS ?", (product,))
+ vendor = "%"
- for row in c:
- cve = row[0]
- version_start = row[3]
- operator_start = row[4]
- version_end = row[5]
- operator_end = row[6]
+ # Find all relevant CVE IDs.
+ for cverow in conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor)):
+ cve = cverow[0]
if cve in cve_whitelist:
bb.note("%s-%s has been whitelisted for %s" % (product, pv, cve))
+ # TODO: this should be in the report as 'whitelisted'
+ patched_cves.add(cve)
+ continue
elif cve in patched_cves:
bb.note("%s has been patched" % (cve))
- else:
- to_append = False
- if (operator_start == '=' and pv == version_start):
- cves_unpatched.append(cve)
+ continue
+
+ vulnerable = False
+ for row in conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor)):
+ (_, _, _, version_start, operator_start, version_end, operator_end) = row
+ #bb.debug(2, "Evaluating row " + str(row))
+
+ if (operator_start == '=' and pv == version_start) or version_start == '-':
+ vulnerable = True
else:
if operator_start:
try:
- to_append_start = (operator_start == '>=' and LooseVersion(pv) >= LooseVersion(version_start))
- to_append_start |= (operator_start == '>' and LooseVersion(pv) > LooseVersion(version_start))
+ vulnerable_start = (operator_start == '>=' and LooseVersion(pv) >= LooseVersion(version_start))
+ vulnerable_start |= (operator_start == '>' and LooseVersion(pv) > LooseVersion(version_start))
except:
- bb.note("%s: Failed to compare %s %s %s for %s" %
+ bb.warn("%s: Failed to compare %s %s %s for %s" %
(product, pv, operator_start, version_start, cve))
- to_append_start = False
+ vulnerable_start = False
else:
- to_append_start = False
+ vulnerable_start = False
if operator_end:
try:
- to_append_end = (operator_end == '<=' and LooseVersion(pv) <= LooseVersion(version_end))
- to_append_end |= (operator_end == '<' and LooseVersion(pv) < LooseVersion(version_end))
+ vulnerable_end = (operator_end == '<=' and LooseVersion(pv) <= LooseVersion(version_end))
+ vulnerable_end |= (operator_end == '<' and LooseVersion(pv) < LooseVersion(version_end))
except:
- bb.note("%s: Failed to compare %s %s %s for %s" %
+ bb.warn("%s: Failed to compare %s %s %s for %s" %
(product, pv, operator_end, version_end, cve))
- to_append_end = False
+ vulnerable_end = False
else:
- to_append_end = False
+ vulnerable_end = False
if operator_start and operator_end:
- to_append = to_append_start and to_append_end
+ vulnerable = vulnerable_start and vulnerable_end
else:
- to_append = to_append_start or to_append_end
+ vulnerable = vulnerable_start or vulnerable_end
- if to_append:
+ if vulnerable:
+ bb.note("%s-%s is vulnerable to %s" % (product, pv, cve))
cves_unpatched.append(cve)
- bb.debug(2, "%s-%s is not patched for %s" % (product, pv, cve))
+ break
+
+ if not vulnerable:
+ bb.note("%s-%s is not vulnerable to %s" % (product, pv, cve))
+ # TODO: not patched but not vulnerable
+ patched_cves.add(cve)
+
conn.close()
return (list(patched_cves), cves_unpatched)
@@ -252,31 +261,23 @@ def check_cves(d, patched_cves):
def get_cve_info(d, cves):
"""
Get CVE information from the database.
-
- Unfortunately the only way to get CVE info is set the output to
- html (hard to parse) or query directly the database.
"""
- try:
- import sqlite3
- except ImportError:
- from pysqlite2 import dbapi2 as sqlite3
+ import sqlite3
cve_data = {}
- db_file = d.getVar("CVE_CHECK_DB_FILE")
- placeholder = ",".join("?" * len(cves))
- query = "SELECT * FROM NVD WHERE id IN (%s)" % placeholder
- conn = sqlite3.connect(db_file)
- cur = conn.cursor()
- for row in cur.execute(query, tuple(cves)):
- cve_data[row[0]] = {}
- cve_data[row[0]]["summary"] = row[1]
- cve_data[row[0]]["scorev2"] = row[2]
- cve_data[row[0]]["scorev3"] = row[3]
- cve_data[row[0]]["modified"] = row[4]
- cve_data[row[0]]["vector"] = row[5]
- conn.close()
+ conn = sqlite3.connect(d.getVar("CVE_CHECK_DB_FILE"))
+
+ for cve in cves:
+ for row in conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,)):
+ cve_data[row[0]] = {}
+ cve_data[row[0]]["summary"] = row[1]
+ cve_data[row[0]]["scorev2"] = row[2]
+ cve_data[row[0]]["scorev3"] = row[3]
+ cve_data[row[0]]["modified"] = row[4]
+ cve_data[row[0]]["vector"] = row[5]
+ conn.close()
return cve_data
def cve_write_data(d, patched, unpatched, cve_data):
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass
index 800e1175d7..f845f7d47d 100644
--- a/meta/classes/populate_sdk_ext.bbclass
+++ b/meta/classes/populate_sdk_ext.bbclass
@@ -121,7 +121,7 @@ SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTR
def clean_esdk_builddir(d, sdkbasepath):
"""Clean up traces of the fake build for create_filtered_tasklist()"""
import shutil
- cleanpaths = 'cache conf/sanity_info tmp'.split()
+ cleanpaths = ['cache', 'tmp']
for pth in cleanpaths:
fullpth = os.path.join(sdkbasepath, pth)
if os.path.isdir(fullpth):
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
index 9429202dca..3b41c69ef6 100644
--- a/meta/classes/sanity.bbclass
+++ b/meta/classes/sanity.bbclass
@@ -919,7 +919,7 @@ def check_sanity(sanity_data):
last_tmpdir = ""
last_sstate_dir = ""
last_nativelsbstr = ""
- sanityverfile = sanity_data.expand("${TOPDIR}/conf/sanity_info")
+ sanityverfile = sanity_data.expand("${TOPDIR}/cache/sanity_info")
if os.path.exists(sanityverfile):
with open(sanityverfile, 'r') as f:
for line in f:
diff --git a/meta/conf/distro/include/yocto-uninative.inc b/meta/conf/distro/include/yocto-uninative.inc
index ad75d3e2a3..889695eae3 100644
--- a/meta/conf/distro/include/yocto-uninative.inc
+++ b/meta/conf/distro/include/yocto-uninative.inc
@@ -6,9 +6,9 @@
# to the distro running on the build machine.
#
-UNINATIVE_MAXGLIBCVERSION = "2.30"
+UNINATIVE_MAXGLIBCVERSION = "2.31"
-UNINATIVE_URL ?= "http://downloads.yoctoproject.org/releases/uninative/2.7/"
-UNINATIVE_CHECKSUM[aarch64] ?= "e76a45886ee8a0b3904b761c17ac8ff91edf9811ee455f1832d10763ba794dfc"
-UNINATIVE_CHECKSUM[i686] ?= "810d027dfb1c7675226afbcec07808770516c969ee7378f6d8240281083f8924"
-UNINATIVE_CHECKSUM[x86_64] ?= "9498d8bba047499999a7310ac2576d0796461184965351a56f6d32c888a1f216"
+UNINATIVE_URL ?= "http://downloads.yoctoproject.org/releases/uninative/2.8/"
+UNINATIVE_CHECKSUM[aarch64] ?= "989187344bf9539b464fb7ed9c223e51f4bdb4c7a677d2c314e6fed393176efe"
+UNINATIVE_CHECKSUM[i686] ?= "cc3e45bc8594488b407363e3fa9af5a099279dab2703c64342098719bd674990"
+UNINATIVE_CHECKSUM[x86_64] ?= "a09922172c3a439105e0ae6b943daad2d83505b17da0aba97961ff433b8c21ab"
diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py
index b4fbdb799e..d02a274812 100644
--- a/meta/lib/oe/sdk.py
+++ b/meta/lib/oe/sdk.py
@@ -88,10 +88,6 @@ class Sdk(object, metaclass=ABCMeta):
bb.warn("cannot remove SDK dir: %s" % path)
def install_locales(self, pm):
- # This is only relevant for glibc
- if self.d.getVar("TCLIBC") != "glibc":
- return
-
linguas = self.d.getVar("SDKIMAGE_LINGUAS")
if linguas:
import fnmatch
diff --git a/meta/lib/oeqa/buildperf/base.py b/meta/lib/oeqa/buildperf/base.py
index 3b2fed549f..5f1805d86c 100644
--- a/meta/lib/oeqa/buildperf/base.py
+++ b/meta/lib/oeqa/buildperf/base.py
@@ -462,7 +462,7 @@ class BuildPerfTestCase(unittest.TestCase):
def rm_tmp(self):
"""Cleanup temporary/intermediate files and directories"""
log.debug("Removing temporary and cache files")
- for name in ['bitbake.lock', 'conf/sanity_info',
+ for name in ['bitbake.lock', 'cache/sanity_info',
self.bb_vars['TMPDIR']]:
oe.path.remove(name, recurse=True)
diff --git a/meta/recipes-connectivity/bind/bind/0001-bind-fix-CVE-2019-6471.patch b/meta/recipes-connectivity/bind/bind/0001-bind-fix-CVE-2019-6471.patch
new file mode 100644
index 0000000000..2fed99e1bb
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/0001-bind-fix-CVE-2019-6471.patch
@@ -0,0 +1,64 @@
+Backport patch to fix CVE-2019-6471.
+
+Ref:
+https://security-tracker.debian.org/tracker/CVE-2019-6471
+
+CVE: CVE-2019-6471
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/3a9c7bb]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From 3a9c7bb80d4a609b86427406d9dd783199920b5b Mon Sep 17 00:00:00 2001
+From: Mark Andrews <marka@isc.org>
+Date: Tue, 19 Mar 2019 14:14:21 +1100
+Subject: [PATCH] move item_out test inside lock in dns_dispatch_getnext()
+
+(cherry picked from commit 60c42f849d520564ed42e5ed0ba46b4b69c07712)
+---
+ lib/dns/dispatch.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/lib/dns/dispatch.c b/lib/dns/dispatch.c
+index 408beda367..3278db4a07 100644
+--- a/lib/dns/dispatch.c
++++ b/lib/dns/dispatch.c
+@@ -134,7 +134,7 @@ struct dns_dispentry {
+ isc_task_t *task;
+ isc_taskaction_t action;
+ void *arg;
+- bool item_out;
++ bool item_out;
+ dispsocket_t *dispsocket;
+ ISC_LIST(dns_dispatchevent_t) items;
+ ISC_LINK(dns_dispentry_t) link;
+@@ -3422,13 +3422,14 @@ dns_dispatch_getnext(dns_dispentry_t *resp, dns_dispatchevent_t **sockevent) {
+ disp = resp->disp;
+ REQUIRE(VALID_DISPATCH(disp));
+
+- REQUIRE(resp->item_out == true);
+- resp->item_out = false;
+-
+ ev = *sockevent;
+ *sockevent = NULL;
+
+ LOCK(&disp->lock);
++
++ REQUIRE(resp->item_out == true);
++ resp->item_out = false;
++
+ if (ev->buffer.base != NULL)
+ free_buffer(disp, ev->buffer.base, ev->buffer.length);
+ free_devent(disp, ev);
+@@ -3573,6 +3574,9 @@ dns_dispatch_removeresponse(dns_dispentry_t **resp,
+ isc_task_send(disp->task[0], &disp->ctlevent);
+ }
+
++/*
++ * disp must be locked.
++ */
+ static void
+ do_cancel(dns_dispatch_t *disp) {
+ dns_dispatchevent_t *ev;
+--
+2.20.1
+
diff --git a/meta/recipes-connectivity/bind/bind/0001-fix-enforcement-of-tcp-clients-v1.patch b/meta/recipes-connectivity/bind/bind/0001-fix-enforcement-of-tcp-clients-v1.patch
new file mode 100644
index 0000000000..48ae125f84
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/0001-fix-enforcement-of-tcp-clients-v1.patch
@@ -0,0 +1,60 @@
+Backport patch to fix CVE-2018-5743.
+
+Ref:
+https://security-tracker.debian.org/tracker/CVE-2018-5743
+
+CVE: CVE-2018-5743
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/ec2d50d]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From ec2d50da8d81814640e28593d912f4b96c7efece Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Witold=20Kr=C4=99cicki?= <wpk@isc.org>
+Date: Thu, 3 Jan 2019 14:17:43 +0100
+Subject: [PATCH 1/6] fix enforcement of tcp-clients (v1)
+
+tcp-clients settings could be exceeded in some cases by
+creating more and more active TCP clients that are over
+the set quota limit, which in the end could lead to a
+DoS attack by e.g. exhaustion of file descriptors.
+
+If TCP client we're closing went over the quota (so it's
+not attached to a quota) mark it as mortal - so that it
+will be destroyed and not set up to listen for new
+connections - unless it's the last client for a specific
+interface.
+
+(cherry picked from commit f97131d21b97381cef72b971b157345c1f9b4115)
+(cherry picked from commit 9689ffc485df8f971f0ad81ab8ab1f5389493776)
+---
+ bin/named/client.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/bin/named/client.c b/bin/named/client.c
+index d482da7121..0739dd48af 100644
+--- a/bin/named/client.c
++++ b/bin/named/client.c
+@@ -421,8 +421,19 @@ exit_check(ns_client_t *client) {
+ isc_socket_detach(&client->tcpsocket);
+ }
+
+- if (client->tcpquota != NULL)
++ if (client->tcpquota != NULL) {
+ isc_quota_detach(&client->tcpquota);
++ } else {
++ /*
++ * We went over quota with this client, we don't
++ * want to restart listening unless this is the
++ * last client on this interface, which is
++ * checked later.
++ */
++ if (TCP_CLIENT(client)) {
++ client->mortal = true;
++ }
++ }
+
+ if (client->timerset) {
+ (void)isc_timer_reset(client->timer,
+--
+2.20.1
+
diff --git a/meta/recipes-connectivity/bind/bind/0002-tcp-clients-could-still-be-exceeded-v2.patch b/meta/recipes-connectivity/bind/bind/0002-tcp-clients-could-still-be-exceeded-v2.patch
new file mode 100644
index 0000000000..ca4e8b1a66
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/0002-tcp-clients-could-still-be-exceeded-v2.patch
@@ -0,0 +1,670 @@
+Backport patch to fix CVE-2018-5743.
+
+Ref:
+https://security-tracker.debian.org/tracker/CVE-2018-5743
+
+CVE: CVE-2018-5743
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/719f604]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From 719f604e3fad5b7479bd14e2fa0ef4413f0a8fdc Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Witold=20Kr=C4=99cicki?= <wpk@isc.org>
+Date: Fri, 4 Jan 2019 12:50:51 +0100
+Subject: [PATCH 2/6] tcp-clients could still be exceeded (v2)
+
+the TCP client quota could still be ineffective under some
+circumstances. this change:
+
+- improves quota accounting to ensure that TCP clients are
+ properly limited, while still guaranteeing that at least one client
+ is always available to serve TCP connections on each interface.
+- uses more descriptive names and removes one (ntcptarget) that
+ was no longer needed
+- adds comments
+
+(cherry picked from commit 924651f1d5e605cd186d03f4f7340bcc54d77cc2)
+(cherry picked from commit 55a7a458e30e47874d34bdf1079eb863a0512396)
+---
+ bin/named/client.c | 311 ++++++++++++++++++++-----
+ bin/named/include/named/client.h | 14 +-
+ bin/named/include/named/interfacemgr.h | 11 +-
+ bin/named/interfacemgr.c | 8 +-
+ 4 files changed, 267 insertions(+), 77 deletions(-)
+
+diff --git a/bin/named/client.c b/bin/named/client.c
+index 0739dd48af..a7b49a0f71 100644
+--- a/bin/named/client.c
++++ b/bin/named/client.c
+@@ -246,10 +246,11 @@ static void ns_client_dumpmessage(ns_client_t *client, const char *reason);
+ static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+ dns_dispatch_t *disp, bool tcp);
+ static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp,
+- isc_socket_t *sock);
++ isc_socket_t *sock, ns_client_t *oldclient);
+ static inline bool
+-allowed(isc_netaddr_t *addr, dns_name_t *signer, isc_netaddr_t *ecs_addr,
+- uint8_t ecs_addrlen, uint8_t *ecs_scope, dns_acl_t *acl);
++allowed(isc_netaddr_t *addr, dns_name_t *signer,
++ isc_netaddr_t *ecs_addr, uint8_t ecs_addrlen,
++ uint8_t *ecs_scope, dns_acl_t *acl)
+ static void compute_cookie(ns_client_t *client, uint32_t when,
+ uint32_t nonce, const unsigned char *secret,
+ isc_buffer_t *buf);
+@@ -405,8 +406,11 @@ exit_check(ns_client_t *client) {
+ */
+ INSIST(client->recursionquota == NULL);
+ INSIST(client->newstate <= NS_CLIENTSTATE_READY);
+- if (client->nreads > 0)
++
++ if (client->nreads > 0) {
+ dns_tcpmsg_cancelread(&client->tcpmsg);
++ }
++
+ if (client->nreads != 0) {
+ /* Still waiting for read cancel completion. */
+ return (true);
+@@ -416,25 +420,58 @@ exit_check(ns_client_t *client) {
+ dns_tcpmsg_invalidate(&client->tcpmsg);
+ client->tcpmsg_valid = false;
+ }
++
+ if (client->tcpsocket != NULL) {
+ CTRACE("closetcp");
+ isc_socket_detach(&client->tcpsocket);
++
++ if (client->tcpactive) {
++ LOCK(&client->interface->lock);
++ INSIST(client->interface->ntcpactive > 0);
++ client->interface->ntcpactive--;
++ UNLOCK(&client->interface->lock);
++ client->tcpactive = false;
++ }
+ }
+
+ if (client->tcpquota != NULL) {
+- isc_quota_detach(&client->tcpquota);
+- } else {
+ /*
+- * We went over quota with this client, we don't
+- * want to restart listening unless this is the
+- * last client on this interface, which is
+- * checked later.
++ * If we are not in a pipeline group, or
++ * we are the last client in the group, detach from
++ * tcpquota; otherwise, transfer the quota to
++ * another client in the same group.
+ */
+- if (TCP_CLIENT(client)) {
+- client->mortal = true;
++ if (!ISC_LINK_LINKED(client, glink) ||
++ (client->glink.next == NULL &&
++ client->glink.prev == NULL))
++ {
++ isc_quota_detach(&client->tcpquota);
++ } else if (client->glink.next != NULL) {
++ INSIST(client->glink.next->tcpquota == NULL);
++ client->glink.next->tcpquota = client->tcpquota;
++ client->tcpquota = NULL;
++ } else {
++ INSIST(client->glink.prev->tcpquota == NULL);
++ client->glink.prev->tcpquota = client->tcpquota;
++ client->tcpquota = NULL;
+ }
+ }
+
++ /*
++ * Unlink from pipeline group.
++ */
++ if (ISC_LINK_LINKED(client, glink)) {
++ if (client->glink.next != NULL) {
++ client->glink.next->glink.prev =
++ client->glink.prev;
++ }
++ if (client->glink.prev != NULL) {
++ client->glink.prev->glink.next =
++ client->glink.next;
++ }
++ ISC_LINK_INIT(client, glink);
++ }
++
+ if (client->timerset) {
+ (void)isc_timer_reset(client->timer,
+ isc_timertype_inactive,
+@@ -455,15 +492,16 @@ exit_check(ns_client_t *client) {
+ * that already. Check whether this client needs to remain
+ * active and force it to go inactive if not.
+ *
+- * UDP clients go inactive at this point, but TCP clients
+- * may remain active if we have fewer active TCP client
+- * objects than desired due to an earlier quota exhaustion.
++ * UDP clients go inactive at this point, but a TCP client
++ * will needs to remain active if no other clients are
++ * listening for TCP requests on this interface, to
++ * prevent this interface from going nonresponsive.
+ */
+ if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) {
+ LOCK(&client->interface->lock);
+- if (client->interface->ntcpcurrent <
+- client->interface->ntcptarget)
++ if (client->interface->ntcpaccepting == 0) {
+ client->mortal = false;
++ }
+ UNLOCK(&client->interface->lock);
+ }
+
+@@ -472,15 +510,17 @@ exit_check(ns_client_t *client) {
+ * queue for recycling.
+ */
+ if (client->mortal) {
+- if (client->newstate > NS_CLIENTSTATE_INACTIVE)
++ if (client->newstate > NS_CLIENTSTATE_INACTIVE) {
+ client->newstate = NS_CLIENTSTATE_INACTIVE;
++ }
+ }
+
+ if (NS_CLIENTSTATE_READY == client->newstate) {
+ if (TCP_CLIENT(client)) {
+ client_accept(client);
+- } else
++ } else {
+ client_udprecv(client);
++ }
+ client->newstate = NS_CLIENTSTATE_MAX;
+ return (true);
+ }
+@@ -492,41 +532,57 @@ exit_check(ns_client_t *client) {
+ /*
+ * We are trying to enter the inactive state.
+ */
+- if (client->naccepts > 0)
++ if (client->naccepts > 0) {
+ isc_socket_cancel(client->tcplistener, client->task,
+ ISC_SOCKCANCEL_ACCEPT);
++ }
+
+ /* Still waiting for accept cancel completion. */
+- if (! (client->naccepts == 0))
++ if (! (client->naccepts == 0)) {
+ return (true);
++ }
+
+ /* Accept cancel is complete. */
+- if (client->nrecvs > 0)
++ if (client->nrecvs > 0) {
+ isc_socket_cancel(client->udpsocket, client->task,
+ ISC_SOCKCANCEL_RECV);
++ }
+
+ /* Still waiting for recv cancel completion. */
+- if (! (client->nrecvs == 0))
++ if (! (client->nrecvs == 0)) {
+ return (true);
++ }
+
+ /* Still waiting for control event to be delivered */
+- if (client->nctls > 0)
++ if (client->nctls > 0) {
+ return (true);
+-
+- /* Deactivate the client. */
+- if (client->interface)
+- ns_interface_detach(&client->interface);
++ }
+
+ INSIST(client->naccepts == 0);
+ INSIST(client->recursionquota == NULL);
+- if (client->tcplistener != NULL)
++ if (client->tcplistener != NULL) {
+ isc_socket_detach(&client->tcplistener);
+
+- if (client->udpsocket != NULL)
++ if (client->tcpactive) {
++ LOCK(&client->interface->lock);
++ INSIST(client->interface->ntcpactive > 0);
++ client->interface->ntcpactive--;
++ UNLOCK(&client->interface->lock);
++ client->tcpactive = false;
++ }
++ }
++ if (client->udpsocket != NULL) {
+ isc_socket_detach(&client->udpsocket);
++ }
+
+- if (client->dispatch != NULL)
++ /* Deactivate the client. */
++ if (client->interface != NULL) {
++ ns_interface_detach(&client->interface);
++ }
++
++ if (client->dispatch != NULL) {
+ dns_dispatch_detach(&client->dispatch);
++ }
+
+ client->attributes = 0;
+ client->mortal = false;
+@@ -551,10 +607,13 @@ exit_check(ns_client_t *client) {
+ client->newstate = NS_CLIENTSTATE_MAX;
+ if (!ns_g_clienttest && manager != NULL &&
+ !manager->exiting)
++ {
+ ISC_QUEUE_PUSH(manager->inactive, client,
+ ilink);
+- if (client->needshutdown)
++ }
++ if (client->needshutdown) {
+ isc_task_shutdown(client->task);
++ }
+ return (true);
+ }
+ }
+@@ -675,7 +734,6 @@ client_start(isc_task_t *task, isc_event_t *event) {
+ }
+ }
+
+-
+ /*%
+ * The client's task has received a shutdown event.
+ */
+@@ -2507,17 +2565,12 @@ client_request(isc_task_t *task, isc_event_t *event) {
+ /*
+ * Pipeline TCP query processing.
+ */
+- if (client->message->opcode != dns_opcode_query)
++ if (client->message->opcode != dns_opcode_query) {
+ client->pipelined = false;
++ }
+ if (TCP_CLIENT(client) && client->pipelined) {
+- result = isc_quota_reserve(&ns_g_server->tcpquota);
+- if (result == ISC_R_SUCCESS)
+- result = ns_client_replace(client);
++ result = ns_client_replace(client);
+ if (result != ISC_R_SUCCESS) {
+- ns_client_log(client, NS_LOGCATEGORY_CLIENT,
+- NS_LOGMODULE_CLIENT, ISC_LOG_WARNING,
+- "no more TCP clients(read): %s",
+- isc_result_totext(result));
+ client->pipelined = false;
+ }
+ }
+@@ -3087,6 +3140,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
+ client->filter_aaaa = dns_aaaa_ok;
+ #endif
+ client->needshutdown = ns_g_clienttest;
++ client->tcpactive = false;
+
+ ISC_EVENT_INIT(&client->ctlevent, sizeof(client->ctlevent), 0, NULL,
+ NS_EVENT_CLIENTCONTROL, client_start, client, client,
+@@ -3100,6 +3154,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
+ client->formerrcache.id = 0;
+ ISC_LINK_INIT(client, link);
+ ISC_LINK_INIT(client, rlink);
++ ISC_LINK_INIT(client, glink);
+ ISC_QLINK_INIT(client, ilink);
+ client->keytag = NULL;
+ client->keytag_len = 0;
+@@ -3193,12 +3248,19 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+
+ INSIST(client->state == NS_CLIENTSTATE_READY);
+
++ /*
++ * The accept() was successful and we're now establishing a new
++ * connection. We need to make note of it in the client and
++ * interface objects so client objects can do the right thing
++ * when going inactive in exit_check() (see comments in
++ * client_accept() for details).
++ */
+ INSIST(client->naccepts == 1);
+ client->naccepts--;
+
+ LOCK(&client->interface->lock);
+- INSIST(client->interface->ntcpcurrent > 0);
+- client->interface->ntcpcurrent--;
++ INSIST(client->interface->ntcpaccepting > 0);
++ client->interface->ntcpaccepting--;
+ UNLOCK(&client->interface->lock);
+
+ /*
+@@ -3232,6 +3294,9 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3),
+ "accept failed: %s",
+ isc_result_totext(nevent->result));
++ if (client->tcpquota != NULL) {
++ isc_quota_detach(&client->tcpquota);
++ }
+ }
+
+ if (exit_check(client))
+@@ -3270,18 +3335,12 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ * deny service to legitimate TCP clients.
+ */
+ client->pipelined = false;
+- result = isc_quota_attach(&ns_g_server->tcpquota,
+- &client->tcpquota);
+- if (result == ISC_R_SUCCESS)
+- result = ns_client_replace(client);
+- if (result != ISC_R_SUCCESS) {
+- ns_client_log(client, NS_LOGCATEGORY_CLIENT,
+- NS_LOGMODULE_CLIENT, ISC_LOG_WARNING,
+- "no more TCP clients(accept): %s",
+- isc_result_totext(result));
+- } else if (ns_g_server->keepresporder == NULL ||
+- !allowed(&netaddr, NULL, NULL, 0, NULL,
+- ns_g_server->keepresporder)) {
++ result = ns_client_replace(client);
++ if (result == ISC_R_SUCCESS &&
++ (client->sctx->keepresporder == NULL ||
++ !allowed(&netaddr, NULL, NULL, 0, NULL,
++ ns_g_server->keepresporder)))
++ {
+ client->pipelined = true;
+ }
+
+@@ -3298,12 +3357,80 @@ client_accept(ns_client_t *client) {
+
+ CTRACE("accept");
+
++ /*
++ * The tcpquota object can only be simultaneously referenced a
++ * pre-defined number of times; this is configured by 'tcp-clients'
++ * in named.conf. If we can't attach to it here, that means the TCP
++ * client quota has been exceeded.
++ */
++ result = isc_quota_attach(&client->sctx->tcpquota,
++ &client->tcpquota);
++ if (result != ISC_R_SUCCESS) {
++ bool exit;
++
++ ns_client_log(client, NS_LOGCATEGORY_CLIENT,
++ NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(1),
++ "no more TCP clients: %s",
++ isc_result_totext(result));
++
++ /*
++ * We have exceeded the system-wide TCP client
++ * quota. But, we can't just block this accept
++ * in all cases, because if we did, a heavy TCP
++ * load on other interfaces might cause this
++ * interface to be starved, with no clients able
++ * to accept new connections.
++ *
++ * So, we check here to see if any other client
++ * is already servicing TCP queries on this
++ * interface (whether accepting, reading, or
++ * processing).
++ *
++ * If so, then it's okay *not* to call
++ * accept - we can let this client to go inactive
++ * and the other one handle the next connection
++ * when it's ready.
++ *
++ * But if not, then we need to be a little bit
++ * flexible about the quota. We allow *one* extra
++ * TCP client through, to ensure we're listening on
++ * every interface.
++ *
++ * (Note: In practice this means that the *real*
++ * TCP client quota is tcp-clients plus the number
++ * of interfaces.)
++ */
++ LOCK(&client->interface->lock);
++ exit = (client->interface->ntcpactive > 0);
++ UNLOCK(&client->interface->lock);
++
++ if (exit) {
++ client->newstate = NS_CLIENTSTATE_INACTIVE;
++ (void)exit_check(client);
++ return;
++ }
++ }
++
++ /*
++ * By incrementing the interface's ntcpactive counter we signal
++ * that there is at least one client servicing TCP queries for the
++ * interface.
++ *
++ * We also make note of the fact in the client itself with the
++ * tcpactive flag. This ensures proper accounting by preventing
++ * us from accidentally incrementing or decrementing ntcpactive
++ * more than once per client object.
++ */
++ if (!client->tcpactive) {
++ LOCK(&client->interface->lock);
++ client->interface->ntcpactive++;
++ UNLOCK(&client->interface->lock);
++ client->tcpactive = true;
++ }
++
+ result = isc_socket_accept(client->tcplistener, client->task,
+ client_newconn, client);
+ if (result != ISC_R_SUCCESS) {
+- UNEXPECTED_ERROR(__FILE__, __LINE__,
+- "isc_socket_accept() failed: %s",
+- isc_result_totext(result));
+ /*
+ * XXXRTH What should we do? We're trying to accept but
+ * it didn't work. If we just give up, then TCP
+@@ -3311,12 +3438,39 @@ client_accept(ns_client_t *client) {
+ *
+ * For now, we just go idle.
+ */
++ UNEXPECTED_ERROR(__FILE__, __LINE__,
++ "isc_socket_accept() failed: %s",
++ isc_result_totext(result));
++ if (client->tcpquota != NULL) {
++ isc_quota_detach(&client->tcpquota);
++ }
+ return;
+ }
++
++ /*
++ * The client's 'naccepts' counter indicates that this client has
++ * called accept() and is waiting for a new connection. It should
++ * never exceed 1.
++ */
+ INSIST(client->naccepts == 0);
+ client->naccepts++;
++
++ /*
++ * The interface's 'ntcpaccepting' counter is incremented when
++ * any client calls accept(), and decremented in client_newconn()
++ * once the connection is established.
++ *
++ * When the client object is shutting down after handling a TCP
++ * request (see exit_check()), it looks to see whether this value is
++ * non-zero. If so, that means another client has already called
++ * accept() and is waiting to establish the next connection, which
++ * means the first client is free to go inactive. Otherwise,
++ * the first client must come back and call accept() again; this
++ * guarantees there will always be at least one client listening
++ * for new TCP connections on each interface.
++ */
+ LOCK(&client->interface->lock);
+- client->interface->ntcpcurrent++;
++ client->interface->ntcpaccepting++;
+ UNLOCK(&client->interface->lock);
+ }
+
+@@ -3390,13 +3544,14 @@ ns_client_replace(ns_client_t *client) {
+ tcp = TCP_CLIENT(client);
+ if (tcp && client->pipelined) {
+ result = get_worker(client->manager, client->interface,
+- client->tcpsocket);
++ client->tcpsocket, client);
+ } else {
+ result = get_client(client->manager, client->interface,
+ client->dispatch, tcp);
+ }
+- if (result != ISC_R_SUCCESS)
++ if (result != ISC_R_SUCCESS) {
+ return (result);
++ }
+
+ /*
+ * The responsibility for listening for new requests is hereby
+@@ -3585,6 +3740,7 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+ client->attributes |= NS_CLIENTATTR_TCP;
+ isc_socket_attach(ifp->tcpsocket,
+ &client->tcplistener);
++
+ } else {
+ isc_socket_t *sock;
+
+@@ -3602,7 +3758,8 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+ }
+
+ static isc_result_t
+-get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock)
++get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
++ ns_client_t *oldclient)
+ {
+ isc_result_t result = ISC_R_SUCCESS;
+ isc_event_t *ev;
+@@ -3610,6 +3767,7 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock)
+ MTRACE("get worker");
+
+ REQUIRE(manager != NULL);
++ REQUIRE(oldclient != NULL);
+
+ if (manager->exiting)
+ return (ISC_R_SHUTTINGDOWN);
+@@ -3642,7 +3800,28 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock)
+ ns_interface_attach(ifp, &client->interface);
+ client->newstate = client->state = NS_CLIENTSTATE_WORKING;
+ INSIST(client->recursionquota == NULL);
+- client->tcpquota = &ns_g_server->tcpquota;
++
++ /*
++ * Transfer TCP quota to the new client.
++ */
++ INSIST(client->tcpquota == NULL);
++ INSIST(oldclient->tcpquota != NULL);
++ client->tcpquota = oldclient->tcpquota;
++ oldclient->tcpquota = NULL;
++
++ /*
++ * Link to a pipeline group, creating it if needed.
++ */
++ if (!ISC_LINK_LINKED(oldclient, glink)) {
++ oldclient->glink.next = NULL;
++ oldclient->glink.prev = NULL;
++ }
++ client->glink.next = oldclient->glink.next;
++ client->glink.prev = oldclient;
++ if (oldclient->glink.next != NULL) {
++ oldclient->glink.next->glink.prev = client;
++ }
++ oldclient->glink.next = client;
+
+ client->dscp = ifp->dscp;
+
+@@ -3656,6 +3835,12 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock)
+ (void)isc_socket_getpeername(client->tcpsocket, &client->peeraddr);
+ client->peeraddr_valid = true;
+
++ LOCK(&client->interface->lock);
++ client->interface->ntcpactive++;
++ UNLOCK(&client->interface->lock);
++
++ client->tcpactive = true;
++
+ INSIST(client->tcpmsg_valid == false);
+ dns_tcpmsg_init(client->mctx, client->tcpsocket, &client->tcpmsg);
+ client->tcpmsg_valid = true;
+diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
+index b23a7b191d..1f7973f9c5 100644
+--- a/bin/named/include/named/client.h
++++ b/bin/named/include/named/client.h
+@@ -94,7 +94,8 @@ struct ns_client {
+ int nupdates;
+ int nctls;
+ int references;
+- bool needshutdown; /*
++ bool tcpactive;
++ bool needshutdown; /*
+ * Used by clienttest to get
+ * the client to go from
+ * inactive to free state
+@@ -130,9 +131,9 @@ struct ns_client {
+ isc_stdtime_t now;
+ isc_time_t tnow;
+ dns_name_t signername; /*%< [T]SIG key name */
+- dns_name_t * signer; /*%< NULL if not valid sig */
+- bool mortal; /*%< Die after handling request */
+- bool pipelined; /*%< TCP queries not in sequence */
++ dns_name_t *signer; /*%< NULL if not valid sig */
++ bool mortal; /*%< Die after handling request */
++ bool pipelined; /*%< TCP queries not in sequence */
+ isc_quota_t *tcpquota;
+ isc_quota_t *recursionquota;
+ ns_interface_t *interface;
+@@ -143,8 +144,8 @@ struct ns_client {
+ isc_sockaddr_t destsockaddr;
+
+ isc_netaddr_t ecs_addr; /*%< EDNS client subnet */
+- uint8_t ecs_addrlen;
+- uint8_t ecs_scope;
++ uint8_t ecs_addrlen;
++ uint8_t ecs_scope;
+
+ struct in6_pktinfo pktinfo;
+ isc_dscp_t dscp;
+@@ -166,6 +167,7 @@ struct ns_client {
+
+ ISC_LINK(ns_client_t) link;
+ ISC_LINK(ns_client_t) rlink;
++ ISC_LINK(ns_client_t) glink;
+ ISC_QLINK(ns_client_t) ilink;
+ unsigned char cookie[8];
+ uint32_t expire;
+diff --git a/bin/named/include/named/interfacemgr.h b/bin/named/include/named/interfacemgr.h
+index 7d1883e1e8..61b08826a6 100644
+--- a/bin/named/include/named/interfacemgr.h
++++ b/bin/named/include/named/interfacemgr.h
+@@ -77,9 +77,14 @@ struct ns_interface {
+ /*%< UDP dispatchers. */
+ isc_socket_t * tcpsocket; /*%< TCP socket. */
+ isc_dscp_t dscp; /*%< "listen-on" DSCP value */
+- int ntcptarget; /*%< Desired number of concurrent
+- TCP accepts */
+- int ntcpcurrent; /*%< Current ditto, locked */
++ int ntcpaccepting; /*%< Number of clients
++ ready to accept new
++ TCP connections on this
++ interface */
++ int ntcpactive; /*%< Number of clients
++ servicing TCP queries
++ (whether accepting or
++ connected) */
+ int nudpdispatch; /*%< Number of UDP dispatches */
+ ns_clientmgr_t * clientmgr; /*%< Client manager. */
+ ISC_LINK(ns_interface_t) link;
+diff --git a/bin/named/interfacemgr.c b/bin/named/interfacemgr.c
+index 419927bf54..955096ef47 100644
+--- a/bin/named/interfacemgr.c
++++ b/bin/named/interfacemgr.c
+@@ -386,8 +386,8 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr,
+ * connections will be handled in parallel even though there is
+ * only one client initially.
+ */
+- ifp->ntcptarget = 1;
+- ifp->ntcpcurrent = 0;
++ ifp->ntcpaccepting = 0;
++ ifp->ntcpactive = 0;
+ ifp->nudpdispatch = 0;
+
+ ifp->dscp = -1;
+@@ -522,9 +522,7 @@ ns_interface_accepttcp(ns_interface_t *ifp) {
+ */
+ (void)isc_socket_filter(ifp->tcpsocket, "dataready");
+
+- result = ns_clientmgr_createclients(ifp->clientmgr,
+- ifp->ntcptarget, ifp,
+- true);
++ result = ns_clientmgr_createclients(ifp->clientmgr, 1, ifp, true);
+ if (result != ISC_R_SUCCESS) {
+ UNEXPECTED_ERROR(__FILE__, __LINE__,
+ "TCP ns_clientmgr_createclients(): %s",
+--
+2.20.1
+
diff --git a/meta/recipes-connectivity/bind/bind/0003-use-reference-counter-for-pipeline-groups-v3.patch b/meta/recipes-connectivity/bind/bind/0003-use-reference-counter-for-pipeline-groups-v3.patch
new file mode 100644
index 0000000000..032cfb8c44
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/0003-use-reference-counter-for-pipeline-groups-v3.patch
@@ -0,0 +1,278 @@
+Backport patch to fix CVE-2018-5743.
+
+Ref:
+https://security-tracker.debian.org/tracker/CVE-2018-5743
+
+CVE: CVE-2018-5743
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/366b4e1]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From 366b4e1ede8aed690e981e07137cb1cb77879c36 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Micha=C5=82=20K=C4=99pie=C5=84?= <michal@isc.org>
+Date: Thu, 17 Jan 2019 15:53:38 +0100
+Subject: [PATCH 3/6] use reference counter for pipeline groups (v3)
+
+Track pipeline groups using a shared reference counter
+instead of a linked list.
+
+(cherry picked from commit 513afd33eb17d5dc41a3f0d2d38204ef8c5f6f91)
+(cherry picked from commit 9446629b730c59c4215f08d37fbaf810282fbccb)
+---
+ bin/named/client.c | 171 ++++++++++++++++++++-----------
+ bin/named/include/named/client.h | 2 +-
+ 2 files changed, 110 insertions(+), 63 deletions(-)
+
+diff --git a/bin/named/client.c b/bin/named/client.c
+index a7b49a0f71..277656cef0 100644
+--- a/bin/named/client.c
++++ b/bin/named/client.c
+@@ -299,6 +299,75 @@ ns_client_settimeout(ns_client_t *client, unsigned int seconds) {
+ }
+ }
+
++/*%
++ * Allocate a reference counter that will track the number of client structures
++ * using the TCP connection that 'client' called accept() for. This counter
++ * will be shared between all client structures associated with this TCP
++ * connection.
++ */
++static void
++pipeline_init(ns_client_t *client) {
++ isc_refcount_t *refs;
++
++ REQUIRE(client->pipeline_refs == NULL);
++
++ /*
++ * A global memory context is used for the allocation as different
++ * client structures may have different memory contexts assigned and a
++ * reference counter allocated here might need to be freed by a
++ * different client. The performance impact caused by memory context
++ * contention here is expected to be negligible, given that this code
++ * is only executed for TCP connections.
++ */
++ refs = isc_mem_allocate(client->sctx->mctx, sizeof(*refs));
++ isc_refcount_init(refs, 1);
++ client->pipeline_refs = refs;
++}
++
++/*%
++ * Increase the count of client structures using the TCP connection that
++ * 'source' is associated with and put a pointer to that count in 'target',
++ * thus associating it with the same TCP connection.
++ */
++static void
++pipeline_attach(ns_client_t *source, ns_client_t *target) {
++ int old_refs;
++
++ REQUIRE(source->pipeline_refs != NULL);
++ REQUIRE(target->pipeline_refs == NULL);
++
++ old_refs = isc_refcount_increment(source->pipeline_refs);
++ INSIST(old_refs > 0);
++ target->pipeline_refs = source->pipeline_refs;
++}
++
++/*%
++ * Decrease the count of client structures using the TCP connection that
++ * 'client' is associated with. If this is the last client using this TCP
++ * connection, free the reference counter and return true; otherwise, return
++ * false.
++ */
++static bool
++pipeline_detach(ns_client_t *client) {
++ isc_refcount_t *refs;
++ int old_refs;
++
++ REQUIRE(client->pipeline_refs != NULL);
++
++ refs = client->pipeline_refs;
++ client->pipeline_refs = NULL;
++
++ old_refs = isc_refcount_decrement(refs);
++ INSIST(old_refs > 0);
++
++ if (old_refs == 1) {
++ isc_mem_free(client->sctx->mctx, refs);
++ return (true);
++ }
++
++ return (false);
++}
++
+ /*%
+ * Check for a deactivation or shutdown request and take appropriate
+ * action. Returns true if either is in progress; in this case
+@@ -421,6 +490,40 @@ exit_check(ns_client_t *client) {
+ client->tcpmsg_valid = false;
+ }
+
++ if (client->tcpquota != NULL) {
++ if (client->pipeline_refs == NULL ||
++ pipeline_detach(client))
++ {
++ /*
++ * Only detach from the TCP client quota if
++ * there are no more client structures using
++ * this TCP connection.
++ *
++ * Note that we check 'pipeline_refs' and not
++ * 'pipelined' because in some cases (e.g.
++ * after receiving a request with an opcode
++ * different than QUERY) 'pipelined' is set to
++ * false after the reference counter gets
++ * allocated in pipeline_init() and we must
++ * still drop our reference as failing to do so
++ * would prevent the reference counter itself
++ * from being freed.
++ */
++ isc_quota_detach(&client->tcpquota);
++ } else {
++ /*
++ * There are other client structures using this
++ * TCP connection, so we cannot detach from the
++ * TCP client quota to prevent excess TCP
++ * connections from being accepted. However,
++ * this client structure might later be reused
++ * for accepting new connections and thus must
++ * have its 'tcpquota' field set to NULL.
++ */
++ client->tcpquota = NULL;
++ }
++ }
++
+ if (client->tcpsocket != NULL) {
+ CTRACE("closetcp");
+ isc_socket_detach(&client->tcpsocket);
+@@ -434,44 +537,6 @@ exit_check(ns_client_t *client) {
+ }
+ }
+
+- if (client->tcpquota != NULL) {
+- /*
+- * If we are not in a pipeline group, or
+- * we are the last client in the group, detach from
+- * tcpquota; otherwise, transfer the quota to
+- * another client in the same group.
+- */
+- if (!ISC_LINK_LINKED(client, glink) ||
+- (client->glink.next == NULL &&
+- client->glink.prev == NULL))
+- {
+- isc_quota_detach(&client->tcpquota);
+- } else if (client->glink.next != NULL) {
+- INSIST(client->glink.next->tcpquota == NULL);
+- client->glink.next->tcpquota = client->tcpquota;
+- client->tcpquota = NULL;
+- } else {
+- INSIST(client->glink.prev->tcpquota == NULL);
+- client->glink.prev->tcpquota = client->tcpquota;
+- client->tcpquota = NULL;
+- }
+- }
+-
+- /*
+- * Unlink from pipeline group.
+- */
+- if (ISC_LINK_LINKED(client, glink)) {
+- if (client->glink.next != NULL) {
+- client->glink.next->glink.prev =
+- client->glink.prev;
+- }
+- if (client->glink.prev != NULL) {
+- client->glink.prev->glink.next =
+- client->glink.next;
+- }
+- ISC_LINK_INIT(client, glink);
+- }
+-
+ if (client->timerset) {
+ (void)isc_timer_reset(client->timer,
+ isc_timertype_inactive,
+@@ -3130,6 +3195,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
+ dns_name_init(&client->signername, NULL);
+ client->mortal = false;
+ client->pipelined = false;
++ client->pipeline_refs = NULL;
+ client->tcpquota = NULL;
+ client->recursionquota = NULL;
+ client->interface = NULL;
+@@ -3154,7 +3220,6 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
+ client->formerrcache.id = 0;
+ ISC_LINK_INIT(client, link);
+ ISC_LINK_INIT(client, rlink);
+- ISC_LINK_INIT(client, glink);
+ ISC_QLINK_INIT(client, ilink);
+ client->keytag = NULL;
+ client->keytag_len = 0;
+@@ -3341,6 +3406,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ !allowed(&netaddr, NULL, NULL, 0, NULL,
+ ns_g_server->keepresporder)))
+ {
++ pipeline_init(client);
+ client->pipelined = true;
+ }
+
+@@ -3800,35 +3866,16 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
+ ns_interface_attach(ifp, &client->interface);
+ client->newstate = client->state = NS_CLIENTSTATE_WORKING;
+ INSIST(client->recursionquota == NULL);
+-
+- /*
+- * Transfer TCP quota to the new client.
+- */
+- INSIST(client->tcpquota == NULL);
+- INSIST(oldclient->tcpquota != NULL);
+- client->tcpquota = oldclient->tcpquota;
+- oldclient->tcpquota = NULL;
+-
+- /*
+- * Link to a pipeline group, creating it if needed.
+- */
+- if (!ISC_LINK_LINKED(oldclient, glink)) {
+- oldclient->glink.next = NULL;
+- oldclient->glink.prev = NULL;
+- }
+- client->glink.next = oldclient->glink.next;
+- client->glink.prev = oldclient;
+- if (oldclient->glink.next != NULL) {
+- oldclient->glink.next->glink.prev = client;
+- }
+- oldclient->glink.next = client;
++ client->tcpquota = &client->sctx->tcpquota;
+
+ client->dscp = ifp->dscp;
+
+ client->attributes |= NS_CLIENTATTR_TCP;
+- client->pipelined = true;
+ client->mortal = true;
+
++ pipeline_attach(oldclient, client);
++ client->pipelined = true;
++
+ isc_socket_attach(ifp->tcpsocket, &client->tcplistener);
+ isc_socket_attach(sock, &client->tcpsocket);
+ isc_socket_setname(client->tcpsocket, "worker-tcp", NULL);
+diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
+index 1f7973f9c5..aeed9ccdda 100644
+--- a/bin/named/include/named/client.h
++++ b/bin/named/include/named/client.h
+@@ -134,6 +134,7 @@ struct ns_client {
+ dns_name_t *signer; /*%< NULL if not valid sig */
+ bool mortal; /*%< Die after handling request */
+ bool pipelined; /*%< TCP queries not in sequence */
++ isc_refcount_t *pipeline_refs;
+ isc_quota_t *tcpquota;
+ isc_quota_t *recursionquota;
+ ns_interface_t *interface;
+@@ -167,7 +168,6 @@ struct ns_client {
+
+ ISC_LINK(ns_client_t) link;
+ ISC_LINK(ns_client_t) rlink;
+- ISC_LINK(ns_client_t) glink;
+ ISC_QLINK(ns_client_t) ilink;
+ unsigned char cookie[8];
+ uint32_t expire;
+--
+2.20.1
+
diff --git a/meta/recipes-connectivity/bind/bind/0004-better-tcpquota-accounting-and-client-mortality-chec.patch b/meta/recipes-connectivity/bind/bind/0004-better-tcpquota-accounting-and-client-mortality-chec.patch
new file mode 100644
index 0000000000..034ab13303
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/0004-better-tcpquota-accounting-and-client-mortality-chec.patch
@@ -0,0 +1,512 @@
+Backport patch to fix CVE-2018-5743.
+
+Ref:
+https://security-tracker.debian.org/tracker/CVE-2018-5743
+
+CVE: CVE-2018-5743
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/2ab8a08]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From 2ab8a085b3c666f28f1f9229bd6ecb59915b26c3 Mon Sep 17 00:00:00 2001
+From: Evan Hunt <each@isc.org>
+Date: Fri, 5 Apr 2019 16:12:18 -0700
+Subject: [PATCH 4/6] better tcpquota accounting and client mortality checks
+
+- ensure that tcpactive is cleaned up correctly when accept() fails.
+- set 'client->tcpattached' when the client is attached to the tcpquota.
+ carry this value on to new clients sharing the same pipeline group.
+ don't call isc_quota_detach() on the tcpquota unless tcpattached is
+ set. this way clients that were allowed to accept TCP connections
+ despite being over quota (and therefore, were never attached to the
+ quota) will not inadvertently detach from it and mess up the
+ accounting.
+- simplify the code for tcpquota disconnection by using a new function
+ tcpquota_disconnect().
+- before deciding whether to reject a new connection due to quota
+ exhaustion, check to see whether there are at least two active
+ clients. previously, this was "at least one", but that could be
+ insufficient if there was one other client in READING state (waiting
+ for messages on an open connection) but none in READY (listening
+ for new connections).
+- before deciding whether a TCP client object can to go inactive, we
+ must ensure there are enough other clients to maintain service
+ afterward -- both accepting new connections and reading/processing new
+ queries. A TCP client can't shut down unless at least one
+ client is accepting new connections and (in the case of pipelined
+ clients) at least one additional client is waiting to read.
+
+(cherry picked from commit c7394738b2445c16f728a88394864dd61baad900)
+(cherry picked from commit e965d5f11d3d0f6d59704e614fceca2093cb1856)
+(cherry picked from commit 87d431161450777ea093821212abfb52d51b36e3)
+---
+ bin/named/client.c | 244 +++++++++++++++++++------------
+ bin/named/include/named/client.h | 3 +-
+ 2 files changed, 152 insertions(+), 95 deletions(-)
+
+diff --git a/bin/named/client.c b/bin/named/client.c
+index 277656cef0..61e96dd28c 100644
+--- a/bin/named/client.c
++++ b/bin/named/client.c
+@@ -244,13 +244,14 @@ static void client_start(isc_task_t *task, isc_event_t *event);
+ static void client_request(isc_task_t *task, isc_event_t *event);
+ static void ns_client_dumpmessage(ns_client_t *client, const char *reason);
+ static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+- dns_dispatch_t *disp, bool tcp);
++ dns_dispatch_t *disp, ns_client_t *oldclient,
++ bool tcp);
+ static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp,
+ isc_socket_t *sock, ns_client_t *oldclient);
+ static inline bool
+ allowed(isc_netaddr_t *addr, dns_name_t *signer,
+ isc_netaddr_t *ecs_addr, uint8_t ecs_addrlen,
+- uint8_t *ecs_scope, dns_acl_t *acl)
++ uint8_t *ecs_scope, dns_acl_t *acl);
+ static void compute_cookie(ns_client_t *client, uint32_t when,
+ uint32_t nonce, const unsigned char *secret,
+ isc_buffer_t *buf);
+@@ -319,7 +320,7 @@ pipeline_init(ns_client_t *client) {
+ * contention here is expected to be negligible, given that this code
+ * is only executed for TCP connections.
+ */
+- refs = isc_mem_allocate(client->sctx->mctx, sizeof(*refs));
++ refs = isc_mem_allocate(ns_g_mctx, sizeof(*refs));
+ isc_refcount_init(refs, 1);
+ client->pipeline_refs = refs;
+ }
+@@ -331,13 +332,13 @@ pipeline_init(ns_client_t *client) {
+ */
+ static void
+ pipeline_attach(ns_client_t *source, ns_client_t *target) {
+- int old_refs;
++ int refs;
+
+ REQUIRE(source->pipeline_refs != NULL);
+ REQUIRE(target->pipeline_refs == NULL);
+
+- old_refs = isc_refcount_increment(source->pipeline_refs);
+- INSIST(old_refs > 0);
++ isc_refcount_increment(source->pipeline_refs, &refs);
++ INSIST(refs > 1);
+ target->pipeline_refs = source->pipeline_refs;
+ }
+
+@@ -349,25 +350,51 @@ pipeline_attach(ns_client_t *source, ns_client_t *target) {
+ */
+ static bool
+ pipeline_detach(ns_client_t *client) {
+- isc_refcount_t *refs;
+- int old_refs;
++ isc_refcount_t *refcount;
++ int refs;
+
+ REQUIRE(client->pipeline_refs != NULL);
+
+- refs = client->pipeline_refs;
++ refcount = client->pipeline_refs;
+ client->pipeline_refs = NULL;
+
+- old_refs = isc_refcount_decrement(refs);
+- INSIST(old_refs > 0);
++ isc_refcount_decrement(refcount, refs);
+
+- if (old_refs == 1) {
+- isc_mem_free(client->sctx->mctx, refs);
++ if (refs == 0) {
++ isc_mem_free(ns_g_mctx, refs);
+ return (true);
+ }
+
+ return (false);
+ }
+
++/*
++ * Detach a client from the TCP client quota if appropriate, and set
++ * the quota pointer to NULL.
++ *
++ * Sometimes when the TCP client quota is exhausted but there are no other
++ * clients servicing the interface, a client will be allowed to continue
++ * running despite not having been attached to the quota. In this event,
++ * the TCP quota was never attached to the client, so when the client (or
++ * associated pipeline group) shuts down, the quota must NOT be detached.
++ *
++ * Otherwise, if the quota pointer is set, it should be detached. If not
++ * set at all, we just return without doing anything.
++ */
++static void
++tcpquota_disconnect(ns_client_t *client) {
++ if (client->tcpquota == NULL) {
++ return;
++ }
++
++ if (client->tcpattached) {
++ isc_quota_detach(&client->tcpquota);
++ client->tcpattached = false;
++ } else {
++ client->tcpquota = NULL;
++ }
++}
++
+ /*%
+ * Check for a deactivation or shutdown request and take appropriate
+ * action. Returns true if either is in progress; in this case
+@@ -490,38 +517,31 @@ exit_check(ns_client_t *client) {
+ client->tcpmsg_valid = false;
+ }
+
+- if (client->tcpquota != NULL) {
+- if (client->pipeline_refs == NULL ||
+- pipeline_detach(client))
+- {
+- /*
+- * Only detach from the TCP client quota if
+- * there are no more client structures using
+- * this TCP connection.
+- *
+- * Note that we check 'pipeline_refs' and not
+- * 'pipelined' because in some cases (e.g.
+- * after receiving a request with an opcode
+- * different than QUERY) 'pipelined' is set to
+- * false after the reference counter gets
+- * allocated in pipeline_init() and we must
+- * still drop our reference as failing to do so
+- * would prevent the reference counter itself
+- * from being freed.
+- */
+- isc_quota_detach(&client->tcpquota);
+- } else {
+- /*
+- * There are other client structures using this
+- * TCP connection, so we cannot detach from the
+- * TCP client quota to prevent excess TCP
+- * connections from being accepted. However,
+- * this client structure might later be reused
+- * for accepting new connections and thus must
+- * have its 'tcpquota' field set to NULL.
+- */
+- client->tcpquota = NULL;
+- }
++ /*
++ * Detach from pipeline group and from TCP client quota,
++ * if appropriate.
++ *
++ * - If no pipeline group is active, attempt to
++ * detach from the TCP client quota.
++ *
++ * - If a pipeline group is active, detach from it;
++ * if the return code indicates that there no more
++ * clients left if this pipeline group, we also detach
++ * from the TCP client quota.
++ *
++ * - Otherwise we don't try to detach, we just set the
++ * TCP quota pointer to NULL if it wasn't NULL already.
++ *
++ * tcpquota_disconnect() will set tcpquota to NULL, either
++ * by detaching it or by assignment, depending on the
++ * needs of the client. See the comments on that function
++ * for further information.
++ */
++ if (client->pipeline_refs == NULL || pipeline_detach(client)) {
++ tcpquota_disconnect(client);
++ } else {
++ client->tcpquota = NULL;
++ client->tcpattached = false;
+ }
+
+ if (client->tcpsocket != NULL) {
+@@ -544,8 +564,6 @@ exit_check(ns_client_t *client) {
+ client->timerset = false;
+ }
+
+- client->pipelined = false;
+-
+ client->peeraddr_valid = false;
+
+ client->state = NS_CLIENTSTATE_READY;
+@@ -558,18 +576,27 @@ exit_check(ns_client_t *client) {
+ * active and force it to go inactive if not.
+ *
+ * UDP clients go inactive at this point, but a TCP client
+- * will needs to remain active if no other clients are
+- * listening for TCP requests on this interface, to
+- * prevent this interface from going nonresponsive.
++ * may need to remain active and go into ready state if
++ * no other clients are available to listen for TCP
++ * requests on this interface or (in the case of pipelined
++ * clients) to read for additional messages on the current
++ * connection.
+ */
+ if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) {
+ LOCK(&client->interface->lock);
+- if (client->interface->ntcpaccepting == 0) {
++ if ((client->interface->ntcpaccepting == 0 ||
++ (client->pipelined &&
++ client->interface->ntcpactive < 2)) &&
++ client->newstate != NS_CLIENTSTATE_FREED)
++ {
+ client->mortal = false;
++ client->newstate = NS_CLIENTSTATE_READY;
+ }
+ UNLOCK(&client->interface->lock);
+ }
+
++ client->pipelined = false;
++
+ /*
+ * We don't need the client; send it to the inactive
+ * queue for recycling.
+@@ -2634,6 +2661,18 @@ client_request(isc_task_t *task, isc_event_t *event) {
+ client->pipelined = false;
+ }
+ if (TCP_CLIENT(client) && client->pipelined) {
++ /*
++ * We're pipelining. Replace the client; the
++ * the replacement can read the TCP socket looking
++ * for new messages and this client can process the
++ * current message asynchronously.
++ *
++ * There are now at least three clients using this
++ * TCP socket - one accepting new connections,
++ * one reading an existing connection to get new
++ * messages, and one answering the message already
++ * received.
++ */
+ result = ns_client_replace(client);
+ if (result != ISC_R_SUCCESS) {
+ client->pipelined = false;
+@@ -3197,6 +3236,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
+ client->pipelined = false;
+ client->pipeline_refs = NULL;
+ client->tcpquota = NULL;
++ client->tcpattached = false;
+ client->recursionquota = NULL;
+ client->interface = NULL;
+ client->peeraddr_valid = false;
+@@ -3359,9 +3399,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3),
+ "accept failed: %s",
+ isc_result_totext(nevent->result));
+- if (client->tcpquota != NULL) {
+- isc_quota_detach(&client->tcpquota);
+- }
++ tcpquota_disconnect(client);
+ }
+
+ if (exit_check(client))
+@@ -3402,7 +3440,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ client->pipelined = false;
+ result = ns_client_replace(client);
+ if (result == ISC_R_SUCCESS &&
+- (client->sctx->keepresporder == NULL ||
++ (ns_g_server->keepresporder == NULL ||
+ !allowed(&netaddr, NULL, NULL, 0, NULL,
+ ns_g_server->keepresporder)))
+ {
+@@ -3429,7 +3467,7 @@ client_accept(ns_client_t *client) {
+ * in named.conf. If we can't attach to it here, that means the TCP
+ * client quota has been exceeded.
+ */
+- result = isc_quota_attach(&client->sctx->tcpquota,
++ result = isc_quota_attach(&ns_g_server->tcpquota,
+ &client->tcpquota);
+ if (result != ISC_R_SUCCESS) {
+ bool exit;
+@@ -3447,27 +3485,27 @@ client_accept(ns_client_t *client) {
+ * interface to be starved, with no clients able
+ * to accept new connections.
+ *
+- * So, we check here to see if any other client
+- * is already servicing TCP queries on this
++ * So, we check here to see if any other clients
++ * are already servicing TCP queries on this
+ * interface (whether accepting, reading, or
+- * processing).
+- *
+- * If so, then it's okay *not* to call
+- * accept - we can let this client to go inactive
+- * and the other one handle the next connection
+- * when it's ready.
++ * processing). If there are at least two
++ * (one reading and one processing a request)
++ * then it's okay *not* to call accept - we
++ * can let this client go inactive and another
++ * one will resume accepting when it's done.
+ *
+- * But if not, then we need to be a little bit
+- * flexible about the quota. We allow *one* extra
+- * TCP client through, to ensure we're listening on
+- * every interface.
++ * If there aren't enough active clients on the
++ * interface, then we can be a little bit
++ * flexible about the quota. We'll allow *one*
++ * extra client through to ensure we're listening
++ * on every interface.
+ *
+- * (Note: In practice this means that the *real*
+- * TCP client quota is tcp-clients plus the number
+- * of interfaces.)
++ * (Note: In practice this means that the real
++ * TCP client quota is tcp-clients plus the
++ * number of listening interfaces plus 2.)
+ */
+ LOCK(&client->interface->lock);
+- exit = (client->interface->ntcpactive > 0);
++ exit = (client->interface->ntcpactive > 1);
+ UNLOCK(&client->interface->lock);
+
+ if (exit) {
+@@ -3475,6 +3513,9 @@ client_accept(ns_client_t *client) {
+ (void)exit_check(client);
+ return;
+ }
++
++ } else {
++ client->tcpattached = true;
+ }
+
+ /*
+@@ -3507,9 +3548,16 @@ client_accept(ns_client_t *client) {
+ UNEXPECTED_ERROR(__FILE__, __LINE__,
+ "isc_socket_accept() failed: %s",
+ isc_result_totext(result));
+- if (client->tcpquota != NULL) {
+- isc_quota_detach(&client->tcpquota);
++
++ tcpquota_disconnect(client);
++
++ if (client->tcpactive) {
++ LOCK(&client->interface->lock);
++ client->interface->ntcpactive--;
++ UNLOCK(&client->interface->lock);
++ client->tcpactive = false;
+ }
++
+ return;
+ }
+
+@@ -3527,13 +3575,12 @@ client_accept(ns_client_t *client) {
+ * once the connection is established.
+ *
+ * When the client object is shutting down after handling a TCP
+- * request (see exit_check()), it looks to see whether this value is
+- * non-zero. If so, that means another client has already called
+- * accept() and is waiting to establish the next connection, which
+- * means the first client is free to go inactive. Otherwise,
+- * the first client must come back and call accept() again; this
+- * guarantees there will always be at least one client listening
+- * for new TCP connections on each interface.
++ * request (see exit_check()), if this value is at least one, that
++ * means another client has called accept() and is waiting to
++ * establish the next connection. That means the client may be
++ * be free to become inactive; otherwise it may need to start
++ * listening for connections itself to prevent the interface
++ * going dead.
+ */
+ LOCK(&client->interface->lock);
+ client->interface->ntcpaccepting++;
+@@ -3613,19 +3660,19 @@ ns_client_replace(ns_client_t *client) {
+ client->tcpsocket, client);
+ } else {
+ result = get_client(client->manager, client->interface,
+- client->dispatch, tcp);
++ client->dispatch, client, tcp);
++
++ /*
++ * The responsibility for listening for new requests is hereby
++ * transferred to the new client. Therefore, the old client
++ * should refrain from listening for any more requests.
++ */
++ client->mortal = true;
+ }
+ if (result != ISC_R_SUCCESS) {
+ return (result);
+ }
+
+- /*
+- * The responsibility for listening for new requests is hereby
+- * transferred to the new client. Therefore, the old client
+- * should refrain from listening for any more requests.
+- */
+- client->mortal = true;
+-
+ return (ISC_R_SUCCESS);
+ }
+
+@@ -3759,7 +3806,7 @@ ns_clientmgr_destroy(ns_clientmgr_t **managerp) {
+
+ static isc_result_t
+ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+- dns_dispatch_t *disp, bool tcp)
++ dns_dispatch_t *disp, ns_client_t *oldclient, bool tcp)
+ {
+ isc_result_t result = ISC_R_SUCCESS;
+ isc_event_t *ev;
+@@ -3803,6 +3850,16 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+ client->dscp = ifp->dscp;
+
+ if (tcp) {
++ client->tcpattached = false;
++ if (oldclient != NULL) {
++ client->tcpattached = oldclient->tcpattached;
++ }
++
++ LOCK(&client->interface->lock);
++ client->interface->ntcpactive++;
++ UNLOCK(&client->interface->lock);
++ client->tcpactive = true;
++
+ client->attributes |= NS_CLIENTATTR_TCP;
+ isc_socket_attach(ifp->tcpsocket,
+ &client->tcplistener);
+@@ -3866,7 +3923,8 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
+ ns_interface_attach(ifp, &client->interface);
+ client->newstate = client->state = NS_CLIENTSTATE_WORKING;
+ INSIST(client->recursionquota == NULL);
+- client->tcpquota = &client->sctx->tcpquota;
++ client->tcpquota = &ns_g_server->tcpquota;
++ client->tcpattached = oldclient->tcpattached;
+
+ client->dscp = ifp->dscp;
+
+@@ -3885,7 +3943,6 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
+ LOCK(&client->interface->lock);
+ client->interface->ntcpactive++;
+ UNLOCK(&client->interface->lock);
+-
+ client->tcpactive = true;
+
+ INSIST(client->tcpmsg_valid == false);
+@@ -3913,7 +3970,8 @@ ns_clientmgr_createclients(ns_clientmgr_t *manager, unsigned int n,
+ MTRACE("createclients");
+
+ for (disp = 0; disp < n; disp++) {
+- result = get_client(manager, ifp, ifp->udpdispatch[disp], tcp);
++ result = get_client(manager, ifp, ifp->udpdispatch[disp],
++ NULL, tcp);
+ if (result != ISC_R_SUCCESS)
+ break;
+ }
+diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
+index aeed9ccdda..e2c40acd28 100644
+--- a/bin/named/include/named/client.h
++++ b/bin/named/include/named/client.h
+@@ -9,8 +9,6 @@
+ * information regarding copyright ownership.
+ */
+
+-/* $Id: client.h,v 1.96 2012/01/31 23:47:31 tbox Exp $ */
+-
+ #ifndef NAMED_CLIENT_H
+ #define NAMED_CLIENT_H 1
+
+@@ -136,6 +134,7 @@ struct ns_client {
+ bool pipelined; /*%< TCP queries not in sequence */
+ isc_refcount_t *pipeline_refs;
+ isc_quota_t *tcpquota;
++ bool tcpattached;
+ isc_quota_t *recursionquota;
+ ns_interface_t *interface;
+
+--
+2.20.1
+
diff --git a/meta/recipes-connectivity/bind/bind/0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch b/meta/recipes-connectivity/bind/bind/0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch
new file mode 100644
index 0000000000..987e75bc0e
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch
@@ -0,0 +1,911 @@
+Backport patch to fix CVE-2018-5743.
+
+Ref:
+https://security-tracker.debian.org/tracker/CVE-2018-5743
+
+CVE: CVE-2018-5743
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/c47ccf6]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From c47ccf630f147378568b33e8fdb7b754f228c346 Mon Sep 17 00:00:00 2001
+From: Evan Hunt <each@isc.org>
+Date: Fri, 5 Apr 2019 16:26:05 -0700
+Subject: [PATCH 5/6] refactor tcpquota and pipeline refs; allow special-case
+ overrun in isc_quota
+
+- if the TCP quota has been exceeded but there are no clients listening
+ for new connections on the interface, we can now force attachment to the
+ quota using isc_quota_force(), instead of carrying on with the quota not
+ attached.
+- the TCP client quota is now referenced via a reference-counted
+ 'ns_tcpconn' object, one of which is created whenever a client begins
+ listening for new connections, and attached to by members of that
+ client's pipeline group. when the last reference to the tcpconn
+ object is detached, it is freed and the TCP quota slot is released.
+- reduce code duplication by adding mark_tcp_active() function.
+- convert counters to atomic.
+
+(cherry picked from commit 7e8222378ca24f1302a0c1c638565050ab04681b)
+(cherry picked from commit 4939451275722bfda490ea86ca13e84f6bc71e46)
+(cherry picked from commit 13f7c918b8720d890408f678bd73c20e634539d9)
+---
+ bin/named/client.c | 444 +++++++++++--------------
+ bin/named/include/named/client.h | 12 +-
+ bin/named/include/named/interfacemgr.h | 6 +-
+ bin/named/interfacemgr.c | 1 +
+ lib/isc/include/isc/quota.h | 7 +
+ lib/isc/quota.c | 33 +-
+ lib/isc/win32/libisc.def.in | 1 +
+ 7 files changed, 236 insertions(+), 268 deletions(-)
+
+diff --git a/bin/named/client.c b/bin/named/client.c
+index 61e96dd28c..d826ab32bf 100644
+--- a/bin/named/client.c
++++ b/bin/named/client.c
+@@ -244,8 +244,7 @@ static void client_start(isc_task_t *task, isc_event_t *event);
+ static void client_request(isc_task_t *task, isc_event_t *event);
+ static void ns_client_dumpmessage(ns_client_t *client, const char *reason);
+ static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+- dns_dispatch_t *disp, ns_client_t *oldclient,
+- bool tcp);
++ dns_dispatch_t *disp, bool tcp);
+ static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp,
+ isc_socket_t *sock, ns_client_t *oldclient);
+ static inline bool
+@@ -301,16 +300,32 @@ ns_client_settimeout(ns_client_t *client, unsigned int seconds) {
+ }
+
+ /*%
+- * Allocate a reference counter that will track the number of client structures
+- * using the TCP connection that 'client' called accept() for. This counter
+- * will be shared between all client structures associated with this TCP
+- * connection.
++ * Allocate a reference-counted object that will maintain a single pointer to
++ * the (also reference-counted) TCP client quota, shared between all the
++ * clients processing queries on a single TCP connection, so that all
++ * clients sharing the one socket will together consume only one slot in
++ * the 'tcp-clients' quota.
+ */
+-static void
+-pipeline_init(ns_client_t *client) {
+- isc_refcount_t *refs;
++static isc_result_t
++tcpconn_init(ns_client_t *client, bool force) {
++ isc_result_t result;
++ isc_quota_t *quota = NULL;
++ ns_tcpconn_t *tconn = NULL;
+
+- REQUIRE(client->pipeline_refs == NULL);
++ REQUIRE(client->tcpconn == NULL);
++
++ /*
++ * Try to attach to the quota first, so we won't pointlessly
++ * allocate memory for a tcpconn object if we can't get one.
++ */
++ if (force) {
++ result = isc_quota_force(&ns_g_server->tcpquota, &quota);
++ } else {
++ result = isc_quota_attach(&ns_g_server->tcpquota, &quota);
++ }
++ if (result != ISC_R_SUCCESS) {
++ return (result);
++ }
+
+ /*
+ * A global memory context is used for the allocation as different
+@@ -320,78 +335,80 @@ pipeline_init(ns_client_t *client) {
+ * contention here is expected to be negligible, given that this code
+ * is only executed for TCP connections.
+ */
+- refs = isc_mem_allocate(ns_g_mctx, sizeof(*refs));
+- isc_refcount_init(refs, 1);
+- client->pipeline_refs = refs;
++ tconn = isc_mem_allocate(ns_g_mctx, sizeof(*tconn));
++
++ isc_refcount_init(&tconn->refs, 1);
++ tconn->tcpquota = quota;
++ quota = NULL;
++ tconn->pipelined = false;
++
++ client->tcpconn = tconn;
++
++ return (ISC_R_SUCCESS);
+ }
+
+ /*%
+- * Increase the count of client structures using the TCP connection that
+- * 'source' is associated with and put a pointer to that count in 'target',
+- * thus associating it with the same TCP connection.
++ * Increase the count of client structures sharing the TCP connection
++ * that 'source' is associated with; add a pointer to the same tcpconn
++ * to 'target', thus associating it with the same TCP connection.
+ */
+ static void
+-pipeline_attach(ns_client_t *source, ns_client_t *target) {
++tcpconn_attach(ns_client_t *source, ns_client_t *target) {
+ int refs;
+
+- REQUIRE(source->pipeline_refs != NULL);
+- REQUIRE(target->pipeline_refs == NULL);
++ REQUIRE(source->tcpconn != NULL);
++ REQUIRE(target->tcpconn == NULL);
++ REQUIRE(source->tcpconn->pipelined);
+
+- isc_refcount_increment(source->pipeline_refs, &refs);
++ isc_refcount_increment(&source->tcpconn->refs, &refs);
+ INSIST(refs > 1);
+- target->pipeline_refs = source->pipeline_refs;
++ target->tcpconn = source->tcpconn;
+ }
+
+ /*%
+- * Decrease the count of client structures using the TCP connection that
++ * Decrease the count of client structures sharing the TCP connection that
+ * 'client' is associated with. If this is the last client using this TCP
+- * connection, free the reference counter and return true; otherwise, return
+- * false.
++ * connection, we detach from the TCP quota and free the tcpconn
++ * object. Either way, client->tcpconn is set to NULL.
+ */
+-static bool
+-pipeline_detach(ns_client_t *client) {
+- isc_refcount_t *refcount;
++static void
++tcpconn_detach(ns_client_t *client) {
++ ns_tcpconn_t *tconn = NULL;
+ int refs;
+
+- REQUIRE(client->pipeline_refs != NULL);
+-
+- refcount = client->pipeline_refs;
+- client->pipeline_refs = NULL;
++ REQUIRE(client->tcpconn != NULL);
+
+- isc_refcount_decrement(refcount, refs);
++ tconn = client->tcpconn;
++ client->tcpconn = NULL;
+
++ isc_refcount_decrement(&tconn->refs, &refs);
+ if (refs == 0) {
+- isc_mem_free(ns_g_mctx, refs);
+- return (true);
++ isc_quota_detach(&tconn->tcpquota);
++ isc_mem_free(ns_g_mctx, tconn);
+ }
+-
+- return (false);
+ }
+
+-/*
+- * Detach a client from the TCP client quota if appropriate, and set
+- * the quota pointer to NULL.
+- *
+- * Sometimes when the TCP client quota is exhausted but there are no other
+- * clients servicing the interface, a client will be allowed to continue
+- * running despite not having been attached to the quota. In this event,
+- * the TCP quota was never attached to the client, so when the client (or
+- * associated pipeline group) shuts down, the quota must NOT be detached.
++/*%
++ * Mark a client as active and increment the interface's 'ntcpactive'
++ * counter, as a signal that there is at least one client servicing
++ * TCP queries for the interface. If we reach the TCP client quota at
++ * some point, this will be used to determine whether a quota overrun
++ * should be permitted.
+ *
+- * Otherwise, if the quota pointer is set, it should be detached. If not
+- * set at all, we just return without doing anything.
++ * Marking the client active with the 'tcpactive' flag ensures proper
++ * accounting, by preventing us from incrementing or decrementing
++ * 'ntcpactive' more than once per client.
+ */
+ static void
+-tcpquota_disconnect(ns_client_t *client) {
+- if (client->tcpquota == NULL) {
+- return;
+- }
+-
+- if (client->tcpattached) {
+- isc_quota_detach(&client->tcpquota);
+- client->tcpattached = false;
+- } else {
+- client->tcpquota = NULL;
++mark_tcp_active(ns_client_t *client, bool active) {
++ if (active && !client->tcpactive) {
++ isc_atomic_xadd(&client->interface->ntcpactive, 1);
++ client->tcpactive = active;
++ } else if (!active && client->tcpactive) {
++ uint32_t old =
++ isc_atomic_xadd(&client->interface->ntcpactive, -1);
++ INSIST(old > 0);
++ client->tcpactive = active;
+ }
+ }
+
+@@ -484,7 +501,8 @@ exit_check(ns_client_t *client) {
+ INSIST(client->recursionquota == NULL);
+
+ if (NS_CLIENTSTATE_READING == client->newstate) {
+- if (!client->pipelined) {
++ INSIST(client->tcpconn != NULL);
++ if (!client->tcpconn->pipelined) {
+ client_read(client);
+ client->newstate = NS_CLIENTSTATE_MAX;
+ return (true); /* We're done. */
+@@ -507,8 +525,8 @@ exit_check(ns_client_t *client) {
+ dns_tcpmsg_cancelread(&client->tcpmsg);
+ }
+
+- if (client->nreads != 0) {
+- /* Still waiting for read cancel completion. */
++ /* Still waiting for read cancel completion. */
++ if (client->nreads > 0) {
+ return (true);
+ }
+
+@@ -518,43 +536,45 @@ exit_check(ns_client_t *client) {
+ }
+
+ /*
+- * Detach from pipeline group and from TCP client quota,
+- * if appropriate.
++ * Soon the client will be ready to accept a new TCP
++ * connection or UDP request, but we may have enough
++ * clients doing that already. Check whether this client
++ * needs to remain active and allow it go inactive if
++ * not.
+ *
+- * - If no pipeline group is active, attempt to
+- * detach from the TCP client quota.
++ * UDP clients always go inactive at this point, but a TCP
++ * client may need to stay active and return to READY
++ * state if no other clients are available to listen
++ * for TCP requests on this interface.
+ *
+- * - If a pipeline group is active, detach from it;
+- * if the return code indicates that there no more
+- * clients left if this pipeline group, we also detach
+- * from the TCP client quota.
+- *
+- * - Otherwise we don't try to detach, we just set the
+- * TCP quota pointer to NULL if it wasn't NULL already.
+- *
+- * tcpquota_disconnect() will set tcpquota to NULL, either
+- * by detaching it or by assignment, depending on the
+- * needs of the client. See the comments on that function
+- * for further information.
++ * Regardless, if we're going to FREED state, that means
++ * the system is shutting down and we don't need to
++ * retain clients.
+ */
+- if (client->pipeline_refs == NULL || pipeline_detach(client)) {
+- tcpquota_disconnect(client);
+- } else {
+- client->tcpquota = NULL;
+- client->tcpattached = false;
++ if (client->mortal && TCP_CLIENT(client) &&
++ client->newstate != NS_CLIENTSTATE_FREED &&
++ !ns_g_clienttest &&
++ isc_atomic_xadd(&client->interface->ntcpaccepting, 0) == 0)
++ {
++ /* Nobody else is accepting */
++ client->mortal = false;
++ client->newstate = NS_CLIENTSTATE_READY;
++ }
++
++ /*
++ * Detach from TCP connection and TCP client quota,
++ * if appropriate. If this is the last reference to
++ * the TCP connection in our pipeline group, the
++ * TCP quota slot will be released.
++ */
++ if (client->tcpconn) {
++ tcpconn_detach(client);
+ }
+
+ if (client->tcpsocket != NULL) {
+ CTRACE("closetcp");
+ isc_socket_detach(&client->tcpsocket);
+-
+- if (client->tcpactive) {
+- LOCK(&client->interface->lock);
+- INSIST(client->interface->ntcpactive > 0);
+- client->interface->ntcpactive--;
+- UNLOCK(&client->interface->lock);
+- client->tcpactive = false;
+- }
++ mark_tcp_active(client, false);
+ }
+
+ if (client->timerset) {
+@@ -567,35 +587,6 @@ exit_check(ns_client_t *client) {
+ client->peeraddr_valid = false;
+
+ client->state = NS_CLIENTSTATE_READY;
+- INSIST(client->recursionquota == NULL);
+-
+- /*
+- * Now the client is ready to accept a new TCP connection
+- * or UDP request, but we may have enough clients doing
+- * that already. Check whether this client needs to remain
+- * active and force it to go inactive if not.
+- *
+- * UDP clients go inactive at this point, but a TCP client
+- * may need to remain active and go into ready state if
+- * no other clients are available to listen for TCP
+- * requests on this interface or (in the case of pipelined
+- * clients) to read for additional messages on the current
+- * connection.
+- */
+- if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) {
+- LOCK(&client->interface->lock);
+- if ((client->interface->ntcpaccepting == 0 ||
+- (client->pipelined &&
+- client->interface->ntcpactive < 2)) &&
+- client->newstate != NS_CLIENTSTATE_FREED)
+- {
+- client->mortal = false;
+- client->newstate = NS_CLIENTSTATE_READY;
+- }
+- UNLOCK(&client->interface->lock);
+- }
+-
+- client->pipelined = false;
+
+ /*
+ * We don't need the client; send it to the inactive
+@@ -630,7 +621,7 @@ exit_check(ns_client_t *client) {
+ }
+
+ /* Still waiting for accept cancel completion. */
+- if (! (client->naccepts == 0)) {
++ if (client->naccepts > 0) {
+ return (true);
+ }
+
+@@ -641,7 +632,7 @@ exit_check(ns_client_t *client) {
+ }
+
+ /* Still waiting for recv cancel completion. */
+- if (! (client->nrecvs == 0)) {
++ if (client->nrecvs > 0) {
+ return (true);
+ }
+
+@@ -654,14 +645,7 @@ exit_check(ns_client_t *client) {
+ INSIST(client->recursionquota == NULL);
+ if (client->tcplistener != NULL) {
+ isc_socket_detach(&client->tcplistener);
+-
+- if (client->tcpactive) {
+- LOCK(&client->interface->lock);
+- INSIST(client->interface->ntcpactive > 0);
+- client->interface->ntcpactive--;
+- UNLOCK(&client->interface->lock);
+- client->tcpactive = false;
+- }
++ mark_tcp_active(client, false);
+ }
+ if (client->udpsocket != NULL) {
+ isc_socket_detach(&client->udpsocket);
+@@ -816,7 +800,7 @@ client_start(isc_task_t *task, isc_event_t *event) {
+ return;
+
+ if (TCP_CLIENT(client)) {
+- if (client->pipelined) {
++ if (client->tcpconn != NULL) {
+ client_read(client);
+ } else {
+ client_accept(client);
+@@ -2470,6 +2454,7 @@ client_request(isc_task_t *task, isc_event_t *event) {
+ client->nrecvs--;
+ } else {
+ INSIST(TCP_CLIENT(client));
++ INSIST(client->tcpconn != NULL);
+ REQUIRE(event->ev_type == DNS_EVENT_TCPMSG);
+ REQUIRE(event->ev_sender == &client->tcpmsg);
+ buffer = &client->tcpmsg.buffer;
+@@ -2657,17 +2642,19 @@ client_request(isc_task_t *task, isc_event_t *event) {
+ /*
+ * Pipeline TCP query processing.
+ */
+- if (client->message->opcode != dns_opcode_query) {
+- client->pipelined = false;
++ if (TCP_CLIENT(client) &&
++ client->message->opcode != dns_opcode_query)
++ {
++ client->tcpconn->pipelined = false;
+ }
+- if (TCP_CLIENT(client) && client->pipelined) {
++ if (TCP_CLIENT(client) && client->tcpconn->pipelined) {
+ /*
+ * We're pipelining. Replace the client; the
+- * the replacement can read the TCP socket looking
+- * for new messages and this client can process the
++ * replacement can read the TCP socket looking
++ * for new messages and this one can process the
+ * current message asynchronously.
+ *
+- * There are now at least three clients using this
++ * There will now be at least three clients using this
+ * TCP socket - one accepting new connections,
+ * one reading an existing connection to get new
+ * messages, and one answering the message already
+@@ -2675,7 +2662,7 @@ client_request(isc_task_t *task, isc_event_t *event) {
+ */
+ result = ns_client_replace(client);
+ if (result != ISC_R_SUCCESS) {
+- client->pipelined = false;
++ client->tcpconn->pipelined = false;
+ }
+ }
+
+@@ -3233,10 +3220,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
+ client->signer = NULL;
+ dns_name_init(&client->signername, NULL);
+ client->mortal = false;
+- client->pipelined = false;
+- client->pipeline_refs = NULL;
+- client->tcpquota = NULL;
+- client->tcpattached = false;
++ client->tcpconn = NULL;
+ client->recursionquota = NULL;
+ client->interface = NULL;
+ client->peeraddr_valid = false;
+@@ -3341,9 +3325,10 @@ client_read(ns_client_t *client) {
+
+ static void
+ client_newconn(isc_task_t *task, isc_event_t *event) {
++ isc_result_t result;
+ ns_client_t *client = event->ev_arg;
+ isc_socket_newconnev_t *nevent = (isc_socket_newconnev_t *)event;
+- isc_result_t result;
++ uint32_t old;
+
+ REQUIRE(event->ev_type == ISC_SOCKEVENT_NEWCONN);
+ REQUIRE(NS_CLIENT_VALID(client));
+@@ -3363,10 +3348,8 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ INSIST(client->naccepts == 1);
+ client->naccepts--;
+
+- LOCK(&client->interface->lock);
+- INSIST(client->interface->ntcpaccepting > 0);
+- client->interface->ntcpaccepting--;
+- UNLOCK(&client->interface->lock);
++ old = isc_atomic_xadd(&client->interface->ntcpaccepting, -1);
++ INSIST(old > 0);
+
+ /*
+ * We must take ownership of the new socket before the exit
+@@ -3399,7 +3382,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3),
+ "accept failed: %s",
+ isc_result_totext(nevent->result));
+- tcpquota_disconnect(client);
++ tcpconn_detach(client);
+ }
+
+ if (exit_check(client))
+@@ -3437,15 +3420,13 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ * telnetting to port 53 (once per CPU) will
+ * deny service to legitimate TCP clients.
+ */
+- client->pipelined = false;
+ result = ns_client_replace(client);
+ if (result == ISC_R_SUCCESS &&
+ (ns_g_server->keepresporder == NULL ||
+ !allowed(&netaddr, NULL, NULL, 0, NULL,
+ ns_g_server->keepresporder)))
+ {
+- pipeline_init(client);
+- client->pipelined = true;
++ client->tcpconn->pipelined = true;
+ }
+
+ client_read(client);
+@@ -3462,78 +3443,59 @@ client_accept(ns_client_t *client) {
+ CTRACE("accept");
+
+ /*
+- * The tcpquota object can only be simultaneously referenced a
+- * pre-defined number of times; this is configured by 'tcp-clients'
+- * in named.conf. If we can't attach to it here, that means the TCP
+- * client quota has been exceeded.
++ * Set up a new TCP connection. This means try to attach to the
++ * TCP client quota (tcp-clients), but fail if we're over quota.
+ */
+- result = isc_quota_attach(&ns_g_server->tcpquota,
+- &client->tcpquota);
++ result = tcpconn_init(client, false);
+ if (result != ISC_R_SUCCESS) {
+- bool exit;
++ bool exit;
+
+- ns_client_log(client, NS_LOGCATEGORY_CLIENT,
+- NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(1),
+- "no more TCP clients: %s",
+- isc_result_totext(result));
+-
+- /*
+- * We have exceeded the system-wide TCP client
+- * quota. But, we can't just block this accept
+- * in all cases, because if we did, a heavy TCP
+- * load on other interfaces might cause this
+- * interface to be starved, with no clients able
+- * to accept new connections.
+- *
+- * So, we check here to see if any other clients
+- * are already servicing TCP queries on this
+- * interface (whether accepting, reading, or
+- * processing). If there are at least two
+- * (one reading and one processing a request)
+- * then it's okay *not* to call accept - we
+- * can let this client go inactive and another
+- * one will resume accepting when it's done.
+- *
+- * If there aren't enough active clients on the
+- * interface, then we can be a little bit
+- * flexible about the quota. We'll allow *one*
+- * extra client through to ensure we're listening
+- * on every interface.
+- *
+- * (Note: In practice this means that the real
+- * TCP client quota is tcp-clients plus the
+- * number of listening interfaces plus 2.)
+- */
+- LOCK(&client->interface->lock);
+- exit = (client->interface->ntcpactive > 1);
+- UNLOCK(&client->interface->lock);
++ ns_client_log(client, NS_LOGCATEGORY_CLIENT,
++ NS_LOGMODULE_CLIENT, ISC_LOG_WARNING,
++ "TCP client quota reached: %s",
++ isc_result_totext(result));
+
+- if (exit) {
+- client->newstate = NS_CLIENTSTATE_INACTIVE;
+- (void)exit_check(client);
+- return;
+- }
++ /*
++ * We have exceeded the system-wide TCP client quota. But,
++ * we can't just block this accept in all cases, because if
++ * we did, a heavy TCP load on other interfaces might cause
++ * this interface to be starved, with no clients able to
++ * accept new connections.
++ *
++ * So, we check here to see if any other clients are
++ * already servicing TCP queries on this interface (whether
++ * accepting, reading, or processing). If we find at least
++ * one, then it's okay *not* to call accept - we can let this
++ * client go inactive and another will take over when it's
++ * done.
++ *
++ * If there aren't enough active clients on the interface,
++ * then we can be a little bit flexible about the quota.
++ * We'll allow *one* extra client through to ensure we're
++ * listening on every interface; we do this by setting the
++ * 'force' option to tcpconn_init().
++ *
++ * (Note: In practice this means that the real TCP client
++ * quota is tcp-clients plus the number of listening
++ * interfaces plus 1.)
++ */
++ exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) > 0);
++ if (exit) {
++ client->newstate = NS_CLIENTSTATE_INACTIVE;
++ (void)exit_check(client);
++ return;
++ }
+
+- } else {
+- client->tcpattached = true;
++ result = tcpconn_init(client, true);
++ RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ }
+
+ /*
+- * By incrementing the interface's ntcpactive counter we signal
+- * that there is at least one client servicing TCP queries for the
+- * interface.
+- *
+- * We also make note of the fact in the client itself with the
+- * tcpactive flag. This ensures proper accounting by preventing
+- * us from accidentally incrementing or decrementing ntcpactive
+- * more than once per client object.
++ * If this client was set up using get_client() or get_worker(),
++ * then TCP is already marked active. However, if it was restarted
++ * from exit_check(), it might not be, so we take care of it now.
+ */
+- if (!client->tcpactive) {
+- LOCK(&client->interface->lock);
+- client->interface->ntcpactive++;
+- UNLOCK(&client->interface->lock);
+- client->tcpactive = true;
+- }
++ mark_tcp_active(client, true);
+
+ result = isc_socket_accept(client->tcplistener, client->task,
+ client_newconn, client);
+@@ -3549,15 +3511,8 @@ client_accept(ns_client_t *client) {
+ "isc_socket_accept() failed: %s",
+ isc_result_totext(result));
+
+- tcpquota_disconnect(client);
+-
+- if (client->tcpactive) {
+- LOCK(&client->interface->lock);
+- client->interface->ntcpactive--;
+- UNLOCK(&client->interface->lock);
+- client->tcpactive = false;
+- }
+-
++ tcpconn_detach(client);
++ mark_tcp_active(client, false);
+ return;
+ }
+
+@@ -3582,9 +3537,7 @@ client_accept(ns_client_t *client) {
+ * listening for connections itself to prevent the interface
+ * going dead.
+ */
+- LOCK(&client->interface->lock);
+- client->interface->ntcpaccepting++;
+- UNLOCK(&client->interface->lock);
++ isc_atomic_xadd(&client->interface->ntcpaccepting, 1);
+ }
+
+ static void
+@@ -3655,24 +3608,25 @@ ns_client_replace(ns_client_t *client) {
+ REQUIRE(client->manager != NULL);
+
+ tcp = TCP_CLIENT(client);
+- if (tcp && client->pipelined) {
++ if (tcp && client->tcpconn != NULL && client->tcpconn->pipelined) {
+ result = get_worker(client->manager, client->interface,
+ client->tcpsocket, client);
+ } else {
+ result = get_client(client->manager, client->interface,
+- client->dispatch, client, tcp);
++ client->dispatch, tcp);
+
+- /*
+- * The responsibility for listening for new requests is hereby
+- * transferred to the new client. Therefore, the old client
+- * should refrain from listening for any more requests.
+- */
+- client->mortal = true;
+ }
+ if (result != ISC_R_SUCCESS) {
+ return (result);
+ }
+
++ /*
++ * The responsibility for listening for new requests is hereby
++ * transferred to the new client. Therefore, the old client
++ * should refrain from listening for any more requests.
++ */
++ client->mortal = true;
++
+ return (ISC_R_SUCCESS);
+ }
+
+@@ -3806,7 +3760,7 @@ ns_clientmgr_destroy(ns_clientmgr_t **managerp) {
+
+ static isc_result_t
+ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+- dns_dispatch_t *disp, ns_client_t *oldclient, bool tcp)
++ dns_dispatch_t *disp, bool tcp)
+ {
+ isc_result_t result = ISC_R_SUCCESS;
+ isc_event_t *ev;
+@@ -3850,15 +3804,7 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
+ client->dscp = ifp->dscp;
+
+ if (tcp) {
+- client->tcpattached = false;
+- if (oldclient != NULL) {
+- client->tcpattached = oldclient->tcpattached;
+- }
+-
+- LOCK(&client->interface->lock);
+- client->interface->ntcpactive++;
+- UNLOCK(&client->interface->lock);
+- client->tcpactive = true;
++ mark_tcp_active(client, true);
+
+ client->attributes |= NS_CLIENTATTR_TCP;
+ isc_socket_attach(ifp->tcpsocket,
+@@ -3923,16 +3869,14 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
+ ns_interface_attach(ifp, &client->interface);
+ client->newstate = client->state = NS_CLIENTSTATE_WORKING;
+ INSIST(client->recursionquota == NULL);
+- client->tcpquota = &ns_g_server->tcpquota;
+- client->tcpattached = oldclient->tcpattached;
+
+ client->dscp = ifp->dscp;
+
+ client->attributes |= NS_CLIENTATTR_TCP;
+ client->mortal = true;
+
+- pipeline_attach(oldclient, client);
+- client->pipelined = true;
++ tcpconn_attach(oldclient, client);
++ mark_tcp_active(client, true);
+
+ isc_socket_attach(ifp->tcpsocket, &client->tcplistener);
+ isc_socket_attach(sock, &client->tcpsocket);
+@@ -3940,11 +3884,6 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
+ (void)isc_socket_getpeername(client->tcpsocket, &client->peeraddr);
+ client->peeraddr_valid = true;
+
+- LOCK(&client->interface->lock);
+- client->interface->ntcpactive++;
+- UNLOCK(&client->interface->lock);
+- client->tcpactive = true;
+-
+ INSIST(client->tcpmsg_valid == false);
+ dns_tcpmsg_init(client->mctx, client->tcpsocket, &client->tcpmsg);
+ client->tcpmsg_valid = true;
+@@ -3970,8 +3909,7 @@ ns_clientmgr_createclients(ns_clientmgr_t *manager, unsigned int n,
+ MTRACE("createclients");
+
+ for (disp = 0; disp < n; disp++) {
+- result = get_client(manager, ifp, ifp->udpdispatch[disp],
+- NULL, tcp);
++ result = get_client(manager, ifp, ifp->udpdispatch[disp], tcp);
+ if (result != ISC_R_SUCCESS)
+ break;
+ }
+diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
+index e2c40acd28..969ee4c08f 100644
+--- a/bin/named/include/named/client.h
++++ b/bin/named/include/named/client.h
+@@ -78,6 +78,13 @@
+ *** Types
+ ***/
+
++/*% reference-counted TCP connection object */
++typedef struct ns_tcpconn {
++ isc_refcount_t refs;
++ isc_quota_t *tcpquota;
++ bool pipelined;
++} ns_tcpconn_t;
++
+ /*% nameserver client structure */
+ struct ns_client {
+ unsigned int magic;
+@@ -131,10 +138,7 @@ struct ns_client {
+ dns_name_t signername; /*%< [T]SIG key name */
+ dns_name_t *signer; /*%< NULL if not valid sig */
+ bool mortal; /*%< Die after handling request */
+- bool pipelined; /*%< TCP queries not in sequence */
+- isc_refcount_t *pipeline_refs;
+- isc_quota_t *tcpquota;
+- bool tcpattached;
++ ns_tcpconn_t *tcpconn;
+ isc_quota_t *recursionquota;
+ ns_interface_t *interface;
+
+diff --git a/bin/named/include/named/interfacemgr.h b/bin/named/include/named/interfacemgr.h
+index 61b08826a6..3535ef22a8 100644
+--- a/bin/named/include/named/interfacemgr.h
++++ b/bin/named/include/named/interfacemgr.h
+@@ -9,8 +9,6 @@
+ * information regarding copyright ownership.
+ */
+
+-/* $Id: interfacemgr.h,v 1.35 2011/07/28 23:47:58 tbox Exp $ */
+-
+ #ifndef NAMED_INTERFACEMGR_H
+ #define NAMED_INTERFACEMGR_H 1
+
+@@ -77,11 +75,11 @@ struct ns_interface {
+ /*%< UDP dispatchers. */
+ isc_socket_t * tcpsocket; /*%< TCP socket. */
+ isc_dscp_t dscp; /*%< "listen-on" DSCP value */
+- int ntcpaccepting; /*%< Number of clients
++ int32_t ntcpaccepting; /*%< Number of clients
+ ready to accept new
+ TCP connections on this
+ interface */
+- int ntcpactive; /*%< Number of clients
++ int32_t ntcpactive; /*%< Number of clients
+ servicing TCP queries
+ (whether accepting or
+ connected) */
+diff --git a/bin/named/interfacemgr.c b/bin/named/interfacemgr.c
+index 955096ef47..d9f6df5802 100644
+--- a/bin/named/interfacemgr.c
++++ b/bin/named/interfacemgr.c
+@@ -388,6 +388,7 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr,
+ */
+ ifp->ntcpaccepting = 0;
+ ifp->ntcpactive = 0;
++
+ ifp->nudpdispatch = 0;
+
+ ifp->dscp = -1;
+diff --git a/lib/isc/include/isc/quota.h b/lib/isc/include/isc/quota.h
+index b9bf59877a..36c5830242 100644
+--- a/lib/isc/include/isc/quota.h
++++ b/lib/isc/include/isc/quota.h
+@@ -100,6 +100,13 @@ isc_quota_attach(isc_quota_t *quota, isc_quota_t **p);
+ * quota if successful (ISC_R_SUCCESS or ISC_R_SOFTQUOTA).
+ */
+
++isc_result_t
++isc_quota_force(isc_quota_t *quota, isc_quota_t **p);
++/*%<
++ * Like isc_quota_attach, but will attach '*p' to the quota
++ * even if the hard quota has been exceeded.
++ */
++
+ void
+ isc_quota_detach(isc_quota_t **p);
+ /*%<
+diff --git a/lib/isc/quota.c b/lib/isc/quota.c
+index 3ddff0d875..556a61f21d 100644
+--- a/lib/isc/quota.c
++++ b/lib/isc/quota.c
+@@ -74,20 +74,39 @@ isc_quota_release(isc_quota_t *quota) {
+ UNLOCK(&quota->lock);
+ }
+
+-isc_result_t
+-isc_quota_attach(isc_quota_t *quota, isc_quota_t **p)
+-{
++static isc_result_t
++doattach(isc_quota_t *quota, isc_quota_t **p, bool force) {
+ isc_result_t result;
+- INSIST(p != NULL && *p == NULL);
++ REQUIRE(p != NULL && *p == NULL);
++
+ result = isc_quota_reserve(quota);
+- if (result == ISC_R_SUCCESS || result == ISC_R_SOFTQUOTA)
++ if (result == ISC_R_SUCCESS || result == ISC_R_SOFTQUOTA) {
++ *p = quota;
++ } else if (result == ISC_R_QUOTA && force) {
++ /* attach anyway */
++ LOCK(&quota->lock);
++ quota->used++;
++ UNLOCK(&quota->lock);
++
+ *p = quota;
++ result = ISC_R_SUCCESS;
++ }
++
+ return (result);
+ }
+
++isc_result_t
++isc_quota_attach(isc_quota_t *quota, isc_quota_t **p) {
++ return (doattach(quota, p, false));
++}
++
++isc_result_t
++isc_quota_force(isc_quota_t *quota, isc_quota_t **p) {
++ return (doattach(quota, p, true));
++}
++
+ void
+-isc_quota_detach(isc_quota_t **p)
+-{
++isc_quota_detach(isc_quota_t **p) {
+ INSIST(p != NULL && *p != NULL);
+ isc_quota_release(*p);
+ *p = NULL;
+diff --git a/lib/isc/win32/libisc.def.in b/lib/isc/win32/libisc.def.in
+index a82facec0f..7b9f23d776 100644
+--- a/lib/isc/win32/libisc.def.in
++++ b/lib/isc/win32/libisc.def.in
+@@ -519,6 +519,7 @@ isc_portset_removerange
+ isc_quota_attach
+ isc_quota_destroy
+ isc_quota_detach
++isc_quota_force
+ isc_quota_init
+ isc_quota_max
+ isc_quota_release
+--
+2.20.1
+
diff --git a/meta/recipes-connectivity/bind/bind/0006-restore-allowance-for-tcp-clients-interfaces.patch b/meta/recipes-connectivity/bind/bind/0006-restore-allowance-for-tcp-clients-interfaces.patch
new file mode 100644
index 0000000000..3821d18501
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/0006-restore-allowance-for-tcp-clients-interfaces.patch
@@ -0,0 +1,80 @@
+Backport patch to fix CVE-2018-5743.
+
+Ref:
+https://security-tracker.debian.org/tracker/CVE-2018-5743
+
+CVE: CVE-2018-5743
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/59434b9]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From 59434b987e8eb436b08c24e559ee094c4e939daa Mon Sep 17 00:00:00 2001
+From: Evan Hunt <each@isc.org>
+Date: Fri, 5 Apr 2019 16:26:19 -0700
+Subject: [PATCH 6/6] restore allowance for tcp-clients < interfaces
+
+in the "refactor tcpquota and pipeline refs" commit, the counting
+of active interfaces was tightened in such a way that named could
+fail to listen on an interface if there were more interfaces than
+tcp-clients. when checking the quota to start accepting on an
+interface, if the number of active clients was above zero, then
+it was presumed that some other client was able to handle accepting
+new connections. this, however, ignored the fact that the current client
+could be included in that count, so if the quota was already exceeded
+before all the interfaces were listening, some interfaces would never
+listen.
+
+we now check whether the current client has been marked active; if so,
+then the number of active clients on the interface must be greater
+than 1, not 0.
+
+(cherry picked from commit 0b4e2cd4c3192ba88569dd344f542a8cc43742b5)
+(cherry picked from commit d01023aaac35543daffbdf48464e320150235d41)
+---
+ bin/named/client.c | 8 +++++---
+ doc/arm/Bv9ARM-book.xml | 3 ++-
+ 2 files changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/bin/named/client.c b/bin/named/client.c
+index d826ab32bf..845326abc0 100644
+--- a/bin/named/client.c
++++ b/bin/named/client.c
+@@ -3464,8 +3464,9 @@ client_accept(ns_client_t *client) {
+ *
+ * So, we check here to see if any other clients are
+ * already servicing TCP queries on this interface (whether
+- * accepting, reading, or processing). If we find at least
+- * one, then it's okay *not* to call accept - we can let this
++ * accepting, reading, or processing). If we find that at
++ * least one client other than this one is active, then
++ * it's okay *not* to call accept - we can let this
+ * client go inactive and another will take over when it's
+ * done.
+ *
+@@ -3479,7 +3480,8 @@ client_accept(ns_client_t *client) {
+ * quota is tcp-clients plus the number of listening
+ * interfaces plus 1.)
+ */
+- exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) > 0);
++ exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) >
++ (client->tcpactive ? 1 : 0));
+ if (exit) {
+ client->newstate = NS_CLIENTSTATE_INACTIVE;
+ (void)exit_check(client);
+diff --git a/doc/arm/Bv9ARM-book.xml b/doc/arm/Bv9ARM-book.xml
+index 381768d540..9c76d3cd6f 100644
+--- a/doc/arm/Bv9ARM-book.xml
++++ b/doc/arm/Bv9ARM-book.xml
+@@ -8493,7 +8493,8 @@ avoid-v6-udp-ports { 40000; range 50000 60000; };
+ <para>
+ The number of file descriptors reserved for TCP, stdio,
+ etc. This needs to be big enough to cover the number of
+- interfaces <command>named</command> listens on, <command>tcp-clients</command> as well as
++ interfaces <command>named</command> listens on plus
++ <command>tcp-clients</command>, as well as
+ to provide room for outgoing TCP queries and incoming zone
+ transfers. The default is <literal>512</literal>.
+ The minimum value is <literal>128</literal> and the
+--
+2.20.1
+
diff --git a/meta/recipes-connectivity/bind/bind/0007-Replace-atomic-operations-in-bin-named-client.c-with.patch b/meta/recipes-connectivity/bind/bind/0007-Replace-atomic-operations-in-bin-named-client.c-with.patch
new file mode 100644
index 0000000000..1a84eca58a
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/0007-Replace-atomic-operations-in-bin-named-client.c-with.patch
@@ -0,0 +1,140 @@
+Backport commit to fix compile error on arm caused by commits which are
+to fix CVE-2018-5743.
+
+CVE: CVE-2018-5743
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/ef49780]
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From ef49780d30d3ddc5735cfc32561b678a634fa72f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ond=C5=99ej=20Sur=C3=BD?= <ondrej@sury.org>
+Date: Wed, 17 Apr 2019 15:22:27 +0200
+Subject: [PATCH] Replace atomic operations in bin/named/client.c with
+ isc_refcount reference counting
+
+---
+ bin/named/client.c | 18 +++++++-----------
+ bin/named/include/named/interfacemgr.h | 5 +++--
+ bin/named/interfacemgr.c | 7 +++++--
+ 3 files changed, 15 insertions(+), 15 deletions(-)
+
+diff --git a/bin/named/client.c b/bin/named/client.c
+index 845326abc0..29fecadca8 100644
+--- a/bin/named/client.c
++++ b/bin/named/client.c
+@@ -402,12 +402,10 @@ tcpconn_detach(ns_client_t *client) {
+ static void
+ mark_tcp_active(ns_client_t *client, bool active) {
+ if (active && !client->tcpactive) {
+- isc_atomic_xadd(&client->interface->ntcpactive, 1);
++ isc_refcount_increment0(&client->interface->ntcpactive, NULL);
+ client->tcpactive = active;
+ } else if (!active && client->tcpactive) {
+- uint32_t old =
+- isc_atomic_xadd(&client->interface->ntcpactive, -1);
+- INSIST(old > 0);
++ isc_refcount_decrement(&client->interface->ntcpactive, NULL);
+ client->tcpactive = active;
+ }
+ }
+@@ -554,7 +552,7 @@ exit_check(ns_client_t *client) {
+ if (client->mortal && TCP_CLIENT(client) &&
+ client->newstate != NS_CLIENTSTATE_FREED &&
+ !ns_g_clienttest &&
+- isc_atomic_xadd(&client->interface->ntcpaccepting, 0) == 0)
++ isc_refcount_current(&client->interface->ntcpaccepting) == 0)
+ {
+ /* Nobody else is accepting */
+ client->mortal = false;
+@@ -3328,7 +3326,6 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ isc_result_t result;
+ ns_client_t *client = event->ev_arg;
+ isc_socket_newconnev_t *nevent = (isc_socket_newconnev_t *)event;
+- uint32_t old;
+
+ REQUIRE(event->ev_type == ISC_SOCKEVENT_NEWCONN);
+ REQUIRE(NS_CLIENT_VALID(client));
+@@ -3348,8 +3345,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
+ INSIST(client->naccepts == 1);
+ client->naccepts--;
+
+- old = isc_atomic_xadd(&client->interface->ntcpaccepting, -1);
+- INSIST(old > 0);
++ isc_refcount_decrement(&client->interface->ntcpaccepting, NULL);
+
+ /*
+ * We must take ownership of the new socket before the exit
+@@ -3480,8 +3476,8 @@ client_accept(ns_client_t *client) {
+ * quota is tcp-clients plus the number of listening
+ * interfaces plus 1.)
+ */
+- exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) >
+- (client->tcpactive ? 1 : 0));
++ exit = (isc_refcount_current(&client->interface->ntcpactive) >
++ (client->tcpactive ? 1U : 0U));
+ if (exit) {
+ client->newstate = NS_CLIENTSTATE_INACTIVE;
+ (void)exit_check(client);
+@@ -3539,7 +3535,7 @@ client_accept(ns_client_t *client) {
+ * listening for connections itself to prevent the interface
+ * going dead.
+ */
+- isc_atomic_xadd(&client->interface->ntcpaccepting, 1);
++ isc_refcount_increment0(&client->interface->ntcpaccepting, NULL);
+ }
+
+ static void
+diff --git a/bin/named/include/named/interfacemgr.h b/bin/named/include/named/interfacemgr.h
+index 3535ef22a8..6e10f210fd 100644
+--- a/bin/named/include/named/interfacemgr.h
++++ b/bin/named/include/named/interfacemgr.h
+@@ -45,6 +45,7 @@
+ #include <isc/magic.h>
+ #include <isc/mem.h>
+ #include <isc/socket.h>
++#include <isc/refcount.h>
+
+ #include <dns/result.h>
+
+@@ -75,11 +76,11 @@ struct ns_interface {
+ /*%< UDP dispatchers. */
+ isc_socket_t * tcpsocket; /*%< TCP socket. */
+ isc_dscp_t dscp; /*%< "listen-on" DSCP value */
+- int32_t ntcpaccepting; /*%< Number of clients
++ isc_refcount_t ntcpaccepting; /*%< Number of clients
+ ready to accept new
+ TCP connections on this
+ interface */
+- int32_t ntcpactive; /*%< Number of clients
++ isc_refcount_t ntcpactive; /*%< Number of clients
+ servicing TCP queries
+ (whether accepting or
+ connected) */
+diff --git a/bin/named/interfacemgr.c b/bin/named/interfacemgr.c
+index d9f6df5802..135533be6b 100644
+--- a/bin/named/interfacemgr.c
++++ b/bin/named/interfacemgr.c
+@@ -386,8 +386,8 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr,
+ * connections will be handled in parallel even though there is
+ * only one client initially.
+ */
+- ifp->ntcpaccepting = 0;
+- ifp->ntcpactive = 0;
++ isc_refcount_init(&ifp->ntcpaccepting, 0);
++ isc_refcount_init(&ifp->ntcpactive, 0);
+
+ ifp->nudpdispatch = 0;
+
+@@ -618,6 +618,9 @@ ns_interface_destroy(ns_interface_t *ifp) {
+
+ ns_interfacemgr_detach(&ifp->mgr);
+
++ isc_refcount_destroy(&ifp->ntcpactive);
++ isc_refcount_destroy(&ifp->ntcpaccepting);
++
+ ifp->magic = 0;
+ isc_mem_put(mctx, ifp, sizeof(*ifp));
+ }
+--
+2.20.1
+
diff --git a/meta/recipes-connectivity/bind/bind_9.11.5-P4.bb b/meta/recipes-connectivity/bind/bind_9.11.5-P4.bb
index 1355841e6b..5d52b696c8 100644
--- a/meta/recipes-connectivity/bind/bind_9.11.5-P4.bb
+++ b/meta/recipes-connectivity/bind/bind_9.11.5-P4.bb
@@ -20,6 +20,14 @@ SRC_URI = "https://ftp.isc.org/isc/bind9/${PV}/${BPN}-${PV}.tar.gz \
file://0001-configure.in-remove-useless-L-use_openssl-lib.patch \
file://0001-named-lwresd-V-and-start-log-hide-build-options.patch \
file://0001-avoid-start-failure-with-bind-user.patch \
+ file://0001-bind-fix-CVE-2019-6471.patch \
+ file://0001-fix-enforcement-of-tcp-clients-v1.patch \
+ file://0002-tcp-clients-could-still-be-exceeded-v2.patch \
+ file://0003-use-reference-counter-for-pipeline-groups-v3.patch \
+ file://0004-better-tcpquota-accounting-and-client-mortality-chec.patch \
+ file://0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch \
+ file://0006-restore-allowance-for-tcp-clients-interfaces.patch \
+ file://0007-Replace-atomic-operations-in-bin-named-client.c-with.patch \
"
SRC_URI[md5sum] = "8ddab4b61fa4516fe404679c74e37960"
@@ -29,6 +37,10 @@ UPSTREAM_CHECK_URI = "https://ftp.isc.org/isc/bind9/"
UPSTREAM_CHECK_REGEX = "(?P<pver>9(\.\d+)+(-P\d+)*)/"
RECIPE_NO_UPDATE_REASON = "9.11 is LTS 2021"
+# BIND >= 9.11.2 need dhcpd >= 4.4.0,
+# don't report it here since dhcpd is already recent enough.
+CVE_CHECK_WHITELIST += "CVE-2019-6470"
+
inherit autotools update-rc.d systemd useradd pkgconfig multilib_script
MULTILIB_SCRIPTS = "${PN}:${bindir}/bind9-config ${PN}:${bindir}/isc-config.sh"
diff --git a/meta/recipes-connectivity/libpcap/libpcap/0001-pcap-usb-linux.c-add-missing-limits.h-for-musl-syste.patch b/meta/recipes-connectivity/libpcap/libpcap/0001-pcap-usb-linux.c-add-missing-limits.h-for-musl-syste.patch
deleted file mode 100644
index 01773834c7..0000000000
--- a/meta/recipes-connectivity/libpcap/libpcap/0001-pcap-usb-linux.c-add-missing-limits.h-for-musl-syste.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From aafa3512b7b742f5e66a5543e41974cc5e7eebfa Mon Sep 17 00:00:00 2001
-From: maxice8 <thinkabit.ukim@gmail.com>
-Date: Sun, 22 Jul 2018 18:54:17 -0300
-Subject: [PATCH] pcap-usb-linux.c: add missing limits.h for musl systems.
-
-fix compilation on musl libc systems like Void Linux and Alpine.
-
-Upstream-Status: Backport [https://github.com/the-tcpdump-group/libpcap/commit/d557c98a16dc254aaff03762b694fe624e180bea]
-
-Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
----
- pcap-usb-linux.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/pcap-usb-linux.c b/pcap-usb-linux.c
-index 6f8adf65..b92c05ea 100644
---- a/pcap-usb-linux.c
-+++ b/pcap-usb-linux.c
-@@ -50,6 +50,7 @@
- #include <stdlib.h>
- #include <unistd.h>
- #include <fcntl.h>
-+#include <limits.h>
- #include <string.h>
- #include <dirent.h>
- #include <byteswap.h>
---
-2.17.1
-
diff --git a/meta/recipes-connectivity/libpcap/libpcap_1.9.0.bb b/meta/recipes-connectivity/libpcap/libpcap_1.9.1.bb
index 78361561e6..a0f66ac0fa 100644
--- a/meta/recipes-connectivity/libpcap/libpcap_1.9.0.bb
+++ b/meta/recipes-connectivity/libpcap/libpcap_1.9.1.bb
@@ -11,10 +11,9 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=5eb289217c160e2920d2e35bddc36453 \
DEPENDS = "flex-native bison-native"
SRC_URI = "https://www.tcpdump.org/release/${BP}.tar.gz \
- file://0001-pcap-usb-linux.c-add-missing-limits.h-for-musl-syste.patch \
"
-SRC_URI[md5sum] = "dffd65cb14406ab9841f421732eb0f33"
-SRC_URI[sha256sum] = "2edb88808e5913fdaa8e9c1fcaf272e19b2485338742b5074b9fe44d68f37019"
+SRC_URI[md5sum] = "21af603d9a591c7d96a6457021d84e6c"
+SRC_URI[sha256sum] = "635237637c5b619bcceba91900666b64d56ecb7be63f298f601ec786ce087094"
inherit autotools binconfig-disabled pkgconfig bluetooth
diff --git a/meta/recipes-connectivity/openssh/openssh/0001-upstream-what-bozo-decided-to-use-2020-as-a-future-d.patch b/meta/recipes-connectivity/openssh/openssh/0001-upstream-what-bozo-decided-to-use-2020-as-a-future-d.patch
new file mode 100644
index 0000000000..e2930c3c7d
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/0001-upstream-what-bozo-decided-to-use-2020-as-a-future-d.patch
@@ -0,0 +1,46 @@
+From 3cccc0a2ab597b8273bddf08e9a3cc5551d7e530 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Fri, 3 Jan 2020 03:02:26 +0000
+Subject: [PATCH] upstream: what bozo decided to use 2020 as a future date in a
+ regress
+
+test?
+
+OpenBSD-Regress-ID: 3b953df5a7e14081ff6cf495d4e8d40e153cbc3a
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/ff31f15773ee173502eec4d7861ec56f26bba381]
+
+[Dropped the script version and copyright year change at the top]
+
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+---
+ regress/cert-hostkey.sh | 2 +-
+ regress/cert-userkey.sh | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/regress/cert-hostkey.sh b/regress/cert-hostkey.sh
+index 3ce7779..74d5a53 100644
+--- a/regress/cert-hostkey.sh
++++ b/regress/cert-hostkey.sh
+@@ -248,7 +248,7 @@ test_one() {
+ test_one "user-certificate" failure "-n $HOSTS"
+ test_one "empty principals" success "-h"
+ test_one "wrong principals" failure "-h -n foo"
+-test_one "cert not yet valid" failure "-h -V20200101:20300101"
++test_one "cert not yet valid" failure "-h -V20300101:20320101"
+ test_one "cert expired" failure "-h -V19800101:19900101"
+ test_one "cert valid interval" success "-h -V-1w:+2w"
+ test_one "cert has constraints" failure "-h -Oforce-command=false"
+diff --git a/regress/cert-userkey.sh b/regress/cert-userkey.sh
+index 6849e99..de455b8 100644
+--- a/regress/cert-userkey.sh
++++ b/regress/cert-userkey.sh
+@@ -327,7 +327,7 @@ test_one() {
+ test_one "correct principal" success "-n ${USER}"
+ test_one "host-certificate" failure "-n ${USER} -h"
+ test_one "wrong principals" failure "-n foo"
+-test_one "cert not yet valid" failure "-n ${USER} -V20200101:20300101"
++test_one "cert not yet valid" failure "-n ${USER} -V20300101:20320101"
+ test_one "cert expired" failure "-n ${USER} -V19800101:19900101"
+ test_one "cert valid interval" success "-n ${USER} -V-1w:+2w"
+ test_one "wrong source-address" failure "-n ${USER} -Osource-address=10.0.0.0/8"
diff --git a/meta/recipes-connectivity/openssh/openssh_7.9p1.bb b/meta/recipes-connectivity/openssh/openssh_7.9p1.bb
index 6c8f7327a9..4d4f2753fe 100644
--- a/meta/recipes-connectivity/openssh/openssh_7.9p1.bb
+++ b/meta/recipes-connectivity/openssh/openssh_7.9p1.bb
@@ -28,6 +28,7 @@ SRC_URI = "http://ftp.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-${PV}.tar
file://CVE-2019-6109.patch \
file://0001-upstream-Have-progressmeter-force-an-update-at-the-b.patch \
file://CVE-2019-6111.patch \
+ file://0001-upstream-what-bozo-decided-to-use-2020-as-a-future-d.patch \
"
SRC_URI[md5sum] = "c6af50b7a474d04726a5aa747a5dce8f"
SRC_URI[sha256sum] = "6b4b3ba2253d84ed3771c8050728d597c91cfce898713beb7b64a305b6f11aad"
diff --git a/meta/recipes-connectivity/openssl/openssl/CVE-2019-1543.patch b/meta/recipes-connectivity/openssl/openssl/CVE-2019-1543.patch
deleted file mode 100644
index 900ef97fce..0000000000
--- a/meta/recipes-connectivity/openssl/openssl/CVE-2019-1543.patch
+++ /dev/null
@@ -1,69 +0,0 @@
-Upstream-Status: Backport [https://github.com/openssl/openssl/commit/f426625b6ae9a7831010750490a5f0ad689c5ba3]
-Signed-off-by: Ross Burton <ross.burton@intel.com>
-
-From f426625b6ae9a7831010750490a5f0ad689c5ba3 Mon Sep 17 00:00:00 2001
-From: Matt Caswell <matt@openssl.org>
-Date: Tue, 5 Mar 2019 14:39:15 +0000
-Subject: [PATCH] Prevent over long nonces in ChaCha20-Poly1305
-
-ChaCha20-Poly1305 is an AEAD cipher, and requires a unique nonce input for
-every encryption operation. RFC 7539 specifies that the nonce value (IV)
-should be 96 bits (12 bytes). OpenSSL allows a variable nonce length and
-front pads the nonce with 0 bytes if it is less than 12 bytes. However it
-also incorrectly allows a nonce to be set of up to 16 bytes. In this case
-only the last 12 bytes are significant and any additional leading bytes are
-ignored.
-
-It is a requirement of using this cipher that nonce values are unique.
-Messages encrypted using a reused nonce value are susceptible to serious
-confidentiality and integrity attacks. If an application changes the
-default nonce length to be longer than 12 bytes and then makes a change to
-the leading bytes of the nonce expecting the new value to be a new unique
-nonce then such an application could inadvertently encrypt messages with a
-reused nonce.
-
-Additionally the ignored bytes in a long nonce are not covered by the
-integrity guarantee of this cipher. Any application that relies on the
-integrity of these ignored leading bytes of a long nonce may be further
-affected.
-
-Any OpenSSL internal use of this cipher, including in SSL/TLS, is safe
-because no such use sets such a long nonce value. However user
-applications that use this cipher directly and set a non-default nonce
-length to be longer than 12 bytes may be vulnerable.
-
-CVE: CVE-2019-1543
-
-Fixes #8345
-
-Reviewed-by: Paul Dale <paul.dale@oracle.com>
-Reviewed-by: Richard Levitte <levitte@openssl.org>
-(Merged from https://github.com/openssl/openssl/pull/8406)
-
-(cherry picked from commit 2a3d0ee9d59156c48973592331404471aca886d6)
----
- crypto/evp/e_chacha20_poly1305.c | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
-diff --git a/crypto/evp/e_chacha20_poly1305.c b/crypto/evp/e_chacha20_poly1305.c
-index c1917bb86a6..d3e2c622a1b 100644
---- a/crypto/evp/e_chacha20_poly1305.c
-+++ b/crypto/evp/e_chacha20_poly1305.c
-@@ -30,6 +30,8 @@ typedef struct {
-
- #define data(ctx) ((EVP_CHACHA_KEY *)(ctx)->cipher_data)
-
-+#define CHACHA20_POLY1305_MAX_IVLEN 12
-+
- static int chacha_init_key(EVP_CIPHER_CTX *ctx,
- const unsigned char user_key[CHACHA_KEY_SIZE],
- const unsigned char iv[CHACHA_CTR_SIZE], int enc)
-@@ -533,7 +535,7 @@ static int chacha20_poly1305_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg,
- return 1;
-
- case EVP_CTRL_AEAD_SET_IVLEN:
-- if (arg <= 0 || arg > CHACHA_CTR_SIZE)
-+ if (arg <= 0 || arg > CHACHA20_POLY1305_MAX_IVLEN)
- return 0;
- actx->nonce_len = arg;
- return 1;
diff --git a/meta/recipes-connectivity/openssl/openssl/afalg.patch b/meta/recipes-connectivity/openssl/openssl/afalg.patch
index 7c4b084f3d..b7c0e9697f 100644
--- a/meta/recipes-connectivity/openssl/openssl/afalg.patch
+++ b/meta/recipes-connectivity/openssl/openssl/afalg.patch
@@ -18,14 +18,14 @@ index 3baa8ce..9ef52ed 100755
- ($mi2) = $mi2 =~ /(\d+)/;
- my $ver = $ma*10000 + $mi1*100 + $mi2;
- if ($ver < $minver) {
-- $disabled{afalgeng} = "too-old-kernel";
+- disable('too-old-kernel', 'afalgeng');
- } else {
- push @{$config{engdirs}}, "afalg";
- }
- } else {
-- $disabled{afalgeng} = "cross-compiling";
+- disable('cross-compiling', 'afalgeng');
- }
+ push @{$config{engdirs}}, "afalg";
} else {
- $disabled{afalgeng} = "not-linux";
+ disable('not-linux', 'afalgeng');
}
diff --git a/meta/recipes-connectivity/openssl/openssl/reproducible.patch b/meta/recipes-connectivity/openssl/openssl/reproducible.patch
new file mode 100644
index 0000000000..a24260c95d
--- /dev/null
+++ b/meta/recipes-connectivity/openssl/openssl/reproducible.patch
@@ -0,0 +1,32 @@
+The value for perl_archname can vary depending on the host, e.g.
+x86_64-linux-gnu-thread-multi or x86_64-linux-thread-multi which
+makes the ptest package non-reproducible. Its unused other than
+these references so drop it.
+
+RP 2020/2/6
+
+Upstream-Status: Pending
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+Index: openssl-1.1.1d/Configure
+===================================================================
+--- openssl-1.1.1d.orig/Configure
++++ openssl-1.1.1d/Configure
+@@ -286,7 +286,7 @@ if (defined env($local_config_envname))
+ # Save away perl command information
+ $config{perl_cmd} = $^X;
+ $config{perl_version} = $Config{version};
+-$config{perl_archname} = $Config{archname};
++#$config{perl_archname} = $Config{archname};
+
+ $config{prefix}="";
+ $config{openssldir}="";
+@@ -2517,7 +2517,7 @@ _____
+ @{$config{perlargv}}), "\n";
+ print "\nPerl information:\n\n";
+ print ' ',$config{perl_cmd},"\n";
+- print ' ',$config{perl_version},' for ',$config{perl_archname},"\n";
++ print ' ',$config{perl_version},"\n";
+ }
+ if ($dump || $options) {
+ my $longest = 0;
diff --git a/meta/recipes-connectivity/openssl/openssl10_1.0.2r.bb b/meta/recipes-connectivity/openssl/openssl10_1.0.2u.bb
index 87df4f517a..c5a00066ba 100644
--- a/meta/recipes-connectivity/openssl/openssl10_1.0.2r.bb
+++ b/meta/recipes-connectivity/openssl/openssl10_1.0.2u.bb
@@ -53,13 +53,15 @@ SRC_URI_append_class-nativesdk = " \
file://environment.d-openssl.sh \
"
-SRC_URI[md5sum] = "0d2baaf04c56d542f6cc757b9c2a2aac"
-SRC_URI[sha256sum] = "ae51d08bba8a83958e894946f15303ff894d75c2b8bbd44a852b64e3fe11d0d6"
+SRC_URI[md5sum] = "cdc2638f789ecc2db2c91488265686c1"
+SRC_URI[sha256sum] = "ecd0c6ffb493dd06707d38b14bb4d8c2288bb7033735606569d8f90f89669d16"
S = "${WORKDIR}/openssl-${PV}"
UPSTREAM_CHECK_REGEX = "openssl-(?P<pver>1\.0.+)\.tar"
+CVE_PRODUCT = "openssl:openssl"
+
inherit pkgconfig siteinfo multilib_header ptest manpages
PACKAGECONFIG ?= "cryptodev-linux"
diff --git a/meta/recipes-connectivity/openssl/openssl_1.1.1b.bb b/meta/recipes-connectivity/openssl/openssl_1.1.1g.bb
index 9e36df807c..a57e09c802 100644
--- a/meta/recipes-connectivity/openssl/openssl_1.1.1b.bb
+++ b/meta/recipes-connectivity/openssl/openssl_1.1.1g.bb
@@ -16,15 +16,14 @@ SRC_URI = "http://www.openssl.org/source/openssl-${PV}.tar.gz \
file://0001-skip-test_symbol_presence.patch \
file://0001-buildinfo-strip-sysroot-and-debug-prefix-map-from-co.patch \
file://afalg.patch \
- file://CVE-2019-1543.patch \
+ file://reproducible.patch \
"
SRC_URI_append_class-nativesdk = " \
file://environment.d-openssl.sh \
"
-SRC_URI[md5sum] = "4532712e7bcc9414f5bce995e4e13930"
-SRC_URI[sha256sum] = "5c557b023230413dfb0756f3137a13e6d726838ccd1430888ad15bfb2b43ea4b"
+SRC_URI[sha256sum] = "ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46"
inherit lib_package multilib_header ptest
@@ -32,7 +31,7 @@ PACKAGECONFIG ?= ""
PACKAGECONFIG_class-native = ""
PACKAGECONFIG_class-nativesdk = ""
-PACKAGECONFIG[cryptodev-linux] = "enable-devcryptoeng,disable-devcryptoeng,cryptodev-linux"
+PACKAGECONFIG[cryptodev-linux] = "enable-devcryptoeng,disable-devcryptoeng,cryptodev-linux,,cryptodev-module"
B = "${WORKDIR}/build"
do_configure[cleandirs] = "${B}"
@@ -43,10 +42,10 @@ do_configure[cleandirs] = "${B}"
EXTRA_OECONF_append_libc-musl = " no-async"
EXTRA_OECONF_append_libc-musl_powerpc64 = " no-asm"
-# This prevents openssl from using getrandom() which is not available on older glibc versions
+# adding devrandom prevents openssl from using getrandom() which is not available on older glibc versions
# (native versions can be built with newer glibc, but then relocated onto a system with older glibc)
-EXTRA_OECONF_class-native = "--with-rand-seed=devrandom"
-EXTRA_OECONF_class-nativesdk = "--with-rand-seed=devrandom"
+EXTRA_OECONF_class-native = "--with-rand-seed=os,devrandom"
+EXTRA_OECONF_class-nativesdk = "--with-rand-seed=os,devrandom"
# Relying on hardcoded built-in paths causes openssl-native to not be relocateable from sstate.
CFLAGS_append_class-native = " -DOPENSSLDIR=/not/builtin -DENGINESDIR=/not/builtin"
@@ -204,3 +203,9 @@ RREPLACES_openssl-conf = "openssl10-conf"
RCONFLICTS_openssl-conf = "openssl10-conf"
BBCLASSEXTEND = "native nativesdk"
+
+CVE_PRODUCT = "openssl:openssl"
+
+# Only affects OpenSSL >= 1.1.1 in combination with Apache < 2.4.37
+# Apache in meta-webserver is already recent enough
+CVE_CHECK_WHITELIST += "CVE-2019-0190"
diff --git a/meta/recipes-connectivity/ppp/ppp/0001-pppd-Fix-bounds-check-in-EAP-code.patch b/meta/recipes-connectivity/ppp/ppp/0001-pppd-Fix-bounds-check-in-EAP-code.patch
new file mode 100644
index 0000000000..b7ba7ba643
--- /dev/null
+++ b/meta/recipes-connectivity/ppp/ppp/0001-pppd-Fix-bounds-check-in-EAP-code.patch
@@ -0,0 +1,47 @@
+From 8d7970b8f3db727fe798b65f3377fe6787575426 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Mon, 3 Feb 2020 15:53:28 +1100
+Subject: [PATCH] pppd: Fix bounds check in EAP code
+
+Given that we have just checked vallen < len, it can never be the case
+that vallen >= len + sizeof(rhostname). This fixes the check so we
+actually avoid overflowing the rhostname array.
+
+Reported-by: Ilja Van Sprundel <ivansprundel@ioactive.com>
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+
+Upstream-Status: Backport
+[https://github.com/paulusmack/ppp/commit/8d7970b8f3db727fe798b65f3377fe6787575426]
+
+CVE: CVE-2020-8597
+
+Signed-off-by: Yi Zhao <yi.zhao@windriver.com>
+---
+ pppd/eap.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/pppd/eap.c b/pppd/eap.c
+index 94407f5..1b93db0 100644
+--- a/pppd/eap.c
++++ b/pppd/eap.c
+@@ -1420,7 +1420,7 @@ int len;
+ }
+
+ /* Not so likely to happen. */
+- if (vallen >= len + sizeof (rhostname)) {
++ if (len - vallen >= sizeof (rhostname)) {
+ dbglog("EAP: trimming really long peer name down");
+ BCOPY(inp + vallen, rhostname, sizeof (rhostname) - 1);
+ rhostname[sizeof (rhostname) - 1] = '\0';
+@@ -1846,7 +1846,7 @@ int len;
+ }
+
+ /* Not so likely to happen. */
+- if (vallen >= len + sizeof (rhostname)) {
++ if (len - vallen >= sizeof (rhostname)) {
+ dbglog("EAP: trimming really long peer name down");
+ BCOPY(inp + vallen, rhostname, sizeof (rhostname) - 1);
+ rhostname[sizeof (rhostname) - 1] = '\0';
+--
+2.17.1
+
diff --git a/meta/recipes-connectivity/ppp/ppp_2.4.7.bb b/meta/recipes-connectivity/ppp/ppp_2.4.7.bb
index 644cde4562..60c56dd0bd 100644
--- a/meta/recipes-connectivity/ppp/ppp_2.4.7.bb
+++ b/meta/recipes-connectivity/ppp/ppp_2.4.7.bb
@@ -33,6 +33,7 @@ SRC_URI = "https://download.samba.org/pub/${BPN}/${BP}.tar.gz \
file://0001-pppoe-include-netinet-in.h-before-linux-in.h.patch \
file://0001-ppp-Remove-unneeded-include.patch \
file://ppp-2.4.7-DES-openssl.patch \
+ file://0001-pppd-Fix-bounds-check-in-EAP-code.patch \
"
SRC_URI_append_libc-musl = "\
diff --git a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0001-AP-Silently-ignore-management-frame-from-unexpected-.patch b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0001-AP-Silently-ignore-management-frame-from-unexpected-.patch
new file mode 100644
index 0000000000..7b0713cf6d
--- /dev/null
+++ b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0001-AP-Silently-ignore-management-frame-from-unexpected-.patch
@@ -0,0 +1,82 @@
+hostapd before 2.10 and wpa_supplicant before 2.10 allow an incorrect indication
+of disconnection in certain situations because source address validation is
+mishandled. This is a denial of service that should have been prevented by PMF
+(aka management frame protection). The attacker must send a crafted 802.11 frame
+from a location that is within the 802.11 communications range.
+
+CVE: CVE-2019-16275
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+From 8c07fa9eda13e835f3f968b2e1c9a8be3a851ff9 Mon Sep 17 00:00:00 2001
+From: Jouni Malinen <j@w1.fi>
+Date: Thu, 29 Aug 2019 11:52:04 +0300
+Subject: [PATCH] AP: Silently ignore management frame from unexpected source
+ address
+
+Do not process any received Management frames with unexpected/invalid SA
+so that we do not add any state for unexpected STA addresses or end up
+sending out frames to unexpected destination. This prevents unexpected
+sequences where an unprotected frame might end up causing the AP to send
+out a response to another device and that other device processing the
+unexpected response.
+
+In particular, this prevents some potential denial of service cases
+where the unexpected response frame from the AP might result in a
+connected station dropping its association.
+
+Signed-off-by: Jouni Malinen <j@w1.fi>
+---
+ src/ap/drv_callbacks.c | 13 +++++++++++++
+ src/ap/ieee802_11.c | 12 ++++++++++++
+ 2 files changed, 25 insertions(+)
+
+diff --git a/src/ap/drv_callbacks.c b/src/ap/drv_callbacks.c
+index 31587685fe3b..34ca379edc3d 100644
+--- a/src/ap/drv_callbacks.c
++++ b/src/ap/drv_callbacks.c
+@@ -131,6 +131,19 @@ int hostapd_notif_assoc(struct hostapd_data *hapd, const u8 *addr,
+ "hostapd_notif_assoc: Skip event with no address");
+ return -1;
+ }
++
++ if (is_multicast_ether_addr(addr) ||
++ is_zero_ether_addr(addr) ||
++ os_memcmp(addr, hapd->own_addr, ETH_ALEN) == 0) {
++ /* Do not process any frames with unexpected/invalid SA so that
++ * we do not add any state for unexpected STA addresses or end
++ * up sending out frames to unexpected destination. */
++ wpa_printf(MSG_DEBUG, "%s: Invalid SA=" MACSTR
++ " in received indication - ignore this indication silently",
++ __func__, MAC2STR(addr));
++ return 0;
++ }
++
+ random_add_randomness(addr, ETH_ALEN);
+
+ hostapd_logger(hapd, addr, HOSTAPD_MODULE_IEEE80211,
+diff --git a/src/ap/ieee802_11.c b/src/ap/ieee802_11.c
+index c85a28db44b7..e7065372e158 100644
+--- a/src/ap/ieee802_11.c
++++ b/src/ap/ieee802_11.c
+@@ -4626,6 +4626,18 @@ int ieee802_11_mgmt(struct hostapd_data *hapd, const u8 *buf, size_t len,
+ fc = le_to_host16(mgmt->frame_control);
+ stype = WLAN_FC_GET_STYPE(fc);
+
++ if (is_multicast_ether_addr(mgmt->sa) ||
++ is_zero_ether_addr(mgmt->sa) ||
++ os_memcmp(mgmt->sa, hapd->own_addr, ETH_ALEN) == 0) {
++ /* Do not process any frames with unexpected/invalid SA so that
++ * we do not add any state for unexpected STA addresses or end
++ * up sending out frames to unexpected destination. */
++ wpa_printf(MSG_DEBUG, "MGMT: Invalid SA=" MACSTR
++ " in received frame - ignore this frame silently",
++ MAC2STR(mgmt->sa));
++ return 0;
++ }
++
+ if (stype == WLAN_FC_STYPE_BEACON) {
+ handle_beacon(hapd, mgmt, len, fi);
+ return 1;
+--
+2.20.1
diff --git a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.7.bb b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.7.bb
index 277bbaec63..542bbf4a9a 100644
--- a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.7.bb
+++ b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.7.bb
@@ -41,6 +41,7 @@ SRC_URI = "http://w1.fi/releases/wpa_supplicant-${PV}.tar.gz \
file://0014-EAP-pwd-Check-element-x-y-coordinates-explicitly.patch \
file://0001-EAP-pwd-server-Fix-reassembly-buffer-handling.patch \
file://0003-EAP-pwd-peer-Fix-reassembly-buffer-handling.patch \
+ file://0001-AP-Silently-ignore-management-frame-from-unexpected-.patch \
"
SRC_URI[md5sum] = "a68538fb62766f40f890125026c42c10"
SRC_URI[sha256sum] = "76ea6b06b7a2ea8e6d9eb1a9166166f1656e6d48c7508914f592100c95c73074"
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/0001-gfile-Limit-access-to-files-when-copying.patch b/meta/recipes-core/glib-2.0/glib-2.0/0001-gfile-Limit-access-to-files-when-copying.patch
new file mode 100644
index 0000000000..8fc03d1aed
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/0001-gfile-Limit-access-to-files-when-copying.patch
@@ -0,0 +1,57 @@
+From e6b769819d63d2b24b251dbc9f902fe6fd614da3 Mon Sep 17 00:00:00 2001
+From: Ondrej Holy <oholy@redhat.com>
+Date: Thu, 23 May 2019 10:41:53 +0200
+Subject: gfile: Limit access to files when copying
+
+file_copy_fallback creates new files with default permissions and
+set the correct permissions after the operation is finished. This
+might cause that the files can be accessible by more users during
+the operation than expected. Use G_FILE_CREATE_PRIVATE for the new
+files to limit access to those files.
+
+CVE: CVE-2019-12450
+Upstream-Status: Backport
+Signed-off-by: Adrian Bunk <bunk@stusta.de>
+---
+ gio/gfile.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/gio/gfile.c b/gio/gfile.c
+index 1cc69166a..13b435480 100644
+--- a/gio/gfile.c
++++ b/gio/gfile.c
+@@ -3284,12 +3284,12 @@ file_copy_fallback (GFile *source,
+ out = (GOutputStream*)_g_local_file_output_stream_replace (_g_local_file_get_filename (G_LOCAL_FILE (destination)),
+ FALSE, NULL,
+ flags & G_FILE_COPY_BACKUP,
+- G_FILE_CREATE_REPLACE_DESTINATION,
+- info,
++ G_FILE_CREATE_REPLACE_DESTINATION |
++ G_FILE_CREATE_PRIVATE, info,
+ cancellable, error);
+ else
+ out = (GOutputStream*)_g_local_file_output_stream_create (_g_local_file_get_filename (G_LOCAL_FILE (destination)),
+- FALSE, 0, info,
++ FALSE, G_FILE_CREATE_PRIVATE, info,
+ cancellable, error);
+ }
+ else if (flags & G_FILE_COPY_OVERWRITE)
+@@ -3297,12 +3297,13 @@ file_copy_fallback (GFile *source,
+ out = (GOutputStream *)g_file_replace (destination,
+ NULL,
+ flags & G_FILE_COPY_BACKUP,
+- G_FILE_CREATE_REPLACE_DESTINATION,
++ G_FILE_CREATE_REPLACE_DESTINATION |
++ G_FILE_CREATE_PRIVATE,
+ cancellable, error);
+ }
+ else
+ {
+- out = (GOutputStream *)g_file_create (destination, 0, cancellable, error);
++ out = (GOutputStream *)g_file_create (destination, G_FILE_CREATE_PRIVATE, cancellable, error);
+ }
+
+ if (!out)
+--
+2.20.1
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0_2.58.3.bb b/meta/recipes-core/glib-2.0/glib-2.0_2.58.3.bb
index 2286d03148..f151a3358f 100644
--- a/meta/recipes-core/glib-2.0/glib-2.0_2.58.3.bb
+++ b/meta/recipes-core/glib-2.0/glib-2.0_2.58.3.bb
@@ -19,6 +19,7 @@ SRC_URI = "${GNOME_MIRROR}/glib/${SHRT_VER}/glib-${PV}.tar.xz \
file://0001-meson-do-a-build-time-check-for-strlcpy-before-attem.patch \
file://glib-meson.cross \
file://CVE-2019-13012.patch \
+ file://0001-gfile-Limit-access-to-files-when-copying.patch \
"
SRC_URI_append_class-native = " file://relocate-modules.patch"
diff --git a/meta/recipes-core/glibc/glibc/CVE-2019-19126.patch b/meta/recipes-core/glibc/glibc/CVE-2019-19126.patch
new file mode 100644
index 0000000000..aead04c485
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2019-19126.patch
@@ -0,0 +1,32 @@
+From 37c90e117310728a4ad1eb998c0bbe7d79c4a398 Mon Sep 17 00:00:00 2001
+From: =?utf8?q?Marcin=20Ko=C5=9Bcielnicki?= <mwk@0x04.net>
+Date: Thu, 21 Nov 2019 00:20:15 +0100
+Subject: [PATCH] rtld: Check __libc_enable_secure before honoring
+ LD_PREFER_MAP_32BIT_EXEC (CVE-2019-19126) [BZ #25204]
+
+The problem was introduced in glibc 2.23, in commit
+b9eb92ab05204df772eb4929eccd018637c9f3e9
+("Add Prefer_MAP_32BIT_EXEC to map executable pages with MAP_32BIT").
+
+(cherry picked from commit d5dfad4326fc683c813df1e37bbf5cf920591c8e)
+
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commit;h=37c90e117310728a4ad1eb998c0bbe7d79c4a398]
+CVE: CVE-2019-19126
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+---
+diff --git a/sysdeps/unix/sysv/linux/x86_64/64/dl-librecon.h b/sysdeps/unix/sysv/linux/x86_64/64/dl-librecon.h
+index 975cbe2..df2cdfd 100644
+--- a/sysdeps/unix/sysv/linux/x86_64/64/dl-librecon.h
++++ b/sysdeps/unix/sysv/linux/x86_64/64/dl-librecon.h
+@@ -31,7 +31,8 @@
+ environment variable, LD_PREFER_MAP_32BIT_EXEC. */
+ #define EXTRA_LD_ENVVARS \
+ case 21: \
+- if (memcmp (envline, "PREFER_MAP_32BIT_EXEC", 21) == 0) \
++ if (!__libc_enable_secure \
++ && memcmp (envline, "PREFER_MAP_32BIT_EXEC", 21) == 0) \
+ GLRO(dl_x86_cpu_features).feature[index_arch_Prefer_MAP_32BIT_EXEC] \
+ |= bit_arch_Prefer_MAP_32BIT_EXEC; \
+ break;
+--
+2.9.3
diff --git a/meta/recipes-core/glibc/glibc_2.29.bb b/meta/recipes-core/glibc/glibc_2.29.bb
index c6b2caad42..28af4d1ba4 100644
--- a/meta/recipes-core/glibc/glibc_2.29.bb
+++ b/meta/recipes-core/glibc/glibc_2.29.bb
@@ -56,6 +56,7 @@ SRC_URI = "${GLIBC_GIT_URI};branch=${SRCBRANCH};name=glibc \
file://0030-locale-prevent-maybe-uninitialized-errors-with-Os-BZ.patch \
file://0001-x86-64-memcmp-Use-unsigned-Jcc-instructions-on-size-.patch \
file://CVE-2019-9169.patch \
+ file://CVE-2019-19126.patch \
"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-core/images/build-appliance-image_15.0.0.bb b/meta/recipes-core/images/build-appliance-image_15.0.0.bb
index a17c51e9a7..20a71adb52 100644
--- a/meta/recipes-core/images/build-appliance-image_15.0.0.bb
+++ b/meta/recipes-core/images/build-appliance-image_15.0.0.bb
@@ -22,7 +22,7 @@ IMAGE_FSTYPES = "wic.vmdk"
inherit core-image module-base setuptools3
-SRCREV ?= "d0f73121551dc98f6924cd77952bf9ebf5ef3dd7"
+SRCREV ?= "83e9841bb832c0e68b3b34e64166234ad09155b9"
SRC_URI = "git://git.yoctoproject.org/poky;branch=warrior \
file://Yocto_Build_Appliance.vmx \
file://Yocto_Build_Appliance.vmxf \
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2019-20388.patch b/meta/recipes-core/libxml/libxml2/CVE-2019-20388.patch
new file mode 100644
index 0000000000..4ee2d4fe62
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2019-20388.patch
@@ -0,0 +1,37 @@
+From 7ffcd44d7e6c46704f8af0321d9314cd26e0e18a Mon Sep 17 00:00:00 2001
+From: Zhipeng Xie <xiezhipeng1@huawei.com>
+Date: Tue, 20 Aug 2019 16:33:06 +0800
+Subject: [PATCH] Fix memory leak in xmlSchemaValidateStream
+
+When ctxt->schema is NULL, xmlSchemaSAXPlug->xmlSchemaPreRun
+alloc a new schema for ctxt->schema and set vctxt->xsiAssemble
+to 1. Then xmlSchemaVStart->xmlSchemaPreRun initialize
+vctxt->xsiAssemble to 0 again which cause the alloced schema
+can not be freed anymore.
+
+Found with libFuzzer.
+
+Upstream-Status: Accepted [https://gitlab.gnome.org/GNOME/libxml2/commit/7ffcd44d7e6c46704f8af0321d9314cd26e0e18a]
+CVE: CVE-2019-20388
+
+Signed-off-by: Zhipeng Xie <xiezhipeng1@huawei.com>
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+---
+ xmlschemas.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/xmlschemas.c b/xmlschemas.c
+index 301c8449..39d92182 100644
+--- a/xmlschemas.c
++++ b/xmlschemas.c
+@@ -28090,7 +28090,6 @@ xmlSchemaPreRun(xmlSchemaValidCtxtPtr vctxt) {
+ vctxt->nberrors = 0;
+ vctxt->depth = -1;
+ vctxt->skipDepth = -1;
+- vctxt->xsiAssemble = 0;
+ vctxt->hasKeyrefs = 0;
+ #ifdef ENABLE_IDC_NODE_TABLES_TEST
+ vctxt->createIDCNodeTables = 1;
+--
+2.24.1
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2020-7595.patch b/meta/recipes-core/libxml/libxml2/CVE-2020-7595.patch
new file mode 100644
index 0000000000..facfefd362
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2020-7595.patch
@@ -0,0 +1,36 @@
+From 0e1a49c8907645d2e155f0d89d4d9895ac5112b5 Mon Sep 17 00:00:00 2001
+From: Zhipeng Xie <xiezhipeng1@huawei.com>
+Date: Thu, 12 Dec 2019 17:30:55 +0800
+Subject: [PATCH] Fix infinite loop in xmlStringLenDecodeEntities
+
+When ctxt->instate == XML_PARSER_EOF,xmlParseStringEntityRef
+return NULL which cause a infinite loop in xmlStringLenDecodeEntities
+
+Found with libFuzzer.
+
+Signed-off-by: Zhipeng Xie <xiezhipeng1@huawei.com>
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/commit/0e1a49c89076]
+CVE: CVE-2020-7595
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+---
+ parser.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/parser.c b/parser.c
+index d1c31963..a34bb6cd 100644
+--- a/parser.c
++++ b/parser.c
+@@ -2646,7 +2646,8 @@ xmlStringLenDecodeEntities(xmlParserCtxtPtr ctxt, const xmlChar *str, int len,
+ else
+ c = 0;
+ while ((c != 0) && (c != end) && /* non input consuming loop */
+- (c != end2) && (c != end3)) {
++ (c != end2) && (c != end3) &&
++ (ctxt->instate != XML_PARSER_EOF)) {
+
+ if (c == 0) break;
+ if ((c == '&') && (str[1] == '#')) {
+--
+2.24.1
+
diff --git a/meta/recipes-core/libxml/libxml2/fix-CVE-2019-19956.patch b/meta/recipes-core/libxml/libxml2/fix-CVE-2019-19956.patch
new file mode 100644
index 0000000000..43c3589d2c
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/fix-CVE-2019-19956.patch
@@ -0,0 +1,38 @@
+From 3cd2b25ddb04740be2880cfd78d60038452228b1 Mon Sep 17 00:00:00 2001
+From: Zhipeng Xie <xiezhipeng1@huawei.com>
+Date: Wed, 7 Aug 2019 17:39:17 +0800
+Subject: [PATCH] Fix memory leak in xmlParseBalancedChunkMemoryRecover
+
+When doc is NULL, namespace created in xmlTreeEnsureXMLDecl
+is bind to newDoc->oldNs, in this case, set newDoc->oldNs to
+NULL and free newDoc will cause a memory leak.
+
+Found with libFuzzer.
+
+Closes #82.
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/commit/5a02583c7e683896d84878bd90641d8d9b0d0549]
+CVE: CVE-2019-19956
+
+Signed-off-by: Rahul Chauhan <rahulk@mvista.com>
+---
+ parser.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/parser.c b/parser.c
+index b7ecd65..491f5c9 100644
+--- a/parser.c
++++ b/parser.c
+@@ -13899,7 +13899,8 @@ xmlParseBalancedChunkMemoryRecover(xmlDocPtr doc, xmlSAXHandlerPtr sax,
+ xmlFreeParserCtxt(ctxt);
+ newDoc->intSubset = NULL;
+ newDoc->extSubset = NULL;
+- newDoc->oldNs = NULL;
++ if(doc != NULL)
++ newDoc->oldNs = NULL;
+ xmlFreeDoc(newDoc);
+
+ return(ret);
+--
+2.7.4
+
diff --git a/meta/recipes-core/libxml/libxml2_2.9.8.bb b/meta/recipes-core/libxml/libxml2_2.9.8.bb
index 62643bc764..ab47a50c56 100644
--- a/meta/recipes-core/libxml/libxml2_2.9.8.bb
+++ b/meta/recipes-core/libxml/libxml2_2.9.8.bb
@@ -23,6 +23,9 @@ SRC_URI = "http://www.xmlsoft.org/sources/libxml2-${PV}.tar.gz;name=libtar \
file://fix-CVE-2017-8872.patch \
file://fix-CVE-2018-14404.patch \
file://0001-Fix-infinite-loop-in-LZMA-decompression.patch \
+ file://fix-CVE-2019-19956.patch \
+ file://CVE-2020-7595.patch \
+ file://CVE-2019-20388.patch \
"
SRC_URI[libtar.md5sum] = "b786e353e2aa1b872d70d5d1ca0c740d"
diff --git a/meta/recipes-core/meta/cve-update-db-native.bb b/meta/recipes-core/meta/cve-update-db-native.bb
index 2c427a5884..1b4f31692b 100644
--- a/meta/recipes-core/meta/cve-update-db-native.bb
+++ b/meta/recipes-core/meta/cve-update-db-native.bb
@@ -21,26 +21,26 @@ python do_populate_cve_db() {
"""
Update NVD database with json data feed
"""
-
+ import bb.utils
import sqlite3, urllib, urllib.parse, shutil, gzip
from datetime import date
- BASE_URL = "https://nvd.nist.gov/feeds/json/cve/1.0/nvdcve-1.0-"
+ bb.utils.export_proxies(d)
+
+ BASE_URL = "https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-"
YEAR_START = 2002
- db_dir = os.path.join(d.getVar("DL_DIR"), 'CVE_CHECK')
- db_file = os.path.join(db_dir, 'nvdcve_1.0.db')
+ db_file = d.getVar("CVE_CHECK_DB_FILE")
+ db_dir = os.path.dirname(db_file)
json_tmpfile = os.path.join(db_dir, 'nvd.json.gz')
- proxy = d.getVar("https_proxy")
- if proxy:
- # instantiate an opener but do not install it as the global
- # opener unless if we're really sure it's applicable for all
- # urllib requests
- proxy_handler = urllib.request.ProxyHandler({'https': proxy})
- proxy_opener = urllib.request.build_opener(proxy_handler)
- else:
- proxy_opener = None
+ # Don't refresh the database more than once an hour
+ try:
+ import time
+ if time.time() - os.path.getmtime(db_file) < (60*60):
+ return
+ except OSError:
+ pass
cve_f = open(os.path.join(d.getVar("TMPDIR"), 'cve_check'), 'a')
@@ -59,15 +59,7 @@ python do_populate_cve_db() {
json_url = year_url + ".json.gz"
# Retrieve meta last modified date
-
- response = None
-
- if proxy_opener:
- response = proxy_opener.open(meta_url)
- else:
- req = urllib.request.Request(meta_url)
- response = urllib.request.urlopen(req)
-
+ response = urllib.request.urlopen(meta_url)
if response:
for l in response.read().decode("utf-8").splitlines():
key, value = l.split(":", 1)
@@ -87,12 +79,7 @@ python do_populate_cve_db() {
# Update db with current year json file
try:
- if proxy_opener:
- response = proxy_opener.open(json_url)
- else:
- req = urllib.request.Request(json_url)
- response = urllib.request.urlopen(req)
-
+ response = urllib.request.urlopen(json_url)
if response:
update_db(c, gzip.decompress(response.read()).decode('utf-8'))
c.execute("insert or replace into META values (?, ?)", [year, last_modified])
@@ -112,11 +99,14 @@ python do_populate_cve_db() {
def initialize_db(c):
c.execute("CREATE TABLE IF NOT EXISTS META (YEAR INTEGER UNIQUE, DATE TEXT)")
+
c.execute("CREATE TABLE IF NOT EXISTS NVD (ID TEXT UNIQUE, SUMMARY TEXT, \
SCOREV2 TEXT, SCOREV3 TEXT, MODIFIED INTEGER, VECTOR TEXT)")
+
c.execute("CREATE TABLE IF NOT EXISTS PRODUCTS (ID TEXT, \
VENDOR TEXT, PRODUCT TEXT, VERSION_START TEXT, OPERATOR_START TEXT, \
VERSION_END TEXT, OPERATOR_END TEXT)")
+ c.execute("CREATE INDEX IF NOT EXISTS PRODUCT_ID_IDX on PRODUCTS(ID);")
def parse_node_and_insert(c, node, cveId):
# Parse children node if needed
@@ -132,7 +122,7 @@ def parse_node_and_insert(c, node, cveId):
product = cpe23[4]
version = cpe23[5]
- if version != '*':
+ if version != '*' and version != '-':
# Version is defined, this is a '=' match
yield [cveId, vendor, product, version, '=', '', '']
else:
diff --git a/meta/recipes-core/ncurses/ncurses_6.1+20181013.bb b/meta/recipes-core/ncurses/ncurses_6.1+20181013.bb
index ef6ca9879b..90f6b4695c 100644
--- a/meta/recipes-core/ncurses/ncurses_6.1+20181013.bb
+++ b/meta/recipes-core/ncurses/ncurses_6.1+20181013.bb
@@ -9,3 +9,5 @@ SRCREV = "7a97a7f937762ba342d5b2fd7cd090885a809835"
S = "${WORKDIR}/git"
EXTRA_OECONF += "--with-abi-version=5 --cache-file=${B}/config.cache"
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>\d+(\.\d+)+(\+\d+)*)"
+
+CVE_VERSION = "6.1.${@d.getVar("PV").split('+')[1]}"
diff --git a/meta/recipes-core/systemd/systemd.inc b/meta/recipes-core/systemd/systemd.inc
index 2b9c291959..3161a6c543 100644
--- a/meta/recipes-core/systemd/systemd.inc
+++ b/meta/recipes-core/systemd/systemd.inc
@@ -14,7 +14,14 @@ LICENSE = "GPLv2 & LGPLv2.1"
LIC_FILES_CHKSUM = "file://LICENSE.GPL2;md5=751419260aa954499f7abaabaa882bbe \
file://LICENSE.LGPL2.1;md5=4fbd65380cdd255951079008b364516c"
-SRCREV = "511646b8ac5c82f210b16920044465756913d238"
+# DNSOverTLS strict mode was added in 243
+# https://github.com/systemd/systemd/issues/9397
+CVE_CHECK_WHITELIST += "CVE-2018-21029"
+
+# Commit dc903ec516cb on the 241 branch
+CVE_CHECK_WHITELIST += "CVE-2019-3843 CVE-2019-3844"
+
+SRCREV = "d1cc09a6eac5f8d62e630cc31d604095e30d2d84"
SRCBRANCH = "v241-stable"
SRC_URI = "git://github.com/systemd/systemd-stable.git;protocol=git;branch=${SRCBRANCH}"
diff --git a/meta/recipes-core/systemd/systemd/0001-bus_open-leak-sd_event_source-when-udevadm-trigger.patch b/meta/recipes-core/systemd/systemd/0001-bus_open-leak-sd_event_source-when-udevadm-trigger.patch
new file mode 100644
index 0000000000..7864140bd1
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/0001-bus_open-leak-sd_event_source-when-udevadm-trigger.patch
@@ -0,0 +1,35 @@
+From 3e9828454dcdaa6cd19ee7ea3e3db30567f22c9f Mon Sep 17 00:00:00 2001
+From: ven <2988994+hexiaowen@users.noreply.github.com>
+Date: Wed, 22 May 2019 14:24:28 +0800
+Subject: =?UTF-8?q?bus=5Fopen=20leak=20sd=5Fevent=5Fsource=20when=20udevad?=
+ =?UTF-8?q?m=20trigger=E3=80=82?=
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+On my host, when executing the udevadm trigger, I only receive the change event, which causes memleak
+
+CVE: CVE-2019-20386
+Upstream-Status: Backport
+Signed-off-by: Adrian Bunk <bunk@stusta.de>
+---
+ src/login/logind-button.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/src/login/logind-button.c b/src/login/logind-button.c
+index daffbf0668..1624a31cc3 100644
+--- a/src/login/logind-button.c
++++ b/src/login/logind-button.c
+@@ -341,7 +341,8 @@ int button_open(Button *b) {
+ }
+
+ (void) button_set_mask(b);
+-
++
++ b->io_event_source = sd_event_source_unref(b->io_event_source);
+ r = sd_event_add_io(b->manager->event, &b->io_event_source, b->fd, EPOLLIN, button_dispatch, b);
+ if (r < 0) {
+ log_error_errno(r, "Failed to add button event: %m");
+--
+2.20.1
+
diff --git a/meta/recipes-core/systemd/systemd_241.bb b/meta/recipes-core/systemd/systemd_241.bb
index eb3242d624..e0dc936294 100644
--- a/meta/recipes-core/systemd/systemd_241.bb
+++ b/meta/recipes-core/systemd/systemd_241.bb
@@ -24,6 +24,7 @@ SRC_URI += "file://touchscreen.rules \
file://0005-rules-watch-metadata-changes-in-ide-devices.patch \
file://0001-meson-declare-version.h-as-dep-for-various-targets-t.patch \
file://0001-meson-declare-version.h-as-dependency-for-systemd.patch \
+ file://0001-bus_open-leak-sd_event_source-when-udevadm-trigger.patch \
"
# patches needed by musl
diff --git a/meta/recipes-devtools/binutils/binutils-2.32.inc b/meta/recipes-devtools/binutils/binutils-2.32.inc
index d3c52936d1..739ba70cf2 100644
--- a/meta/recipes-devtools/binutils/binutils-2.32.inc
+++ b/meta/recipes-devtools/binutils/binutils-2.32.inc
@@ -52,6 +52,8 @@ SRC_URI = "\
file://CVE-2019-12972.patch \
file://CVE-2019-14250.patch \
file://CVE-2019-14444.patch \
+ file://CVE-2019-17450.patch \
+ file://CVE-2019-17451.patch \
"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2019-17450.patch b/meta/recipes-devtools/binutils/binutils/CVE-2019-17450.patch
new file mode 100644
index 0000000000..a6ce0b9a8a
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2019-17450.patch
@@ -0,0 +1,99 @@
+From 09dd135df9ebc7a4b640537e23e26a03a288a789 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Wed, 9 Oct 2019 00:07:29 +1030
+Subject: [PATCH] PR25078, stack overflow in function find_abstract_instance
+
+Selectively backporting fix for bfd/dwarf2.c, but not the ChangeLog
+file. There are newer versions of binutils, but none of them contain the
+commit fixing CVE-2019-17450, so backport it to master and zeus.
+
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=063c511bd79]
+CVE: CVE-2019-17450
+Signed-off-by: Trevor Gamblin <trevor.gamblin@windriver.com>
+
+ PR 25078
+ * dwarf2.c (find_abstract_instance): Delete orig_info_ptr, add
+ recur_count. Error on recur_count reaching 100 rather than
+ info_ptr matching orig_info_ptr. Adjust calls.
+
+---
+ bfd/dwarf2.c | 35 +++++++++++++++++------------------
+ 1 file changed, 17 insertions(+), 18 deletions(-)
+
+diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
+index 0b4e485582..20ec9e2e56 100644
+--- a/bfd/dwarf2.c
++++ b/bfd/dwarf2.c
+@@ -2803,13 +2803,13 @@ lookup_symbol_in_variable_table (struct comp_unit *unit,
+ }
+
+ static bfd_boolean
+-find_abstract_instance (struct comp_unit * unit,
+- bfd_byte * orig_info_ptr,
+- struct attribute * attr_ptr,
+- const char ** pname,
+- bfd_boolean * is_linkage,
+- char ** filename_ptr,
+- int * linenumber_ptr)
++find_abstract_instance (struct comp_unit *unit,
++ struct attribute *attr_ptr,
++ unsigned int recur_count,
++ const char **pname,
++ bfd_boolean *is_linkage,
++ char **filename_ptr,
++ int *linenumber_ptr)
+ {
+ bfd *abfd = unit->abfd;
+ bfd_byte *info_ptr;
+@@ -2820,6 +2820,14 @@ find_abstract_instance (struct comp_unit * unit,
+ struct attribute attr;
+ const char *name = NULL;
+
++ if (recur_count == 100)
++ {
++ _bfd_error_handler
++ (_("DWARF error: abstract instance recursion detected"));
++ bfd_set_error (bfd_error_bad_value);
++ return FALSE;
++ }
++
+ /* DW_FORM_ref_addr can reference an entry in a different CU. It
+ is an offset from the .debug_info section, not the current CU. */
+ if (attr_ptr->form == DW_FORM_ref_addr)
+@@ -2939,15 +2947,6 @@ find_abstract_instance (struct comp_unit * unit,
+ info_ptr, info_ptr_end);
+ if (info_ptr == NULL)
+ break;
+- /* It doesn't ever make sense for DW_AT_specification to
+- refer to the same DIE. Stop simple recursion. */
+- if (info_ptr == orig_info_ptr)
+- {
+- _bfd_error_handler
+- (_("DWARF error: abstract instance recursion detected"));
+- bfd_set_error (bfd_error_bad_value);
+- return FALSE;
+- }
+ switch (attr.name)
+ {
+ case DW_AT_name:
+@@ -2961,7 +2960,7 @@ find_abstract_instance (struct comp_unit * unit,
+ }
+ break;
+ case DW_AT_specification:
+- if (!find_abstract_instance (unit, info_ptr, &attr,
++ if (!find_abstract_instance (unit, &attr, recur_count + 1,
+ &name, is_linkage,
+ filename_ptr, linenumber_ptr))
+ return FALSE;
+@@ -3175,7 +3174,7 @@ scan_unit_for_symbols (struct comp_unit *unit)
+
+ case DW_AT_abstract_origin:
+ case DW_AT_specification:
+- if (!find_abstract_instance (unit, info_ptr, &attr,
++ if (!find_abstract_instance (unit, &attr, 0,
+ &func->name,
+ &func->is_linkage,
+ &func->file,
+--
+2.23.0
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2019-17451.patch b/meta/recipes-devtools/binutils/binutils/CVE-2019-17451.patch
new file mode 100644
index 0000000000..b36a532668
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2019-17451.patch
@@ -0,0 +1,51 @@
+From 0192438051a7e781585647d5581a2a6f62fda362 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Wed, 9 Oct 2019 10:47:13 +1030
+Subject: [PATCH] PR25070, SEGV in function _bfd_dwarf2_find_nearest_line
+
+Selectively backporting fix for bfd/dwarf2.c, but not the ChangeLog
+file. There are newer versions of binutils, but none of them contain the
+commit fixing CVE-2019-17451, so backport it to master and zeus.
+
+Upstream-Status: Backport
+[https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=336bfbeb1848]
+CVE: CVE-2019-17451
+Signed-off-by: Trevor Gamblin <trevor.gamblin@windriver.com>
+
+
+Evil testcase with two debug info sections, with sizes of 2aaaabac4ec1
+and ffffd5555453b140 result in a total size of 1. Reading the first
+section of course overflows the buffer and tramples on other memory.
+
+ PR 25070
+ * dwarf2.c (_bfd_dwarf2_slurp_debug_info): Catch overflow of
+ total_size calculation.
+---
+ bfd/dwarf2.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
+index 0b4e485582..a91597b1d0 100644
+--- a/bfd/dwarf2.c
++++ b/bfd/dwarf2.c
+@@ -4426,7 +4426,16 @@ _bfd_dwarf2_slurp_debug_info (bfd *abfd, bfd *debug_bfd,
+ for (total_size = 0;
+ msec;
+ msec = find_debug_info (debug_bfd, debug_sections, msec))
+- total_size += msec->size;
++ {
++ /* Catch PR25070 testcase overflowing size calculation here. */
++ if (total_size + msec->size < total_size
++ || total_size + msec->size < msec->size)
++ {
++ bfd_set_error (bfd_error_no_memory);
++ return FALSE;
++ }
++ total_size += msec->size;
++ }
+
+ stash->info_ptr_memory = (bfd_byte *) bfd_malloc (total_size);
+ if (stash->info_ptr_memory == NULL)
+--
+2.23.0
+
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-e2fsck-don-t-try-to-rehash-a-deleted-directory.patch b/meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-e2fsck-don-t-try-to-rehash-a-deleted-directory.patch
new file mode 100644
index 0000000000..ba4e3a3c97
--- /dev/null
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-e2fsck-don-t-try-to-rehash-a-deleted-directory.patch
@@ -0,0 +1,49 @@
+From 71ba13755337e19c9a826dfc874562a36e1b24d3 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Thu, 19 Dec 2019 19:45:06 -0500
+Subject: [PATCH] e2fsck: don't try to rehash a deleted directory
+
+If directory has been deleted in pass1[bcd] processing, then we
+shouldn't try to rehash the directory in pass 3a when we try to
+rehash/reoptimize directories.
+
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/commit/?id=71ba13755337e19c9a826dfc874562a36e1b24d3]
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+---
+ e2fsck/pass1b.c | 4 ++++
+ e2fsck/rehash.c | 2 ++
+ 2 files changed, 6 insertions(+)
+
+diff --git a/e2fsck/pass1b.c b/e2fsck/pass1b.c
+index 5693b9cf..bca701ca 100644
+--- a/e2fsck/pass1b.c
++++ b/e2fsck/pass1b.c
+@@ -705,6 +705,10 @@ static void delete_file(e2fsck_t ctx, ext2_ino_t ino,
+ fix_problem(ctx, PR_1B_BLOCK_ITERATE, &pctx);
+ if (ctx->inode_bad_map)
+ ext2fs_unmark_inode_bitmap2(ctx->inode_bad_map, ino);
++ if (ctx->inode_reg_map)
++ ext2fs_unmark_inode_bitmap2(ctx->inode_reg_map, ino);
++ ext2fs_unmark_inode_bitmap2(ctx->inode_dir_map, ino);
++ ext2fs_unmark_inode_bitmap2(ctx->inode_used_map, ino);
+ ext2fs_inode_alloc_stats2(fs, ino, -1, LINUX_S_ISDIR(dp->inode.i_mode));
+ quota_data_sub(ctx->qctx, &dp->inode, ino,
+ pb.dup_blocks * fs->blocksize);
+diff --git a/e2fsck/rehash.c b/e2fsck/rehash.c
+index 3dd1e941..2c908be0 100644
+--- a/e2fsck/rehash.c
++++ b/e2fsck/rehash.c
+@@ -1028,6 +1028,8 @@ void e2fsck_rehash_directories(e2fsck_t ctx)
+ if (!ext2fs_u32_list_iterate(iter, &ino))
+ break;
+ }
++ if (!ext2fs_test_inode_bitmap2(ctx->inode_dir_map, ino))
++ continue;
+
+ pctx.dir = ino;
+ if (first) {
+--
+2.24.1
+
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2019-5188.patch b/meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2019-5188.patch
new file mode 100644
index 0000000000..de4bce0037
--- /dev/null
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2019-5188.patch
@@ -0,0 +1,57 @@
+From 8dd73c149f418238f19791f9d666089ef9734dff Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Thu, 19 Dec 2019 19:37:34 -0500
+Subject: [PATCH] e2fsck: abort if there is a corrupted directory block when
+ rehashing
+
+In e2fsck pass 3a, when we are rehashing directories, at least in
+theory, all of the directories should have had corruptions with
+respect to directory entry structure fixed. However, it's possible
+(for example, if the user declined a fix) that we can reach this stage
+of processing with a corrupted directory entries.
+
+So check for that case and don't try to process a corrupted directory
+block so we don't run into trouble in mutate_name() if there is a
+zero-length file name.
+
+Addresses: TALOS-2019-0973
+Addresses: CVE-2019-5188
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+
+CVE: CVE-2019-5188
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/commit/?id=8dd73c149f418238f19791f9d666089ef9734dff]
+---
+ e2fsck/rehash.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/e2fsck/rehash.c b/e2fsck/rehash.c
+index a5fc1be1..3dd1e941 100644
+--- a/e2fsck/rehash.c
++++ b/e2fsck/rehash.c
+@@ -160,6 +160,10 @@ static int fill_dir_block(ext2_filsys fs,
+ dir_offset += rec_len;
+ if (dirent->inode == 0)
+ continue;
++ if ((name_len) == 0) {
++ fd->err = EXT2_ET_DIR_CORRUPTED;
++ return BLOCK_ABORT;
++ }
+ if (!fd->compress && (name_len == 1) &&
+ (dirent->name[0] == '.'))
+ continue;
+@@ -401,6 +405,11 @@ static int duplicate_search_and_fix(e2fsck_t ctx, ext2_filsys fs,
+ continue;
+ }
+ new_len = ext2fs_dirent_name_len(ent->dir);
++ if (new_len == 0) {
++ /* should never happen */
++ ext2fs_unmark_valid(fs);
++ continue;
++ }
+ memcpy(new_name, ent->dir->name, new_len);
+ mutate_name(new_name, &new_len);
+ for (j=0; j < fd->num_array; j++) {
+--
+2.24.1
+
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsck-fix-use-after-free-in-calculate_tree.patch b/meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsck-fix-use-after-free-in-calculate_tree.patch
new file mode 100644
index 0000000000..342a2b855b
--- /dev/null
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsck-fix-use-after-free-in-calculate_tree.patch
@@ -0,0 +1,76 @@
+From: Wang Shilong <wshilong@ddn.com>
+Date: Mon, 30 Dec 2019 19:52:39 -0500
+Subject: e2fsck: fix use after free in calculate_tree()
+
+The problem is alloc_blocks() will call get_next_block() which might
+reallocate outdir->buf, and memory address could be changed after
+this. To fix this, pointers that point into outdir->buf, such as
+int_limit and root need to be recaulated based on the new starting
+address of outdir->buf.
+
+[ Changed to correctly recalculate int_limit, and to optimize how we
+ reallocate outdir->buf. -TYT ]
+
+Addresses-Debian-Bug: 948517
+Signed-off-by: Wang Shilong <wshilong@ddn.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+(cherry picked from commit 101e73e99ccafa0403fcb27dd7413033b587ca01)
+
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/commit/?id=101e73e99ccafa0403fcb27dd7413033b587ca01]
+---
+ e2fsck/rehash.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/e2fsck/rehash.c b/e2fsck/rehash.c
+index 0a5888a9..2574e151 100644
+--- a/e2fsck/rehash.c
++++ b/e2fsck/rehash.c
+@@ -295,7 +295,11 @@ static errcode_t get_next_block(ext2_filsys fs, struct out_dir *outdir,
+ errcode_t retval;
+
+ if (outdir->num >= outdir->max) {
+- retval = alloc_size_dir(fs, outdir, outdir->max + 50);
++ int increment = outdir->max / 10;
++
++ if (increment < 50)
++ increment = 50;
++ retval = alloc_size_dir(fs, outdir, outdir->max + increment);
+ if (retval)
+ return retval;
+ }
+@@ -637,6 +641,9 @@ static int alloc_blocks(ext2_filsys fs,
+ if (retval)
+ return retval;
+
++ /* outdir->buf might be reallocated */
++ *prev_ent = (struct ext2_dx_entry *) (outdir->buf + *prev_offset);
++
+ *next_ent = set_int_node(fs, block_start);
+ *limit = (struct ext2_dx_countlimit *)(*next_ent);
+ if (next_offset)
+@@ -726,6 +733,9 @@ static errcode_t calculate_tree(ext2_filsys fs,
+ return retval;
+ }
+ if (c3 == 0) {
++ int delta1 = (char *)int_limit - outdir->buf;
++ int delta2 = (char *)root - outdir->buf;
++
+ retval = alloc_blocks(fs, &limit, &int_ent,
+ &dx_ent, &int_offset,
+ NULL, outdir, i, &c2,
+@@ -733,6 +743,11 @@ static errcode_t calculate_tree(ext2_filsys fs,
+ if (retval)
+ return retval;
+
++ /* outdir->buf might be reallocated */
++ int_limit = (struct ext2_dx_countlimit *)
++ (outdir->buf + delta1);
++ root = (struct ext2_dx_entry *)
++ (outdir->buf + delta2);
+ }
+ dx_ent->block = ext2fs_cpu_to_le32(i);
+ if (c3 != limit->limit)
+--
+2.24.1
+
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.44.5.bb b/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.44.5.bb
index 0695ee9dc3..da06888b34 100644
--- a/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.44.5.bb
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.44.5.bb
@@ -7,6 +7,9 @@ SRC_URI += "file://remove.ldconfig.call.patch \
file://mkdir_p.patch \
file://0001-misc-create_inode.c-set-dir-s-mode-correctly.patch \
file://0001-create_inode-fix-copying-large-files.patch \
+ file://CVE-2019-5188.patch \
+ file://0001-e2fsck-don-t-try-to-rehash-a-deleted-directory.patch \
+ file://e2fsck-fix-use-after-free-in-calculate_tree.patch \
"
SRC_URI_append_class-native = " file://e2fsprogs-fix-missing-check-for-permission-denied.patch \
diff --git a/meta/recipes-devtools/file/file/CVE-2019-18218.patch b/meta/recipes-devtools/file/file/CVE-2019-18218.patch
new file mode 100644
index 0000000000..3d02c5ad4b
--- /dev/null
+++ b/meta/recipes-devtools/file/file/CVE-2019-18218.patch
@@ -0,0 +1,55 @@
+cdf_read_property_info in cdf.c in file through 5.37 does not restrict the
+number of CDF_VECTOR elements, which allows a heap-based buffer overflow (4-byte
+out-of-bounds write).
+
+CVE: CVE-2019-18218
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+From 46a8443f76cec4b41ec736eca396984c74664f84 Mon Sep 17 00:00:00 2001
+From: Christos Zoulas <christos@zoulas.com>
+Date: Mon, 26 Aug 2019 14:31:39 +0000
+Subject: [PATCH] Limit the number of elements in a vector (found by oss-fuzz)
+
+---
+ src/cdf.c | 9 ++++-----
+ src/cdf.h | 1 +
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/src/cdf.c b/src/cdf.c
+index 9d6396742..bb81d6374 100644
+--- a/src/cdf.c
++++ b/src/cdf.c
+@@ -1016,8 +1016,9 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
+ goto out;
+ }
+ nelements = CDF_GETUINT32(q, 1);
+- if (nelements == 0) {
+- DPRINTF(("CDF_VECTOR with nelements == 0\n"));
++ if (nelements > CDF_ELEMENT_LIMIT || nelements == 0) {
++ DPRINTF(("CDF_VECTOR with nelements == %"
++ SIZE_T_FORMAT "u\n", nelements));
+ goto out;
+ }
+ slen = 2;
+@@ -1060,8 +1061,6 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
+ goto out;
+ inp += nelem;
+ }
+- DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n",
+- nelements));
+ for (j = 0; j < nelements && i < sh.sh_properties;
+ j++, i++)
+ {
+diff --git a/src/cdf.h b/src/cdf.h
+index 2f7e554b7..05056668f 100644
+--- a/src/cdf.h
++++ b/src/cdf.h
+@@ -48,6 +48,7 @@
+ typedef int32_t cdf_secid_t;
+
+ #define CDF_LOOP_LIMIT 10000
++#define CDF_ELEMENT_LIMIT 100000
+
+ #define CDF_SECID_NULL 0
+ #define CDF_SECID_FREE -1
diff --git a/meta/recipes-devtools/file/file_5.36.bb b/meta/recipes-devtools/file/file_5.36.bb
index 1a81fde259..f169671f10 100644
--- a/meta/recipes-devtools/file/file_5.36.bb
+++ b/meta/recipes-devtools/file/file_5.36.bb
@@ -14,7 +14,8 @@ DEPENDS_class-native = "zlib-native"
# Blacklist a bogus tag in upstream check
UPSTREAM_CHECK_GITTAGREGEX = "FILE(?P<pver>(?!6_23).+)"
-SRC_URI = "git://github.com/file/file.git"
+SRC_URI = "git://github.com/file/file.git \
+ file://CVE-2019-18218.patch"
SRCREV = "f3a4b9ada3ca99e62c62b9aa78eee4935a8094fe"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-devtools/flex/flex_2.6.0.bb b/meta/recipes-devtools/flex/flex_2.6.0.bb
index b477cd8c7f..12ce0cb461 100644
--- a/meta/recipes-devtools/flex/flex_2.6.0.bb
+++ b/meta/recipes-devtools/flex/flex_2.6.0.bb
@@ -68,3 +68,6 @@ do_install_ptest() {
-e 's/^builddir = \(.*\)/builddir = ./' -e 's/^top_builddir = \(.*\)/top_builddir = ./' \
-i ${D}${PTEST_PATH}/Makefile
}
+
+# Not Apache Flex, or Adobe Flex, or IBM Flex.
+CVE_PRODUCT = "flex_project:flex"
diff --git a/meta/recipes-devtools/gdb/gdb-8.2.1.inc b/meta/recipes-devtools/gdb/gdb-8.2.1.inc
index f28b57439c..8fa48171f4 100644
--- a/meta/recipes-devtools/gdb/gdb-8.2.1.inc
+++ b/meta/recipes-devtools/gdb/gdb-8.2.1.inc
@@ -19,6 +19,7 @@ SRC_URI = "http://ftp.gnu.org/gnu/gdb/gdb-${PV}.tar.xz \
file://0001-Fix-build-with-latest-GCC-9.0-tree.patch \
file://CVE-2017-9778.patch \
file://0012-AArch64-Fix-the-gdb-build-with-musl-libc.patch \
+ file://CVE-2019-1010180.patch \
"
SRC_URI[md5sum] = "f8b2562e830a4098dd5b5ea9e9296c70"
SRC_URI[sha256sum] = "0a6a432907a03c5c8eaad3c3cffd50c00a40c3a5e3c4039440624bae703f2202"
diff --git a/meta/recipes-devtools/gdb/gdb/CVE-2019-1010180.patch b/meta/recipes-devtools/gdb/gdb/CVE-2019-1010180.patch
new file mode 100644
index 0000000000..46b2b3a713
--- /dev/null
+++ b/meta/recipes-devtools/gdb/gdb/CVE-2019-1010180.patch
@@ -0,0 +1,132 @@
+From 950b74950f6020eda38647f22e9077ac7f68ca49 Mon Sep 17 00:00:00 2001
+From: Keith Seitz <keiths@redhat.com>
+Date: Wed, 16 Oct 2019 11:33:59 -0700
+Subject: [PATCH] DWARF reader: Reject sections with invalid sizes
+
+This is another fuzzer bug, gdb/23567. This time, the fuzzer has
+specifically altered the size of .debug_str:
+
+$ eu-readelf -S objdump
+Section Headers:
+[Nr] Name Type Addr Off Size ES Flags Lk Inf Al
+[31] .debug_str PROGBITS 0000000000000000 0057116d ffffffffffffffff 1 MS 0 0 1
+
+When this file is loaded into GDB, the DWARF reader crashes attempting
+to access the string table (or it may just store a bunch of nonsense):
+
+[gdb-8.3-6-fc30]
+$ gdb -nx -q objdump
+BFD: warning: /path/to/objdump has a corrupt section with a size (ffffffffffffffff) larger than the file size
+Reading symbols from /path/to/objdump...
+Segmentation fault (core dumped)
+
+Nick has already committed a BFD patch to issue the warning seen above.
+
+[gdb master 6acc1a0b]
+$ gdb -BFD: warning: /path/to/objdump has a corrupt section with a size (ffffffffffffffff) larger than the file size
+Reading symbols from /path/to/objdump...
+(gdb) inf func
+All defined functions:
+
+File ./../include/dwarf2.def:
+186: const
+
+ 8 *>(.:
+ ;'@�B);
+747: const
+
+ 8 *�(.:
+ ;'@�B);
+701: const
+
+ 8 *�D �
+ (.:
+ ;'@�B);
+71: const
+
+ 8 *(.:
+ ;'@�B);
+/* and more gibberish */
+
+Consider read_indirect_string_at_offset_from:
+
+static const char *
+read_indirect_string_at_offset_from (struct objfile *objfile,
+ bfd *abfd, LONGEST str_offset,
+ struct dwarf2_section_info *sect,
+ const char *form_name,
+ const char *sect_name)
+{
+ dwarf2_read_section (objfile, sect);
+ if (sect->buffer == NULL)
+ error (_("%s used without %s section [in module %s]"),
+ form_name, sect_name, bfd_get_filename (abfd));
+ if (str_offset >= sect->size)
+ error (_("%s pointing outside of %s section [in module %s]"),
+ form_name, sect_name, bfd_get_filename (abfd));
+ gdb_assert (HOST_CHAR_BIT == 8);
+ if (sect->buffer[str_offset] == '\0')
+ return NULL;
+ return (const char *) (sect->buffer + str_offset);
+}
+
+With sect_size being ginormous, the code attempts to access
+sect->buffer[GINORMOUS], and depending on the layout of memory,
+GDB either stores a bunch of gibberish strings or crashes.
+
+This is an attempt to mitigate this by implementing a similar approach
+used by BFD. In our case, we simply reject the section with the invalid
+length:
+
+$ ./gdb -nx -q objdump
+BFD: warning: /path/to/objdump has a corrupt section with a size (ffffffffffffffff) larger than the file size
+Reading symbols from /path/to/objdump...
+
+warning: Discarding section .debug_str which has a section size (ffffffffffffffff) larger than the file size [in module /path/to/objdump]
+DW_FORM_strp used without .debug_str section [in module /path/to/objdump]
+(No debugging symbols found in /path/to/objdump)
+(gdb)
+
+Unfortunately, I have not found a way to regression test this, since it
+requires poking ELF section headers.
+
+gdb/ChangeLog:
+2019-10-16 Keith Seitz <keiths@redhat.com>
+
+ PR gdb/23567
+ * dwarf2read.c (dwarf2_per_objfile::locate_sections): Discard
+ sections whose size is greater than the file size.
+
+Change-Id: I896ac3b4eb2207c54e8e05c16beab3051d9b4b2f
+
+CVE: CVE-2019-1010180
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=950b74950f6020eda38647f22e9077ac7f68ca49]
+[Removed Changelog entry]
+Signed-off-by: Vinay Kumar <vinay.m.engg@gmail.com>
+---
+ gdb/dwarf2read.c | 9 +++++++++
+ 2 files changed, 15 insertions(+)
+
+diff --git a/gdb/dwarf2read.c b/gdb/dwarf2read.c
+index 0443b55..a78f818 100644
+--- a/gdb/dwarf2read.c
++++ b/gdb/dwarf2read.c
+@@ -2338,6 +2338,15 @@ dwarf2_per_objfile::locate_sections (bfd *abfd, asection *sectp,
+ if ((aflag & SEC_HAS_CONTENTS) == 0)
+ {
+ }
++ else if (elf_section_data (sectp)->this_hdr.sh_size
++ > bfd_get_file_size (abfd))
++ {
++ bfd_size_type size = elf_section_data (sectp)->this_hdr.sh_size;
++ warning (_("Discarding section %s which has a section size (%s"
++ ") larger than the file size [in module %s]"),
++ bfd_section_name (abfd, sectp), phex_nz (size, sizeof (size)),
++ bfd_get_filename (abfd));
++ }
+ else if (section_is_p (sectp->name, &names.info))
+ {
+ this->info.s.section = sectp;
+--
+2.7.4
+
diff --git a/meta/recipes-devtools/git/git.inc b/meta/recipes-devtools/git/git.inc
index 26a22ac1e8..6e137432f0 100644
--- a/meta/recipes-devtools/git/git.inc
+++ b/meta/recipes-devtools/git/git.inc
@@ -13,6 +13,8 @@ S = "${WORKDIR}/git-${PV}"
LIC_FILES_CHKSUM = "file://COPYING;md5=7c0d7ef03a7eb04ce795b0f60e68e7e1"
+CVE_PRODUCT = "git-scm:git"
+
PACKAGECONFIG ??= ""
PACKAGECONFIG[cvsserver] = ""
PACKAGECONFIG[svn] = ""
diff --git a/meta/recipes-devtools/git/git_2.20.1.bb b/meta/recipes-devtools/git/git_2.20.1.bb
deleted file mode 100644
index 877fb05e58..0000000000
--- a/meta/recipes-devtools/git/git_2.20.1.bb
+++ /dev/null
@@ -1,11 +0,0 @@
-require git.inc
-
-EXTRA_OECONF += "ac_cv_snprintf_returns_bogus=no \
- ac_cv_fread_reads_directories=${ac_cv_fread_reads_directories=yes} \
- "
-EXTRA_OEMAKE += "NO_GETTEXT=1"
-
-SRC_URI[tarball.md5sum] = "7a7769e5c957364ed0aed89e6e67c254"
-SRC_URI[tarball.sha256sum] = "edc3bc1495b69179ba4e272e97eff93334a20decb1d8db6ec3c19c16417738fd"
-SRC_URI[manpages.md5sum] = "78c6e54a61a167dab5e8ae07036293ab"
-SRC_URI[manpages.sha256sum] = "e9c123463abd05e142defe44a8060ce6e9853dfd8c83b2542e38b7deac4e6d4c"
diff --git a/meta/recipes-devtools/git/git_2.20.4.bb b/meta/recipes-devtools/git/git_2.20.4.bb
new file mode 100644
index 0000000000..e44da452ad
--- /dev/null
+++ b/meta/recipes-devtools/git/git_2.20.4.bb
@@ -0,0 +1,11 @@
+require git.inc
+
+EXTRA_OECONF += "ac_cv_snprintf_returns_bogus=no \
+ ac_cv_fread_reads_directories=${ac_cv_fread_reads_directories=yes} \
+ "
+EXTRA_OEMAKE += "NO_GETTEXT=1"
+
+SRC_URI[tarball.md5sum] = "6f524e37186a79848a716e2a91330868"
+SRC_URI[tarball.sha256sum] = "92719084d7648b69038ea617a3bc45ec74f60ed7eef753ae2ad84b6f0b268e9a"
+SRC_URI[manpages.md5sum] = "dceabcda244042a06ed4cabd754627a5"
+SRC_URI[manpages.sha256sum] = "72fdd1799756b1240921d10eb5c67de9a651b44d429ba7293929c9d5344ad3e0"
diff --git a/meta/recipes-devtools/go/go-1.12.inc b/meta/recipes-devtools/go/go-1.12.inc
index ed14b175e6..6aecaad75d 100644
--- a/meta/recipes-devtools/go/go-1.12.inc
+++ b/meta/recipes-devtools/go/go-1.12.inc
@@ -17,6 +17,7 @@ SRC_URI += "\
file://0007-cmd-go-make-GOROOT-precious-by-default.patch \
file://0008-use-GOBUILDMODE-to-set-buildmode.patch \
file://0001-release-branch.go1.12-security-net-textproto-don-t-n.patch \
+ file://0010-fix-CVE-2019-17596.patch \
"
SRC_URI_append_libc-musl = " file://0009-ld-replace-glibc-dynamic-linker-with-musl.patch"
diff --git a/meta/recipes-devtools/go/go-1.12/0010-fix-CVE-2019-17596.patch b/meta/recipes-devtools/go/go-1.12/0010-fix-CVE-2019-17596.patch
new file mode 100644
index 0000000000..134cfab737
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.12/0010-fix-CVE-2019-17596.patch
@@ -0,0 +1,42 @@
+From f1783e1ce44a86c000a7c380a57a805c89c3efbe Mon Sep 17 00:00:00 2001
+From: Katie Hockman <katie@golang.org>
+Date: Mon, 14 Oct 2019 16:42:21 -0400
+Subject: [PATCH] crypto/dsa: prevent bad public keys from causing panic
+
+dsa.Verify might currently use a nil s inverse in a
+multiplication if the public key contains a non-prime Q,
+causing a panic. Change this to check that the mod
+inverse exists before using it.
+
+Fixes CVE-2019-17596
+
+Change-Id: I94d5f3cc38f1b5d52d38dcb1d253c71b7fd1cae7
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/572809
+Reviewed-by: Filippo Valsorda <valsorda@google.com>
+(cherry picked from commit 9119dfb0511326d4485b248b83d4fde19c95d0f7)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/575232
+
+CVE: CVE-2019-17596
+Upstream-Status: Backport [https://github.com/golang/go/commit/2017d88dbc096381d4f348d2fb08bfb3c2b7ed73]
+Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
+---
+ src/crypto/dsa/dsa.go | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/src/crypto/dsa/dsa.go b/src/crypto/dsa/dsa.go
+index 575314b..2fc4f1f 100644
+--- a/src/crypto/dsa/dsa.go
++++ b/src/crypto/dsa/dsa.go
+@@ -279,6 +279,9 @@ func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool {
+ }
+
+ w := new(big.Int).ModInverse(s, pub.Q)
++ if w == nil {
++ return false
++ }
+
+ n := pub.Q.BitLen()
+ if n&7 != 0 {
+--
+2.23.0
+
diff --git a/meta/recipes-devtools/nasm/nasm/CVE-2018-19755.patch b/meta/recipes-devtools/nasm/nasm/CVE-2018-19755.patch
new file mode 100644
index 0000000000..6e3f909d0f
--- /dev/null
+++ b/meta/recipes-devtools/nasm/nasm/CVE-2018-19755.patch
@@ -0,0 +1,116 @@
+From 3079f7966dbed4497e36d5067cbfd896a90358cb Mon Sep 17 00:00:00 2001
+From: Cyrill Gorcunov <gorcunov@gmail.com>
+Date: Wed, 14 Nov 2018 10:03:42 +0300
+Subject: [PATCH] preproc: Fix malformed parameter count
+
+readnum returns 64bit number which may become
+a negative integer upon conversion which in
+turn lead to out of bound array access.
+
+Fix it by explicit conversion with bounds check
+
+ | POC6:2: error: parameter count `2222222222' is out of bounds [0; 2147483647]
+
+https://bugzilla.nasm.us/show_bug.cgi?id=3392528
+
+Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com>
+
+Upstream-Status: Backport
+CVE: CVE-2018-19755
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+---
+ asm/preproc.c | 43 +++++++++++++++++++++----------------------
+ 1 file changed, 21 insertions(+), 22 deletions(-)
+
+diff --git a/asm/preproc.c b/asm/preproc.c
+index b6afee3..e5ad05a 100644
+--- a/asm/preproc.c
++++ b/asm/preproc.c
+@@ -1650,6 +1650,23 @@ smacro_defined(Context * ctx, const char *name, int nparam, SMacro ** defn,
+ return false;
+ }
+
++/* param should be a natural number [0; INT_MAX] */
++static int read_param_count(const char *str)
++{
++ int result;
++ bool err;
++
++ result = readnum(str, &err);
++ if (result < 0 || result > INT_MAX) {
++ result = 0;
++ nasm_error(ERR_NONFATAL, "parameter count `%s' is out of bounds [%d; %d]",
++ str, 0, INT_MAX);
++ } else if (err) {
++ nasm_error(ERR_NONFATAL, "unable to parse parameter count `%s'", str);
++ }
++ return result;
++}
++
+ /*
+ * Count and mark off the parameters in a multi-line macro call.
+ * This is called both from within the multi-line macro expansion
+@@ -1871,11 +1888,7 @@ static bool if_condition(Token * tline, enum preproc_token ct)
+ pp_directives[ct]);
+ } else {
+ searching.nparam_min = searching.nparam_max =
+- readnum(tline->text, &j);
+- if (j)
+- nasm_error(ERR_NONFATAL,
+- "unable to parse parameter count `%s'",
+- tline->text);
++ read_param_count(tline->text);
+ }
+ if (tline && tok_is_(tline->next, "-")) {
+ tline = tline->next->next;
+@@ -1886,11 +1899,7 @@ static bool if_condition(Token * tline, enum preproc_token ct)
+ "`%s' expects a parameter count after `-'",
+ pp_directives[ct]);
+ else {
+- searching.nparam_max = readnum(tline->text, &j);
+- if (j)
+- nasm_error(ERR_NONFATAL,
+- "unable to parse parameter count `%s'",
+- tline->text);
++ searching.nparam_max = read_param_count(tline->text);
+ if (searching.nparam_min > searching.nparam_max) {
+ nasm_error(ERR_NONFATAL,
+ "minimum parameter count exceeds maximum");
+@@ -2079,8 +2088,6 @@ static void undef_smacro(Context *ctx, const char *mname)
+ */
+ static bool parse_mmacro_spec(Token *tline, MMacro *def, const char *directive)
+ {
+- bool err;
+-
+ tline = tline->next;
+ skip_white_(tline);
+ tline = expand_id(tline);
+@@ -2103,11 +2110,7 @@ static bool parse_mmacro_spec(Token *tline, MMacro *def, const char *directive)
+ if (!tok_type_(tline, TOK_NUMBER)) {
+ nasm_error(ERR_NONFATAL, "`%s' expects a parameter count", directive);
+ } else {
+- def->nparam_min = def->nparam_max =
+- readnum(tline->text, &err);
+- if (err)
+- nasm_error(ERR_NONFATAL,
+- "unable to parse parameter count `%s'", tline->text);
++ def->nparam_min = def->nparam_max = read_param_count(tline->text);
+ }
+ if (tline && tok_is_(tline->next, "-")) {
+ tline = tline->next->next;
+@@ -2117,11 +2120,7 @@ static bool parse_mmacro_spec(Token *tline, MMacro *def, const char *directive)
+ nasm_error(ERR_NONFATAL,
+ "`%s' expects a parameter count after `-'", directive);
+ } else {
+- def->nparam_max = readnum(tline->text, &err);
+- if (err) {
+- nasm_error(ERR_NONFATAL, "unable to parse parameter count `%s'",
+- tline->text);
+- }
++ def->nparam_max = read_param_count(tline->text);
+ if (def->nparam_min > def->nparam_max) {
+ nasm_error(ERR_NONFATAL, "minimum parameter count exceeds maximum");
+ def->nparam_max = def->nparam_min;
+--
+2.10.5.GIT
+
diff --git a/meta/recipes-devtools/nasm/nasm/CVE-2019-14248.patch b/meta/recipes-devtools/nasm/nasm/CVE-2019-14248.patch
new file mode 100644
index 0000000000..d45d2cb465
--- /dev/null
+++ b/meta/recipes-devtools/nasm/nasm/CVE-2019-14248.patch
@@ -0,0 +1,43 @@
+From 93d41d82963b2cfd0b24c906f5a8daf53281b559 Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin (Intel)" <hpa@zytor.com>
+Date: Fri, 16 Aug 2019 01:12:54 -0700
+Subject: [PATCH] BR 3392576: don't segfault on a bad %pragma limit
+
+Don't segfault on a bad %pragma limit. Instead treat a NULL pointer as
+an empty string.
+
+Reported-by: Ren Kimura <rkx1209dev@gmail.com>
+Signed-off-by: H. Peter Anvin (Intel) <hpa@zytor.com>
+
+CVE: CVE-2019-14248
+Upstream-Status: Backport [https://repo.or.cz/nasm.git/commit/93d41d82963b2cfd0b24c906f5a8daf53281b559]
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+---
+ asm/nasm.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/asm/nasm.c b/asm/nasm.c
+index c84d675..65116ab 100644
+--- a/asm/nasm.c
++++ b/asm/nasm.c
+@@ -212,6 +212,11 @@ nasm_set_limit(const char *limit, const char *valstr)
+ bool rn_error;
+ int errlevel;
+
++ if (!limit)
++ limit = "";
++ if (!valstr)
++ valstr = "";
++
+ for (i = 0; i <= LIMIT_MAX; i++) {
+ if (!nasm_stricmp(limit, limit_info[i].name))
+ break;
+@@ -204,7 +209,7 @@ nasm_set_limit(const char *limit, const char *valstr)
+ errlevel = ERR_WARNING|ERR_NOFILE|ERR_USAGE;
+ else
+ errlevel = ERR_WARNING|ERR_PASS1|WARN_UNKNOWN_PRAGMA;
+- nasm_error(errlevel, "unknown limit: `%s'", limit);
++ nasm_error(errlevel, "invalid limit value: `%s'", valstr);
+ return DIRR_ERROR;
+ }
+
diff --git a/meta/recipes-devtools/nasm/nasm_2.14.02.bb b/meta/recipes-devtools/nasm/nasm_2.14.02.bb
index ecec78d8ec..bd4ecea8b6 100644
--- a/meta/recipes-devtools/nasm/nasm_2.14.02.bb
+++ b/meta/recipes-devtools/nasm/nasm_2.14.02.bb
@@ -3,7 +3,10 @@ SECTION = "devel"
LICENSE = "BSD-2-Clause"
LIC_FILES_CHKSUM = "file://LICENSE;md5=90904486f8fbf1861cf42752e1a39efe"
-SRC_URI = "http://www.nasm.us/pub/nasm/releasebuilds/${PV}/nasm-${PV}.tar.bz2"
+SRC_URI = "http://www.nasm.us/pub/nasm/releasebuilds/${PV}/nasm-${PV}.tar.bz2 \
+ file://CVE-2018-19755.patch \
+ file://CVE-2019-14248.patch \
+ "
SRC_URI[md5sum] = "3f489aa48ad2aa1f967dc5e293bbd06f"
SRC_URI[sha256sum] = "34fd26c70a277a9fdd54cb5ecf389badedaf48047b269d1008fbc819b24e80bc"
diff --git a/meta/recipes-devtools/python/python-native/0001-python-native-fix-one-do_populate_sysroot-warning.patch b/meta/recipes-devtools/python/python-native/0001-python-native-fix-one-do_populate_sysroot-warning.patch
index 989818927d..707ee596fa 100644
--- a/meta/recipes-devtools/python/python-native/0001-python-native-fix-one-do_populate_sysroot-warning.patch
+++ b/meta/recipes-devtools/python/python-native/0001-python-native-fix-one-do_populate_sysroot-warning.patch
@@ -1,4 +1,4 @@
-From 12292444e1b3662b994bc223d92b8338fb0895ff Mon Sep 17 00:00:00 2001
+From 6cbb7529cf7ff0da3ca649fb3486facd9620d625 Mon Sep 17 00:00:00 2001
From: Changqing Li <changqing.li@windriver.com>
Date: Thu, 25 Oct 2018 07:32:14 +0000
Subject: [PATCH] python-native: fix one do_populate_sysroot warning
@@ -17,23 +17,24 @@ when do_populate_sysroot. use append to fix it.
Upstream-Status: Inappropriate [oe-specific]
Signed-off-by: Changqing Li <changqing.li@windriver.com>
+
---
setup.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/setup.py b/setup.py
-index 7bf13ed..6c0f29b 100644
+index a2c8127..22f9e23 100644
--- a/setup.py
+++ b/setup.py
-@@ -40,7 +40,7 @@ def add_dir_to_list(dirlist, dir):
- 1) 'dir' is not already in 'dirlist'
- 2) 'dir' actually exists, and is a directory."""
- if dir is not None and os.path.isdir(dir) and dir not in dirlist:
-- dirlist.insert(0, dir)
-+ dirlist.append(dir)
-
- def macosx_sdk_root():
- """
+@@ -47,7 +47,7 @@ def add_dir_to_list(dirlist, dir):
+ else:
+ dir_exists = os.path.isdir(dir)
+ if dir_exists:
+- dirlist.insert(0, dir)
++ dirlist.append(dir)
+
+ MACOS_SDK_ROOT = None
+
--
-2.18.0
+2.17.1
diff --git a/meta/recipes-devtools/python/python-native_2.7.16.bb b/meta/recipes-devtools/python/python-native_2.7.18.bb
index b7442800d9..335318bab8 100644
--- a/meta/recipes-devtools/python/python-native_2.7.16.bb
+++ b/meta/recipes-devtools/python/python-native_2.7.18.bb
@@ -12,7 +12,7 @@ SRC_URI += "\
file://nohostlibs.patch \
file://multilib.patch \
file://add-md5module-support.patch \
- file://builddir.patch \
+ file://0001-python-Resolve-intermediate-staging-issues.patch \
file://parallel-makeinst-create-bindir.patch \
file://revert_use_of_sysconfigdata.patch \
file://0001-python-native-fix-one-do_populate_sysroot-warning.patch \
diff --git a/meta/recipes-devtools/python/python.inc b/meta/recipes-devtools/python/python.inc
index 8d0e90862c..bd214e8f8b 100644
--- a/meta/recipes-devtools/python/python.inc
+++ b/meta/recipes-devtools/python/python.inc
@@ -5,18 +5,13 @@ SECTION = "devel/python"
# bump this on every change in contrib/python/generate-manifest-2.7.py
INC_PR = "r1"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=e466242989bd33c1bd2b6a526a742498"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=203a6dbc802ee896020a47161e759642"
SRC_URI = "http://www.python.org/ftp/python/${PV}/Python-${PV}.tar.xz \
- file://bpo-35907-cve-2019-9948.patch \
- file://bpo-35907-cve-2019-9948-fix.patch \
- file://bpo-36216-cve-2019-9636.patch \
- file://bpo-36216-cve-2019-9636-fix.patch \
- file://CVE-2019-9740.patch \
"
-SRC_URI[md5sum] = "30157d85a2c0479c09ea2cbe61f2aaf5"
-SRC_URI[sha256sum] = "f222ef602647eecb6853681156d32de4450a2c39f4de93bd5b20235f2e660ed7"
+SRC_URI[md5sum] = "fd6cc8ec0a78c44036f825e739f36e5a"
+SRC_URI[sha256sum] = "b62c0e7937551d0cc02b8fd5cb0f544f9405bafc9a54d3808ed4594812edef43"
# python recipe is actually python 2.x
# also, exclude pre-releases for both python 2.x and 3.x
@@ -24,6 +19,19 @@ UPSTREAM_CHECK_REGEX = "[Pp]ython-(?P<pver>2(\.\d+)+).tar"
CVE_PRODUCT = "python"
+# Upstream agreement is that these are not security issues:
+# https://bugs.python.org/issue32367
+CVE_CHECK_WHITELIST += "CVE-2017-17522"
+# https://bugs.python.org/issue32056
+CVE_CHECK_WHITELIST += "CVE-2017-18207"
+
+# Windows-only, "It was determined that this is a longtime behavior
+# of Python that cannot really be altered at this point."
+CVE_CHECK_WHITELIST += "CVE-2015-5652"
+
+# This is not exploitable when glibc has CVE-2016-10739 fixed.
+CVE_CHECK_WHITELIST += "CVE-2019-18348"
+
PYTHON_MAJMIN = "2.7"
inherit autotools pkgconfig
diff --git a/meta/recipes-devtools/python/python/0001-2.7-bpo-34155-Dont-parse-domains-containing-GH-13079.patch b/meta/recipes-devtools/python/python/0001-2.7-bpo-34155-Dont-parse-domains-containing-GH-13079.patch
deleted file mode 100644
index 5415472a35..0000000000
--- a/meta/recipes-devtools/python/python/0001-2.7-bpo-34155-Dont-parse-domains-containing-GH-13079.patch
+++ /dev/null
@@ -1,90 +0,0 @@
-From 532ed09c5454bb789a301bb6f1339a0818255610 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Roberto=20C=2E=20S=C3=A1nchez?= <roberto@connexer.com>
-Date: Sat, 14 Sep 2019 13:26:38 -0400
-Subject: [PATCH] [2.7] bpo-34155: Dont parse domains containing @ (GH-13079)
- (GH-16006)
-
-This change skips parsing of email addresses where domains include a "@" character, which can be maliciously used since the local part is returned as a complete address.
-
-(cherry picked from commit 8cb65d1381b027f0b09ee36bfed7f35bb4dec9a9)
-
-Excludes changes to Lib/email/_header_value_parser.py, which did not
-exist in 2.7.
-
-Co-authored-by: jpic <jpic@users.noreply.github.com>
-
-https://bugs.python.org/issue34155
-
-Upstream-Status: Backport [https://github.com/python/cpython/commit/8cb65d1381b027f0b09ee36bfed7f35bb4dec9a9]
-
-CVE: CVE-2019-16056
-
-Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
----
- Lib/email/_parseaddr.py | 11 ++++++++++-
- Lib/email/test/test_email.py | 14 ++++++++++++++
- .../2019-05-04-13-33-37.bpo-34155.MJll68.rst | 1 +
- 3 files changed, 25 insertions(+), 1 deletion(-)
- create mode 100644 Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst
-
-diff --git a/Lib/email/_parseaddr.py b/Lib/email/_parseaddr.py
-index 690db2c22d..dc49d2e45a 100644
---- a/Lib/email/_parseaddr.py
-+++ b/Lib/email/_parseaddr.py
-@@ -336,7 +336,12 @@ class AddrlistClass:
- aslist.append('@')
- self.pos += 1
- self.gotonext()
-- return EMPTYSTRING.join(aslist) + self.getdomain()
-+ domain = self.getdomain()
-+ if not domain:
-+ # Invalid domain, return an empty address instead of returning a
-+ # local part to denote failed parsing.
-+ return EMPTYSTRING
-+ return EMPTYSTRING.join(aslist) + domain
-
- def getdomain(self):
- """Get the complete domain name from an address."""
-@@ -351,6 +356,10 @@ class AddrlistClass:
- elif self.field[self.pos] == '.':
- self.pos += 1
- sdlist.append('.')
-+ elif self.field[self.pos] == '@':
-+ # bpo-34155: Don't parse domains with two `@` like
-+ # `a@malicious.org@important.com`.
-+ return EMPTYSTRING
- elif self.field[self.pos] in self.atomends:
- break
- else:
-diff --git a/Lib/email/test/test_email.py b/Lib/email/test/test_email.py
-index 4b4dee3d34..2efe44ac5a 100644
---- a/Lib/email/test/test_email.py
-+++ b/Lib/email/test/test_email.py
-@@ -2306,6 +2306,20 @@ class TestMiscellaneous(TestEmailBase):
- self.assertEqual(Utils.parseaddr('<>'), ('', ''))
- self.assertEqual(Utils.formataddr(Utils.parseaddr('<>')), '')
-
-+ def test_parseaddr_multiple_domains(self):
-+ self.assertEqual(
-+ Utils.parseaddr('a@b@c'),
-+ ('', '')
-+ )
-+ self.assertEqual(
-+ Utils.parseaddr('a@b.c@c'),
-+ ('', '')
-+ )
-+ self.assertEqual(
-+ Utils.parseaddr('a@172.17.0.1@c'),
-+ ('', '')
-+ )
-+
- def test_noquote_dump(self):
- self.assertEqual(
- Utils.formataddr(('A Silly Person', 'person@dom.ain')),
-diff --git a/Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst b/Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst
-new file mode 100644
-index 0000000000..50292e29ed
---- /dev/null
-+++ b/Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst
-@@ -0,0 +1 @@
-+Fix parsing of invalid email addresses with more than one ``@`` (e.g. a@b@c.com.) to not return the part before 2nd ``@`` as valid email address. Patch by maxking & jpic.
diff --git a/meta/recipes-devtools/python/python/builddir.patch b/meta/recipes-devtools/python/python/0001-python-Resolve-intermediate-staging-issues.patch
index ad629a022e..2ff2ccc43d 100644
--- a/meta/recipes-devtools/python/python/builddir.patch
+++ b/meta/recipes-devtools/python/python/0001-python-Resolve-intermediate-staging-issues.patch
@@ -1,5 +1,10 @@
-When cross compiling python, we used to need to install the Makefile, pyconfig.h
-and the python library to their final location before being able to compile the
+From 77bcb3238b2853d511714544e0f84a37be6c79bf Mon Sep 17 00:00:00 2001
+From: Richard Purdie <richard.purdie@linuxfoundation.org>
+Date: Wed, 14 Nov 2012 14:31:24 +0000
+Subject: [PATCH] python: Resolve intermediate staging issues
+
+When cross compiling python, we used to need to install the Makefile, pyconfig.h
+and the python library to their final location before being able to compile the
rest of python. This change allows us to point python at its own source when
building, avoiding a variety of sysroot staging issues and simplifying the main
python recipe.
@@ -7,10 +12,29 @@ python recipe.
Upstream-Status: Inappropriate
RP 2012/11/13
-Index: Python-2.7.9/Lib/sysconfig.py
-===================================================================
---- Python-2.7.9.orig/Lib/sysconfig.py
-+++ Python-2.7.9/Lib/sysconfig.py
+---
+ Lib/distutils/sysconfig.py | 3 +++
+ Lib/sysconfig.py | 5 ++++-
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/Lib/distutils/sysconfig.py b/Lib/distutils/sysconfig.py
+index 2f4b8ca..15bceb5 100644
+--- a/Lib/distutils/sysconfig.py
++++ b/Lib/distutils/sysconfig.py
+@@ -31,6 +31,9 @@ else:
+ # sys.executable can be empty if argv[0] has been changed and Python is
+ # unable to retrieve the real program name
+ project_base = os.getcwd()
++_PYTHONBUILDDIR = os.environ.get("PYTHONBUILDDIR", None)
++if _PYTHONBUILDDIR:
++ project_base = _PYTHONBUILDDIR
+ if os.name == "nt" and "pcbuild" in project_base[-8:].lower():
+ project_base = os.path.abspath(os.path.join(project_base, os.path.pardir))
+ # PC/VS7.1
+diff --git a/Lib/sysconfig.py b/Lib/sysconfig.py
+index 9c8350d..bddbe2e 100644
+--- a/Lib/sysconfig.py
++++ b/Lib/sysconfig.py
@@ -93,6 +93,7 @@ _PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
@@ -30,17 +54,6 @@ Index: Python-2.7.9/Lib/sysconfig.py
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
-Index: Python-2.7.9/Lib/distutils/sysconfig.py
-===================================================================
---- Python-2.7.9.orig/Lib/distutils/sysconfig.py
-+++ Python-2.7.9/Lib/distutils/sysconfig.py
-@@ -26,6 +26,9 @@ EXEC_PREFIX = os.path.normpath(sys.exec_
- # live in project/PCBuild9. If we're dealing with an x64 Windows build,
- # it'll live in project/PCbuild/amd64.
- project_base = os.path.dirname(os.path.abspath(sys.executable))
-+_PYTHONBUILDDIR = os.environ.get("PYTHONBUILDDIR", None)
-+if _PYTHONBUILDDIR:
-+ project_base = _PYTHONBUILDDIR
- if os.name == "nt" and "pcbuild" in project_base[-8:].lower():
- project_base = os.path.abspath(os.path.join(project_base, os.path.pardir))
- # PC/VS7.1
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/python/python/CVE-2019-9740.patch b/meta/recipes-devtools/python/python/CVE-2019-9740.patch
deleted file mode 100644
index 066ac68290..0000000000
--- a/meta/recipes-devtools/python/python/CVE-2019-9740.patch
+++ /dev/null
@@ -1,215 +0,0 @@
-From bb8071a4cae5ab3fe321481dd3d73662ffb26052 Mon Sep 17 00:00:00 2001
-From: Victor Stinner <victor.stinner@gmail.com>
-Date: Tue, 21 May 2019 15:12:33 +0200
-Subject: [PATCH] bpo-30458: Disallow control chars in http URLs (GH-12755)
- (GH-13154) (GH-13315)
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Disallow control chars in http URLs in urllib2.urlopen. This
-addresses a potential security problem for applications that do not
-sanity check their URLs where http request headers could be injected.
-
-Disable https related urllib tests on a build without ssl (GH-13032)
-These tests require an SSL enabled build. Skip these tests when
-python is built without SSL to fix test failures.
-
-Use httplib.InvalidURL instead of ValueError as the new error case's
-exception. (GH-13044)
-
-Backport Co-Authored-By: Miro Hrončok <miro@hroncok.cz>
-
-(cherry picked from commit 7e200e0763f5b71c199aaf98bd5588f291585619)
-
-Notes on backport to Python 2.7:
-
-* test_urllib tests urllib.urlopen() which quotes the URL and so is
- not vulerable to HTTP Header Injection.
-* Add tests to test_urllib2 on urllib2.urlopen().
-* Reject non-ASCII characters: range 0x80-0xff.
-
-Upstream-Status: Backport
-CVE: CVE-2019-9740
-Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
----
- Lib/httplib.py | 16 ++++++
- Lib/test/test_urllib.py | 25 +++++++++
- Lib/test/test_urllib2.py | 51 ++++++++++++++++++-
- Lib/test/test_xmlrpc.py | 8 ++-
- .../2019-04-10-08-53-30.bpo-30458.51E-DA.rst | 1 +
- 5 files changed, 99 insertions(+), 2 deletions(-)
- create mode 100644 Misc/NEWS.d/next/Security/2019-04-10-08-53-30.bpo-30458.51E-DA.rst
-
-diff --git a/Lib/httplib.py b/Lib/httplib.py
-index 60a8fb4e355f..1b41c346e090 100644
---- a/Lib/httplib.py
-+++ b/Lib/httplib.py
-@@ -247,6 +247,16 @@
- _is_legal_header_name = re.compile(r'\A[^:\s][^:\r\n]*\Z').match
- _is_illegal_header_value = re.compile(r'\n(?![ \t])|\r(?![ \t\n])').search
-
-+# These characters are not allowed within HTTP URL paths.
-+# See https://tools.ietf.org/html/rfc3986#section-3.3 and the
-+# https://tools.ietf.org/html/rfc3986#appendix-A pchar definition.
-+# Prevents CVE-2019-9740. Includes control characters such as \r\n.
-+# Restrict non-ASCII characters above \x7f (0x80-0xff).
-+_contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f-\xff]')
-+# Arguably only these _should_ allowed:
-+# _is_allowed_url_pchars_re = re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$")
-+# We are more lenient for assumed real world compatibility purposes.
-+
- # We always set the Content-Length header for these methods because some
- # servers will otherwise respond with a 411
- _METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
-@@ -927,6 +937,12 @@ def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
- self._method = method
- if not url:
- url = '/'
-+ # Prevent CVE-2019-9740.
-+ match = _contains_disallowed_url_pchar_re.search(url)
-+ if match:
-+ raise InvalidURL("URL can't contain control characters. %r "
-+ "(found at least %r)"
-+ % (url, match.group()))
- hdr = '%s %s %s' % (method, url, self._http_vsn_str)
-
- self._output(hdr)
-diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
-index 1ce9201c0693..d7778d4194f3 100644
---- a/Lib/test/test_urllib.py
-+++ b/Lib/test/test_urllib.py
-@@ -257,6 +257,31 @@ def test_url_fragment(self):
- finally:
- self.unfakehttp()
-
-+ def test_url_with_control_char_rejected(self):
-+ for char_no in range(0, 0x21) + range(0x7f, 0x100):
-+ char = chr(char_no)
-+ schemeless_url = "//localhost:7777/test%s/" % char
-+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
-+ try:
-+ # urllib quotes the URL so there is no injection.
-+ resp = urllib.urlopen("http:" + schemeless_url)
-+ self.assertNotIn(char, resp.geturl())
-+ finally:
-+ self.unfakehttp()
-+
-+ def test_url_with_newline_header_injection_rejected(self):
-+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
-+ host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
-+ schemeless_url = "//" + host + ":8080/test/?test=a"
-+ try:
-+ # urllib quotes the URL so there is no injection.
-+ resp = urllib.urlopen("http:" + schemeless_url)
-+ self.assertNotIn(' ', resp.geturl())
-+ self.assertNotIn('\r', resp.geturl())
-+ self.assertNotIn('\n', resp.geturl())
-+ finally:
-+ self.unfakehttp()
-+
- def test_read_bogus(self):
- # urlopen() should raise IOError for many error codes.
- self.fakehttp('''HTTP/1.1 401 Authentication Required
-diff --git a/Lib/test/test_urllib2.py b/Lib/test/test_urllib2.py
-index 6d24d5ddf83c..9531818e16b2 100644
---- a/Lib/test/test_urllib2.py
-+++ b/Lib/test/test_urllib2.py
-@@ -15,6 +15,9 @@
- except ImportError:
- ssl = None
-
-+from test.test_urllib import FakeHTTPMixin
-+
-+
- # XXX
- # Request
- # CacheFTPHandler (hard to write)
-@@ -1262,7 +1265,7 @@ def _test_basic_auth(self, opener, auth_handler, auth_header,
- self.assertEqual(len(http_handler.requests), 1)
- self.assertFalse(http_handler.requests[0].has_header(auth_header))
-
--class MiscTests(unittest.TestCase):
-+class MiscTests(unittest.TestCase, FakeHTTPMixin):
-
- def test_build_opener(self):
- class MyHTTPHandler(urllib2.HTTPHandler): pass
-@@ -1317,6 +1320,52 @@ def test_unsupported_algorithm(self):
- "Unsupported digest authentication algorithm 'invalid'"
- )
-
-+ @unittest.skipUnless(ssl, "ssl module required")
-+ def test_url_with_control_char_rejected(self):
-+ for char_no in range(0, 0x21) + range(0x7f, 0x100):
-+ char = chr(char_no)
-+ schemeless_url = "//localhost:7777/test%s/" % char
-+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
-+ try:
-+ # We explicitly test urllib.request.urlopen() instead of the top
-+ # level 'def urlopen()' function defined in this... (quite ugly)
-+ # test suite. They use different url opening codepaths. Plain
-+ # urlopen uses FancyURLOpener which goes via a codepath that
-+ # calls urllib.parse.quote() on the URL which makes all of the
-+ # above attempts at injection within the url _path_ safe.
-+ escaped_char_repr = repr(char).replace('\\', r'\\')
-+ InvalidURL = httplib.InvalidURL
-+ with self.assertRaisesRegexp(
-+ InvalidURL, "contain control.*" + escaped_char_repr):
-+ urllib2.urlopen("http:" + schemeless_url)
-+ with self.assertRaisesRegexp(
-+ InvalidURL, "contain control.*" + escaped_char_repr):
-+ urllib2.urlopen("https:" + schemeless_url)
-+ finally:
-+ self.unfakehttp()
-+
-+ @unittest.skipUnless(ssl, "ssl module required")
-+ def test_url_with_newline_header_injection_rejected(self):
-+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
-+ host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
-+ schemeless_url = "//" + host + ":8080/test/?test=a"
-+ try:
-+ # We explicitly test urllib2.urlopen() instead of the top
-+ # level 'def urlopen()' function defined in this... (quite ugly)
-+ # test suite. They use different url opening codepaths. Plain
-+ # urlopen uses FancyURLOpener which goes via a codepath that
-+ # calls urllib.parse.quote() on the URL which makes all of the
-+ # above attempts at injection within the url _path_ safe.
-+ InvalidURL = httplib.InvalidURL
-+ with self.assertRaisesRegexp(
-+ InvalidURL, r"contain control.*\\r.*(found at least . .)"):
-+ urllib2.urlopen("http:" + schemeless_url)
-+ with self.assertRaisesRegexp(InvalidURL, r"contain control.*\\n"):
-+ urllib2.urlopen("https:" + schemeless_url)
-+ finally:
-+ self.unfakehttp()
-+
-+
-
- class RequestTests(unittest.TestCase):
-
-diff --git a/Lib/test/test_xmlrpc.py b/Lib/test/test_xmlrpc.py
-index 36b3be67fd6b..90ccb30716ff 100644
---- a/Lib/test/test_xmlrpc.py
-+++ b/Lib/test/test_xmlrpc.py
-@@ -659,7 +659,13 @@ def test_dotted_attribute(self):
- def test_partial_post(self):
- # Check that a partial POST doesn't make the server loop: issue #14001.
- conn = httplib.HTTPConnection(ADDR, PORT)
-- conn.request('POST', '/RPC2 HTTP/1.0\r\nContent-Length: 100\r\n\r\nbye')
-+ conn.send('POST /RPC2 HTTP/1.0\r\n'
-+ 'Content-Length: 100\r\n\r\n'
-+ 'bye HTTP/1.1\r\n'
-+ 'Host: %s:%s\r\n'
-+ 'Accept-Encoding: identity\r\n'
-+ 'Content-Length: 0\r\n\r\n'
-+ % (ADDR, PORT))
- conn.close()
-
- class SimpleServerEncodingTestCase(BaseServerTestCase):
-diff --git a/Misc/NEWS.d/next/Security/2019-04-10-08-53-30.bpo-30458.51E-DA.rst b/Misc/NEWS.d/next/Security/2019-04-10-08-53-30.bpo-30458.51E-DA.rst
-new file mode 100644
-index 000000000000..47cb899df1af
---- /dev/null
-+++ b/Misc/NEWS.d/next/Security/2019-04-10-08-53-30.bpo-30458.51E-DA.rst
-@@ -0,0 +1 @@
-+Address CVE-2019-9740 by disallowing URL paths with embedded whitespace or control characters through into the underlying http client request. Such potentially malicious header injection URLs now cause an httplib.InvalidURL exception to be raised.
diff --git a/meta/recipes-devtools/python/python/bpo-35907-cve-2019-9948-fix.patch b/meta/recipes-devtools/python/python/bpo-35907-cve-2019-9948-fix.patch
deleted file mode 100644
index b267237018..0000000000
--- a/meta/recipes-devtools/python/python/bpo-35907-cve-2019-9948-fix.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From 179a5f75f1121dab271fe8f90eb35145f9dcbbda Mon Sep 17 00:00:00 2001
-From: Sihoon Lee <push0ebp@gmail.com>
-Date: Fri, 17 May 2019 02:41:06 +0900
-Subject: [PATCH] Update test_urllib.py and urllib.py\nchange assertEqual into
- assertRasies in DummyURLopener test, and simplify mitigation
-
-Upstream-Status: Submitted https://github.com/python/cpython/pull/11842
-
-CVE: CVE-2019-9948
-
-Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
----
- Lib/test/test_urllib.py | 11 +++--------
- Lib/urllib.py | 4 ++--
- 2 files changed, 5 insertions(+), 10 deletions(-)
-
-diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
-index e5f210e62a18..1e23dfb0bb16 100644
---- a/Lib/test/test_urllib.py
-+++ b/Lib/test/test_urllib.py
-@@ -1027,14 +1027,9 @@ def test_local_file_open(self):
- class DummyURLopener(urllib.URLopener):
- def open_local_file(self, url):
- return url
-- self.assertEqual(DummyURLopener().open(
-- 'local-file://example'), '//example')
-- self.assertEqual(DummyURLopener().open(
-- 'local_file://example'), '//example')
-- self.assertRaises(IOError, urllib.urlopen,
-- 'local-file://example')
-- self.assertRaises(IOError, urllib.urlopen,
-- 'local_file://example')
-+ for url in ('local_file://example', 'local-file://example'):
-+ self.assertRaises(IOError, DummyURLopener().open, url)
-+ self.assertRaises(IOError, urllib.urlopen, url)
-
- # Just commented them out.
- # Can't really tell why keep failing in windows and sparc.
-diff --git a/Lib/urllib.py b/Lib/urllib.py
-index a24e9a5c68fb..39b834054e9e 100644
---- a/Lib/urllib.py
-+++ b/Lib/urllib.py
-@@ -203,10 +203,10 @@ def open(self, fullurl, data=None):
- name = 'open_' + urltype
- self.type = urltype
- name = name.replace('-', '_')
--
-+
- # bpo-35907: # disallow the file reading with the type not allowed
- if not hasattr(self, name) or \
-- (self == _urlopener and name == 'open_local_file'):
-+ getattr(self, name) == self.open_local_file:
- if proxy:
- return self.open_unknown_proxy(proxy, fullurl, data)
- else:
diff --git a/meta/recipes-devtools/python/python/bpo-35907-cve-2019-9948.patch b/meta/recipes-devtools/python/python/bpo-35907-cve-2019-9948.patch
deleted file mode 100644
index f4c225d2fc..0000000000
--- a/meta/recipes-devtools/python/python/bpo-35907-cve-2019-9948.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From 8f99cc799e4393bf1112b9395b2342f81b3f45ef Mon Sep 17 00:00:00 2001
-From: push0ebp <push0ebp@shl-MacBook-Pro.local>
-Date: Thu, 14 Feb 2019 02:05:46 +0900
-Subject: [PATCH] bpo-35907: Avoid file reading as disallowing the unnecessary
- URL scheme in urllib
-
-Upstream-Status: Submitted https://github.com/python/cpython/pull/11842
-
-CVE: CVE-2019-9948
-
-Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
----
- Lib/test/test_urllib.py | 12 ++++++++++++
- Lib/urllib.py | 5 ++++-
- 2 files changed, 16 insertions(+), 1 deletion(-)
-
-diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
-index 1ce9201c0693..e5f210e62a18 100644
---- a/Lib/test/test_urllib.py
-+++ b/Lib/test/test_urllib.py
-@@ -1023,6 +1023,18 @@ def open_spam(self, url):
- "spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
- "//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
-
-+ def test_local_file_open(self):
-+ class DummyURLopener(urllib.URLopener):
-+ def open_local_file(self, url):
-+ return url
-+ self.assertEqual(DummyURLopener().open(
-+ 'local-file://example'), '//example')
-+ self.assertEqual(DummyURLopener().open(
-+ 'local_file://example'), '//example')
-+ self.assertRaises(IOError, urllib.urlopen,
-+ 'local-file://example')
-+ self.assertRaises(IOError, urllib.urlopen,
-+ 'local_file://example')
-
- # Just commented them out.
- # Can't really tell why keep failing in windows and sparc.
-diff --git a/Lib/urllib.py b/Lib/urllib.py
-index d85504a5cb7e..a24e9a5c68fb 100644
---- a/Lib/urllib.py
-+++ b/Lib/urllib.py
-@@ -203,7 +203,10 @@ def open(self, fullurl, data=None):
- name = 'open_' + urltype
- self.type = urltype
- name = name.replace('-', '_')
-- if not hasattr(self, name):
-+
-+ # bpo-35907: # disallow the file reading with the type not allowed
-+ if not hasattr(self, name) or \
-+ (self == _urlopener and name == 'open_local_file'):
- if proxy:
- return self.open_unknown_proxy(proxy, fullurl, data)
- else:
diff --git a/meta/recipes-devtools/python/python/bpo-36216-cve-2019-9636-fix.patch b/meta/recipes-devtools/python/python/bpo-36216-cve-2019-9636-fix.patch
deleted file mode 100644
index 2ce4d2cde7..0000000000
--- a/meta/recipes-devtools/python/python/bpo-36216-cve-2019-9636-fix.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From 06b5ee585d6e76bdbb4002f642d864d860cbbd2b Mon Sep 17 00:00:00 2001
-From: Steve Dower <steve.dower@python.org>
-Date: Tue, 12 Mar 2019 08:23:33 -0700
-Subject: [PATCH] bpo-36216: Only print test messages when verbose
-
-CVE: CVE-2019-9636
-
-Upstream-Status: Backport https://github.com/python/cpython/pull/12291/commits/06b5ee585d6e76bdbb4002f642d864d860cbbd2b
-
-Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
----
- Lib/test/test_urlparse.py | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
-index 73b0228ea8e3..1830d0b28688 100644
---- a/Lib/test/test_urlparse.py
-+++ b/Lib/test/test_urlparse.py
-@@ -644,7 +644,8 @@ def test_urlsplit_normalization(self):
- for scheme in [u"http", u"https", u"ftp"]:
- for c in denorm_chars:
- url = u"{}://netloc{}false.netloc/path".format(scheme, c)
-- print "Checking %r" % url
-+ if test_support.verbose:
-+ print "Checking %r" % url
- with self.assertRaises(ValueError):
- urlparse.urlsplit(url)
-
diff --git a/meta/recipes-devtools/python/python/bpo-36216-cve-2019-9636.patch b/meta/recipes-devtools/python/python/bpo-36216-cve-2019-9636.patch
deleted file mode 100644
index 352b13ba9b..0000000000
--- a/meta/recipes-devtools/python/python/bpo-36216-cve-2019-9636.patch
+++ /dev/null
@@ -1,111 +0,0 @@
-From 3e3669c9c41a27e1466e2c28b3906e3dd0ce3e7e Mon Sep 17 00:00:00 2001
-From: Steve Dower <steve.dower@python.org>
-Date: Thu, 7 Mar 2019 08:25:22 -0800
-Subject: [PATCH] bpo-36216: Add check for characters in netloc that normalize
- to separators (GH-12201)
-
-CVE: CVE-2019-9636
-
-Upstream-Status: Backport https://github.com/python/cpython/pull/12216/commits/3e3669c9c41a27e1466e2c28b3906e3dd0ce3e7e
-
-Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
----
- Doc/library/urlparse.rst | 20 ++++++++++++++++
- Lib/test/test_urlparse.py | 24 +++++++++++++++++++
- Lib/urlparse.py | 17 +++++++++++++
- .../2019-03-06-09-38-40.bpo-36216.6q1m4a.rst | 3 +++
- 4 files changed, 64 insertions(+)
- create mode 100644 Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst
-
-diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
-index 4e1ded73c266..73b0228ea8e3 100644
---- a/Lib/test/test_urlparse.py
-+++ b/Lib/test/test_urlparse.py
-@@ -1,4 +1,6 @@
- from test import test_support
-+import sys
-+import unicodedata
- import unittest
- import urlparse
-
-@@ -624,6 +626,28 @@ def test_portseparator(self):
- self.assertEqual(urlparse.urlparse("http://www.python.org:80"),
- ('http','www.python.org:80','','','',''))
-
-+ def test_urlsplit_normalization(self):
-+ # Certain characters should never occur in the netloc,
-+ # including under normalization.
-+ # Ensure that ALL of them are detected and cause an error
-+ illegal_chars = u'/:#?@'
-+ hex_chars = {'{:04X}'.format(ord(c)) for c in illegal_chars}
-+ denorm_chars = [
-+ c for c in map(unichr, range(128, sys.maxunicode))
-+ if (hex_chars & set(unicodedata.decomposition(c).split()))
-+ and c not in illegal_chars
-+ ]
-+ # Sanity check that we found at least one such character
-+ self.assertIn(u'\u2100', denorm_chars)
-+ self.assertIn(u'\uFF03', denorm_chars)
-+
-+ for scheme in [u"http", u"https", u"ftp"]:
-+ for c in denorm_chars:
-+ url = u"{}://netloc{}false.netloc/path".format(scheme, c)
-+ print "Checking %r" % url
-+ with self.assertRaises(ValueError):
-+ urlparse.urlsplit(url)
-+
- def test_main():
- test_support.run_unittest(UrlParseTestCase)
-
-diff --git a/Lib/urlparse.py b/Lib/urlparse.py
-index f7c2b032b097..54eda08651ab 100644
---- a/Lib/urlparse.py
-+++ b/Lib/urlparse.py
-@@ -165,6 +165,21 @@ def _splitnetloc(url, start=0):
- delim = min(delim, wdelim) # use earliest delim position
- return url[start:delim], url[delim:] # return (domain, rest)
-
-+def _checknetloc(netloc):
-+ if not netloc or not isinstance(netloc, unicode):
-+ return
-+ # looking for characters like \u2100 that expand to 'a/c'
-+ # IDNA uses NFKC equivalence, so normalize for this check
-+ import unicodedata
-+ netloc2 = unicodedata.normalize('NFKC', netloc)
-+ if netloc == netloc2:
-+ return
-+ _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
-+ for c in '/?#@:':
-+ if c in netloc2:
-+ raise ValueError("netloc '" + netloc2 + "' contains invalid " +
-+ "characters under NFKC normalization")
-+
- def urlsplit(url, scheme='', allow_fragments=True):
- """Parse a URL into 5 components:
- <scheme>://<netloc>/<path>?<query>#<fragment>
-@@ -193,6 +208,7 @@ def urlsplit(url, scheme='', allow_fragments=True):
- url, fragment = url.split('#', 1)
- if '?' in url:
- url, query = url.split('?', 1)
-+ _checknetloc(netloc)
- v = SplitResult(scheme, netloc, url, query, fragment)
- _parse_cache[key] = v
- return v
-@@ -216,6 +232,7 @@ def urlsplit(url, scheme='', allow_fragments=True):
- url, fragment = url.split('#', 1)
- if '?' in url:
- url, query = url.split('?', 1)
-+ _checknetloc(netloc)
- v = SplitResult(scheme, netloc, url, query, fragment)
- _parse_cache[key] = v
- return v
-diff --git a/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst b/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst
-new file mode 100644
-index 000000000000..1e1ad92c6feb
---- /dev/null
-+++ b/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst
-@@ -0,0 +1,3 @@
-+Changes urlsplit() to raise ValueError when the URL contains characters that
-+decompose under IDNA encoding (NFKC-normalization) into characters that
-+affect how the URL is parsed.
-\ No newline at end of file
diff --git a/meta/recipes-devtools/python/python/bpo-36742-cve-2019-10160.patch b/meta/recipes-devtools/python/python/bpo-36742-cve-2019-10160.patch
deleted file mode 100644
index 1b6cb8cf3e..0000000000
--- a/meta/recipes-devtools/python/python/bpo-36742-cve-2019-10160.patch
+++ /dev/null
@@ -1,81 +0,0 @@
-From 5a1033fe5be764a135adcfff2fdc14edc3e5f327 Mon Sep 17 00:00:00 2001
-From: Changqing Li <changqing.li@windriver.com>
-Date: Thu, 10 Oct 2019 16:32:19 +0800
-Subject: [PATCH] bpo-36742: Fixes handling of pre-normalization characters in
- urlsplit() bpo-36742: Corrects fix to handle decomposition in usernames
-
-Upstream-Status: Backport
-
-https://github.com/python/cpython/commit/98a4dcefbbc3bce5ab07e7c0830a183157250259
-https://github.com/python/cpython/commit/f61599b050c621386a3fc6bc480359e2d3bb93de#diff-b577545d73dd0cdb2c337a4c5f89e1d7
-
-CVE: CVE-2019-10160
-
-Signed-off-by: Changqing Li <changqing.li@windriver.com>
----
- Lib/test/test_urlparse.py | 19 +++++++++++++------
- Lib/urlparse.py | 14 +++++++++-----
- 2 files changed, 22 insertions(+), 11 deletions(-)
-
-diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
-index 1830d0b..857ed96 100644
---- a/Lib/test/test_urlparse.py
-+++ b/Lib/test/test_urlparse.py
-@@ -641,13 +641,20 @@ class UrlParseTestCase(unittest.TestCase):
- self.assertIn(u'\u2100', denorm_chars)
- self.assertIn(u'\uFF03', denorm_chars)
-
-+ # bpo-36742: Verify port separators are ignored when they
-+ # existed prior to decomposition
-+ urlparse.urlsplit(u'http://\u30d5\u309a:80')
-+ with self.assertRaises(ValueError):
-+ urlparse.urlsplit(u'http://\u30d5\u309a\ufe1380')
-+
- for scheme in [u"http", u"https", u"ftp"]:
-- for c in denorm_chars:
-- url = u"{}://netloc{}false.netloc/path".format(scheme, c)
-- if test_support.verbose:
-- print "Checking %r" % url
-- with self.assertRaises(ValueError):
-- urlparse.urlsplit(url)
-+ for netloc in [u"netloc{}false.netloc", u"n{}user@netloc"]:
-+ for c in denorm_chars:
-+ url = u"{}://{}/path".format(scheme, netloc.format(c))
-+ if test_support.verbose:
-+ print "Checking %r" % url
-+ with self.assertRaises(ValueError):
-+ urlparse.urlsplit(url)
-
- def test_main():
- test_support.run_unittest(UrlParseTestCase)
-diff --git a/Lib/urlparse.py b/Lib/urlparse.py
-index 54eda08..e34b368 100644
---- a/Lib/urlparse.py
-+++ b/Lib/urlparse.py
-@@ -171,14 +171,18 @@ def _checknetloc(netloc):
- # looking for characters like \u2100 that expand to 'a/c'
- # IDNA uses NFKC equivalence, so normalize for this check
- import unicodedata
-- netloc2 = unicodedata.normalize('NFKC', netloc)
-- if netloc == netloc2:
-+ n = netloc.replace(u'@', u'') # ignore characters already included
-+ n = n.replace(u':', u'') # but not the surrounding text
-+ n = n.replace(u'#', u'')
-+ n = n.replace(u'?', u'')
-+
-+ netloc2 = unicodedata.normalize('NFKC', n)
-+ if n == netloc2:
- return
-- _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
- for c in '/?#@:':
- if c in netloc2:
-- raise ValueError("netloc '" + netloc2 + "' contains invalid " +
-- "characters under NFKC normalization")
-+ raise ValueError(u"netloc '" + netloc + u"' contains invalid " +
-+ u"characters under NFKC normalization")
-
- def urlsplit(url, scheme='', allow_fragments=True):
- """Parse a URL into 5 components:
---
-2.7.4
-
diff --git a/meta/recipes-devtools/python/python/python2-manifest.json b/meta/recipes-devtools/python/python/python2-manifest.json
index eb52e862ab..fd98774d00 100644
--- a/meta/recipes-devtools/python/python/python2-manifest.json
+++ b/meta/recipes-devtools/python/python/python2-manifest.json
@@ -267,6 +267,7 @@
"${libdir}/python2.7/lib-dynload/xreadlines.so",
"${libdir}/python2.7/linecache.py",
"${libdir}/python2.7/new.py",
+ "${libdir}/python2.7/ntpath.py",
"${libdir}/python2.7/os.py",
"${libdir}/python2.7/platform.py",
"${libdir}/python2.7/posixpath.py",
diff --git a/meta/recipes-devtools/python/python3_3.7.5.bb b/meta/recipes-devtools/python/python3_3.7.7.bb
index c560c4a29d..114cf2fe09 100644
--- a/meta/recipes-devtools/python/python3_3.7.5.bb
+++ b/meta/recipes-devtools/python/python3_3.7.7.bb
@@ -3,7 +3,7 @@ HOMEPAGE = "http://www.python.org"
LICENSE = "PSFv2"
SECTION = "devel/python"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=e466242989bd33c1bd2b6a526a742498"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=203a6dbc802ee896020a47161e759642"
SRC_URI = "http://www.python.org/ftp/python/${PV}/Python-${PV}.tar.xz \
file://run-ptest \
@@ -38,14 +38,17 @@ SRC_URI_append_class-nativesdk = " \
file://0001-main.c-if-OEPYTHON3HOME-is-set-use-instead-of-PYTHON.patch \
"
-SRC_URI[md5sum] = "08ed8030b1183107c48f2092e79a87e2"
-SRC_URI[sha256sum] = "e85a76ea9f3d6c485ec1780fca4e500725a4a7bbc63c78ebc44170de9b619d94"
+SRC_URI[md5sum] = "172c650156f7bea68ce31b2fd01fa766"
+SRC_URI[sha256sum] = "06a0a9f1bf0d8cd1e4121194d666c4e28ddae4dd54346de6c343206599f02136"
# exclude pre-releases for both python 2.x and 3.x
UPSTREAM_CHECK_REGEX = "[Pp]ython-(?P<pver>\d+(\.\d+)+).tar"
CVE_PRODUCT = "python"
+# This is not exploitable when glibc has CVE-2016-10739 fixed.
+CVE_CHECK_WHITELIST += "CVE-2019-18348"
+
PYTHON_MAJMIN = "3.7"
PYTHON_BINABI = "${PYTHON_MAJMIN}m"
@@ -305,6 +308,8 @@ FILES_${PN}-misc = "${libdir}/python${PYTHON_MAJMIN} ${libdir}/python${PYTHON_MA
PACKAGES += "${PN}-man"
FILES_${PN}-man = "${datadir}/man"
+# See https://bugs.python.org/issue18748 and https://bugs.python.org/issue37395
+RDEPENDS_libpython3_append_libc-glibc = " libgcc"
RDEPENDS_${PN}-ptest = "${PN}-modules ${PN}-tests unzip bzip2 libgcc tzdata-europe coreutils sed"
RDEPENDS_${PN}-ptest_append_libc-glibc = " locale-base-tr-tr.iso-8859-9"
RDEPENDS_${PN}-tkinter += "${@bb.utils.contains('PACKAGECONFIG', 'tk', 'tk tk-lib', '', d)}"
diff --git a/meta/recipes-devtools/python/python_2.7.16.bb b/meta/recipes-devtools/python/python_2.7.18.bb
index 1c7c58199f..ec724c3918 100644
--- a/meta/recipes-devtools/python/python_2.7.16.bb
+++ b/meta/recipes-devtools/python/python_2.7.18.bb
@@ -30,8 +30,6 @@ SRC_URI += " \
file://support_SOURCE_DATE_EPOCH_in_py_compile_2.7.patch \
file://float-endian.patch \
file://0001-python2-use-cc_basename-to-replace-CC-for-checking-c.patch \
- file://0001-2.7-bpo-34155-Dont-parse-domains-containing-GH-13079.patch \
- file://bpo-36742-cve-2019-10160.patch \
"
S = "${WORKDIR}/Python-${PV}"
diff --git a/meta/recipes-devtools/rsync/rsync_3.1.3.bb b/meta/recipes-devtools/rsync/rsync_3.1.3.bb
index ffb1d061c0..152ff02a25 100644
--- a/meta/recipes-devtools/rsync/rsync_3.1.3.bb
+++ b/meta/recipes-devtools/rsync/rsync_3.1.3.bb
@@ -20,6 +20,9 @@ SRC_URI = "https://download.samba.org/pub/${BPN}/src/${BP}.tar.gz \
SRC_URI[md5sum] = "1581a588fde9d89f6bc6201e8129afaf"
SRC_URI[sha256sum] = "55cc554efec5fdaad70de921cd5a5eeb6c29a95524c715f3bbf849235b0800c0"
+# -16548 required for v3.1.3pre1. Already in v3.1.3.
+CVE_CHECK_WHITELIST += " CVE-2017-16548 "
+
inherit autotools
PACKAGECONFIG ??= "acl attr \
diff --git a/meta/recipes-devtools/subversion/subversion_1.11.1.bb b/meta/recipes-devtools/subversion/subversion_1.11.1.bb
index 8abac7408e..9909461bf7 100644
--- a/meta/recipes-devtools/subversion/subversion_1.11.1.bb
+++ b/meta/recipes-devtools/subversion/subversion_1.11.1.bb
@@ -18,6 +18,8 @@ SRC_URI[sha256sum] = "9efd2750ca4d72ec903431a24b9c732b6cbb84aad9b7563f59dd96dea5
inherit autotools pkgconfig gettext
+CVE_PRODUCT = "apache:subversion"
+
PACKAGECONFIG ?= ""
PACKAGECONFIG[sasl] = "--with-sasl,--without-sasl,cyrus-sasl"
diff --git a/meta/recipes-extended/cpio/cpio-2.12/CVE-2019-14866.patch b/meta/recipes-extended/cpio/cpio-2.12/CVE-2019-14866.patch
new file mode 100644
index 0000000000..5d587fc832
--- /dev/null
+++ b/meta/recipes-extended/cpio/cpio-2.12/CVE-2019-14866.patch
@@ -0,0 +1,316 @@
+CVE: CVE-2019-14866
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/cpio.git/commit/?id=7554e3e42cd72f6f8304410c47fe6f8918e9bfd7]
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+
+From a052401293e45a13cded5959b258204dae6d0af5 Mon Sep 17 00:00:00 2001
+From: Sergey Poznyakoff <gray@gnu.org>
+Date: Sun, 3 Nov 2019 23:59:39 +0200
+Subject: [PATCH] Fix CVE-2019-14866
+
+* src/copyout.c (to_ascii): Additional argument nul controls whether
+to add the terminating nul character.
+(field_width_error): Improve diagnostics: print the actual and the
+maximum allowed field value.
+* src/extern.h (to_ascii, field_width_error): New prototypes.
+* src/tar.c (to_oct): Remove.
+(to_oct_or_error): New function.
+(TO_OCT): New macro.
+(write_out_tar_header): Use TO_OCT and to_ascii. Return 0 on
+success, 1 on error.
+---
+ src/copyout.c | 49 ++++++++++++++++++++++--------------
+ src/extern.h | 15 +++++++++--
+ src/tar.c | 69 ++++++++++++++++++++++++---------------------------
+ 3 files changed, 75 insertions(+), 58 deletions(-)
+
+diff --git a/src/copyout.c b/src/copyout.c
+index 1f0987a..1ae5477 100644
+--- a/src/copyout.c
++++ b/src/copyout.c
+@@ -269,26 +269,32 @@ writeout_final_defers (int out_des)
+ so it should be moved to paxutils too.
+ Allowed values for logbase are: 1 (binary), 2, 3 (octal), 4 (hex) */
+ int
+-to_ascii (char *where, uintmax_t v, size_t digits, unsigned logbase)
++to_ascii (char *where, uintmax_t v, size_t digits, unsigned logbase, bool nul)
+ {
+ static char codetab[] = "0123456789ABCDEF";
+- int i = digits;
+-
+- do
++
++ if (nul)
++ where[--digits] = 0;
++ while (digits > 0)
+ {
+- where[--i] = codetab[(v & ((1 << logbase) - 1))];
++ where[--digits] = codetab[(v & ((1 << logbase) - 1))];
+ v >>= logbase;
+ }
+- while (i);
+
+ return v != 0;
+ }
+
+-static void
+-field_width_error (const char *filename, const char *fieldname)
++void
++field_width_error (const char *filename, const char *fieldname,
++ uintmax_t value, size_t width, bool nul)
+ {
+- error (0, 0, _("%s: field width not sufficient for storing %s"),
+- filename, fieldname);
++ char valbuf[UINTMAX_STRSIZE_BOUND + 1];
++ char maxbuf[UINTMAX_STRSIZE_BOUND + 1];
++ error (0, 0, _("%s: value %s %s out of allowed range 0..%s"),
++ filename, fieldname,
++ STRINGIFY_BIGINT (value, valbuf),
++ STRINGIFY_BIGINT (MAX_VAL_WITH_DIGITS (width - nul, LG_8),
++ maxbuf));
+ }
+
+ static void
+@@ -303,7 +309,7 @@ to_ascii_or_warn (char *where, uintmax_t n, size_t digits,
+ unsigned logbase,
+ const char *filename, const char *fieldname)
+ {
+- if (to_ascii (where, n, digits, logbase))
++ if (to_ascii (where, n, digits, logbase, false))
+ field_width_warning (filename, fieldname);
+ }
+
+@@ -312,9 +318,9 @@ to_ascii_or_error (char *where, uintmax_t n, size_t digits,
+ unsigned logbase,
+ const char *filename, const char *fieldname)
+ {
+- if (to_ascii (where, n, digits, logbase))
++ if (to_ascii (where, n, digits, logbase, false))
+ {
+- field_width_error (filename, fieldname);
++ field_width_error (filename, fieldname, n, digits, false);
+ return 1;
+ }
+ return 0;
+@@ -371,7 +377,7 @@ write_out_new_ascii_header (const char *magic_string,
+ _("name size")))
+ return 1;
+ p += 8;
+- to_ascii (p, file_hdr->c_chksum & 0xffffffff, 8, LG_16);
++ to_ascii (p, file_hdr->c_chksum & 0xffffffff, 8, LG_16, false);
+
+ tape_buffered_write (ascii_header, out_des, sizeof ascii_header);
+
+@@ -388,7 +394,7 @@ write_out_old_ascii_header (dev_t dev, dev_t rdev,
+ char ascii_header[76];
+ char *p = ascii_header;
+
+- to_ascii (p, file_hdr->c_magic, 6, LG_8);
++ to_ascii (p, file_hdr->c_magic, 6, LG_8, false);
+ p += 6;
+ to_ascii_or_warn (p, dev, 6, LG_8, file_hdr->c_name, _("device number"));
+ p += 6;
+@@ -492,7 +498,10 @@ write_out_binary_header (dev_t rdev,
+ short_hdr.c_namesize = file_hdr->c_namesize & 0xFFFF;
+ if (short_hdr.c_namesize != file_hdr->c_namesize)
+ {
+- field_width_error (file_hdr->c_name, _("name size"));
++ char maxbuf[UINTMAX_STRSIZE_BOUND + 1];
++ error (0, 0, _("%s: value %s %s out of allowed range 0..%u"),
++ file_hdr->c_name, _("name size"),
++ STRINGIFY_BIGINT (file_hdr->c_namesize, maxbuf), 0xFFFFu);
+ return 1;
+ }
+
+@@ -502,7 +511,10 @@ write_out_binary_header (dev_t rdev,
+ if (((off_t)short_hdr.c_filesizes[0] << 16) + short_hdr.c_filesizes[1]
+ != file_hdr->c_filesize)
+ {
+- field_width_error (file_hdr->c_name, _("file size"));
++ char maxbuf[UINTMAX_STRSIZE_BOUND + 1];
++ error (0, 0, _("%s: value %s %s out of allowed range 0..%lu"),
++ file_hdr->c_name, _("file size"),
++ STRINGIFY_BIGINT (file_hdr->c_namesize, maxbuf), 0xFFFFFFFFlu);
+ return 1;
+ }
+
+@@ -552,8 +564,7 @@ write_out_header (struct cpio_file_stat *file_hdr, int out_des)
+ error (0, 0, _("%s: file name too long"), file_hdr->c_name);
+ return 1;
+ }
+- write_out_tar_header (file_hdr, out_des); /* FIXME: No error checking */
+- return 0;
++ return write_out_tar_header (file_hdr, out_des);
+
+ case arf_binary:
+ return write_out_binary_header (makedev (file_hdr->c_rdev_maj,
+diff --git a/src/extern.h b/src/extern.h
+index e27d662..f9ef56a 100644
+--- a/src/extern.h
++++ b/src/extern.h
+@@ -117,6 +117,10 @@ void print_name_with_quoting (char *p);
+ /* copyout.c */
+ int write_out_header (struct cpio_file_stat *file_hdr, int out_des);
+ void process_copy_out (void);
++int to_ascii (char *where, uintmax_t v, size_t digits, unsigned logbase,
++ bool nul);
++void field_width_error (const char *filename, const char *fieldname,
++ uintmax_t value, size_t width, bool nul);
+
+ /* copypass.c */
+ void process_copy_pass (void);
+@@ -145,7 +149,7 @@ int make_path (char *argpath, uid_t owner, gid_t group,
+ const char *verbose_fmt_string);
+
+ /* tar.c */
+-void write_out_tar_header (struct cpio_file_stat *file_hdr, int out_des);
++int write_out_tar_header (struct cpio_file_stat *file_hdr, int out_des);
+ int null_block (long *block, int size);
+ void read_in_tar_header (struct cpio_file_stat *file_hdr, int in_des);
+ int otoa (char *s, unsigned long *n);
+@@ -204,9 +208,16 @@ void cpio_safer_name_suffix (char *name, bool link_target,
+ int cpio_create_dir (struct cpio_file_stat *file_hdr, int existing_dir);
+ void change_dir (void);
+
+-/* FIXME: These two defines should be defined in paxutils */
++/* FIXME: The following three should be defined in paxutils */
+ #define LG_8 3
+ #define LG_16 4
++/* The maximum uintmax_t value that can be represented with DIGITS digits,
++ assuming that each digit is BITS_PER_DIGIT wide. */
++#define MAX_VAL_WITH_DIGITS(digits, bits_per_digit) \
++ ((digits) * (bits_per_digit) < sizeof (uintmax_t) * CHAR_BIT \
++ ? ((uintmax_t) 1 << ((digits) * (bits_per_digit))) - 1 \
++ : (uintmax_t) -1)
++
+
+ uintmax_t from_ascii (char const *where, size_t digs, unsigned logbase);
+
+diff --git a/src/tar.c b/src/tar.c
+index a2ce171..ef58027 100644
+--- a/src/tar.c
++++ b/src/tar.c
+@@ -79,36 +79,17 @@ stash_tar_filename (char *prefix, char *filename)
+ return hold_tar_filename;
+ }
+
+-/* Convert a number into a string of octal digits.
+- Convert long VALUE into a DIGITS-digit field at WHERE,
+- including a trailing space and room for a NUL. DIGITS==3 means
+- 1 digit, a space, and room for a NUL.
+-
+- We assume the trailing NUL is already there and don't fill it in.
+- This fact is used by start_header and finish_header, so don't change it!
+-
+- This is be equivalent to:
+- sprintf (where, "%*lo ", digits - 2, value);
+- except that sprintf fills in the trailing NUL and we don't. */
+-
+-static void
+-to_oct (register long value, register int digits, register char *where)
++static int
++to_oct_or_error (uintmax_t value, size_t digits, char *where, char const *field,
++ char const *file)
+ {
+- --digits; /* Leave the trailing NUL slot alone. */
+-
+- /* Produce the digits -- at least one. */
+- do
++ if (to_ascii (where, value, digits, LG_8, true))
+ {
+- where[--digits] = '0' + (char) (value & 7); /* One octal digit. */
+- value >>= 3;
++ field_width_error (file, field, value, digits, true);
++ return 1;
+ }
+- while (digits > 0 && value != 0);
+-
+- /* Add leading zeroes, if necessary. */
+- while (digits > 0)
+- where[--digits] = '0';
++ return 0;
+ }
+-
+
+
+ /* Compute and return a checksum for TAR_HDR,
+@@ -134,10 +115,22 @@ tar_checksum (struct tar_header *tar_hdr)
+ return sum;
+ }
+
++#define TO_OCT(file_hdr, c_fld, digits, tar_hdr, tar_field) \
++ do \
++ { \
++ if (to_oct_or_error (file_hdr -> c_fld, \
++ digits, \
++ tar_hdr -> tar_field, \
++ #tar_field, \
++ file_hdr->c_name)) \
++ return 1; \
++ } \
++ while (0)
++
+ /* Write out header FILE_HDR, including the file name, to file
+ descriptor OUT_DES. */
+
+-void
++int
+ write_out_tar_header (struct cpio_file_stat *file_hdr, int out_des)
+ {
+ int name_len;
+@@ -166,11 +159,11 @@ write_out_tar_header (struct cpio_file_stat *file_hdr, int out_des)
+
+ /* Ustar standard (POSIX.1-1988) requires the mode to contain only 3 octal
+ digits */
+- to_oct (file_hdr->c_mode & MODE_ALL, 8, tar_hdr->mode);
+- to_oct (file_hdr->c_uid, 8, tar_hdr->uid);
+- to_oct (file_hdr->c_gid, 8, tar_hdr->gid);
+- to_oct (file_hdr->c_filesize, 12, tar_hdr->size);
+- to_oct (file_hdr->c_mtime, 12, tar_hdr->mtime);
++ TO_OCT (file_hdr, c_mode & MODE_ALL, 8, tar_hdr, mode);
++ TO_OCT (file_hdr, c_uid, 8, tar_hdr, uid);
++ TO_OCT (file_hdr, c_gid, 8, tar_hdr, gid);
++ TO_OCT (file_hdr, c_filesize, 12, tar_hdr, size);
++ TO_OCT (file_hdr, c_mtime, 12, tar_hdr, mtime);
+
+ switch (file_hdr->c_mode & CP_IFMT)
+ {
+@@ -182,7 +175,7 @@ write_out_tar_header (struct cpio_file_stat *file_hdr, int out_des)
+ strncpy (tar_hdr->linkname, file_hdr->c_tar_linkname,
+ TARLINKNAMESIZE);
+ tar_hdr->typeflag = LNKTYPE;
+- to_oct (0, 12, tar_hdr->size);
++ to_ascii (tar_hdr->size, 0, 12, LG_8, true);
+ }
+ else
+ tar_hdr->typeflag = REGTYPE;
+@@ -208,7 +201,7 @@ write_out_tar_header (struct cpio_file_stat *file_hdr, int out_des)
+ than TARLINKNAMESIZE. */
+ strncpy (tar_hdr->linkname, file_hdr->c_tar_linkname,
+ TARLINKNAMESIZE);
+- to_oct (0, 12, tar_hdr->size);
++ to_ascii (tar_hdr->size, 0, 12, LG_8, true);
+ break;
+ #endif /* CP_IFLNK */
+ }
+@@ -227,13 +220,15 @@ write_out_tar_header (struct cpio_file_stat *file_hdr, int out_des)
+ if (name)
+ strcpy (tar_hdr->gname, name);
+
+- to_oct (file_hdr->c_rdev_maj, 8, tar_hdr->devmajor);
+- to_oct (file_hdr->c_rdev_min, 8, tar_hdr->devminor);
++ TO_OCT (file_hdr, c_rdev_maj, 8, tar_hdr, devmajor);
++ TO_OCT (file_hdr, c_rdev_min, 8, tar_hdr, devminor);
+ }
+
+- to_oct (tar_checksum (tar_hdr), 8, tar_hdr->chksum);
++ to_ascii (tar_hdr->chksum, tar_checksum (tar_hdr), 8, LG_8, true);
+
+ tape_buffered_write ((char *) &tar_rec, out_des, TARRECORDSIZE);
++
++ return 0;
+ }
+
+ /* Return nonzero iff all the bytes in BLOCK are NUL.
+--
+2.24.1
+
diff --git a/meta/recipes-extended/cpio/cpio_2.12.bb b/meta/recipes-extended/cpio/cpio_2.12.bb
index 3713bf0b1f..5abe494ebc 100644
--- a/meta/recipes-extended/cpio/cpio_2.12.bb
+++ b/meta/recipes-extended/cpio/cpio_2.12.bb
@@ -11,6 +11,7 @@ SRC_URI = "${GNU_MIRROR}/cpio/cpio-${PV}.tar.gz \
file://0001-Fix-CVE-2015-1197.patch \
file://0001-CVE-2016-2037-1-byte-out-of-bounds-write.patch \
file://0001-Fix-segfault-with-append.patch \
+ file://CVE-2019-14866.patch \
"
SRC_URI[md5sum] = "fc207561a86b63862eea4b8300313e86"
diff --git a/meta/recipes-extended/ed/ed_1.15.bb b/meta/recipes-extended/ed/ed_1.15.bb
index c79310325f..886c3ddcab 100644
--- a/meta/recipes-extended/ed/ed_1.15.bb
+++ b/meta/recipes-extended/ed/ed_1.15.bb
@@ -9,6 +9,8 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=0c7051aef9219dc7237f206c5c4179a7 \
SECTION = "base"
+CVE_PRODUCT = "gnu:ed"
+
# LSB states that ed should be in /bin/
bindir = "${base_bindir}"
diff --git a/meta/recipes-extended/iputils/iputils_s20180629.bb b/meta/recipes-extended/iputils/iputils_s20180629.bb
index eff44be1bd..5d11ced96d 100644
--- a/meta/recipes-extended/iputils/iputils_s20180629.bb
+++ b/meta/recipes-extended/iputils/iputils_s20180629.bb
@@ -21,6 +21,10 @@ S = "${WORKDIR}/git"
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>s\d+)"
+# Fixed in 2000-10-10, but the versioning of iputils
+# breaks the version order.
+CVE_CHECK_WHITELIST += "CVE-2000-1213 CVE-2000-1214"
+
EXTRA_OEMAKE = "-e MAKEFLAGS="
PACKAGECONFIG ??= ""
diff --git a/meta/recipes-extended/libarchive/libarchive/CVE-2019-19221.patch b/meta/recipes-extended/libarchive/libarchive/CVE-2019-19221.patch
new file mode 100644
index 0000000000..b57e87874f
--- /dev/null
+++ b/meta/recipes-extended/libarchive/libarchive/CVE-2019-19221.patch
@@ -0,0 +1,101 @@
+From 22b1db9d46654afc6f0c28f90af8cdc84a199f41 Mon Sep 17 00:00:00 2001
+From: Martin Matuska <martin@matuska.org>
+Date: Thu, 21 Nov 2019 03:08:40 +0100
+Subject: [PATCH] Bugfix and optimize archive_wstring_append_from_mbs()
+
+The cal to mbrtowc() or mbtowc() should read up to mbs_length
+bytes and not wcs_length. This avoids out-of-bounds reads.
+
+mbrtowc() and mbtowc() return (size_t)-1 wit errno EILSEQ when
+they encounter an invalid multibyte character and (size_t)-2 when
+they they encounter an incomplete multibyte character. As we return
+failure and all our callers error out it makes no sense to continue
+parsing mbs.
+
+As we allocate `len` wchars at the beginning and each wchar has
+at least one byte, there will never be need to grow the buffer,
+so the code can be left out. On the other hand, we are always
+allocatng more memory than we need.
+
+As long as wcs_length == mbs_length == len we can omit wcs_length.
+We keep the old code commented if we decide to save memory and
+use autoexpanding wcs_length in the future.
+
+Fixes #1276
+
+Upstream-Status: Backport [https://github.com/libarchive/libarchive/commit/22b1db9d46654afc6f0c28f90af8cdc84a199f41]
+CVE: CVE-2019-19221
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+---
+ libarchive/archive_string.c | 28 +++++++++++++++++-----------
+ 1 file changed, 17 insertions(+), 11 deletions(-)
+
+diff --git a/libarchive/archive_string.c b/libarchive/archive_string.c
+index 979a418b6..bd39c96f1 100644
+--- a/libarchive/archive_string.c
++++ b/libarchive/archive_string.c
+@@ -591,7 +591,7 @@ archive_wstring_append_from_mbs(struct archive_wstring *dest,
+ * No single byte will be more than one wide character,
+ * so this length estimate will always be big enough.
+ */
+- size_t wcs_length = len;
++ // size_t wcs_length = len;
+ size_t mbs_length = len;
+ const char *mbs = p;
+ wchar_t *wcs;
+@@ -600,7 +600,11 @@ archive_wstring_append_from_mbs(struct archive_wstring *dest,
+
+ memset(&shift_state, 0, sizeof(shift_state));
+ #endif
+- if (NULL == archive_wstring_ensure(dest, dest->length + wcs_length + 1))
++ /*
++ * As we decided to have wcs_length == mbs_length == len
++ * we can use len here instead of wcs_length
++ */
++ if (NULL == archive_wstring_ensure(dest, dest->length + len + 1))
+ return (-1);
+ wcs = dest->s + dest->length;
+ /*
+@@ -609,6 +613,12 @@ archive_wstring_append_from_mbs(struct archive_wstring *dest,
+ * multi bytes.
+ */
+ while (*mbs && mbs_length > 0) {
++ /*
++ * The buffer we allocated is always big enough.
++ * Keep this code path in a comment if we decide to choose
++ * smaller wcs_length in the future
++ */
++/*
+ if (wcs_length == 0) {
+ dest->length = wcs - dest->s;
+ dest->s[dest->length] = L'\0';
+@@ -618,24 +628,20 @@ archive_wstring_append_from_mbs(struct archive_wstring *dest,
+ return (-1);
+ wcs = dest->s + dest->length;
+ }
++*/
+ #if HAVE_MBRTOWC
+- r = mbrtowc(wcs, mbs, wcs_length, &shift_state);
++ r = mbrtowc(wcs, mbs, mbs_length, &shift_state);
+ #else
+- r = mbtowc(wcs, mbs, wcs_length);
++ r = mbtowc(wcs, mbs, mbs_length);
+ #endif
+ if (r == (size_t)-1 || r == (size_t)-2) {
+ ret_val = -1;
+- if (errno == EILSEQ) {
+- ++mbs;
+- --mbs_length;
+- continue;
+- } else
+- break;
++ break;
+ }
+ if (r == 0 || r > mbs_length)
+ break;
+ wcs++;
+- wcs_length--;
++ // wcs_length--;
+ mbs += r;
+ mbs_length -= r;
+ }
diff --git a/meta/recipes-extended/libarchive/libarchive_3.3.3.bb b/meta/recipes-extended/libarchive/libarchive_3.3.3.bb
index af5ca65297..36d5bffe09 100644
--- a/meta/recipes-extended/libarchive/libarchive_3.3.3.bb
+++ b/meta/recipes-extended/libarchive/libarchive_3.3.3.bb
@@ -40,6 +40,7 @@ SRC_URI = "http://libarchive.org/downloads/libarchive-${PV}.tar.gz \
file://CVE-2018-1000880.patch \
file://CVE-2019-1000019.patch \
file://CVE-2019-1000020.patch \
+ file://CVE-2019-19221.patch \
"
SRC_URI[md5sum] = "4038e366ca5b659dae3efcc744e72120"
diff --git a/meta/recipes-extended/lighttpd/lighttpd/0001-core-fix-abort-in-http-parseopts-fixes-2945.patch b/meta/recipes-extended/lighttpd/lighttpd/0001-core-fix-abort-in-http-parseopts-fixes-2945.patch
new file mode 100644
index 0000000000..123bb94c60
--- /dev/null
+++ b/meta/recipes-extended/lighttpd/lighttpd/0001-core-fix-abort-in-http-parseopts-fixes-2945.patch
@@ -0,0 +1,54 @@
+From 32120d5b8b3203fc21ccb9eafb0eaf824bb59354 Mon Sep 17 00:00:00 2001
+From: Glenn Strauss <gstrauss@gluelogic.com>
+Date: Wed, 10 Apr 2019 11:28:10 -0400
+Subject: [core] fix abort in http-parseopts (fixes #2945)
+
+fix abort in server.http-parseopts with url-path-2f-decode enabled
+
+(thx stze)
+
+x-ref:
+ "Security - SIGABRT during GET request handling with url-path-2f-decode enabled"
+ https://redmine.lighttpd.net/issues/2945
+
+CVE: CVE-2019-11072
+Upstream-Status: Backport
+Signed-off-by: Adrian Bunk <bunk@stusta.de>
+---
+ src/burl.c | 6 ++++--
+ src/t/test_burl.c | 2 ++
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/src/burl.c b/src/burl.c
+index 51182628..c4b928fd 100644
+--- a/src/burl.c
++++ b/src/burl.c
+@@ -252,8 +252,10 @@ static int burl_normalize_2F_to_slash_fix (buffer *b, int qs, int i)
+ }
+ }
+ if (qs >= 0) {
+- memmove(s+j, s+qs, blen - qs);
+- j += blen - qs;
++ const int qslen = blen - qs;
++ memmove(s+j, s+qs, (size_t)qslen);
++ qs = j;
++ j += qslen;
+ }
+ buffer_string_set_length(b, j);
+ return qs;
+diff --git a/src/t/test_burl.c b/src/t/test_burl.c
+index 7be9be50..f7a16815 100644
+--- a/src/t/test_burl.c
++++ b/src/t/test_burl.c
+@@ -97,6 +97,8 @@ static void test_burl_normalize (void) {
+ flags |= HTTP_PARSEOPT_URL_NORMALIZE_PATH_2F_DECODE;
+ run_burl_normalize(psrc, ptmp, flags, __LINE__, CONST_STR_LEN("/a/b?c=/"), CONST_STR_LEN("/a/b?c=/"));
+ run_burl_normalize(psrc, ptmp, flags, __LINE__, CONST_STR_LEN("/a/b?c=%2f"), CONST_STR_LEN("/a/b?c=/"));
++ run_burl_normalize(psrc, ptmp, flags, __LINE__, CONST_STR_LEN("%2f?"), CONST_STR_LEN("/?"));
++ run_burl_normalize(psrc, ptmp, flags, __LINE__, CONST_STR_LEN("/%2f?"), CONST_STR_LEN("//?"));
+ run_burl_normalize(psrc, ptmp, flags, __LINE__, CONST_STR_LEN("/a%2fb"), CONST_STR_LEN("/a/b"));
+ run_burl_normalize(psrc, ptmp, flags, __LINE__, CONST_STR_LEN("/a%2Fb"), CONST_STR_LEN("/a/b"));
+ run_burl_normalize(psrc, ptmp, flags, __LINE__, CONST_STR_LEN("/a%2fb?c=/"), CONST_STR_LEN("/a/b?c=/"));
+--
+2.17.1
+
diff --git a/meta/recipes-extended/lighttpd/lighttpd_1.4.53.bb b/meta/recipes-extended/lighttpd/lighttpd_1.4.53.bb
index a0b350f358..1259e63bfe 100644
--- a/meta/recipes-extended/lighttpd/lighttpd_1.4.53.bb
+++ b/meta/recipes-extended/lighttpd/lighttpd_1.4.53.bb
@@ -17,6 +17,7 @@ SRC_URI = "http://download.lighttpd.net/lighttpd/releases-1.4.x/lighttpd-${PV}.t
file://lighttpd.conf \
file://lighttpd \
file://0001-Use-pkg-config-for-pcre-dependency-instead-of-config.patch \
+ file://0001-core-fix-abort-in-http-parseopts-fixes-2945.patch \
"
SRC_URI[md5sum] = "f93436d8d400b2b0e26ee4bcc60b9ac7"
diff --git a/meta/recipes-extended/pam/libpam_1.3.0.bb b/meta/recipes-extended/pam/libpam_1.3.0.bb
index c124e3bb02..ad6ac4b701 100644
--- a/meta/recipes-extended/pam/libpam_1.3.0.bb
+++ b/meta/recipes-extended/pam/libpam_1.3.0.bb
@@ -164,3 +164,5 @@ CONFFILES_${PN}-runtime += "${sysconfdir}/pam.d/common-password"
CONFFILES_${PN}-runtime += "${sysconfdir}/pam.d/common-session-noninteractive"
CONFFILES_${PN}-runtime += "${sysconfdir}/pam.d/common-account"
CONFFILES_${PN}-runtime += "${sysconfdir}/security/limits.conf"
+
+CVE_PRODUCT = "linux-pam"
diff --git a/meta/recipes-extended/procps/procps_3.3.15.bb b/meta/recipes-extended/procps/procps_3.3.15.bb
index 9756db0e7b..a20917b223 100644
--- a/meta/recipes-extended/procps/procps_3.3.15.bb
+++ b/meta/recipes-extended/procps/procps_3.3.15.bb
@@ -64,3 +64,6 @@ python __anonymous() {
d.setVarFlag('ALTERNATIVE_LINK_NAME', prog, '%s/%s' % (d.getVar('base_sbindir'), prog))
}
+# 'ps' isn't suitable for use as a security tool so whitelist this CVE.
+# https://bugzilla.redhat.com/show_bug.cgi?id=1575473#c3
+CVE_CHECK_WHITELIST += "CVE-2018-1121"
diff --git a/meta/recipes-extended/stress/stress_1.0.4.bb b/meta/recipes-extended/stress/stress_1.0.4.bb
index e9179d3e19..42046d5bf4 100644
--- a/meta/recipes-extended/stress/stress_1.0.4.bb
+++ b/meta/recipes-extended/stress/stress_1.0.4.bb
@@ -5,7 +5,7 @@ HOMEPAGE = "http://people.seas.harvard.edu/~apw/stress/"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
-SRC_URI = "http://people.seas.harvard.edu/~apw/${BPN}/${BP}.tar.gz \
+SRC_URI = "https://fossies.org/linux/privat/${BP}.tar.gz \
file://texinfo.patch \
"
diff --git a/meta/recipes-extended/sudo/sudo_1.8.27.bb b/meta/recipes-extended/sudo/sudo_1.8.27.bb
index 7460a5bfed..43c27bf37f 100644
--- a/meta/recipes-extended/sudo/sudo_1.8.27.bb
+++ b/meta/recipes-extended/sudo/sudo_1.8.27.bb
@@ -1,6 +1,6 @@
require sudo.inc
-SRC_URI = "http://ftp.sudo.ws/sudo/dist/sudo-${PV}.tar.gz \
+SRC_URI = "https://www.sudo.ws/dist/sudo-${PV}.tar.gz \
${@bb.utils.contains('DISTRO_FEATURES', 'pam', '${PAM_SRC_URI}', '', d)} \
file://0001-Include-sys-types.h-for-id_t-definition.patch \
file://CVE-2019-14287-1.patch \
diff --git a/meta/recipes-extended/sysstat/sysstat/CVE-2019-19725.patch b/meta/recipes-extended/sysstat/sysstat/CVE-2019-19725.patch
new file mode 100644
index 0000000000..2aa12724f8
--- /dev/null
+++ b/meta/recipes-extended/sysstat/sysstat/CVE-2019-19725.patch
@@ -0,0 +1,28 @@
+From a5c8abd4a481ee6e27a3acf00e6d9b0f023e20ed Mon Sep 17 00:00:00 2001
+From: Sebastien GODARD <sysstat@users.noreply.github.com>
+Date: Mon, 9 Dec 2019 17:54:07 +0100
+Subject: [PATCH] Fix #242: Double free in check_file_actlst()
+
+Avoid freeing buffer() twice.
+
+Signed-off-by: Sebastien GODARD <sysstat@users.noreply.github.com>
+
+Upstream-Status: Backport [https://github.com/sysstat/sysstat/commit/a5c8abd4a481ee6e27a3acf00e6d9b0f023e20ed]
+CVE: CVE-2019-19725
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+---
+ sa_common.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sa_common.c b/sa_common.c
+index cf52aefe..856a3715 100644
+--- a/sa_common.c
++++ b/sa_common.c
+@@ -2153,6 +2153,7 @@ void check_file_actlst(int *ifd, char *dfile, struct activity *act[], uint64_t f
+ }
+
+ free(buffer);
++ buffer = NULL;
+
+ /* Check that at least one activity selected by the user is available in file */
+ for (i = 0; i < NR_ACT; i++) {
diff --git a/meta/recipes-extended/sysstat/sysstat_12.1.3.bb b/meta/recipes-extended/sysstat/sysstat_12.1.3.bb
index 5daf3f45f5..1b552166b0 100644
--- a/meta/recipes-extended/sysstat/sysstat_12.1.3.bb
+++ b/meta/recipes-extended/sysstat/sysstat_12.1.3.bb
@@ -2,7 +2,9 @@ require sysstat.inc
LIC_FILES_CHKSUM = "file://COPYING;md5=a23a74b3f4caf9616230789d94217acb"
-SRC_URI += "file://0001-Include-needed-headers-explicitly.patch"
+SRC_URI += "file://0001-Include-needed-headers-explicitly.patch \
+ file://CVE-2019-19725.patch \
+"
SRC_URI[md5sum] = "0f9b73f60aba6fd49de346bc384902c3"
SRC_URI[sha256sum] = "55498bf82755ba9fed3e7df61fd26f8f50dd3e7b3b229c731029a4c8ab51a1aa"
diff --git a/meta/recipes-extended/timezone/timezone.inc b/meta/recipes-extended/timezone/timezone.inc
index 1ade0075e6..f6bab1acb4 100644
--- a/meta/recipes-extended/timezone/timezone.inc
+++ b/meta/recipes-extended/timezone/timezone.inc
@@ -4,7 +4,7 @@ SECTION = "base"
LICENSE = "PD & BSD & BSD-3-Clause"
LIC_FILES_CHKSUM = "file://LICENSE;md5=c679c9d6b02bc2757b3eaf8f53c43fba"
-PV = "2019a"
+PV = "2019c"
SRC_URI =" http://www.iana.org/time-zones/repository/releases/tzcode${PV}.tar.gz;name=tzcode \
http://www.iana.org/time-zones/repository/releases/tzdata${PV}.tar.gz;name=tzdata \
@@ -12,7 +12,7 @@ SRC_URI =" http://www.iana.org/time-zones/repository/releases/tzcode${PV}.tar.gz
UPSTREAM_CHECK_URI = "http://www.iana.org/time-zones"
-SRC_URI[tzcode.md5sum] = "27585a20bc5401324f42c8deb6e4677f"
-SRC_URI[tzcode.sha256sum] = "8739f162bc30cdfb482435697f969253abea49595541a0afd5f443fbae433ff5"
-SRC_URI[tzdata.md5sum] = "288f7b1e43018c633da108f13b27cf91"
-SRC_URI[tzdata.sha256sum] = "90366ddf4aa03e37a16cd49255af77f801822310b213f195e2206ead48c59772"
+SRC_URI[tzcode.md5sum] = "195a17454c5db05cab96595380650391"
+SRC_URI[tzcode.sha256sum] = "f6ebd3668e02d5ed223d3b7b1947561bf2d2da2f4bd1db61efefd9e06c167ed4"
+SRC_URI[tzdata.md5sum] = "f6987e6dfdb2eb83a1b5076a50b80894"
+SRC_URI[tzdata.sha256sum] = "79c7806dab09072308da0e3d22c37d3b245015a591891ea147d3b133b60ffc7c"
diff --git a/meta/recipes-graphics/xorg-lib/libxfont2_2.0.3.bb b/meta/recipes-graphics/xorg-lib/libxfont2_2.0.3.bb
index 2b72d6750c..6994d79e89 100644
--- a/meta/recipes-graphics/xorg-lib/libxfont2_2.0.3.bb
+++ b/meta/recipes-graphics/xorg-lib/libxfont2_2.0.3.bb
@@ -20,3 +20,5 @@ SRC_URI[sha256sum] = "0e8ab7fd737ccdfe87e1f02b55f221f0bd4503a1c5f28be4ed6a54586b
PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)}"
PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6,"
+
+CVE_PRODUCT = "libxfont libxfont2"
diff --git a/meta/recipes-kernel/linux-firmware/linux-firmware_git.bb b/meta/recipes-kernel/linux-firmware/linux-firmware_20190815.bb
index 8b6ad96dba..d83000b64f 100644
--- a/meta/recipes-kernel/linux-firmware/linux-firmware_git.bb
+++ b/meta/recipes-kernel/linux-firmware/linux-firmware_20190815.bb
@@ -92,7 +92,7 @@ LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.it913x;md5=1fbf727bfb6a949810c4dbfa7e6ce4f8 \
file://LICENCE.iwlwifi_firmware;md5=3fd842911ea93c29cd32679aa23e1c88 \
file://LICENCE.kaweth;md5=b1d876e562f4b3b8d391ad8395dfe03f \
- file://LICENCE.Marvell;md5=9ddea1734a4baf3c78d845151f42a37a \
+ file://LICENCE.Marvell;md5=28b6ed8bd04ba105af6e4dcd6e997772 \
file://LICENCE.mediatek;md5=7c1976b63217d76ce47d0a11d8a79cf2 \
file://LICENCE.moxa;md5=1086614767d8ccf744a923289d3d4261 \
file://LICENCE.myri10ge_firmware;md5=42e32fb89f6b959ca222e25ac8df8fed \
@@ -123,7 +123,7 @@ LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.xc4000;md5=0ff51d2dc49fce04814c9155081092f0 \
file://LICENCE.xc5000;md5=1e170c13175323c32c7f4d0998d53f66 \
file://LICENCE.xc5000c;md5=12b02efa3049db65d524aeb418dd87ca \
- file://WHENCE;md5=b6e44adf71bc37e5f26ebfe5a08b5490 \
+ file://WHENCE;md5=37a01e379219d1e06dbccfa90a8fc0ad \
"
# These are not common licenses, set NO_GENERIC_LICENSE for them
@@ -190,9 +190,9 @@ NO_GENERIC_LICENSE[Firmware-xc5000] = "LICENCE.xc5000"
NO_GENERIC_LICENSE[Firmware-xc5000c] = "LICENCE.xc5000c"
NO_GENERIC_LICENSE[WHENCE] = "WHENCE"
-SRCREV = "711d3297bac870af42088a467459a0634c1970ca"
PE = "1"
-PV = "0.0+git${SRCPV}"
+
+SRCREV = "07b925b450bfb4cf3e141c612ec5b104658cd020"
SRC_URI = "git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git"
@@ -273,6 +273,7 @@ PACKAGES =+ "${PN}-ralink-license ${PN}-ralink \
${PN}-bcm4358 \
${PN}-bcm43602 \
${PN}-bcm4366b \
+ ${PN}-bcm4366c \
${PN}-bcm4371 \
${PN}-bcm4373 \
${PN}-bcm43xx \
@@ -571,7 +572,7 @@ FILES_${PN}-bcm43xx-hdr = "${nonarch_base_libdir}/firmware/brcm/bcm43xx_hdr-0.fw
FILES_${PN}-bcm4329-fullmac = "${nonarch_base_libdir}/firmware/brcm/bcm4329-fullmac-4.bin"
FILES_${PN}-bcm43236b = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43236b.bin"
FILES_${PN}-bcm4329 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4329-sdio.bin"
-FILES_${PN}-bcm4330 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4330-sdio.bin"
+FILES_${PN}-bcm4330 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4330-sdio.*"
FILES_${PN}-bcm4334 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4334-sdio.bin"
FILES_${PN}-bcm4335 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4335-sdio.bin"
FILES_${PN}-bcm4339 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4339-sdio.bin"
@@ -582,8 +583,8 @@ FILES_${PN}-bcm43242a = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43242a.bin
FILES_${PN}-bcm43143 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43143.bin \
${nonarch_base_libdir}/firmware/brcm/brcmfmac43143-sdio.bin \
"
-FILES_${PN}-bcm43430a0 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43430a0-sdio.bin"
-FILES_${PN}-bcm43455 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43455-sdio.bin"
+FILES_${PN}-bcm43430a0 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43430a0-sdio.*"
+FILES_${PN}-bcm43455 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43455-sdio.*"
FILES_${PN}-bcm4350c2 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4350c2-pcie.bin"
FILES_${PN}-bcm4350 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4350-pcie.bin"
FILES_${PN}-bcm4356 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4356-sdio.bin"
@@ -594,6 +595,7 @@ FILES_${PN}-bcm43602 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43602-pcie.
${nonarch_base_libdir}/firmware/brcm/brcmfmac43602-pcie.ap.bin \
"
FILES_${PN}-bcm4366b = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4366b-pcie.bin"
+FILES_${PN}-bcm4366c = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4366c-pcie.bin"
FILES_${PN}-bcm4371 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4371-pcie.bin"
# for i in `grep brcm WHENCE | grep ^File | sed 's/File: brcm.//g'`; do pkg=`echo $i | sed 's/-[sp40].*//g; s/\.bin//g; s/brcmfmac/bcm/g; s/_hdr/-hdr/g; s/BCM/bcm-0bb4-0306/g'`; echo -e "LICENSE_\${PN}-$pkg = \"Firmware-broadcom_bcm43xx\"\nRDEPENDS_\${PN}-$pkg += \"\${PN}-broadcom-license\""; done
@@ -647,6 +649,8 @@ LICENSE_${PN}-bcm43602 = "Firmware-broadcom_bcm43xx"
RDEPENDS_${PN}-bcm43602 += "${PN}-broadcom-license"
LICENSE_${PN}-bcm4366b = "Firmware-broadcom_bcm43xx"
RDEPENDS_${PN}-bcm4366b += "${PN}-broadcom-license"
+LICENSE_${PN}-bcm4366c = "Firmware-broadcom_bcm43xx"
+RDEPENDS_${PN}-bcm4366c += "${PN}-broadcom-license"
LICENSE_${PN}-bcm4371 = "Firmware-broadcom_bcm43xx"
RDEPENDS_${PN}-bcm4371 += "${PN}-broadcom-license"
@@ -656,11 +660,11 @@ LICENSE_${PN}-cypress-license = "Firmware-cypress"
FILES_${PN}-cypress-license = "${nonarch_base_libdir}/firmware/LICENCE.cypress"
FILES_${PN}-bcm-0bb4-0306 = "${nonarch_base_libdir}/firmware/brcm/BCM-0bb4-0306.hcd"
-FILES_${PN}-bcm43340 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43340-sdio.bin"
-FILES_${PN}-bcm43362 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43362-sdio.bin"
-FILES_${PN}-bcm43430 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43430-sdio.bin"
+FILES_${PN}-bcm43340 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43340-sdio.*"
+FILES_${PN}-bcm43362 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43362-sdio.*"
+FILES_${PN}-bcm43430 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43430-sdio.*"
FILES_${PN}-bcm4354 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4354-sdio.bin"
-FILES_${PN}-bcm4356-pcie = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4356-pcie.bin"
+FILES_${PN}-bcm4356-pcie = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4356-pcie.*"
FILES_${PN}-bcm4373 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4373-sdio.bin \
${nonarch_base_libdir}/firmware/brcm/brcmfmac4373.bin \
"
diff --git a/meta/recipes-kernel/linux/linux-yocto-rt_4.19.bb b/meta/recipes-kernel/linux/linux-yocto-rt_4.19.bb
index da87d476f9..7b89a24223 100644
--- a/meta/recipes-kernel/linux/linux-yocto-rt_4.19.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-rt_4.19.bb
@@ -11,13 +11,13 @@ python () {
raise bb.parse.SkipRecipe("Set PREFERRED_PROVIDER_virtual/kernel to linux-yocto-rt to enable it")
}
-SRCREV_machine ?= "ca2e3322f4c5678eaef6434c808d0842c805d74d"
-SRCREV_meta ?= "20a6158aa35dbf11819382ef1eeb28915afea765"
+SRCREV_machine ?= "d3fb163023de0a5a57a021e58b55976f68142fc0"
+SRCREV_meta ?= "4f5d761316a9cf14605e5d0cc91b53c1b2e9dc6a"
SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;branch=${KBRANCH};name=machine \
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-4.19;destsuffix=${KMETA}"
-LINUX_VERSION ?= "4.19.61"
+LINUX_VERSION ?= "4.19.87"
LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814"
diff --git a/meta/recipes-kernel/linux/linux-yocto-tiny_4.19.bb b/meta/recipes-kernel/linux/linux-yocto-tiny_4.19.bb
index cadf1a7402..9cde2c2342 100644
--- a/meta/recipes-kernel/linux/linux-yocto-tiny_4.19.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-tiny_4.19.bb
@@ -6,7 +6,7 @@ KCONFIG_MODE = "--allnoconfig"
require recipes-kernel/linux/linux-yocto.inc
-LINUX_VERSION ?= "4.19.61"
+LINUX_VERSION ?= "4.19.87"
LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814"
DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}"
@@ -15,9 +15,9 @@ DEPENDS += "openssl-native util-linux-native"
KMETA = "kernel-meta"
KCONF_BSP_AUDIT_LEVEL = "2"
-SRCREV_machine_qemuarm ?= "b5a2efa31290f31384971494031285d394635938"
-SRCREV_machine ?= "4ec6f255163da37a4c83528e5835b6b9baccee63"
-SRCREV_meta ?= "20a6158aa35dbf11819382ef1eeb28915afea765"
+SRCREV_machine_qemuarm ?= "6e7574e9a9b41143e0c42fcb31b13802529602f0"
+SRCREV_machine ?= "2ba1fa4d5068982e785527ef8ad1a8b658f0add1"
+SRCREV_meta ?= "4f5d761316a9cf14605e5d0cc91b53c1b2e9dc6a"
PV = "${LINUX_VERSION}+git${SRCPV}"
diff --git a/meta/recipes-kernel/linux/linux-yocto_4.19.bb b/meta/recipes-kernel/linux/linux-yocto_4.19.bb
index 5edb97fed1..aab30a981e 100644
--- a/meta/recipes-kernel/linux/linux-yocto_4.19.bb
+++ b/meta/recipes-kernel/linux/linux-yocto_4.19.bb
@@ -11,22 +11,22 @@ KBRANCH_qemux86 ?= "v4.19/standard/base"
KBRANCH_qemux86-64 ?= "v4.19/standard/base"
KBRANCH_qemumips64 ?= "v4.19/standard/mti-malta64"
-SRCREV_machine_qemuarm ?= "ca3cb923f8d7962c6d47a8d29923e52da1818854"
-SRCREV_machine_qemuarm64 ?= "4ec6f255163da37a4c83528e5835b6b9baccee63"
-SRCREV_machine_qemumips ?= "f624314048dfac57e47ac91d89ca3dc8395ca47a"
-SRCREV_machine_qemuppc ?= "4ec6f255163da37a4c83528e5835b6b9baccee63"
-SRCREV_machine_qemux86 ?= "4ec6f255163da37a4c83528e5835b6b9baccee63"
-SRCREV_machine_qemux86-64 ?= "4ec6f255163da37a4c83528e5835b6b9baccee63"
-SRCREV_machine_qemumips64 ?= "ca47368b698795cd5cada84dbfcceda1f47da1aa"
-SRCREV_machine ?= "4ec6f255163da37a4c83528e5835b6b9baccee63"
-SRCREV_meta ?= "20a6158aa35dbf11819382ef1eeb28915afea765"
+SRCREV_machine_qemuarm ?= "4d9d89763cde098e4a8b879e8c831e35a5f39ae6"
+SRCREV_machine_qemuarm64 ?= "2ba1fa4d5068982e785527ef8ad1a8b658f0add1"
+SRCREV_machine_qemumips ?= "1f0855745653c8cb7d72ea2d03197889cc82a410"
+SRCREV_machine_qemuppc ?= "2ba1fa4d5068982e785527ef8ad1a8b658f0add1"
+SRCREV_machine_qemux86 ?= "2ba1fa4d5068982e785527ef8ad1a8b658f0add1"
+SRCREV_machine_qemux86-64 ?= "2ba1fa4d5068982e785527ef8ad1a8b658f0add1"
+SRCREV_machine_qemumips64 ?= "5ed73d6e6dc7d4ad8184de8eb6641bdb265ae2ea"
+SRCREV_machine ?= "2ba1fa4d5068982e785527ef8ad1a8b658f0add1"
+SRCREV_meta ?= "4f5d761316a9cf14605e5d0cc91b53c1b2e9dc6a"
SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;name=machine;branch=${KBRANCH}; \
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-4.19;destsuffix=${KMETA} \
"
LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814"
-LINUX_VERSION ?= "4.19.61"
+LINUX_VERSION ?= "4.19.87"
DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}"
DEPENDS += "openssl-native util-linux-native"
diff --git a/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb b/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb
index 77393db847..6044bf09c7 100644
--- a/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb
+++ b/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb
@@ -38,3 +38,6 @@ do_configure_prepend_arm() {
export ac_cv_sys_file_offset_bits=64
}
+# This can't be replicated and is just a memory leak.
+# https://github.com/erikd/libsndfile/issues/398
+CVE_CHECK_WHITELIST += "CVE-2018-13419"
diff --git a/meta/recipes-sato/webkit/webkitgtk_2.22.7.bb b/meta/recipes-sato/webkit/webkitgtk_2.22.7.bb
index 301bf10cea..26e673cbcb 100644
--- a/meta/recipes-sato/webkit/webkitgtk_2.22.7.bb
+++ b/meta/recipes-sato/webkit/webkitgtk_2.22.7.bb
@@ -30,6 +30,8 @@ inherit cmake pkgconfig gobject-introspection perlnative distro_features_check u
REQUIRED_DISTRO_FEATURES = "x11 opengl"
+CVE_PRODUCT = "webkitgtk webkitgtk\+"
+
DEPENDS = "zlib libsoup-2.4 curl libxml2 cairo libxslt libxt libidn libgcrypt \
gtk+3 gstreamer1.0 gstreamer1.0-plugins-base flex-native gperf-native sqlite3 \
pango icu bison-native gawk intltool-native libwebp \
diff --git a/meta/recipes-support/boost/boost.inc b/meta/recipes-support/boost/boost.inc
index c2e2cbb352..f385541653 100644
--- a/meta/recipes-support/boost/boost.inc
+++ b/meta/recipes-support/boost/boost.inc
@@ -2,6 +2,8 @@ SUMMARY = "Free peer-reviewed portable C++ source libraries"
SECTION = "libs"
DEPENDS = "bjam-native zlib bzip2"
+CVE_PRODUCT = "boost:boost"
+
ARM_INSTRUCTION_SET_armv4 = "arm"
ARM_INSTRUCTION_SET_armv5 = "arm"
diff --git a/meta/recipes-support/boost/boost/0001-dont-setup-compiler-flags-m32-m64.patch b/meta/recipes-support/boost/boost/0001-dont-setup-compiler-flags-m32-m64.patch
new file mode 100644
index 0000000000..78b19225d4
--- /dev/null
+++ b/meta/recipes-support/boost/boost/0001-dont-setup-compiler-flags-m32-m64.patch
@@ -0,0 +1,42 @@
+From 59402e3a61d14eb7ce8c2019ea1a87ad4bd28605 Mon Sep 17 00:00:00 2001
+From: Anuj Mittal <anuj.mittal@intel.com>
+Date: Thu, 14 Nov 2019 10:13:53 +0800
+Subject: [PATCH] dont setup compiler flags -m32/-m64
+
+We don't want these to be setup by boost as we pass our own flags.
+
+Upstream-Status: Inappropriate [OE-specific]
+
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+---
+ tools/build/src/tools/gcc.jam | 14 --------------
+ 1 file changed, 14 deletions(-)
+
+diff --git a/tools/build/src/tools/gcc.jam b/tools/build/src/tools/gcc.jam
+index c7e3cf3..24486e0 100644
+--- a/tools/build/src/tools/gcc.jam
++++ b/tools/build/src/tools/gcc.jam
+@@ -430,20 +430,6 @@ local rule compile-link-flags ( * )
+ }
+
+ {
+- # Handle address-model
+- compile-link-flags <target-os>aix/<address-model>32 : -maix32 ;
+- compile-link-flags <target-os>aix/<address-model>64 : -maix64 ;
+-
+- compile-link-flags <target-os>hpux/<address-model>32 : -milp32 ;
+- compile-link-flags <target-os>hpux/<address-model>64 : -mlp64 ;
+-
+- local generic-os = [ set.difference $(all-os) : aix hpux ] ;
+- local arch = power sparc x86 ;
+- compile-link-flags <target-os>$(generic-os)/<architecture>$(arch)/<address-model>32 : -m32 ;
+- compile-link-flags <target-os>$(generic-os)/<architecture>$(arch)/<address-model>64 : -m64 ;
+-}
+-
+-{
+ # Handle threading
+ local rule threading-flags ( * )
+ {
+--
+2.7.4
+
diff --git a/meta/recipes-support/boost/boost_1.69.0.bb b/meta/recipes-support/boost/boost_1.69.0.bb
index 324b46f168..5e9e0d87d7 100644
--- a/meta/recipes-support/boost/boost_1.69.0.bb
+++ b/meta/recipes-support/boost/boost_1.69.0.bb
@@ -6,4 +6,5 @@ SRC_URI += "file://arm-intrinsics.patch \
file://boost-math-disable-pch-for-gcc.patch \
file://0001-Apply-boost-1.62.0-no-forced-flags.patch.patch \
file://0001-Don-t-set-up-arch-instruction-set-flags-we-do-that-o.patch \
+ file://0001-dont-setup-compiler-flags-m32-m64.patch \
"
diff --git a/meta/recipes-support/gnupg/gnupg/0001-Woverride-init-is-not-needed-with-gcc-9.patch b/meta/recipes-support/gnupg/gnupg/0001-Woverride-init-is-not-needed-with-gcc-9.patch
index 4a280f9d5c..83195b5bd4 100644
--- a/meta/recipes-support/gnupg/gnupg/0001-Woverride-init-is-not-needed-with-gcc-9.patch
+++ b/meta/recipes-support/gnupg/gnupg/0001-Woverride-init-is-not-needed-with-gcc-9.patch
@@ -1,4 +1,4 @@
-From 0df5800cc2e720aad883a517f7d24a9722fe5845 Mon Sep 17 00:00:00 2001
+From e3adc816d2d56dd929016073937ba24e01e03cb8 Mon Sep 17 00:00:00 2001
From: Khem Raj <raj.khem@gmail.com>
Date: Thu, 20 Dec 2018 17:37:48 -0800
Subject: [PATCH] Woverride-init is not needed with gcc 9
@@ -17,15 +17,18 @@ Signed-off-by: Khem Raj <raj.khem@gmail.com>
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/dirmngr/dns.h b/dirmngr/dns.h
-index 30d0b45..98fe412 100644
+index 024d6dcc8..c6e141e16 100644
--- a/dirmngr/dns.h
+++ b/dirmngr/dns.h
-@@ -154,7 +154,7 @@ DNS_PUBLIC int *dns_debug_p(void);
+@@ -139,7 +139,7 @@ DNS_PUBLIC int *dns_debug_p(void);
+ #define DNS_PRAGMA_QUIET _Pragma("clang diagnostic ignored \"-Winitializer-overrides\"")
+ #define DNS_PRAGMA_POP _Pragma("clang diagnostic pop")
- #define dns_quietinit(...) \
- DNS_PRAGMA_PUSH DNS_PRAGMA_QUIET __VA_ARGS__ DNS_PRAGMA_POP
-#elif (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || __GNUC__ > 4
+#elif (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4 && __GNUC__ < 9)
#define DNS_PRAGMA_PUSH _Pragma("GCC diagnostic push")
#define DNS_PRAGMA_QUIET _Pragma("GCC diagnostic ignored \"-Woverride-init\"")
#define DNS_PRAGMA_POP _Pragma("GCC diagnostic pop")
+--
+2.17.1
+
diff --git a/meta/recipes-support/gnupg/gnupg_2.2.13.bb b/meta/recipes-support/gnupg/gnupg_2.2.17.bb
index 3ce2a38c0e..e5456dd9b9 100644
--- a/meta/recipes-support/gnupg/gnupg_2.2.13.bb
+++ b/meta/recipes-support/gnupg/gnupg_2.2.17.bb
@@ -19,9 +19,8 @@ SRC_URI = "${GNUPG_MIRROR}/${BPN}/${BPN}-${PV}.tar.bz2 \
SRC_URI_append_class-native = " file://0001-configure.ac-use-a-custom-value-for-the-location-of-.patch \
file://relocate.patch"
-
-SRC_URI[md5sum] = "563b959d0c3856e34526e9ca51c80d7b"
-SRC_URI[sha256sum] = "76c787a955f9e6e0ead47c9be700bfb9d454f955a7b7c7e697aa719bac7b11d8"
+SRC_URI[md5sum] = "1ba2d9b70c377f8e967742064c27a19c"
+SRC_URI[sha256sum] = "afa262868e39b651a2db4c071fba90415154243e83a830ca00516f9a807fd514"
EXTRA_OECONF = "--disable-ldap \
--disable-ccid-driver \
diff --git a/meta/recipes-support/libsoup/libsoup-2.4_2.64.2.bb b/meta/recipes-support/libsoup/libsoup-2.4_2.64.2.bb
index b095397ec2..b33a47be6a 100644
--- a/meta/recipes-support/libsoup/libsoup-2.4_2.64.2.bb
+++ b/meta/recipes-support/libsoup/libsoup-2.4_2.64.2.bb
@@ -15,6 +15,8 @@ SRC_URI = "${GNOME_MIRROR}/libsoup/${SHRT_VER}/libsoup-${PV}.tar.xz \
SRC_URI[md5sum] = "cac755dc6c6acd6e0c70007f547548f5"
SRC_URI[sha256sum] = "75ddc194a5b1d6f25033bb9d355f04bfe5c03e0e1c71ed0774104457b3a786c6"
+CVE_PRODUCT = "libsoup"
+
S = "${WORKDIR}/libsoup-${PV}"
inherit meson gettext pkgconfig upstream-version-is-even gobject-introspection gtk-doc
diff --git a/meta/recipes-support/lz4/lz4_1.8.3.bb b/meta/recipes-support/lz4/lz4_1.8.3.bb
index 125836f7bf..605e148d81 100644
--- a/meta/recipes-support/lz4/lz4_1.8.3.bb
+++ b/meta/recipes-support/lz4/lz4_1.8.3.bb
@@ -18,6 +18,9 @@ UPSTREAM_CHECK_GITTAGREGEX = "v(?P<pver>.*)"
S = "${WORKDIR}/git"
+# Fixed in r118, which is larger than the current version.
+CVE_CHECK_WHITELIST += "CVE-2014-4715"
+
EXTRA_OEMAKE = "PREFIX=${prefix} CC='${CC}' DESTDIR=${D} LIBDIR=${libdir} INCLUDEDIR=${includedir}"
do_install() {
diff --git a/meta/recipes-support/popt/popt_1.16.bb b/meta/recipes-support/popt/popt_1.16.bb
index 478288f9bf..27e49c2ca2 100644
--- a/meta/recipes-support/popt/popt_1.16.bb
+++ b/meta/recipes-support/popt/popt_1.16.bb
@@ -8,7 +8,7 @@ PR = "r3"
DEPENDS = "virtual/libiconv"
-SRC_URI = "http://rpm5.org/files/popt/popt-${PV}.tar.gz \
+SRC_URI = "http://anduin.linuxfromscratch.org/BLFS/popt/popt-${PV}.tar.gz \
file://pkgconfig_fix.patch \
file://popt_fix_for_automake-1.12.patch \
file://disable_tests.patch \
diff --git a/meta/recipes-support/sqlite/files/0001-Fix-CVE-2019-16168.patch b/meta/recipes-support/sqlite/files/0001-Fix-CVE-2019-16168.patch
new file mode 100644
index 0000000000..7c4a65b3cd
--- /dev/null
+++ b/meta/recipes-support/sqlite/files/0001-Fix-CVE-2019-16168.patch
@@ -0,0 +1,40 @@
+From fcf06b0b426e6c243d6ca2d6c6a02830717ab6a3 Mon Sep 17 00:00:00 2001
+From: Chen Qi <Qi.Chen@windriver.com>
+Date: Tue, 15 Oct 2019 13:22:52 +0800
+Subject: [PATCH] Fix CVE-2019-16168
+
+CVE: CVE-2019-16168
+
+Upstream-Status: Backport [https://www.sqlite.org/src/vpatch?from=4f5b2d938194fab7&to=98357d8c1263920b]
+
+Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
+---
+ sqlite3.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/sqlite3.c b/sqlite3.c
+index 61bfdeb..b3e6ae2 100644
+--- a/sqlite3.c
++++ b/sqlite3.c
+@@ -105933,7 +105933,9 @@ static void decodeIntArray(
+ if( sqlite3_strglob("unordered*", z)==0 ){
+ pIndex->bUnordered = 1;
+ }else if( sqlite3_strglob("sz=[0-9]*", z)==0 ){
+- pIndex->szIdxRow = sqlite3LogEst(sqlite3Atoi(z+3));
++ int sz = sqlite3Atoi(z+3);
++ if( sz<2 ) sz = 2;
++ pIndex->szIdxRow = sqlite3LogEst(sz);
+ }else if( sqlite3_strglob("noskipscan*", z)==0 ){
+ pIndex->noSkipScan = 1;
+ }
+@@ -143260,6 +143262,7 @@ static int whereLoopAddBtreeIndex(
+ ** it to pNew->rRun, which is currently set to the cost of the index
+ ** seek only. Then, if this is a non-covering index, add the cost of
+ ** visiting the rows in the main table. */
++ assert( pSrc->pTab->szTabRow>0 );
+ rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow;
+ pNew->rRun = sqlite3LogEstAdd(rLogSize, rCostIdx);
+ if( (pNew->wsFlags & (WHERE_IDX_ONLY|WHERE_IPK))==0 ){
+--
+2.17.1
+
diff --git a/meta/recipes-support/sqlite/sqlite3_3.27.2.bb b/meta/recipes-support/sqlite/sqlite3_3.27.2.bb
index 4bdb04f4d1..2888a56ee9 100644
--- a/meta/recipes-support/sqlite/sqlite3_3.27.2.bb
+++ b/meta/recipes-support/sqlite/sqlite3_3.27.2.bb
@@ -7,6 +7,7 @@ SRC_URI = "\
http://www.sqlite.org/2019/sqlite-autoconf-${SQLITE_PV}.tar.gz \
file://CVE-2019-9936.patch \
file://CVE-2019-9937.patch \
+ file://0001-Fix-CVE-2019-16168.patch \
"
SRC_URI[md5sum] = "1f72631ce6e8efa5b4a6e55a43b3bdc0"