summaryrefslogtreecommitdiffstats
path: root/meta/recipes-core/glibc/glibc/CVE-2020-6096-1.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta/recipes-core/glibc/glibc/CVE-2020-6096-1.patch')
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2020-6096-1.patch193
1 files changed, 193 insertions, 0 deletions
diff --git a/meta/recipes-core/glibc/glibc/CVE-2020-6096-1.patch b/meta/recipes-core/glibc/glibc/CVE-2020-6096-1.patch
new file mode 100644
index 0000000000..01c0328362
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2020-6096-1.patch
@@ -0,0 +1,193 @@
+From 79a4fa341b8a89cb03f84564fd72abaa1a2db394 Mon Sep 17 00:00:00 2001
+From: Evgeny Eremin <e.eremin@omprussia.ru>
+Date: Wed, 8 Jul 2020 14:18:19 +0200
+Subject: [PATCH 1/2] arm: CVE-2020-6096: fix memcpy and memmove for negative
+ length [BZ #25620]
+
+Unsigned branch instructions could be used for r2 to fix the wrong
+behavior when a negative length is passed to memcpy and memmove.
+This commit fixes the generic arm implementation of memcpy amd memmove.
+
+CVE: CVE-2020-6096
+Upstream-Status: Backport [git://sourceware.org/git/glibc.git]
+Signed-off-by: Zhixiong Chi <zhixiong.chi@windriver.com>
+---
+ sysdeps/arm/memcpy.S | 24 ++++++++++--------------
+ sysdeps/arm/memmove.S | 24 ++++++++++--------------
+ 2 files changed, 20 insertions(+), 28 deletions(-)
+
+diff --git a/sysdeps/arm/memcpy.S b/sysdeps/arm/memcpy.S
+index 510e8adaf2..bcfbc51d99 100644
+--- a/sysdeps/arm/memcpy.S
++++ b/sysdeps/arm/memcpy.S
+@@ -68,7 +68,7 @@ ENTRY(memcpy)
+ cfi_remember_state
+
+ subs r2, r2, #4
+- blt 8f
++ blo 8f
+ ands ip, r0, #3
+ PLD( pld [r1, #0] )
+ bne 9f
+@@ -82,7 +82,7 @@ ENTRY(memcpy)
+ cfi_rel_offset (r6, 4)
+ cfi_rel_offset (r7, 8)
+ cfi_rel_offset (r8, 12)
+- blt 5f
++ blo 5f
+
+ CALGN( ands ip, r1, #31 )
+ CALGN( rsb r3, ip, #32 )
+@@ -98,9 +98,9 @@ ENTRY(memcpy)
+ #endif
+
+ PLD( pld [r1, #0] )
+-2: PLD( subs r2, r2, #96 )
++2: PLD( cmp r2, #96 )
+ PLD( pld [r1, #28] )
+- PLD( blt 4f )
++ PLD( blo 4f )
+ PLD( pld [r1, #60] )
+ PLD( pld [r1, #92] )
+
+@@ -108,9 +108,7 @@ ENTRY(memcpy)
+ 4: ldmia r1!, {r3, r4, r5, r6, r7, r8, ip, lr}
+ subs r2, r2, #32
+ stmia r0!, {r3, r4, r5, r6, r7, r8, ip, lr}
+- bge 3b
+- PLD( cmn r2, #96 )
+- PLD( bge 4b )
++ bhs 3b
+
+ 5: ands ip, r2, #28
+ rsb ip, ip, #32
+@@ -222,7 +220,7 @@ ENTRY(memcpy)
+ strbge r4, [r0], #1
+ subs r2, r2, ip
+ strb lr, [r0], #1
+- blt 8b
++ blo 8b
+ ands ip, r1, #3
+ beq 1b
+
+@@ -236,7 +234,7 @@ ENTRY(memcpy)
+ .macro forward_copy_shift pull push
+
+ subs r2, r2, #28
+- blt 14f
++ blo 14f
+
+ CALGN( ands ip, r1, #31 )
+ CALGN( rsb ip, ip, #32 )
+@@ -253,9 +251,9 @@ ENTRY(memcpy)
+ cfi_rel_offset (r10, 16)
+
+ PLD( pld [r1, #0] )
+- PLD( subs r2, r2, #96 )
++ PLD( cmp r2, #96 )
+ PLD( pld [r1, #28] )
+- PLD( blt 13f )
++ PLD( blo 13f )
+ PLD( pld [r1, #60] )
+ PLD( pld [r1, #92] )
+
+@@ -280,9 +278,7 @@ ENTRY(memcpy)
+ mov ip, ip, PULL #\pull
+ orr ip, ip, lr, PUSH #\push
+ stmia r0!, {r3, r4, r5, r6, r7, r8, r10, ip}
+- bge 12b
+- PLD( cmn r2, #96 )
+- PLD( bge 13b )
++ bhs 12b
+
+ pop {r5 - r8, r10}
+ cfi_adjust_cfa_offset (-20)
+diff --git a/sysdeps/arm/memmove.S b/sysdeps/arm/memmove.S
+index 954037ef3a..0d07b76ee6 100644
+--- a/sysdeps/arm/memmove.S
++++ b/sysdeps/arm/memmove.S
+@@ -85,7 +85,7 @@ ENTRY(memmove)
+ add r1, r1, r2
+ add r0, r0, r2
+ subs r2, r2, #4
+- blt 8f
++ blo 8f
+ ands ip, r0, #3
+ PLD( pld [r1, #-4] )
+ bne 9f
+@@ -99,7 +99,7 @@ ENTRY(memmove)
+ cfi_rel_offset (r6, 4)
+ cfi_rel_offset (r7, 8)
+ cfi_rel_offset (r8, 12)
+- blt 5f
++ blo 5f
+
+ CALGN( ands ip, r1, #31 )
+ CALGN( sbcsne r4, ip, r2 ) @ C is always set here
+@@ -114,9 +114,9 @@ ENTRY(memmove)
+ #endif
+
+ PLD( pld [r1, #-4] )
+-2: PLD( subs r2, r2, #96 )
++2: PLD( cmp r2, #96 )
+ PLD( pld [r1, #-32] )
+- PLD( blt 4f )
++ PLD( blo 4f )
+ PLD( pld [r1, #-64] )
+ PLD( pld [r1, #-96] )
+
+@@ -124,9 +124,7 @@ ENTRY(memmove)
+ 4: ldmdb r1!, {r3, r4, r5, r6, r7, r8, ip, lr}
+ subs r2, r2, #32
+ stmdb r0!, {r3, r4, r5, r6, r7, r8, ip, lr}
+- bge 3b
+- PLD( cmn r2, #96 )
+- PLD( bge 4b )
++ bhs 3b
+
+ 5: ands ip, r2, #28
+ rsb ip, ip, #32
+@@ -237,7 +235,7 @@ ENTRY(memmove)
+ strbge r4, [r0, #-1]!
+ subs r2, r2, ip
+ strb lr, [r0, #-1]!
+- blt 8b
++ blo 8b
+ ands ip, r1, #3
+ beq 1b
+
+@@ -251,7 +249,7 @@ ENTRY(memmove)
+ .macro backward_copy_shift push pull
+
+ subs r2, r2, #28
+- blt 14f
++ blo 14f
+
+ CALGN( ands ip, r1, #31 )
+ CALGN( rsb ip, ip, #32 )
+@@ -268,9 +266,9 @@ ENTRY(memmove)
+ cfi_rel_offset (r10, 16)
+
+ PLD( pld [r1, #-4] )
+- PLD( subs r2, r2, #96 )
++ PLD( cmp r2, #96 )
+ PLD( pld [r1, #-32] )
+- PLD( blt 13f )
++ PLD( blo 13f )
+ PLD( pld [r1, #-64] )
+ PLD( pld [r1, #-96] )
+
+@@ -295,9 +293,7 @@ ENTRY(memmove)
+ mov r4, r4, PUSH #\push
+ orr r4, r4, r3, PULL #\pull
+ stmdb r0!, {r4 - r8, r10, ip, lr}
+- bge 12b
+- PLD( cmn r2, #96 )
+- PLD( bge 13b )
++ bhs 12b
+
+ pop {r5 - r8, r10}
+ cfi_adjust_cfa_offset (-20)
+--
+2.17.0
+