aboutsummaryrefslogtreecommitdiffstats
path: root/meta/recipes-core/glibc/glibc/CVE-2017-18269.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta/recipes-core/glibc/glibc/CVE-2017-18269.patch')
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2017-18269.patch178
1 files changed, 178 insertions, 0 deletions
diff --git a/meta/recipes-core/glibc/glibc/CVE-2017-18269.patch b/meta/recipes-core/glibc/glibc/CVE-2017-18269.patch
new file mode 100644
index 0000000000..d873c51e60
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2017-18269.patch
@@ -0,0 +1,178 @@
+From cd66c0e584c6d692bc8347b5e72723d02b8a8ada Mon Sep 17 00:00:00 2001
+From: Andrew Senkevich <andrew.n.senkevich@gmail.com>
+Date: Fri, 23 Mar 2018 16:19:45 +0100
+Subject: [PATCH] Fix i386 memmove issue (bug 22644).
+
+ [BZ #22644]
+ * sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S: Fixed
+ branch conditions.
+ * string/test-memmove.c (do_test2): New testcase.
+
+Upstream-Status: Backport
+CVE: CVE-2017-18269
+Signed-off-by: Zhixiong Chi <zhixiong.chi@windriver.com>
+---
+ ChangeLog | 8 +++
+ string/test-memmove.c | 58 ++++++++++++++++++++++
+ .../i386/i686/multiarch/memcpy-sse2-unaligned.S | 12 ++---
+ 3 files changed, 72 insertions(+), 6 deletions(-)
+
+diff --git a/ChangeLog b/ChangeLog
+index 18ed09e..afdb766 100644
+--- a/ChangeLog
++++ b/ChangeLog
+@@ -1,3 +1,11 @@
++2018-03-23 Andrew Senkevich <andrew.senkevich@intel.com>
++ Max Horn <max@quendi.de>
++
++ [BZ #22644]
++ * sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S: Fixed
++ branch conditions.
++ * string/test-memmove.c (do_test2): New testcase.
++
+ 2018-02-22 Andrew Waterman <andrew@sifive.com>
+
+ [BZ # 22884]
+diff --git a/string/test-memmove.c b/string/test-memmove.c
+index edc7a4c..64e3651 100644
+--- a/string/test-memmove.c
++++ b/string/test-memmove.c
+@@ -24,6 +24,7 @@
+ # define TEST_NAME "memmove"
+ #endif
+ #include "test-string.h"
++#include <support/test-driver.h>
+
+ char *simple_memmove (char *, const char *, size_t);
+
+@@ -245,6 +246,60 @@ do_random_tests (void)
+ }
+ }
+
++static void
++do_test2 (void)
++{
++ size_t size = 0x20000000;
++ uint32_t * large_buf;
++
++ large_buf = mmap ((void*) 0x70000000, size, PROT_READ | PROT_WRITE,
++ MAP_PRIVATE | MAP_ANON, -1, 0);
++
++ if (large_buf == MAP_FAILED)
++ error (EXIT_UNSUPPORTED, errno, "Large mmap failed");
++
++ if ((uintptr_t) large_buf > 0x80000000 - 128
++ || 0x80000000 - (uintptr_t) large_buf > 0x20000000)
++ {
++ error (0, 0, "Large mmap allocated improperly");
++ ret = EXIT_UNSUPPORTED;
++ munmap ((void *) large_buf, size);
++ return;
++ }
++
++ size_t bytes_move = 0x80000000 - (uintptr_t) large_buf;
++ size_t arr_size = bytes_move / sizeof (uint32_t);
++ size_t i;
++
++ FOR_EACH_IMPL (impl, 0)
++ {
++ for (i = 0; i < arr_size; i++)
++ large_buf[i] = (uint32_t) i;
++
++ uint32_t * dst = &large_buf[33];
++
++#ifdef TEST_BCOPY
++ CALL (impl, (char *) large_buf, (char *) dst, bytes_move);
++#else
++ CALL (impl, (char *) dst, (char *) large_buf, bytes_move);
++#endif
++
++ for (i = 0; i < arr_size; i++)
++ {
++ if (dst[i] != (uint32_t) i)
++ {
++ error (0, 0,
++ "Wrong result in function %s dst \"%p\" src \"%p\" offset \"%zd\"",
++ impl->name, dst, large_buf, i);
++ ret = 1;
++ break;
++ }
++ }
++ }
++
++ munmap ((void *) large_buf, size);
++}
++
+ int
+ test_main (void)
+ {
+@@ -284,6 +339,9 @@ test_main (void)
+ }
+
+ do_random_tests ();
++
++ do_test2 ();
++
+ return ret;
+ }
+
+diff --git a/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S b/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S
+index 9c3bbe7..9aa17de 100644
+--- a/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S
++++ b/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S
+@@ -72,7 +72,7 @@ ENTRY (MEMCPY)
+ cmp %edx, %eax
+
+ # ifdef USE_AS_MEMMOVE
+- jg L(check_forward)
++ ja L(check_forward)
+
+ L(mm_len_0_or_more_backward):
+ /* Now do checks for lengths. We do [0..16], [16..32], [32..64], [64..128]
+@@ -81,7 +81,7 @@ L(mm_len_0_or_more_backward):
+ jbe L(mm_len_0_16_bytes_backward)
+
+ cmpl $32, %ecx
+- jg L(mm_len_32_or_more_backward)
++ ja L(mm_len_32_or_more_backward)
+
+ /* Copy [0..32] and return. */
+ movdqu (%eax), %xmm0
+@@ -92,7 +92,7 @@ L(mm_len_0_or_more_backward):
+
+ L(mm_len_32_or_more_backward):
+ cmpl $64, %ecx
+- jg L(mm_len_64_or_more_backward)
++ ja L(mm_len_64_or_more_backward)
+
+ /* Copy [0..64] and return. */
+ movdqu (%eax), %xmm0
+@@ -107,7 +107,7 @@ L(mm_len_32_or_more_backward):
+
+ L(mm_len_64_or_more_backward):
+ cmpl $128, %ecx
+- jg L(mm_len_128_or_more_backward)
++ ja L(mm_len_128_or_more_backward)
+
+ /* Copy [0..128] and return. */
+ movdqu (%eax), %xmm0
+@@ -132,7 +132,7 @@ L(mm_len_128_or_more_backward):
+ add %ecx, %eax
+ cmp %edx, %eax
+ movl SRC(%esp), %eax
+- jle L(forward)
++ jbe L(forward)
+ PUSH (%esi)
+ PUSH (%edi)
+ PUSH (%ebx)
+@@ -269,7 +269,7 @@ L(check_forward):
+ add %edx, %ecx
+ cmp %eax, %ecx
+ movl LEN(%esp), %ecx
+- jle L(forward)
++ jbe L(forward)
+
+ /* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128]
+ separately. */
+--
+2.9.3