aboutsummaryrefslogtreecommitdiffstats
path: root/meta-oe/recipes-extended/mozjs/mozjs/Manually_mmap_heap_memory_esr17.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-oe/recipes-extended/mozjs/mozjs/Manually_mmap_heap_memory_esr17.patch')
-rw-r--r--meta-oe/recipes-extended/mozjs/mozjs/Manually_mmap_heap_memory_esr17.patch112
1 files changed, 0 insertions, 112 deletions
diff --git a/meta-oe/recipes-extended/mozjs/mozjs/Manually_mmap_heap_memory_esr17.patch b/meta-oe/recipes-extended/mozjs/mozjs/Manually_mmap_heap_memory_esr17.patch
deleted file mode 100644
index 83f4543193..0000000000
--- a/meta-oe/recipes-extended/mozjs/mozjs/Manually_mmap_heap_memory_esr17.patch
+++ /dev/null
@@ -1,112 +0,0 @@
-From 1d1fa95c8ff7697e46343385a79a8f7e5c514a87 Mon Sep 17 00:00:00 2001
-From: Zheng Xu <zheng.xu@linaro.org>
-Date: Fri, 2 Sep 2016 17:40:05 +0800
-Subject: [PATCH] Bug 1143022 - Manually mmap on arm64 to ensure high 17 bits
- are clear. r=ehoogeveen
-
-There might be 48-bit VA on arm64 depending on kernel configuration.
-Manually mmap heap memory to align with the assumption made by JS engine.
-
-Change-Id: Ic5d2b2fe4b758b3c87cc0688348af7e71a991146
-
-Upstream-status: Backport
-
----
- js/src/gc/Memory.cpp | 73 ++++++++++++++++++++++++++++++++++++++++++++++++++--
- 1 file changed, 71 insertions(+), 2 deletions(-)
-
-diff --git a/js/src/gc/Memory.cpp b/js/src/gc/Memory.cpp
-index e5ad018..4149adf 100644
---- a/js/src/gc/Memory.cpp
-+++ b/js/src/gc/Memory.cpp
-@@ -309,6 +309,75 @@ InitMemorySubsystem()
- #endif
- }
-
-+static inline void *
-+MapMemory(size_t length, int prot, int flags, int fd, off_t offset)
-+{
-+#if defined(__ia64__)
-+ /*
-+ * The JS engine assumes that all allocated pointers have their high 17 bits clear,
-+ * which ia64's mmap doesn't support directly. However, we can emulate it by passing
-+ * mmap an "addr" parameter with those bits clear. The mmap will return that address,
-+ * or the nearest available memory above that address, providing a near-guarantee
-+ * that those bits are clear. If they are not, we return NULL below to indicate
-+ * out-of-memory.
-+ *
-+ * The addr is chosen as 0x0000070000000000, which still allows about 120TB of virtual
-+ * address space.
-+ *
-+ * See Bug 589735 for more information.
-+ */
-+ void *region = mmap((void*)0x0000070000000000, length, prot, flags, fd, offset);
-+ if (region == MAP_FAILED)
-+ return MAP_FAILED;
-+ /*
-+ * If the allocated memory doesn't have its upper 17 bits clear, consider it
-+ * as out of memory.
-+ */
-+ if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
-+ JS_ALWAYS_TRUE(0 == munmap(region, length));
-+ return MAP_FAILED;
-+ }
-+ return region;
-+#elif defined(__aarch64__)
-+ /*
-+ * There might be similar virtual address issue on arm64 which depends on
-+ * hardware and kernel configurations. But the work around is slightly
-+ * different due to the different mmap behavior.
-+ *
-+ * TODO: Merge with the above code block if this implementation works for
-+ * ia64 and sparc64.
-+ */
-+ const uintptr_t start = UINT64_C(0x0000070000000000);
-+ const uintptr_t end = UINT64_C(0x0000800000000000);
-+ const uintptr_t step = ChunkSize;
-+ /*
-+ * Optimization options if there are too many retries in practice:
-+ * 1. Examine /proc/self/maps to find an available address. This file is
-+ * not always available, however. In addition, even if we examine
-+ * /proc/self/maps, we may still need to retry several times due to
-+ * racing with other threads.
-+ * 2. Use a global/static variable with lock to track the addresses we have
-+ * allocated or tried.
-+ */
-+ uintptr_t hint;
-+ void* region = MAP_FAILED;
-+ for (hint = start; region == MAP_FAILED && hint + length <= end; hint += step) {
-+ region = mmap((void*)hint, length, prot, flags, fd, offset);
-+ if (region != MAP_FAILED) {
-+ if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
-+ if (munmap(region, length)) {
-+ MOZ_ASSERT(errno == ENOMEM);
-+ }
-+ region = MAP_FAILED;
-+ }
-+ }
-+ }
-+ return region == MAP_FAILED ? NULL : region;
-+#else
-+ return mmap(NULL, length, prot, flags, fd, offset);
-+#endif
-+}
-+
- void *
- MapAlignedPages(size_t size, size_t alignment)
- {
-@@ -322,12 +391,12 @@ MapAlignedPages(size_t size, size_t alignment)
-
- /* Special case: If we want page alignment, no further work is needed. */
- if (alignment == PageSize) {
-- return mmap(NULL, size, prot, flags, -1, 0);
-+ return MapMemory(size, prot, flags, -1, 0);
- }
-
- /* Overallocate and unmap the region's edges. */
- size_t reqSize = Min(size + 2 * alignment, 2 * size);
-- void *region = mmap(NULL, reqSize, prot, flags, -1, 0);
-+ void *region = MapMemory(reqSize, prot, flags, -1, 0);
- if (region == MAP_FAILED)
- return NULL;
-