summaryrefslogtreecommitdiffstats
path: root/vespamalloc
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2023-02-15 06:15:28 +0000
committerHenning Baldersheim <balder@yahoo-inc.com>2023-02-15 06:15:28 +0000
commitdd84fb126d7e871b6b456f28bbfb00e3a2a02544 (patch)
tree537488f7324ef2732db3208d05070532c28bc39d /vespamalloc
parentc5fe5b6be07e57115cd72738a5afd928b0df60ef (diff)
Put independent function to copy memory in separate compilation unit.
Diffstat (limited to 'vespamalloc')
-rw-r--r--vespamalloc/src/vespamalloc/malloc/CMakeLists.txt1
-rw-r--r--vespamalloc/src/vespamalloc/malloc/independent_non_inlined_memcpy.cpp18
-rw-r--r--vespamalloc/src/vespamalloc/malloc/independent_non_inlined_memcpy.h11
-rw-r--r--vespamalloc/src/vespamalloc/malloc/load_as_huge.cpp21
4 files changed, 32 insertions, 19 deletions
diff --git a/vespamalloc/src/vespamalloc/malloc/CMakeLists.txt b/vespamalloc/src/vespamalloc/malloc/CMakeLists.txt
index 5f50cd97aee..8f8f798e982 100644
--- a/vespamalloc/src/vespamalloc/malloc/CMakeLists.txt
+++ b/vespamalloc/src/vespamalloc/malloc/CMakeLists.txt
@@ -72,5 +72,6 @@ vespa_add_library(vespamalloc_mmap OBJECT
vespa_add_library(vespamalloc_load_as_huge OBJECT
SOURCES
load_as_huge.cpp
+ independent_non_inlined_memcpy.cpp
DEPENDS
)
diff --git a/vespamalloc/src/vespamalloc/malloc/independent_non_inlined_memcpy.cpp b/vespamalloc/src/vespamalloc/malloc/independent_non_inlined_memcpy.cpp
new file mode 100644
index 00000000000..10f7c68f049
--- /dev/null
+++ b/vespamalloc/src/vespamalloc/malloc/independent_non_inlined_memcpy.cpp
@@ -0,0 +1,18 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "independent_non_inlined_memcpy.h"
+
+namespace vespamalloc {
+
+// Simple memcpy replacement to avoid calling code in other dso.
+// No dependencies to other libraries are allowed here.
+void
+independent_non_inlined_memcpy(void * dest_in, const void * src_in, size_t n) {
+ char *dest = static_cast<char *>(dest_in);
+ const char *src = static_cast<const char *>(src_in);
+ for (size_t i(0); i < n ; i++) {
+ dest[i] = src[i];
+ }
+}
+
+}
diff --git a/vespamalloc/src/vespamalloc/malloc/independent_non_inlined_memcpy.h b/vespamalloc/src/vespamalloc/malloc/independent_non_inlined_memcpy.h
new file mode 100644
index 00000000000..0ba05f5383e
--- /dev/null
+++ b/vespamalloc/src/vespamalloc/malloc/independent_non_inlined_memcpy.h
@@ -0,0 +1,11 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <cstddef>
+
+namespace vespamalloc {
+
+// Simple memcpy replacement to avoid calling code in other dso.
+void independent_non_inlined_memcpy(void *dest_in, const void *src_in, size_t n);
+
+}
diff --git a/vespamalloc/src/vespamalloc/malloc/load_as_huge.cpp b/vespamalloc/src/vespamalloc/malloc/load_as_huge.cpp
index 8494689ba85..431892032ad 100644
--- a/vespamalloc/src/vespamalloc/malloc/load_as_huge.cpp
+++ b/vespamalloc/src/vespamalloc/malloc/load_as_huge.cpp
@@ -1,5 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "independent_non_inlined_memcpy.h"
#include <cstdio>
#include <cerrno>
#include <cassert>
@@ -29,24 +30,6 @@ mmap_huge(size_t sz) {
size_t round_huge_down(size_t v) { return v & ~(HUGEPAGE_SIZE - 1); }
size_t round_huge_up(size_t v) { return round_huge_down(v + (HUGEPAGE_SIZE - 1)); }
-#ifdef __clang__
-void
-non_optimized_non_inlined_memcpy(void *dest_in, const void *src_in, size_t n) __attribute__((noinline, optnone)) ;
-#else
-void
-non_optimized_non_inlined_memcpy(void *dest_in, const void *src_in, size_t n) __attribute__((noinline, optimize(1))) ;
-#endif
-
-// Simple memcpy replacement to avoid calling code in other dso.
-void
-non_optimized_non_inlined_memcpy(void *dest_in, const void *src_in, size_t n) {
- char *dest = static_cast<char *>(dest_in);
- const char *src = static_cast<const char *>(src_in);
- for (size_t i(0); i < n ; i++) {
- dest[i] = src[i];
- }
-}
-
/**
* Make a large mapping if code is larger than HUGEPAGE_SIZE and copies the content of the various segments.
* Then remaps the areas back to its original location.
@@ -83,7 +66,7 @@ remap_segments(size_t base_vaddr, const Elf64_Phdr * segments, size_t count) {
if (madvise(dest, sz, MADV_HUGEPAGE) != 0) {
fprintf(stderr, "load_as_huge.cpp:remap_segments => madvise(%p, %ld, MADV_HUGEPAGE) FAILED, errno= %d = %s\n", dest, sz, errno, strerror(errno));
}
- non_optimized_non_inlined_memcpy(dest, reinterpret_cast<void*>(vaddr), sz);
+ vespamalloc::independent_non_inlined_memcpy(dest, reinterpret_cast<void*>(vaddr), sz);
int prot = PROT_READ;
if (segments[i].p_flags & PF_X) prot|= PROT_EXEC;
if (segments[i].p_flags & PF_W) prot|= PROT_WRITE;