aboutsummaryrefslogtreecommitdiff
path: root/morecore.c
diff options
context:
space:
mode:
authorDavid Gibson <dgibson@sneetch.ozlabs.ibm.com>2005-11-22 16:46:40 +1100
committerDavid Gibson <dgibson@sneetch.ozlabs.ibm.com>2005-11-22 16:46:40 +1100
commit3627b79c57f38bb24c6c073fd679cb23aeecf344 (patch)
treecc4b604638166dad9593ad78633936b43853d4a4 /morecore.c
parent95894b7cd7a36ecb3675650093351bc90918ed86 (diff)
downloadlibhugetlbfs-3627b79c57f38bb24c6c073fd679cb23aeecf344.tar.gz
Various updates.
Diffstat (limited to 'morecore.c')
-rw-r--r--morecore.c53
1 files changed, 31 insertions, 22 deletions
diff --git a/morecore.c b/morecore.c
index 023a0ea..c5249e0 100644
--- a/morecore.c
+++ b/morecore.c
@@ -13,22 +13,21 @@
#include "libhugetlbfs_internal.h"
static int heap_fd;
-static long blocksize = (16 * 1024 * 1024);
+static long blocksize;
static void *heapbase;
static void *heaptop;
static long mapsize;
-
/*
- * Our plan is to ask for pages 'roughly' at the BASE. We expect nee require
- * the kernel to offer us sequential pages from wherever it first gave us a
- * page. If it does not do so, we return the page and pretend there are none
- * this covers us for the case where another map is in the way. This is
- * required because 'morecore' must have 'sbrk' semantics, ie. return
- * sequential, contigious memory blocks. Luckily, if it does not do so
- * and we error out malloc will happily go back to small pages and use mmap
- * to get them. Hurrah.
+ * Our plan is to ask for pages 'roughly' at the BASE. We expect and
+ * require the kernel to offer us sequential pages from wherever it
+ * first gave us a page. If it does not do so, we return the page and
+ * pretend there are none this covers us for the case where another
+ * map is in the way. This is required because 'morecore' must have
+ * 'sbrk' semantics, ie. return sequential, contigious memory blocks.
+ * Luckily, if it does not do so and we error out malloc will happily
+ * go back to small pages and use mmap to get them. Hurrah.
*/
static void *hugetlbfs_morecore(ptrdiff_t increment)
@@ -49,18 +48,21 @@ static void *hugetlbfs_morecore(ptrdiff_t increment)
DEBUG("Attempting to map %ld bytes\n", newsize);
p = mmap(heapbase + mapsize, newsize, PROT_READ|PROT_WRITE,
- MAP_PRIVATE | MAP_FIXED, heap_fd, mapsize);
+ MAP_PRIVATE, heap_fd, mapsize);
if (p == MAP_FAILED) {
- ERROR("Mapping failed in hugetlbfs_morecore()\n");
+ WARNING("Mapping failed in hugetlbfs_morecore()\n");
return NULL;
}
- if (! heapbase) {
+ if (! mapsize) {
+ if (heapbase && (heapbase != p))
+ WARNING("Heap originates at %p instead of %p\n",
+ p, heapbase);
heapbase = heaptop = p;
} else if (p != (heapbase + mapsize)) {
/* Couldn't get the mapping where we wanted */
munmap(p, newsize);
- ERROR("Mapped at %p instead of %p in hugetlbfs_morecore()\n",
+ WARNING("Mapped at %p instead of %p in hugetlbfs_morecore()\n",
p, heapbase + mapsize);
return NULL;
}
@@ -78,14 +80,18 @@ static void *hugetlbfs_morecore(ptrdiff_t increment)
static void __attribute__((constructor)) setup_morecore(void)
{
char *env, *ep;
- unsigned long heapaddr = 0;
-
- DEBUG("setup_morecore()\n");
+ unsigned long heapaddr;
env = getenv("HUGETLB_MORECORE");
if (! env)
return;
+ blocksize = gethugepagesize();
+ if (! blocksize) {
+ ERROR("Hugepages unavailable\n");
+ return;
+ }
+
heap_fd = hugetlbfs_unlinked_fd();
if (heap_fd < 0) {
ERROR("Couldn't open hugetlbfs file for morecore\n");
@@ -100,18 +106,21 @@ static void __attribute__((constructor)) setup_morecore(void)
env);
return;
}
- }
-
- if (! heapaddr) {
+ } else {
heapaddr = (unsigned long)sbrk(0);
heapaddr = ALIGN(heapaddr, hugetlbfs_vaddr_granularity());
}
- DEBUG("Placing hugepage morecore heap at 0x%lx\n", heapaddr);
+ DEBUG("setup_morecore(): heapaddr = 0x%lx\n", heapaddr);
heaptop = heapbase = (void *)heapaddr;
__morecore = &hugetlbfs_morecore;
- /* we always want to use our morecore, not ordinary mmap() */
+ /* Set some allocator options more appropriate for hugepages */
+ mallopt(M_TRIM_THRESHOLD, blocksize / 2);
+ mallopt(M_TOP_PAD, blocksize / 2);
+ /* we always want to use our morecore, not ordinary mmap().
+ * This doesn't appear to prohibit malloc() from falling back
+ * to mmap() if we run out of hugepages. */
mallopt(M_MMAP_MAX, 0);
}