2017-11-01 07:56:27 +00:00
|
|
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
|
|
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
2017-08-31 03:02:01 +00:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Portions of this file were originally under the following license:
|
|
|
|
//
|
|
|
|
// Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
|
|
|
|
// All rights reserved.
|
|
|
|
// Copyright (C) 2007-2017 Mozilla Foundation.
|
|
|
|
//
|
|
|
|
// Redistribution and use in source and binary forms, with or without
|
|
|
|
// modification, are permitted provided that the following conditions
|
|
|
|
// are met:
|
|
|
|
// 1. Redistributions of source code must retain the above copyright
|
|
|
|
// notice(s), this list of conditions and the following disclaimer as
|
|
|
|
// the first lines of this file unmodified other than the possible
|
|
|
|
// addition of one or more copyright notices.
|
|
|
|
// 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
// notice(s), this list of conditions and the following disclaimer in
|
|
|
|
// the documentation and/or other materials provided with the
|
|
|
|
// distribution.
|
|
|
|
//
|
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
|
|
|
|
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
|
|
|
|
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
|
|
|
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
|
|
|
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
|
|
|
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
|
|
|
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
//
|
|
|
|
// *****************************************************************************
|
|
|
|
//
|
|
|
|
// This allocator implementation is designed to provide scalable performance
|
|
|
|
// for multi-threaded programs on multi-processor systems. The following
|
|
|
|
// features are included for this purpose:
|
|
|
|
//
|
|
|
|
// + Multiple arenas are used if there are multiple CPUs, which reduces lock
|
|
|
|
// contention and cache sloshing.
|
|
|
|
//
|
|
|
|
// + Cache line sharing between arenas is avoided for internal data
|
|
|
|
// structures.
|
|
|
|
//
|
|
|
|
// + Memory is managed in chunks and runs (chunks can be split into runs),
|
|
|
|
// rather than as individual pages. This provides a constant-time
|
|
|
|
// mechanism for associating allocations with particular arenas.
|
|
|
|
//
|
|
|
|
// Allocation requests are rounded up to the nearest size class, and no record
|
|
|
|
// of the original request size is maintained. Allocations are broken into
|
|
|
|
// categories according to size class. Assuming runtime defaults, 4 kB pages
|
|
|
|
// and a 16 byte quantum on a 32-bit system, the size classes in each category
|
|
|
|
// are as follows:
|
|
|
|
//
|
|
|
|
// |=====================================|
|
|
|
|
// | Category | Subcategory | Size |
|
|
|
|
// |=====================================|
|
2017-11-03 01:10:50 +00:00
|
|
|
// | Small | Tiny | 4 |
|
2017-10-29 12:53:31 +00:00
|
|
|
// | | | 8 |
|
|
|
|
// | |----------------+---------|
|
|
|
|
// | | Quantum-spaced | 16 |
|
|
|
|
// | | | 32 |
|
|
|
|
// | | | 48 |
|
|
|
|
// | | | ... |
|
|
|
|
// | | | 480 |
|
|
|
|
// | | | 496 |
|
|
|
|
// | | | 512 |
|
|
|
|
// | |----------------+---------|
|
|
|
|
// | | Sub-page | 1 kB |
|
|
|
|
// | | | 2 kB |
|
|
|
|
// |=====================================|
|
|
|
|
// | Large | 4 kB |
|
|
|
|
// | | 8 kB |
|
|
|
|
// | | 12 kB |
|
|
|
|
// | | ... |
|
|
|
|
// | | 1012 kB |
|
|
|
|
// | | 1016 kB |
|
|
|
|
// | | 1020 kB |
|
|
|
|
// |=====================================|
|
|
|
|
// | Huge | 1 MB |
|
|
|
|
// | | 2 MB |
|
|
|
|
// | | 3 MB |
|
|
|
|
// | | ... |
|
|
|
|
// |=====================================|
|
|
|
|
//
|
|
|
|
// NOTE: Due to Mozilla bug 691003, we cannot reserve less than one word for an
|
|
|
|
// allocation on Linux or Mac. So on 32-bit *nix, the smallest bucket size is
|
|
|
|
// 4 bytes, and on 64-bit, the smallest bucket size is 8 bytes.
|
|
|
|
//
|
|
|
|
// A different mechanism is used for each category:
|
|
|
|
//
|
|
|
|
// Small : Each size class is segregated into its own set of runs. Each run
|
|
|
|
// maintains a bitmap of which regions are free/allocated.
|
|
|
|
//
|
|
|
|
// Large : Each allocation is backed by a dedicated run. Metadata are stored
|
|
|
|
// in the associated arena chunk header maps.
|
|
|
|
//
|
|
|
|
// Huge : Each allocation is backed by a dedicated contiguous set of chunks.
|
|
|
|
// Metadata are stored in a separate red-black tree.
|
|
|
|
//
|
|
|
|
// *****************************************************************************
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-05-12 12:52:25 +00:00
|
|
|
#include "mozmemory_wrap.h"
|
2017-08-31 01:29:11 +00:00
|
|
|
#include "mozjemalloc.h"
|
2017-11-01 07:10:24 +00:00
|
|
|
#include "mozjemalloc_types.h"
|
|
|
|
|
|
|
|
#include <cstring>
|
|
|
|
#include <cerrno>
|
|
|
|
#ifdef XP_WIN
|
|
|
|
#include <io.h>
|
|
|
|
#include <windows.h>
|
|
|
|
#else
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#endif
|
|
|
|
#ifdef XP_DARWIN
|
|
|
|
#include <libkern/OSAtomic.h>
|
|
|
|
#include <mach/mach_init.h>
|
|
|
|
#include <mach/vm_map.h>
|
|
|
|
#endif
|
|
|
|
|
2017-10-26 00:51:00 +00:00
|
|
|
#include "mozilla/Atomics.h"
|
2017-10-27 23:42:59 +00:00
|
|
|
#include "mozilla/Alignment.h"
|
2017-11-01 07:10:24 +00:00
|
|
|
#include "mozilla/Assertions.h"
|
|
|
|
#include "mozilla/Attributes.h"
|
2017-10-30 02:28:17 +00:00
|
|
|
#include "mozilla/CheckedInt.h"
|
2017-09-01 23:55:42 +00:00
|
|
|
#include "mozilla/DoublyLinkedList.h"
|
2017-10-26 00:51:00 +00:00
|
|
|
#include "mozilla/Likely.h"
|
2017-10-30 08:22:36 +00:00
|
|
|
#include "mozilla/MathAlgorithms.h"
|
2017-10-26 00:51:00 +00:00
|
|
|
#include "mozilla/Sprintf.h"
|
2017-11-01 07:10:24 +00:00
|
|
|
// Note: MozTaggedAnonymousMmap() could call an LD_PRELOADed mmap
|
|
|
|
// instead of the one defined here; use only MozTagAnonymousMemory().
|
|
|
|
#include "mozilla/TaggedAnonymousMemory.h"
|
|
|
|
#include "mozilla/ThreadLocal.h"
|
2017-10-25 23:50:49 +00:00
|
|
|
#include "mozilla/UniquePtr.h"
|
2017-10-27 01:31:50 +00:00
|
|
|
#include "mozilla/Unused.h"
|
2017-10-27 23:42:59 +00:00
|
|
|
#include "mozilla/fallible.h"
|
2017-11-01 07:10:24 +00:00
|
|
|
#include "rb.h"
|
2017-12-03 05:21:19 +00:00
|
|
|
#include "Mutex.h"
|
2017-10-30 02:43:10 +00:00
|
|
|
#include "Utils.h"
|
2017-05-12 12:52:25 +00:00
|
|
|
|
2017-11-01 07:10:24 +00:00
|
|
|
using namespace mozilla;
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// On Linux, we use madvise(MADV_DONTNEED) to release memory back to the
|
|
|
|
// operating system. If we release 1MB of live pages with MADV_DONTNEED, our
|
|
|
|
// RSS will decrease by 1MB (almost) immediately.
|
|
|
|
//
|
|
|
|
// On Mac, we use madvise(MADV_FREE). Unlike MADV_DONTNEED on Linux, MADV_FREE
|
|
|
|
// on Mac doesn't cause the OS to release the specified pages immediately; the
|
|
|
|
// OS keeps them in our process until the machine comes under memory pressure.
|
|
|
|
//
|
|
|
|
// It's therefore difficult to measure the process's RSS on Mac, since, in the
|
|
|
|
// absence of memory pressure, the contribution from the heap to RSS will not
|
|
|
|
// decrease due to our madvise calls.
|
|
|
|
//
|
|
|
|
// We therefore define MALLOC_DOUBLE_PURGE on Mac. This causes jemalloc to
|
|
|
|
// track which pages have been MADV_FREE'd. You can then call
|
|
|
|
// jemalloc_purge_freed_pages(), which will force the OS to release those
|
|
|
|
// MADV_FREE'd pages, making the process's RSS reflect its true memory usage.
|
|
|
|
//
|
|
|
|
// The jemalloc_purge_freed_pages definition in memory/build/mozmemory.h needs
|
|
|
|
// to be adjusted if MALLOC_DOUBLE_PURGE is ever enabled on Linux.
|
|
|
|
|
2017-08-30 07:54:17 +00:00
|
|
|
#ifdef XP_DARWIN
|
2011-10-24 17:23:47 +00:00
|
|
|
#define MALLOC_DOUBLE_PURGE
|
|
|
|
#endif
|
|
|
|
|
2017-08-30 07:53:10 +00:00
|
|
|
#ifdef XP_WIN
|
2017-11-01 07:45:24 +00:00
|
|
|
#define MALLOC_DECOMMIT
|
|
|
|
#endif
|
2011-05-08 08:29:59 +00:00
|
|
|
|
2017-11-01 09:33:24 +00:00
|
|
|
// When MALLOC_STATIC_PAGESIZE is defined, the page size is fixed at
|
|
|
|
// compile-time for better performance, as opposed to determined at
|
|
|
|
// runtime. Some platforms can have different page sizes at runtime
|
|
|
|
// depending on kernel configuration, so they are opted out by default.
|
|
|
|
// Debug builds are opted out too, for test coverage.
|
|
|
|
#ifndef MOZ_DEBUG
|
|
|
|
#if !defined(__ia64__) && !defined(__sparc__) && !defined(__mips__) && \
|
|
|
|
!defined(__aarch64__)
|
|
|
|
#define MALLOC_STATIC_PAGESIZE 1
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2017-11-01 07:45:24 +00:00
|
|
|
#ifdef XP_WIN
|
2017-10-29 12:53:37 +00:00
|
|
|
#define STDERR_FILENO 2
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Implement getenv without using malloc.
|
2008-02-06 23:06:50 +00:00
|
|
|
static char mozillaMallocOptionsBuf[64];
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
#define getenv xgetenv
|
|
|
|
static char*
|
|
|
|
getenv(const char* name)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
if (GetEnvironmentVariableA(
|
|
|
|
name, mozillaMallocOptionsBuf, sizeof(mozillaMallocOptionsBuf)) > 0) {
|
|
|
|
return mozillaMallocOptionsBuf;
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
return nullptr;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-08-30 07:53:10 +00:00
|
|
|
#ifndef XP_WIN
|
2008-02-06 23:06:50 +00:00
|
|
|
#ifndef MADV_FREE
|
2017-10-29 12:53:37 +00:00
|
|
|
#define MADV_FREE MADV_DONTNEED
|
2008-02-06 23:06:50 +00:00
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Some tools, such as /dev/dsp wrappers, LD_PRELOAD libraries that
|
|
|
|
// happen to override mmap() and call dlsym() from their overridden
|
|
|
|
// mmap(). The problem is that dlsym() calls malloc(), and this ends
|
|
|
|
// up in a dead lock in jemalloc.
|
|
|
|
// On these systems, we prefer to directly use the system call.
|
|
|
|
// We do that for Linux systems and kfreebsd with GNU userland.
|
|
|
|
// Note sanity checks are not done (alignment of offset, ...) because
|
|
|
|
// the uses of mmap are pretty limited, in jemalloc.
|
|
|
|
//
|
|
|
|
// On Alpha, glibc has a bug that prevents syscall() to work for system
|
|
|
|
// calls with 6 arguments.
|
2017-10-29 12:53:37 +00:00
|
|
|
#if (defined(XP_LINUX) && !defined(__alpha__)) || \
|
|
|
|
(defined(__FreeBSD_kernel__) && defined(__GLIBC__))
|
2010-08-18 17:46:59 +00:00
|
|
|
#include <sys/syscall.h>
|
|
|
|
#if defined(SYS_mmap) || defined(SYS_mmap2)
|
2017-10-29 12:53:37 +00:00
|
|
|
static inline void*
|
|
|
|
_mmap(void* addr, size_t length, int prot, int flags, int fd, off_t offset)
|
2010-08-18 17:46:59 +00:00
|
|
|
{
|
2017-10-29 12:53:31 +00:00
|
|
|
// S390 only passes one argument to the mmap system call, which is a
|
|
|
|
// pointer to a structure containing the arguments.
|
2010-08-18 17:46:59 +00:00
|
|
|
#ifdef __s390__
|
2017-10-29 12:53:37 +00:00
|
|
|
struct
|
|
|
|
{
|
|
|
|
void* addr;
|
|
|
|
size_t length;
|
|
|
|
long prot;
|
|
|
|
long flags;
|
|
|
|
long fd;
|
|
|
|
off_t offset;
|
|
|
|
} args = { addr, length, prot, flags, fd, offset };
|
|
|
|
return (void*)syscall(SYS_mmap, &args);
|
2010-08-18 17:46:59 +00:00
|
|
|
#else
|
2017-08-30 07:56:42 +00:00
|
|
|
#if defined(ANDROID) && defined(__aarch64__) && defined(SYS_mmap2)
|
2017-10-29 12:53:31 +00:00
|
|
|
// Android NDK defines SYS_mmap2 for AArch64 despite it not supporting mmap2.
|
2017-05-17 17:06:21 +00:00
|
|
|
#undef SYS_mmap2
|
|
|
|
#endif
|
2010-08-18 17:46:59 +00:00
|
|
|
#ifdef SYS_mmap2
|
2017-10-29 12:53:37 +00:00
|
|
|
return (void*)syscall(SYS_mmap2, addr, length, prot, flags, fd, offset >> 12);
|
2010-08-18 17:46:59 +00:00
|
|
|
#else
|
2017-10-29 12:53:37 +00:00
|
|
|
return (void*)syscall(SYS_mmap, addr, length, prot, flags, fd, offset);
|
2010-08-18 17:46:59 +00:00
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#define mmap _mmap
|
|
|
|
#define munmap(a, l) syscall(SYS_munmap, a, l)
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2017-11-01 08:54:31 +00:00
|
|
|
// ***************************************************************************
|
|
|
|
// Structures for chunk headers for chunks used for non-huge allocations.
|
|
|
|
|
|
|
|
struct arena_t;
|
|
|
|
|
|
|
|
// Each element of the chunk map corresponds to one page within the chunk.
|
|
|
|
struct arena_chunk_map_t
|
|
|
|
{
|
|
|
|
// Linkage for run trees. There are two disjoint uses:
|
|
|
|
//
|
|
|
|
// 1) arena_t's tree or available runs.
|
|
|
|
// 2) arena_run_t conceptually uses this linkage for in-use non-full
|
|
|
|
// runs, rather than directly embedding linkage.
|
|
|
|
RedBlackTreeNode<arena_chunk_map_t> link;
|
|
|
|
|
|
|
|
// Run address (or size) and various flags are stored together. The bit
|
|
|
|
// layout looks like (assuming 32-bit system):
|
|
|
|
//
|
|
|
|
// ???????? ???????? ????---- -mckdzla
|
|
|
|
//
|
|
|
|
// ? : Unallocated: Run address for first/last pages, unset for internal
|
|
|
|
// pages.
|
|
|
|
// Small: Run address.
|
|
|
|
// Large: Run size for first page, unset for trailing pages.
|
|
|
|
// - : Unused.
|
|
|
|
// m : MADV_FREE/MADV_DONTNEED'ed?
|
|
|
|
// c : decommitted?
|
|
|
|
// k : key?
|
|
|
|
// d : dirty?
|
|
|
|
// z : zeroed?
|
|
|
|
// l : large?
|
|
|
|
// a : allocated?
|
|
|
|
//
|
|
|
|
// Following are example bit patterns for the three types of runs.
|
|
|
|
//
|
|
|
|
// r : run address
|
|
|
|
// s : run size
|
|
|
|
// x : don't care
|
|
|
|
// - : 0
|
|
|
|
// [cdzla] : bit set
|
|
|
|
//
|
|
|
|
// Unallocated:
|
|
|
|
// ssssssss ssssssss ssss---- --c-----
|
|
|
|
// xxxxxxxx xxxxxxxx xxxx---- ----d---
|
|
|
|
// ssssssss ssssssss ssss---- -----z--
|
|
|
|
//
|
|
|
|
// Small:
|
|
|
|
// rrrrrrrr rrrrrrrr rrrr---- -------a
|
|
|
|
// rrrrrrrr rrrrrrrr rrrr---- -------a
|
|
|
|
// rrrrrrrr rrrrrrrr rrrr---- -------a
|
|
|
|
//
|
|
|
|
// Large:
|
|
|
|
// ssssssss ssssssss ssss---- ------la
|
|
|
|
// -------- -------- -------- ------la
|
|
|
|
// -------- -------- -------- ------la
|
|
|
|
size_t bits;
|
|
|
|
|
|
|
|
// Note that CHUNK_MAP_DECOMMITTED's meaning varies depending on whether
|
|
|
|
// MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are defined.
|
|
|
|
//
|
|
|
|
// If MALLOC_DECOMMIT is defined, a page which is CHUNK_MAP_DECOMMITTED must be
|
|
|
|
// re-committed with pages_commit() before it may be touched. If
|
|
|
|
// MALLOC_DECOMMIT is defined, MALLOC_DOUBLE_PURGE may not be defined.
|
|
|
|
//
|
|
|
|
// If neither MALLOC_DECOMMIT nor MALLOC_DOUBLE_PURGE is defined, pages which
|
|
|
|
// are madvised (with either MADV_DONTNEED or MADV_FREE) are marked with
|
|
|
|
// CHUNK_MAP_MADVISED.
|
|
|
|
//
|
|
|
|
// Otherwise, if MALLOC_DECOMMIT is not defined and MALLOC_DOUBLE_PURGE is
|
|
|
|
// defined, then a page which is madvised is marked as CHUNK_MAP_MADVISED.
|
|
|
|
// When it's finally freed with jemalloc_purge_freed_pages, the page is marked
|
|
|
|
// as CHUNK_MAP_DECOMMITTED.
|
|
|
|
#define CHUNK_MAP_MADVISED ((size_t)0x40U)
|
|
|
|
#define CHUNK_MAP_DECOMMITTED ((size_t)0x20U)
|
|
|
|
#define CHUNK_MAP_MADVISED_OR_DECOMMITTED \
|
|
|
|
(CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED)
|
|
|
|
#define CHUNK_MAP_KEY ((size_t)0x10U)
|
|
|
|
#define CHUNK_MAP_DIRTY ((size_t)0x08U)
|
|
|
|
#define CHUNK_MAP_ZEROED ((size_t)0x04U)
|
|
|
|
#define CHUNK_MAP_LARGE ((size_t)0x02U)
|
|
|
|
#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
|
|
|
|
};
|
|
|
|
|
|
|
|
// Arena chunk header.
|
|
|
|
struct arena_chunk_t
|
|
|
|
{
|
|
|
|
// Arena that owns the chunk.
|
|
|
|
arena_t* arena;
|
|
|
|
|
|
|
|
// Linkage for the arena's tree of dirty chunks.
|
|
|
|
RedBlackTreeNode<arena_chunk_t> link_dirty;
|
|
|
|
|
|
|
|
#ifdef MALLOC_DOUBLE_PURGE
|
|
|
|
// If we're double-purging, we maintain a linked list of chunks which
|
|
|
|
// have pages which have been madvise(MADV_FREE)'d but not explicitly
|
|
|
|
// purged.
|
|
|
|
//
|
|
|
|
// We're currently lazy and don't remove a chunk from this list when
|
|
|
|
// all its madvised pages are recommitted.
|
|
|
|
DoublyLinkedListElement<arena_chunk_t> chunks_madvised_elem;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// Number of dirty pages.
|
|
|
|
size_t ndirty;
|
|
|
|
|
|
|
|
// Map of pages within chunk that keeps track of free/large/small.
|
|
|
|
arena_chunk_map_t map[1]; // Dynamically sized.
|
|
|
|
};
|
|
|
|
|
|
|
|
// ***************************************************************************
|
|
|
|
// Constants defining allocator size classes and behavior.
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Maximum size of L1 cache line. This is used to avoid cache line aliasing,
|
|
|
|
// so over-estimates are okay (up to a point), but under-estimates will
|
|
|
|
// negatively affect performance.
|
2017-11-01 10:34:41 +00:00
|
|
|
static const size_t kCacheLineSize = 64;
|
2017-10-29 12:50:49 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Smallest size class to support. On Windows the smallest allocation size
|
|
|
|
// must be 8 bytes on 32-bit, 16 bytes on 64-bit. On Linux and Mac, even
|
|
|
|
// malloc(1) must reserve a word's worth of memory (see Mozilla bug 691003).
|
2017-08-30 07:53:10 +00:00
|
|
|
#ifdef XP_WIN
|
2017-11-03 01:10:50 +00:00
|
|
|
static const size_t kMinTinyClass = sizeof(void*) * 2;
|
2011-10-05 18:03:39 +00:00
|
|
|
#else
|
2017-11-03 01:10:50 +00:00
|
|
|
static const size_t kMinTinyClass = sizeof(void*);
|
2011-10-05 18:03:39 +00:00
|
|
|
#endif
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-03 01:10:50 +00:00
|
|
|
// Maximum tiny size class.
|
|
|
|
static const size_t kMaxTinyClass = 8;
|
|
|
|
|
|
|
|
// Amount (quantum) separating quantum-spaced size classes.
|
|
|
|
static const size_t kQuantum = 16;
|
|
|
|
static const size_t kQuantumMask = kQuantum - 1;
|
2017-10-29 12:50:49 +00:00
|
|
|
|
2017-11-03 01:10:50 +00:00
|
|
|
// Smallest quantum-spaced size classes. It could actually also be labelled a
|
|
|
|
// tiny allocation, and is spaced as such from the largest tiny size class.
|
|
|
|
// Tiny classes being powers of 2, this is twice as large as the largest of
|
|
|
|
// them.
|
|
|
|
static const size_t kMinQuantumClass = kMaxTinyClass * 2;
|
2017-09-28 06:27:59 +00:00
|
|
|
|
2017-11-03 01:10:50 +00:00
|
|
|
// Largest quantum-spaced size classes.
|
|
|
|
static const size_t kMaxQuantumClass = 512;
|
|
|
|
|
|
|
|
static_assert(kMaxQuantumClass % kQuantum == 0,
|
|
|
|
"kMaxQuantumClass is not a multiple of kQuantum");
|
2017-09-28 06:27:59 +00:00
|
|
|
|
2017-11-03 03:21:53 +00:00
|
|
|
// Number of (2^n)-spaced tiny classes.
|
|
|
|
static const size_t kNumTinyClasses =
|
|
|
|
LOG2(kMinQuantumClass) - LOG2(kMinTinyClass);
|
2017-09-28 06:27:59 +00:00
|
|
|
|
2017-11-03 03:21:53 +00:00
|
|
|
// Number of quantum-spaced classes.
|
|
|
|
static const size_t kNumQuantumClasses = kMaxQuantumClass / kQuantum;
|
2017-09-28 06:27:59 +00:00
|
|
|
|
2017-11-03 03:07:16 +00:00
|
|
|
// Size and alignment of memory chunks that are allocated by the OS's virtual
|
|
|
|
// memory system.
|
|
|
|
static const size_t kChunkSize = 1_MiB;
|
|
|
|
static const size_t kChunkSizeMask = kChunkSize - 1;
|
2017-09-26 00:08:00 +00:00
|
|
|
|
2017-11-01 09:33:24 +00:00
|
|
|
#ifdef MALLOC_STATIC_PAGESIZE
|
2017-10-29 12:53:31 +00:00
|
|
|
// VM page size. It must divide the runtime CPU page size or the code
|
|
|
|
// will abort.
|
|
|
|
// Platform specific page size conditions copied from js/public/HeapAPI.h
|
2017-10-29 12:53:37 +00:00
|
|
|
#if (defined(SOLARIS) || defined(__FreeBSD__)) && \
|
|
|
|
(defined(__sparc) || defined(__sparcv9) || defined(__ia64))
|
2017-11-03 03:13:17 +00:00
|
|
|
static const size_t gPageSize = 8_KiB;
|
2017-09-26 00:08:00 +00:00
|
|
|
#elif defined(__powerpc64__)
|
2017-11-03 03:13:17 +00:00
|
|
|
static const size_t gPageSize = 64_KiB;
|
2017-09-26 00:08:00 +00:00
|
|
|
#else
|
2017-11-03 03:13:17 +00:00
|
|
|
static const size_t gPageSize = 4_KiB;
|
2017-09-26 00:08:00 +00:00
|
|
|
#endif
|
|
|
|
|
2017-11-01 09:33:24 +00:00
|
|
|
#else
|
2017-11-03 03:13:17 +00:00
|
|
|
static size_t gPageSize;
|
2017-11-01 09:33:24 +00:00
|
|
|
#endif
|
2017-09-26 00:08:00 +00:00
|
|
|
|
2017-11-01 09:33:24 +00:00
|
|
|
#ifdef MALLOC_STATIC_PAGESIZE
|
|
|
|
#define DECLARE_GLOBAL(type, name)
|
|
|
|
#define DEFINE_GLOBALS
|
|
|
|
#define END_GLOBALS
|
|
|
|
#define DEFINE_GLOBAL(type) static const type
|
|
|
|
#define GLOBAL_LOG2 LOG2
|
|
|
|
#define GLOBAL_ASSERT_HELPER1(x) static_assert(x, #x)
|
|
|
|
#define GLOBAL_ASSERT_HELPER2(x, y) static_assert(x, y)
|
|
|
|
#define GLOBAL_ASSERT(...) \
|
|
|
|
MACRO_CALL( \
|
|
|
|
MOZ_PASTE_PREFIX_AND_ARG_COUNT(GLOBAL_ASSERT_HELPER, __VA_ARGS__), \
|
|
|
|
(__VA_ARGS__))
|
|
|
|
#else
|
|
|
|
#define DECLARE_GLOBAL(type, name) static type name;
|
|
|
|
#define DEFINE_GLOBALS \
|
|
|
|
static void DefineGlobals() \
|
|
|
|
{
|
|
|
|
#define END_GLOBALS }
|
|
|
|
#define DEFINE_GLOBAL(type)
|
|
|
|
#define GLOBAL_LOG2 FloorLog2
|
|
|
|
#define GLOBAL_ASSERT MOZ_RELEASE_ASSERT
|
2017-09-26 00:08:00 +00:00
|
|
|
#endif
|
|
|
|
|
2017-11-03 01:10:50 +00:00
|
|
|
DECLARE_GLOBAL(size_t, gMaxSubPageClass)
|
2017-11-03 03:21:53 +00:00
|
|
|
DECLARE_GLOBAL(uint8_t, gNumSubPageClasses)
|
2017-11-03 03:13:17 +00:00
|
|
|
DECLARE_GLOBAL(uint8_t, gPageSize2Pow)
|
|
|
|
DECLARE_GLOBAL(size_t, gPageSizeMask)
|
2017-11-03 03:16:11 +00:00
|
|
|
DECLARE_GLOBAL(size_t, gChunkNumPages)
|
|
|
|
DECLARE_GLOBAL(size_t, gChunkHeaderNumPages)
|
2017-11-03 01:10:50 +00:00
|
|
|
DECLARE_GLOBAL(size_t, gMaxLargeClass)
|
2017-09-26 00:08:00 +00:00
|
|
|
|
2017-11-01 09:33:24 +00:00
|
|
|
DEFINE_GLOBALS
|
2017-11-03 01:10:50 +00:00
|
|
|
// Largest sub-page size class.
|
2017-11-03 03:13:17 +00:00
|
|
|
DEFINE_GLOBAL(size_t) gMaxSubPageClass = gPageSize / 2;
|
2017-11-03 01:10:50 +00:00
|
|
|
|
|
|
|
// Max size class for bins.
|
|
|
|
#define gMaxBinClass gMaxSubPageClass
|
2017-09-26 00:08:00 +00:00
|
|
|
|
2017-11-01 09:33:24 +00:00
|
|
|
// Number of (2^n)-spaced sub-page bins.
|
|
|
|
DEFINE_GLOBAL(uint8_t)
|
2017-11-03 03:21:53 +00:00
|
|
|
gNumSubPageClasses = GLOBAL_LOG2(gMaxSubPageClass) - LOG2(kMaxQuantumClass);
|
2017-09-26 00:08:00 +00:00
|
|
|
|
2017-11-03 03:13:17 +00:00
|
|
|
DEFINE_GLOBAL(uint8_t) gPageSize2Pow = GLOBAL_LOG2(gPageSize);
|
|
|
|
DEFINE_GLOBAL(size_t) gPageSizeMask = gPageSize - 1;
|
2017-09-26 00:08:00 +00:00
|
|
|
|
2017-11-01 09:33:24 +00:00
|
|
|
// Number of pages in a chunk.
|
2017-11-03 03:16:11 +00:00
|
|
|
DEFINE_GLOBAL(size_t) gChunkNumPages = kChunkSize >> gPageSize2Pow;
|
2017-10-26 00:38:48 +00:00
|
|
|
|
2017-11-01 09:33:24 +00:00
|
|
|
// Number of pages necessary for a chunk header.
|
|
|
|
DEFINE_GLOBAL(size_t)
|
2017-11-03 03:16:11 +00:00
|
|
|
gChunkHeaderNumPages =
|
|
|
|
((sizeof(arena_chunk_t) + sizeof(arena_chunk_map_t) * (gChunkNumPages - 1) +
|
2017-11-03 03:13:17 +00:00
|
|
|
gPageSizeMask) &
|
|
|
|
~gPageSizeMask) >>
|
|
|
|
gPageSize2Pow;
|
2017-11-01 09:33:24 +00:00
|
|
|
|
|
|
|
// Max size class for arenas.
|
|
|
|
DEFINE_GLOBAL(size_t)
|
2017-11-03 03:16:11 +00:00
|
|
|
gMaxLargeClass = kChunkSize - (gChunkHeaderNumPages << gPageSize2Pow);
|
2017-11-01 09:33:24 +00:00
|
|
|
|
|
|
|
// Various sanity checks that regard configuration.
|
2017-11-03 03:13:17 +00:00
|
|
|
GLOBAL_ASSERT(1ULL << gPageSize2Pow == gPageSize,
|
2017-11-01 09:33:24 +00:00
|
|
|
"Page size is not a power of two");
|
2017-11-03 01:10:50 +00:00
|
|
|
GLOBAL_ASSERT(kQuantum >= sizeof(void*));
|
2017-11-03 03:13:17 +00:00
|
|
|
GLOBAL_ASSERT(kQuantum <= gPageSize);
|
|
|
|
GLOBAL_ASSERT(kChunkSize >= gPageSize);
|
2017-11-03 03:07:16 +00:00
|
|
|
GLOBAL_ASSERT(kQuantum * 4 <= kChunkSize);
|
2017-11-01 09:33:24 +00:00
|
|
|
END_GLOBALS
|
2017-09-26 00:08:00 +00:00
|
|
|
|
2017-11-03 03:07:16 +00:00
|
|
|
// Recycle at most 128 MiB of chunks. This means we retain at most
|
2017-10-29 12:53:31 +00:00
|
|
|
// 6.25% of the process address space on a 32-bit OS for later use.
|
2017-11-03 03:07:16 +00:00
|
|
|
static const size_t gRecycleLimit = 128_MiB;
|
2017-10-26 00:38:48 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// The current amount of recycled bytes, updated atomically.
|
2017-10-30 08:19:44 +00:00
|
|
|
static Atomic<size_t, ReleaseAcquire> gRecycledSize;
|
2017-09-26 00:08:00 +00:00
|
|
|
|
2017-11-01 10:29:36 +00:00
|
|
|
// Maximum number of dirty pages per arena.
|
|
|
|
#define DIRTY_MAX_DEFAULT (1U << 8)
|
|
|
|
|
|
|
|
static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
|
|
|
|
|
|
|
|
// Return the smallest chunk multiple that is >= s.
|
2017-11-03 03:07:16 +00:00
|
|
|
#define CHUNK_CEILING(s) (((s) + kChunkSizeMask) & ~kChunkSizeMask)
|
2017-11-01 10:29:36 +00:00
|
|
|
|
|
|
|
// Return the smallest cacheline multiple that is >= s.
|
2017-11-01 10:34:41 +00:00
|
|
|
#define CACHELINE_CEILING(s) \
|
|
|
|
(((s) + (kCacheLineSize - 1)) & ~(kCacheLineSize - 1))
|
2017-11-01 10:29:36 +00:00
|
|
|
|
|
|
|
// Return the smallest quantum multiple that is >= a.
|
2017-11-03 01:10:50 +00:00
|
|
|
#define QUANTUM_CEILING(a) (((a) + (kQuantumMask)) & ~(kQuantumMask))
|
2017-11-01 10:29:36 +00:00
|
|
|
|
|
|
|
// Return the smallest pagesize multiple that is >= s.
|
2017-11-03 03:13:17 +00:00
|
|
|
#define PAGE_CEILING(s) (((s) + gPageSizeMask) & ~gPageSizeMask)
|
2017-11-01 10:29:36 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// ***************************************************************************
|
|
|
|
// MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
|
2011-10-24 17:23:47 +00:00
|
|
|
#if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
|
|
|
|
#error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
|
|
|
|
#endif
|
|
|
|
|
2017-10-27 23:42:59 +00:00
|
|
|
static void*
|
|
|
|
base_alloc(size_t aSize);
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Set to true once the allocator has been initialized.
|
2017-10-30 08:19:44 +00:00
|
|
|
static Atomic<bool> malloc_initialized(false);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-22 22:25:08 +00:00
|
|
|
static StaticMutex gInitLock;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// ***************************************************************************
|
|
|
|
// Statistics data structures.
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
struct arena_stats_t
|
|
|
|
{
|
|
|
|
// Number of bytes currently mapped.
|
|
|
|
size_t mapped;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
// Current number of committed pages.
|
|
|
|
size_t committed;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
// Per-size-category statistics.
|
|
|
|
size_t allocated_small;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
size_t allocated_large;
|
2008-02-06 23:06:50 +00:00
|
|
|
};
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// ***************************************************************************
|
|
|
|
// Extent data structures.
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
enum ChunkType
|
|
|
|
{
|
2017-07-07 01:16:31 +00:00
|
|
|
UNKNOWN_CHUNK,
|
2017-10-29 12:53:31 +00:00
|
|
|
ZEROED_CHUNK, // chunk only contains zeroes.
|
|
|
|
ARENA_CHUNK, // used to back arena runs created by arena_t::AllocRun.
|
2017-11-13 23:29:39 +00:00
|
|
|
HUGE_CHUNK, // used to back huge allocations (e.g. arena_t::MallocHuge).
|
2017-10-29 12:53:31 +00:00
|
|
|
RECYCLED_CHUNK, // chunk has been stored for future use by chunk_recycle.
|
2017-07-07 01:16:31 +00:00
|
|
|
};
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Tree of extents.
|
2017-10-29 12:53:37 +00:00
|
|
|
struct extent_node_t
|
|
|
|
{
|
|
|
|
// Linkage for the size/address-ordered tree.
|
2017-11-08 08:20:20 +00:00
|
|
|
RedBlackTreeNode<extent_node_t> mLinkBySize;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
// Linkage for the address-ordered tree.
|
2017-11-08 08:20:20 +00:00
|
|
|
RedBlackTreeNode<extent_node_t> mLinkByAddr;
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
// Pointer to the extent that this tree node is responsible for.
|
2017-11-08 08:20:20 +00:00
|
|
|
void* mAddr;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
// Total region size.
|
2017-11-08 08:20:20 +00:00
|
|
|
size_t mSize;
|
2014-11-01 11:00:00 +00:00
|
|
|
|
2017-11-09 04:53:27 +00:00
|
|
|
union {
|
|
|
|
// What type of chunk is there; used for chunk recycling.
|
|
|
|
ChunkType mChunkType;
|
|
|
|
|
|
|
|
// A pointer to the associated arena, for huge allocations.
|
|
|
|
arena_t* mArena;
|
|
|
|
};
|
2008-02-06 23:06:50 +00:00
|
|
|
};
|
2017-09-26 06:06:00 +00:00
|
|
|
|
|
|
|
struct ExtentTreeSzTrait
|
|
|
|
{
|
|
|
|
static RedBlackTreeNode<extent_node_t>& GetTreeNode(extent_node_t* aThis)
|
|
|
|
{
|
2017-11-08 08:20:20 +00:00
|
|
|
return aThis->mLinkBySize;
|
2017-09-26 06:06:00 +00:00
|
|
|
}
|
|
|
|
|
2017-11-21 00:11:54 +00:00
|
|
|
static inline Order Compare(extent_node_t* aNode, extent_node_t* aOther)
|
2017-09-26 06:06:00 +00:00
|
|
|
{
|
2017-11-21 00:11:54 +00:00
|
|
|
Order ret = CompareInt(aNode->mSize, aOther->mSize);
|
|
|
|
return (ret != Order::eEqual) ? ret
|
|
|
|
: CompareAddr(aNode->mAddr, aOther->mAddr);
|
2017-09-26 06:06:00 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ExtentTreeTrait
|
|
|
|
{
|
|
|
|
static RedBlackTreeNode<extent_node_t>& GetTreeNode(extent_node_t* aThis)
|
|
|
|
{
|
2017-11-08 08:20:20 +00:00
|
|
|
return aThis->mLinkByAddr;
|
2017-09-26 06:06:00 +00:00
|
|
|
}
|
|
|
|
|
2017-11-21 00:11:54 +00:00
|
|
|
static inline Order Compare(extent_node_t* aNode, extent_node_t* aOther)
|
2017-09-26 06:06:00 +00:00
|
|
|
{
|
2017-11-08 08:20:20 +00:00
|
|
|
return CompareAddr(aNode->mAddr, aOther->mAddr);
|
2017-09-26 06:06:00 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ExtentTreeBoundsTrait : public ExtentTreeTrait
|
|
|
|
{
|
2017-11-21 00:11:54 +00:00
|
|
|
static inline Order Compare(extent_node_t* aKey, extent_node_t* aNode)
|
2017-09-26 06:06:00 +00:00
|
|
|
{
|
2017-11-08 08:20:20 +00:00
|
|
|
uintptr_t key_addr = reinterpret_cast<uintptr_t>(aKey->mAddr);
|
|
|
|
uintptr_t node_addr = reinterpret_cast<uintptr_t>(aNode->mAddr);
|
|
|
|
size_t node_size = aNode->mSize;
|
2017-09-26 06:06:00 +00:00
|
|
|
|
|
|
|
// Is aKey within aNode?
|
|
|
|
if (node_addr <= key_addr && key_addr < node_addr + node_size) {
|
2017-11-21 00:11:54 +00:00
|
|
|
return Order::eEqual;
|
2017-09-26 06:06:00 +00:00
|
|
|
}
|
|
|
|
|
2017-11-21 00:11:54 +00:00
|
|
|
return CompareAddr(aKey->mAddr, aNode->mAddr);
|
2017-09-26 06:06:00 +00:00
|
|
|
}
|
|
|
|
};
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-02 23:53:34 +00:00
|
|
|
// Describe size classes to which allocations are rounded up to.
|
|
|
|
// TODO: add large and huge types when the arena allocation code
|
|
|
|
// changes in a way that allows it to be beneficial.
|
|
|
|
class SizeClass
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
enum ClassType
|
|
|
|
{
|
|
|
|
Tiny,
|
|
|
|
Quantum,
|
|
|
|
SubPage,
|
2017-11-15 05:50:33 +00:00
|
|
|
Large,
|
2017-11-02 23:53:34 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
explicit inline SizeClass(size_t aSize)
|
|
|
|
{
|
2017-11-03 01:10:50 +00:00
|
|
|
if (aSize <= kMaxTinyClass) {
|
2017-11-02 23:53:34 +00:00
|
|
|
mType = Tiny;
|
2017-11-03 01:10:50 +00:00
|
|
|
mSize = std::max(RoundUpPow2(aSize), kMinTinyClass);
|
|
|
|
} else if (aSize <= kMaxQuantumClass) {
|
2017-11-02 23:53:34 +00:00
|
|
|
mType = Quantum;
|
|
|
|
mSize = QUANTUM_CEILING(aSize);
|
2017-11-03 01:10:50 +00:00
|
|
|
} else if (aSize <= gMaxSubPageClass) {
|
2017-11-02 23:53:34 +00:00
|
|
|
mType = SubPage;
|
|
|
|
mSize = RoundUpPow2(aSize);
|
2017-11-15 05:50:33 +00:00
|
|
|
} else if (aSize <= gMaxLargeClass) {
|
|
|
|
mType = Large;
|
|
|
|
mSize = PAGE_CEILING(aSize);
|
2017-11-02 23:53:34 +00:00
|
|
|
} else {
|
|
|
|
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Invalid size");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
SizeClass& operator=(const SizeClass& aOther) = default;
|
|
|
|
|
|
|
|
bool operator==(const SizeClass& aOther) { return aOther.mSize == mSize; }
|
|
|
|
|
|
|
|
size_t Size() { return mSize; }
|
|
|
|
|
|
|
|
ClassType Type() { return mType; }
|
|
|
|
|
|
|
|
SizeClass Next() { return SizeClass(mSize + 1); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
ClassType mType;
|
|
|
|
size_t mSize;
|
|
|
|
};
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// ***************************************************************************
|
|
|
|
// Radix tree data structures.
|
|
|
|
//
|
|
|
|
// The number of bits passed to the template is the number of significant bits
|
|
|
|
// in an address to do a radix lookup with.
|
|
|
|
//
|
|
|
|
// An address is looked up by splitting it in kBitsPerLevel bit chunks, except
|
|
|
|
// the most significant bits, where the bit chunk is kBitsAtLevel1 which can be
|
|
|
|
// different if Bits is not a multiple of kBitsPerLevel.
|
|
|
|
//
|
|
|
|
// With e.g. sizeof(void*)=4, Bits=16 and kBitsPerLevel=8, an address is split
|
|
|
|
// like the following:
|
|
|
|
// 0x12345678 -> mRoot[0x12][0x34]
|
2017-10-29 12:53:37 +00:00
|
|
|
template<size_t Bits>
|
|
|
|
class AddressRadixTree
|
|
|
|
{
|
|
|
|
// Size of each radix tree node (as a power of 2).
|
|
|
|
// This impacts tree depth.
|
2017-10-30 02:43:10 +00:00
|
|
|
#ifdef HAVE_64BIT_BUILD
|
2017-11-03 04:50:44 +00:00
|
|
|
static const size_t kNodeSize = kCacheLineSize;
|
2017-10-30 02:43:10 +00:00
|
|
|
#else
|
2017-11-03 04:50:44 +00:00
|
|
|
static const size_t kNodeSize = 16_KiB;
|
2017-05-11 08:39:56 +00:00
|
|
|
#endif
|
2017-11-03 04:50:44 +00:00
|
|
|
static const size_t kBitsPerLevel = LOG2(kNodeSize) - LOG2(sizeof(void*));
|
2017-10-06 06:50:00 +00:00
|
|
|
static const size_t kBitsAtLevel1 =
|
|
|
|
(Bits % kBitsPerLevel) ? Bits % kBitsPerLevel : kBitsPerLevel;
|
|
|
|
static const size_t kHeight = (Bits + kBitsPerLevel - 1) / kBitsPerLevel;
|
|
|
|
static_assert(kBitsAtLevel1 + (kHeight - 1) * kBitsPerLevel == Bits,
|
|
|
|
"AddressRadixTree parameters don't work out");
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-10-06 08:20:04 +00:00
|
|
|
Mutex mLock;
|
2017-10-06 01:49:24 +00:00
|
|
|
void** mRoot;
|
|
|
|
|
|
|
|
public:
|
2017-10-06 07:18:01 +00:00
|
|
|
bool Init();
|
2017-10-06 01:49:24 +00:00
|
|
|
|
|
|
|
inline void* Get(void* aAddr);
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Returns whether the value was properly set.
|
2017-10-06 01:49:24 +00:00
|
|
|
inline bool Set(void* aAddr, void* aValue);
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
inline bool Unset(void* aAddr) { return Set(aAddr, nullptr); }
|
2017-10-06 01:49:24 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
inline void** GetSlot(void* aAddr, bool aCreate = false);
|
2008-07-25 21:53:20 +00:00
|
|
|
};
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// ***************************************************************************
|
|
|
|
// Arena data structures.
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-01 23:15:39 +00:00
|
|
|
struct arena_bin_t;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-26 06:06:00 +00:00
|
|
|
struct ArenaChunkMapLink
|
|
|
|
{
|
2017-10-29 12:53:37 +00:00
|
|
|
static RedBlackTreeNode<arena_chunk_map_t>& GetTreeNode(
|
|
|
|
arena_chunk_map_t* aThis)
|
2017-09-26 06:06:00 +00:00
|
|
|
{
|
|
|
|
return aThis->link;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ArenaRunTreeTrait : public ArenaChunkMapLink
|
|
|
|
{
|
2017-11-21 00:11:54 +00:00
|
|
|
static inline Order Compare(arena_chunk_map_t* aNode,
|
|
|
|
arena_chunk_map_t* aOther)
|
2017-09-26 06:06:00 +00:00
|
|
|
{
|
|
|
|
MOZ_ASSERT(aNode);
|
|
|
|
MOZ_ASSERT(aOther);
|
|
|
|
return CompareAddr(aNode, aOther);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ArenaAvailTreeTrait : public ArenaChunkMapLink
|
|
|
|
{
|
2017-11-21 00:11:54 +00:00
|
|
|
static inline Order Compare(arena_chunk_map_t* aNode,
|
|
|
|
arena_chunk_map_t* aOther)
|
2017-09-26 06:06:00 +00:00
|
|
|
{
|
2017-11-03 03:13:17 +00:00
|
|
|
size_t size1 = aNode->bits & ~gPageSizeMask;
|
|
|
|
size_t size2 = aOther->bits & ~gPageSizeMask;
|
2017-11-21 00:11:54 +00:00
|
|
|
Order ret = CompareInt(size1, size2);
|
|
|
|
return (ret != Order::eEqual)
|
|
|
|
? ret
|
|
|
|
: CompareAddr((aNode->bits & CHUNK_MAP_KEY) ? nullptr : aNode,
|
|
|
|
aOther);
|
2017-09-26 06:06:00 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ArenaDirtyChunkTrait
|
|
|
|
{
|
|
|
|
static RedBlackTreeNode<arena_chunk_t>& GetTreeNode(arena_chunk_t* aThis)
|
|
|
|
{
|
|
|
|
return aThis->link_dirty;
|
|
|
|
}
|
|
|
|
|
2017-11-21 00:11:54 +00:00
|
|
|
static inline Order Compare(arena_chunk_t* aNode, arena_chunk_t* aOther)
|
2017-09-26 06:06:00 +00:00
|
|
|
{
|
|
|
|
MOZ_ASSERT(aNode);
|
|
|
|
MOZ_ASSERT(aOther);
|
|
|
|
return CompareAddr(aNode, aOther);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-09-01 23:55:42 +00:00
|
|
|
#ifdef MALLOC_DOUBLE_PURGE
|
|
|
|
namespace mozilla {
|
|
|
|
|
|
|
|
template<>
|
|
|
|
struct GetDoublyLinkedListElement<arena_chunk_t>
|
|
|
|
{
|
|
|
|
static DoublyLinkedListElement<arena_chunk_t>& Get(arena_chunk_t* aThis)
|
|
|
|
{
|
|
|
|
return aThis->chunks_madvised_elem;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
struct arena_run_t
|
|
|
|
{
|
2017-10-27 08:29:12 +00:00
|
|
|
#if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
|
2017-11-03 06:23:44 +00:00
|
|
|
uint32_t mMagic;
|
2017-10-29 12:53:37 +00:00
|
|
|
#define ARENA_RUN_MAGIC 0x384adf93
|
2017-11-08 00:56:08 +00:00
|
|
|
|
|
|
|
// On 64-bit platforms, having the arena_bin_t pointer following
|
2017-11-03 06:23:44 +00:00
|
|
|
// the mMagic field means there's padding between both fields, making
|
2017-11-08 00:56:08 +00:00
|
|
|
// the run header larger than necessary.
|
|
|
|
// But when MOZ_DIAGNOSTIC_ASSERT_ENABLED is not set, starting the
|
|
|
|
// header with this field followed by the arena_bin_t pointer yields
|
2017-11-03 06:23:44 +00:00
|
|
|
// the same padding. We do want the mMagic field to appear first, so
|
2017-11-08 00:56:08 +00:00
|
|
|
// depending whether MOZ_DIAGNOSTIC_ASSERT_ENABLED is set or not, we
|
|
|
|
// move some field to avoid padding.
|
|
|
|
|
|
|
|
// Number of free regions in run.
|
2017-11-03 06:23:44 +00:00
|
|
|
unsigned mNumFree;
|
2008-02-06 23:06:50 +00:00
|
|
|
#endif
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
// Bin this run is associated with.
|
2017-11-03 06:23:44 +00:00
|
|
|
arena_bin_t* mBin;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
// Index of first element that might have a free region.
|
2017-11-03 06:23:44 +00:00
|
|
|
unsigned mRegionsMinElement;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-08 00:56:08 +00:00
|
|
|
#if !defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
|
2017-10-29 12:53:37 +00:00
|
|
|
// Number of free regions in run.
|
2017-11-03 06:23:44 +00:00
|
|
|
unsigned mNumFree;
|
2017-11-08 00:56:08 +00:00
|
|
|
#endif
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
// Bitmask of in-use regions (0: in use, 1: free).
|
2017-11-03 06:23:44 +00:00
|
|
|
unsigned mRegionsMask[1]; // Dynamically sized.
|
2008-02-06 23:06:50 +00:00
|
|
|
};
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
struct arena_bin_t
|
|
|
|
{
|
|
|
|
// Current run being used to service allocations of this bin's size
|
|
|
|
// class.
|
2017-11-03 00:26:07 +00:00
|
|
|
arena_run_t* mCurrentRun;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
// Tree of non-full runs. This tree is used when looking for an
|
2017-11-03 00:26:07 +00:00
|
|
|
// existing run when mCurrentRun is no longer usable. We choose the
|
2017-10-29 12:53:37 +00:00
|
|
|
// non-full run that is lowest in memory; this policy tends to keep
|
|
|
|
// objects packed well, and it can also help reduce the number of
|
|
|
|
// almost-empty chunks.
|
2017-11-03 00:26:07 +00:00
|
|
|
RedBlackTree<arena_chunk_map_t, ArenaRunTreeTrait> mNonFullRuns;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-03 00:26:07 +00:00
|
|
|
// Bin's size class.
|
|
|
|
size_t mSizeClass;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
// Total size of a run for this bin's size class.
|
2017-11-03 00:26:07 +00:00
|
|
|
size_t mRunSize;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
// Total number of regions in a run for this bin's size class.
|
2017-11-03 00:26:07 +00:00
|
|
|
uint32_t mRunNumRegions;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-03 06:23:44 +00:00
|
|
|
// Number of elements in a run's mRegionsMask for this bin's size class.
|
2017-11-03 00:26:07 +00:00
|
|
|
uint32_t mRunNumRegionsMask;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
// Offset of first region in a run for this bin's size class.
|
2017-11-03 00:26:07 +00:00
|
|
|
uint32_t mRunFirstRegionOffset;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-03 00:26:07 +00:00
|
|
|
// Current number of runs in this bin, full or otherwise.
|
|
|
|
unsigned long mNumRuns;
|
2017-11-06 23:55:37 +00:00
|
|
|
|
Bug 1414168 - Change how run sizes are calculated. r=njn
There are multiple flaws to the current code:
- The loop calculating the right parameters for a given run size is
repeated.
- The loop trying different run sizes doesn't actually work to fulfil
the overhead constraint: while it stops when the constraint is
fulfilled, the values that are kept are those from the previous
iteration, which may well be well over the constraint.
In practice, the latter resulted in a few surprising results:
- most size classes had an overhead slightly over the constraint
(1.562%), which, while not terribly bad, doesn't match the set
expectations.
- some size classes ended up with relatively good overheads only because
of the additional constraint that run sizes had to be larger than the
run size of smaller size classes. Without this constraint, some size
classes would end up with overheads well over 2% just because that
happens to be the last overhead value before reaching below the 1.5%
constraint.
Furthermore, for higher-level fragmentation concerns, smaller run sizes
are better than larger run sizes, and in many cases, smaller run sizes
can yield the same (or even sometimes, better) overhead as larger run
sizes. For example, the current code choses 8KiB for runs of size 112,
but using 4KiB runs would actually yield the same number of regions, and
the same overhead.
We thus change the calculation to:
- not force runs to be smaller than those of smaller classes.
- avoid the code repetition.
- actually enforce its overhead constraint, but make it 1.6%.
- for especially small size classes, relax the overhead constraint to
2.4%.
This leads to an uneven set of run sizes:
size class before after
4 4 KiB 4 KiB
8 4 KiB 4 KiB
16 4 KiB 4 KiB
32 4 KiB 4 KiB
48 4 KiB 4 KiB
64 4 KiB 4 KiB
80 4 KiB 4 KiB
96 4 KiB 4 KiB
112 8 KiB 4 KiB
128 8 KiB 8 KiB
144 8 KiB 4 KiB
160 8 KiB 8 KiB
176 8 KiB 4 KiB
192 12 KiB 4 KiB
208 12 KiB 8 KiB
224 12 KiB 4 KiB
240 12 KiB 4 KiB
256 16 KiB 16 KiB
272 16 KiB 4 KiB
288 16 KiB 4 KiB
304 16 KiB 12 KiB
320 20 KiB 12 KiB
336 20 KiB 4 KiB
352 20 KiB 8 KiB
368 20 KiB 4 KiB
384 24 KiB 8 KiB
400 24 KiB 20 KiB
416 24 KiB 16 KiB
432 24 KiB 12 KiB
448 28 KiB 4 KiB
464 28 KiB 16 KiB
480 28 KiB 8 KiB
496 28 KiB 20 KiB
512 32 KiB 32 KiB
1024 64 KiB 64 KiB
2048 132 KiB 128 KiB
* Note: before is before this change only, not before the set of changes
from this bug; before that, the run size for 96 could be 8 KiB in some
configurations.
In most cases, the overhead hasn't changed, with a few exceptions:
- Improvements:
size class before after
208 1.823% 0.977%
304 1.660% 1.042%
320 1.562% 1.042%
400 0.716% 0.391%
464 1.283% 0.879%
480 1.228% 0.391%
496 1.395% 0.703%
- Regressions:
352 0.312% 1.172%
416 0.130% 0.977%
2048 1.515% 1.562%
For the regressions, the values are either still well within the
constraint or very close to the previous value, that I don't feel like
it's worth trying to avoid them, with the risk of making things worse
for other size classes.
--HG--
extra : rebase_source : fdff18df8a0a35c24162313d4adb1a1c24fb6e82
2017-11-08 05:04:10 +00:00
|
|
|
// Amount of overhead runs are allowed to have.
|
2017-11-23 02:58:14 +00:00
|
|
|
static constexpr double kRunOverhead = 1.6_percent;
|
|
|
|
static constexpr double kRunRelaxedOverhead = 2.4_percent;
|
2017-11-08 06:53:24 +00:00
|
|
|
|
|
|
|
// Initialize a bin for the given size class.
|
|
|
|
// The generated run sizes, for a page size of 4 KiB, are:
|
|
|
|
// size|run size|run size|run size|run
|
|
|
|
// class|size class|size class|size class|size
|
|
|
|
// 4 4 KiB 8 4 KiB 16 4 KiB 32 4 KiB
|
|
|
|
// 48 4 KiB 64 4 KiB 80 4 KiB 96 4 KiB
|
|
|
|
// 112 4 KiB 128 8 KiB 144 4 KiB 160 8 KiB
|
|
|
|
// 176 4 KiB 192 4 KiB 208 8 KiB 224 4 KiB
|
|
|
|
// 240 4 KiB 256 16 KiB 272 4 KiB 288 4 KiB
|
|
|
|
// 304 12 KiB 320 12 KiB 336 4 KiB 352 8 KiB
|
|
|
|
// 368 4 KiB 384 8 KiB 400 20 KiB 416 16 KiB
|
|
|
|
// 432 12 KiB 448 4 KiB 464 16 KiB 480 8 KiB
|
|
|
|
// 496 20 KiB 512 32 KiB 1024 64 KiB 2048 128 KiB
|
|
|
|
inline void Init(SizeClass aSizeClass);
|
2008-02-06 23:06:50 +00:00
|
|
|
};
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
struct arena_t
|
|
|
|
{
|
2017-10-27 08:29:12 +00:00
|
|
|
#if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
|
2017-09-15 08:20:01 +00:00
|
|
|
uint32_t mMagic;
|
2017-10-29 12:53:37 +00:00
|
|
|
#define ARENA_MAGIC 0x947d3d24
|
2012-02-29 16:25:00 +00:00
|
|
|
#endif
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-21 05:24:37 +00:00
|
|
|
arena_id_t mId;
|
2017-10-29 12:53:31 +00:00
|
|
|
// Linkage for the tree of arenas by id.
|
2017-09-02 00:05:13 +00:00
|
|
|
RedBlackTreeNode<arena_t> mLink;
|
2017-09-21 05:24:37 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// All operations on this arena require that lock be locked.
|
2017-10-06 08:20:04 +00:00
|
|
|
Mutex mLock;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-15 08:20:01 +00:00
|
|
|
arena_stats_t mStats;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-15 09:08:23 +00:00
|
|
|
private:
|
2017-10-29 12:53:31 +00:00
|
|
|
// Tree of dirty-page-containing chunks this arena manages.
|
2017-09-27 21:58:37 +00:00
|
|
|
RedBlackTree<arena_chunk_t, ArenaDirtyChunkTrait> mChunksDirty;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-15 08:57:11 +00:00
|
|
|
#ifdef MALLOC_DOUBLE_PURGE
|
2017-10-29 12:53:31 +00:00
|
|
|
// Head of a linked list of MADV_FREE'd-page-containing chunks this
|
|
|
|
// arena manages.
|
2017-10-30 08:19:44 +00:00
|
|
|
DoublyLinkedList<arena_chunk_t> mChunksMAdvised;
|
2011-10-24 17:23:47 +00:00
|
|
|
#endif
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// In order to avoid rapid chunk allocation/deallocation when an arena
|
|
|
|
// oscillates right on the cusp of needing a new chunk, cache the most
|
|
|
|
// recently freed chunk. The spare is left in the arena's chunk trees
|
|
|
|
// until it is deleted.
|
|
|
|
//
|
|
|
|
// There is one spare chunk per arena, rather than one spare total, in
|
|
|
|
// order to avoid interactions between multiple threads that could make
|
|
|
|
// a single spare inadequate.
|
2017-09-15 08:20:01 +00:00
|
|
|
arena_chunk_t* mSpare;
|
2008-06-20 17:34:42 +00:00
|
|
|
|
2017-09-15 08:57:11 +00:00
|
|
|
public:
|
2017-10-29 12:53:31 +00:00
|
|
|
// Current count of pages within unused runs that are potentially
|
|
|
|
// dirty, and for which madvise(... MADV_FREE) has not been called. By
|
|
|
|
// tracking this, we can institute a limit on how much dirty unused
|
|
|
|
// memory is mapped for each arena.
|
2017-09-15 08:20:01 +00:00
|
|
|
size_t mNumDirty;
|
2017-10-29 12:53:31 +00:00
|
|
|
|
|
|
|
// Maximum value allowed for mNumDirty.
|
2017-09-15 08:20:01 +00:00
|
|
|
size_t mMaxDirty;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-15 09:08:23 +00:00
|
|
|
private:
|
2017-10-29 12:53:31 +00:00
|
|
|
// Size/address-ordered tree of this arena's available runs. This tree
|
|
|
|
// is used for first-best-fit run allocation.
|
2017-09-27 21:58:37 +00:00
|
|
|
RedBlackTree<arena_chunk_map_t, ArenaAvailTreeTrait> mRunsAvail;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-15 09:08:23 +00:00
|
|
|
public:
|
2017-10-29 12:53:31 +00:00
|
|
|
// mBins is used to store rings of free regions of the following sizes,
|
|
|
|
// assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
|
|
|
|
//
|
|
|
|
// mBins[i] | size |
|
|
|
|
// --------+------+
|
|
|
|
// 0 | 2 |
|
|
|
|
// 1 | 4 |
|
|
|
|
// 2 | 8 |
|
|
|
|
// --------+------+
|
|
|
|
// 3 | 16 |
|
|
|
|
// 4 | 32 |
|
|
|
|
// 5 | 48 |
|
|
|
|
// 6 | 64 |
|
|
|
|
// : :
|
|
|
|
// : :
|
|
|
|
// 33 | 496 |
|
|
|
|
// 34 | 512 |
|
|
|
|
// --------+------+
|
|
|
|
// 35 | 1024 |
|
|
|
|
// 36 | 2048 |
|
|
|
|
// --------+------+
|
|
|
|
arena_bin_t mBins[1]; // Dynamically sized.
|
2017-09-15 08:32:21 +00:00
|
|
|
|
2017-11-16 22:27:35 +00:00
|
|
|
explicit arena_t(arena_params_t* aParams);
|
2017-11-01 03:06:26 +00:00
|
|
|
|
2017-09-15 08:57:11 +00:00
|
|
|
private:
|
2017-09-15 08:43:36 +00:00
|
|
|
void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
|
|
|
|
|
2017-09-15 08:50:48 +00:00
|
|
|
void DeallocChunk(arena_chunk_t* aChunk);
|
|
|
|
|
2017-11-03 06:54:20 +00:00
|
|
|
arena_run_t* AllocRun(size_t aSize, bool aLarge, bool aZero);
|
2017-09-15 08:57:11 +00:00
|
|
|
|
2017-09-15 09:01:27 +00:00
|
|
|
void DallocRun(arena_run_t* aRun, bool aDirty);
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
MOZ_MUST_USE bool SplitRun(arena_run_t* aRun,
|
|
|
|
size_t aSize,
|
|
|
|
bool aLarge,
|
|
|
|
bool aZero);
|
2017-09-15 09:08:23 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
void TrimRunHead(arena_chunk_t* aChunk,
|
|
|
|
arena_run_t* aRun,
|
|
|
|
size_t aOldSize,
|
|
|
|
size_t aNewSize);
|
2017-09-15 09:11:12 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
void TrimRunTail(arena_chunk_t* aChunk,
|
|
|
|
arena_run_t* aRun,
|
|
|
|
size_t aOldSize,
|
|
|
|
size_t aNewSize,
|
|
|
|
bool dirty);
|
2017-09-15 09:14:33 +00:00
|
|
|
|
2017-09-15 09:23:33 +00:00
|
|
|
arena_run_t* GetNonFullBinRun(arena_bin_t* aBin);
|
|
|
|
|
2017-09-15 10:11:52 +00:00
|
|
|
inline void* MallocSmall(size_t aSize, bool aZero);
|
|
|
|
|
2017-09-15 10:14:00 +00:00
|
|
|
void* MallocLarge(size_t aSize, bool aZero);
|
|
|
|
|
2017-11-17 00:50:27 +00:00
|
|
|
void* MallocHuge(size_t aSize, bool aZero);
|
|
|
|
|
2017-11-13 23:21:09 +00:00
|
|
|
void* PallocLarge(size_t aAlignment, size_t aSize, size_t aAllocSize);
|
|
|
|
|
2017-11-13 23:29:39 +00:00
|
|
|
void* PallocHuge(size_t aSize, size_t aAlignment, bool aZero);
|
|
|
|
|
2017-11-17 00:50:27 +00:00
|
|
|
void RallocShrinkLarge(arena_chunk_t* aChunk,
|
|
|
|
void* aPtr,
|
|
|
|
size_t aSize,
|
|
|
|
size_t aOldSize);
|
|
|
|
|
|
|
|
bool RallocGrowLarge(arena_chunk_t* aChunk,
|
|
|
|
void* aPtr,
|
|
|
|
size_t aSize,
|
|
|
|
size_t aOldSize);
|
|
|
|
|
|
|
|
void* RallocSmallOrLarge(void* aPtr, size_t aSize, size_t aOldSize);
|
|
|
|
|
|
|
|
void* RallocHuge(void* aPtr, size_t aSize, size_t aOldSize);
|
2017-11-13 23:29:39 +00:00
|
|
|
|
2017-12-03 05:22:05 +00:00
|
|
|
public:
|
2017-09-15 10:20:09 +00:00
|
|
|
inline void* Malloc(size_t aSize, bool aZero);
|
|
|
|
|
2017-11-13 23:21:09 +00:00
|
|
|
void* Palloc(size_t aAlignment, size_t aSize);
|
2017-09-15 11:28:23 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
inline void DallocSmall(arena_chunk_t* aChunk,
|
|
|
|
void* aPtr,
|
|
|
|
arena_chunk_map_t* aMapElm);
|
2017-09-15 11:37:47 +00:00
|
|
|
|
2017-09-15 11:40:36 +00:00
|
|
|
void DallocLarge(arena_chunk_t* aChunk, void* aPtr);
|
|
|
|
|
2017-11-17 00:50:27 +00:00
|
|
|
void* Ralloc(void* aPtr, size_t aSize, size_t aOldSize);
|
2017-09-15 11:50:42 +00:00
|
|
|
|
2017-09-15 08:32:21 +00:00
|
|
|
void Purge(bool aAll);
|
2017-09-15 08:34:53 +00:00
|
|
|
|
|
|
|
void HardPurge();
|
2017-10-27 23:42:59 +00:00
|
|
|
|
|
|
|
void* operator new(size_t aCount) = delete;
|
|
|
|
|
|
|
|
void* operator new(size_t aCount, const fallible_t&)
|
|
|
|
#if !defined(_MSC_VER) || defined(_CPPUNWIND)
|
|
|
|
noexcept
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(aCount == sizeof(arena_t));
|
|
|
|
// Allocate enough space for trailing bins.
|
2017-11-03 03:21:53 +00:00
|
|
|
return base_alloc(
|
|
|
|
aCount + (sizeof(arena_bin_t) * (kNumTinyClasses + kNumQuantumClasses +
|
|
|
|
gNumSubPageClasses - 1)));
|
2017-10-27 23:42:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void operator delete(void*) = delete;
|
2008-02-06 23:06:50 +00:00
|
|
|
};
|
|
|
|
|
2017-09-26 06:06:00 +00:00
|
|
|
struct ArenaTreeTrait
|
|
|
|
{
|
|
|
|
static RedBlackTreeNode<arena_t>& GetTreeNode(arena_t* aThis)
|
|
|
|
{
|
|
|
|
return aThis->mLink;
|
|
|
|
}
|
|
|
|
|
2017-11-21 00:11:54 +00:00
|
|
|
static inline Order Compare(arena_t* aNode, arena_t* aOther)
|
2017-09-26 06:06:00 +00:00
|
|
|
{
|
|
|
|
MOZ_ASSERT(aNode);
|
|
|
|
MOZ_ASSERT(aOther);
|
2017-11-21 00:11:54 +00:00
|
|
|
return CompareInt(aNode->mId, aOther->mId);
|
2017-09-26 06:06:00 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-10-27 22:13:58 +00:00
|
|
|
// Bookkeeping for all the arenas used by the allocator.
|
2017-10-30 22:13:39 +00:00
|
|
|
// Arenas are separated in two categories:
|
|
|
|
// - "private" arenas, used through the moz_arena_* API
|
|
|
|
// - all the other arenas: the default arena, and thread-local arenas,
|
|
|
|
// used by the standard API.
|
2017-10-27 22:13:58 +00:00
|
|
|
class ArenaCollection
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
bool Init()
|
|
|
|
{
|
|
|
|
mArenas.Init();
|
2017-10-30 22:13:39 +00:00
|
|
|
mPrivateArenas.Init();
|
2017-11-16 22:27:35 +00:00
|
|
|
arena_params_t params;
|
|
|
|
// The main arena allows more dirty pages than the default for other arenas.
|
|
|
|
params.mMaxDirty = opt_dirty_max;
|
2017-11-01 08:20:54 +00:00
|
|
|
mDefaultArena =
|
2017-11-16 22:27:35 +00:00
|
|
|
mLock.Init() ? CreateArena(/* IsPrivate = */ false, ¶ms) : nullptr;
|
2017-10-27 22:13:58 +00:00
|
|
|
return bool(mDefaultArena);
|
|
|
|
}
|
|
|
|
|
2017-10-30 22:13:39 +00:00
|
|
|
inline arena_t* GetById(arena_id_t aArenaId, bool aIsPrivate);
|
2017-10-27 22:13:58 +00:00
|
|
|
|
2017-11-16 22:27:35 +00:00
|
|
|
arena_t* CreateArena(bool aIsPrivate, arena_params_t* aParams);
|
2017-10-27 22:13:58 +00:00
|
|
|
|
|
|
|
void DisposeArena(arena_t* aArena)
|
|
|
|
{
|
|
|
|
MutexAutoLock lock(mLock);
|
2017-10-30 22:13:39 +00:00
|
|
|
(mPrivateArenas.Search(aArena) ? mPrivateArenas : mArenas).Remove(aArena);
|
2017-10-27 22:13:58 +00:00
|
|
|
// The arena is leaked, and remaining allocations in it still are alive
|
|
|
|
// until they are freed. After that, the arena will be empty but still
|
|
|
|
// taking have at least a chunk taking address space. TODO: bug 1364359.
|
|
|
|
}
|
|
|
|
|
2017-10-30 22:13:39 +00:00
|
|
|
using Tree = RedBlackTree<arena_t, ArenaTreeTrait>;
|
2017-10-27 22:13:58 +00:00
|
|
|
|
2017-10-30 22:13:39 +00:00
|
|
|
struct Iterator : Tree::Iterator
|
|
|
|
{
|
|
|
|
explicit Iterator(Tree* aTree, Tree* aSecondTree)
|
|
|
|
: Tree::Iterator(aTree)
|
|
|
|
, mNextTree(aSecondTree)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
Item<Iterator> begin()
|
|
|
|
{
|
|
|
|
return Item<Iterator>(this, *Tree::Iterator::begin());
|
|
|
|
}
|
|
|
|
|
2017-11-01 08:20:54 +00:00
|
|
|
Item<Iterator> end() { return Item<Iterator>(this, nullptr); }
|
2017-10-30 22:13:39 +00:00
|
|
|
|
|
|
|
Tree::TreeNode* Next()
|
|
|
|
{
|
|
|
|
Tree::TreeNode* result = Tree::Iterator::Next();
|
|
|
|
if (!result && mNextTree) {
|
2017-11-01 08:20:54 +00:00
|
|
|
new (this) Iterator(mNextTree, nullptr);
|
|
|
|
result = reinterpret_cast<Tree::TreeNode*>(*Tree::Iterator::begin());
|
2017-10-30 22:13:39 +00:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Tree* mNextTree;
|
|
|
|
};
|
|
|
|
|
|
|
|
Iterator iter() { return Iterator(&mArenas, &mPrivateArenas); }
|
2017-10-27 22:13:58 +00:00
|
|
|
|
|
|
|
inline arena_t* GetDefault() { return mDefaultArena; }
|
|
|
|
|
|
|
|
Mutex mLock;
|
|
|
|
|
|
|
|
private:
|
|
|
|
arena_t* mDefaultArena;
|
|
|
|
arena_id_t mLastArenaId;
|
2017-10-30 22:13:39 +00:00
|
|
|
Tree mArenas;
|
|
|
|
Tree mPrivateArenas;
|
2017-10-27 22:13:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static ArenaCollection gArenas;
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// ******
|
|
|
|
// Chunks.
|
2017-11-03 03:07:16 +00:00
|
|
|
static AddressRadixTree<(sizeof(void*) << 3) - LOG2(kChunkSize)> gChunkRTree;
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Protects chunk-related data structures.
|
2017-10-06 08:20:04 +00:00
|
|
|
static Mutex chunks_mtx;
|
2014-11-01 11:00:00 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Trees of chunks that were previously allocated (trees differ only in node
|
|
|
|
// ordering). These are used when allocating chunks, in an attempt to re-use
|
|
|
|
// address space. Depending on function, different tree orderings are needed,
|
|
|
|
// which is why there are two trees with the same contents.
|
2017-10-25 23:29:07 +00:00
|
|
|
static RedBlackTree<extent_node_t, ExtentTreeSzTrait> gChunksBySize;
|
|
|
|
static RedBlackTree<extent_node_t, ExtentTreeTrait> gChunksByAddress;
|
2014-11-01 11:00:00 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Protects huge allocation-related data structures.
|
2017-10-06 08:20:04 +00:00
|
|
|
static Mutex huge_mtx;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Tree of chunks that are stand-alone huge allocations.
|
2017-09-26 06:06:00 +00:00
|
|
|
static RedBlackTree<extent_node_t, ExtentTreeTrait> huge;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Huge allocation statistics.
|
2017-10-29 12:53:37 +00:00
|
|
|
static size_t huge_allocated;
|
|
|
|
static size_t huge_mapped;
|
2017-10-29 12:50:49 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// **************************
|
|
|
|
// base (internal allocation).
|
|
|
|
|
|
|
|
// Current pages that are being used for internal memory allocations. These
|
|
|
|
// pages are carved up in cacheline-size quanta, so that there is no chance of
|
|
|
|
// false cache line sharing.
|
2017-10-29 12:50:49 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
static void* base_pages;
|
|
|
|
static void* base_next_addr;
|
|
|
|
static void* base_next_decommitted;
|
|
|
|
static void* base_past_addr; // Addr immediately past base_pages.
|
|
|
|
static extent_node_t* base_nodes;
|
2017-10-06 08:20:04 +00:00
|
|
|
static Mutex base_mtx;
|
2017-10-29 12:53:37 +00:00
|
|
|
static size_t base_mapped;
|
|
|
|
static size_t base_committed;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// ******
|
|
|
|
// Arenas.
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-12-03 05:22:05 +00:00
|
|
|
// The arena associated with the current thread (per
|
|
|
|
// jemalloc_thread_local_arena) On OSX, __thread/thread_local circles back
|
|
|
|
// calling malloc to allocate storage on first access on each thread, which
|
|
|
|
// leads to an infinite loop, but pthread-based TLS somehow doesn't have this
|
|
|
|
// problem.
|
2017-09-24 21:47:44 +00:00
|
|
|
#if !defined(XP_DARWIN)
|
2017-09-12 07:29:11 +00:00
|
|
|
static MOZ_THREAD_LOCAL(arena_t*) thread_arena;
|
|
|
|
#else
|
2017-10-30 08:19:44 +00:00
|
|
|
static detail::ThreadLocal<arena_t*, detail::ThreadLocalKeyStorage>
|
2017-10-29 12:53:37 +00:00
|
|
|
thread_arena;
|
2008-02-06 23:06:50 +00:00
|
|
|
#endif
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// *****************************
|
|
|
|
// Runtime configuration options.
|
|
|
|
|
2017-05-18 01:02:36 +00:00
|
|
|
const uint8_t kAllocJunk = 0xe4;
|
|
|
|
const uint8_t kAllocPoison = 0xe5;
|
|
|
|
|
2017-05-18 01:22:20 +00:00
|
|
|
#ifdef MOZ_DEBUG
|
2017-10-29 12:53:37 +00:00
|
|
|
static bool opt_junk = true;
|
|
|
|
static bool opt_zero = false;
|
2008-02-06 23:06:50 +00:00
|
|
|
#else
|
2017-10-29 12:53:37 +00:00
|
|
|
static const bool opt_junk = false;
|
|
|
|
static const bool opt_zero = false;
|
2008-02-06 23:06:50 +00:00
|
|
|
#endif
|
2014-02-14 01:10:35 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// ***************************************************************************
|
|
|
|
// Begin forward declarations.
|
2017-10-29 12:50:49 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
static void*
|
|
|
|
chunk_alloc(size_t aSize,
|
|
|
|
size_t aAlignment,
|
|
|
|
bool aBase,
|
|
|
|
bool* aZeroed = nullptr);
|
|
|
|
static void
|
|
|
|
chunk_dealloc(void* aChunk, size_t aSize, ChunkType aType);
|
|
|
|
static void
|
|
|
|
chunk_ensure_zero(void* aPtr, size_t aSize, bool aZeroed);
|
|
|
|
static void
|
2017-11-08 08:43:47 +00:00
|
|
|
huge_dalloc(void* aPtr, arena_t* aArena);
|
Bug 1417234 - Use SRWLock as Mutex for mozjemalloc on Windows. r=njn
SRWLock is more lightweight than CriticalSection, but is only available
on Windows Vista and more. So until we actually dropped support Windows
XP, we had to use CriticalSection.
Now that all supported Windows versions do have SRWLock, this is a
switch we can make, and not only because SRWLock is more lightweight,
but because it can be statically initialized like on other platforms,
allowing to use the same initialization code as on other platforms,
and removing the requirement for a DllMain, which in turn can allow
to statically link mozjemalloc in some cases, instead of requiring a
shared library (DllMain only works on shared libraries), or manually
call the initialization function soon enough.
There is a downside, though: SRWLock, as opposed to CriticalSection, is
not fair, meaning it can have thread scheduling implications, and can
theoretically increase latency on some threads. However, it is the
default used by Rust Mutex, meaning it's at least good enough there.
Let's see how things go with this.
--HG--
extra : rebase_source : 337dc4e245e461fd0ea23a2b6b53981346a545c6
2017-11-14 03:58:33 +00:00
|
|
|
static bool
|
|
|
|
malloc_init_hard();
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-08-30 07:54:17 +00:00
|
|
|
#ifdef XP_DARWIN
|
2017-05-12 12:52:25 +00:00
|
|
|
#define FORK_HOOK extern "C"
|
|
|
|
#else
|
|
|
|
#define FORK_HOOK static
|
2017-01-20 01:06:41 +00:00
|
|
|
#endif
|
2017-10-29 12:53:37 +00:00
|
|
|
FORK_HOOK void
|
|
|
|
_malloc_prefork(void);
|
|
|
|
FORK_HOOK void
|
|
|
|
_malloc_postfork_parent(void);
|
|
|
|
FORK_HOOK void
|
|
|
|
_malloc_postfork_child(void);
|
2011-05-22 03:27:00 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// End forward declarations.
|
|
|
|
// ***************************************************************************
|
2008-07-25 21:52:27 +00:00
|
|
|
|
2017-10-27 08:25:18 +00:00
|
|
|
// FreeBSD's pthreads implementation calls malloc(3), so the malloc
|
|
|
|
// implementation has to take pains to avoid infinite recursion during
|
|
|
|
// initialization.
|
|
|
|
// Returns whether the allocator was successfully initialized.
|
|
|
|
static inline bool
|
|
|
|
malloc_init()
|
|
|
|
{
|
|
|
|
|
|
|
|
if (malloc_initialized == false) {
|
|
|
|
return malloc_init_hard();
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-07-25 21:52:27 +00:00
|
|
|
static void
|
2017-10-29 12:53:37 +00:00
|
|
|
_malloc_message(const char* p)
|
2008-07-25 21:52:27 +00:00
|
|
|
{
|
2017-08-30 07:53:10 +00:00
|
|
|
#if !defined(XP_WIN)
|
2017-10-29 12:53:37 +00:00
|
|
|
#define _write write
|
2008-07-25 21:52:27 +00:00
|
|
|
#endif
|
2017-07-11 05:15:04 +00:00
|
|
|
// Pretend to check _write() errors to suppress gcc warnings about
|
|
|
|
// warn_unused_result annotations in some versions of glibc headers.
|
2017-10-29 12:53:37 +00:00
|
|
|
if (_write(STDERR_FILENO, p, (unsigned int)strlen(p)) < 0) {
|
2017-07-11 05:15:04 +00:00
|
|
|
return;
|
2017-10-29 12:53:14 +00:00
|
|
|
}
|
2017-07-11 05:15:04 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
template<typename... Args>
|
2017-07-11 05:15:04 +00:00
|
|
|
static void
|
2017-10-29 12:53:37 +00:00
|
|
|
_malloc_message(const char* p, Args... args)
|
2017-07-11 05:15:04 +00:00
|
|
|
{
|
|
|
|
_malloc_message(p);
|
|
|
|
_malloc_message(args...);
|
2008-07-25 21:52:27 +00:00
|
|
|
}
|
|
|
|
|
2017-08-30 07:56:42 +00:00
|
|
|
#ifdef ANDROID
|
2015-11-15 21:21:56 +00:00
|
|
|
// Android's pthread.h does not declare pthread_atfork() until SDK 21.
|
2017-10-29 12:53:37 +00:00
|
|
|
extern "C" MOZ_EXPORT int
|
|
|
|
pthread_atfork(void (*)(void), void (*)(void), void (*)(void));
|
2015-11-15 21:21:56 +00:00
|
|
|
#endif
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// ***************************************************************************
|
|
|
|
// Begin Utility functions/macros.
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Return the chunk address for allocation address a.
|
2017-10-26 01:34:37 +00:00
|
|
|
static inline arena_chunk_t*
|
|
|
|
GetChunkForPtr(const void* aPtr)
|
|
|
|
{
|
2017-11-03 03:07:16 +00:00
|
|
|
return (arena_chunk_t*)(uintptr_t(aPtr) & ~kChunkSizeMask);
|
2017-10-26 01:34:37 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Return the chunk offset of address a.
|
2017-10-26 01:34:37 +00:00
|
|
|
static inline size_t
|
|
|
|
GetChunkOffsetForPtr(const void* aPtr)
|
|
|
|
{
|
2017-11-03 03:07:16 +00:00
|
|
|
return (size_t)(uintptr_t(aPtr) & kChunkSizeMask);
|
2017-10-26 01:34:37 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
static inline const char*
|
2008-02-06 23:06:50 +00:00
|
|
|
_getprogname(void)
|
|
|
|
{
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
return "<jemalloc>";
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:59:21 +00:00
|
|
|
// Fill the given range of memory with zeroes or junk depending on opt_junk and
|
2017-12-03 05:22:05 +00:00
|
|
|
// opt_zero. Callers can force filling with zeroes through the aForceZero
|
|
|
|
// argument.
|
2017-11-10 07:59:21 +00:00
|
|
|
static inline void
|
|
|
|
ApplyZeroOrJunk(void* aPtr, size_t aSize)
|
|
|
|
{
|
|
|
|
if (opt_junk) {
|
|
|
|
memset(aPtr, kAllocJunk, aSize);
|
|
|
|
} else if (opt_zero) {
|
|
|
|
memset(aPtr, 0, aSize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// ***************************************************************************
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2008-03-15 03:51:44 +00:00
|
|
|
static inline void
|
2017-10-26 02:11:32 +00:00
|
|
|
pages_decommit(void* aAddr, size_t aSize)
|
2008-03-15 03:51:44 +00:00
|
|
|
{
|
2017-08-30 07:53:10 +00:00
|
|
|
#ifdef XP_WIN
|
2017-10-29 12:53:31 +00:00
|
|
|
// The region starting at addr may have been allocated in multiple calls
|
|
|
|
// to VirtualAlloc and recycled, so decommitting the entire region in one
|
|
|
|
// go may not be valid. However, since we allocate at least a chunk at a
|
|
|
|
// time, we may touch any region in chunksized increments.
|
2017-11-03 03:07:16 +00:00
|
|
|
size_t pages_size = std::min(aSize, kChunkSize - GetChunkOffsetForPtr(aAddr));
|
2017-10-26 02:11:32 +00:00
|
|
|
while (aSize > 0) {
|
|
|
|
if (!VirtualFree(aAddr, pages_size, MEM_DECOMMIT)) {
|
|
|
|
MOZ_CRASH();
|
|
|
|
}
|
|
|
|
aAddr = (void*)((uintptr_t)aAddr + pages_size);
|
|
|
|
aSize -= pages_size;
|
2017-11-03 03:07:16 +00:00
|
|
|
pages_size = std::min(aSize, kChunkSize);
|
2017-10-26 02:11:32 +00:00
|
|
|
}
|
2008-03-15 03:51:44 +00:00
|
|
|
#else
|
2017-10-29 12:53:37 +00:00
|
|
|
if (mmap(
|
|
|
|
aAddr, aSize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) ==
|
2017-10-26 02:11:32 +00:00
|
|
|
MAP_FAILED) {
|
|
|
|
MOZ_CRASH();
|
|
|
|
}
|
|
|
|
MozTagAnonymousMemory(aAddr, aSize, "jemalloc-decommitted");
|
2008-03-15 03:51:44 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Commit pages. Returns whether pages were committed.
|
2017-10-27 01:31:50 +00:00
|
|
|
MOZ_MUST_USE static inline bool
|
2017-10-26 02:11:32 +00:00
|
|
|
pages_commit(void* aAddr, size_t aSize)
|
2008-03-15 03:51:44 +00:00
|
|
|
{
|
2017-10-26 02:11:32 +00:00
|
|
|
#ifdef XP_WIN
|
2017-10-29 12:53:31 +00:00
|
|
|
// The region starting at addr may have been allocated in multiple calls
|
|
|
|
// to VirtualAlloc and recycled, so committing the entire region in one
|
|
|
|
// go may not be valid. However, since we allocate at least a chunk at a
|
|
|
|
// time, we may touch any region in chunksized increments.
|
2017-11-03 03:07:16 +00:00
|
|
|
size_t pages_size = std::min(aSize, kChunkSize - GetChunkOffsetForPtr(aAddr));
|
2017-10-26 02:11:32 +00:00
|
|
|
while (aSize > 0) {
|
|
|
|
if (!VirtualAlloc(aAddr, pages_size, MEM_COMMIT, PAGE_READWRITE)) {
|
2017-10-27 01:31:50 +00:00
|
|
|
return false;
|
2017-10-26 02:11:32 +00:00
|
|
|
}
|
|
|
|
aAddr = (void*)((uintptr_t)aAddr + pages_size);
|
|
|
|
aSize -= pages_size;
|
2017-11-03 03:07:16 +00:00
|
|
|
pages_size = std::min(aSize, kChunkSize);
|
2017-10-26 02:11:32 +00:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
if (mmap(aAddr,
|
|
|
|
aSize,
|
|
|
|
PROT_READ | PROT_WRITE,
|
|
|
|
MAP_FIXED | MAP_PRIVATE | MAP_ANON,
|
|
|
|
-1,
|
|
|
|
0) == MAP_FAILED) {
|
2017-10-27 01:31:50 +00:00
|
|
|
return false;
|
2017-10-26 02:11:32 +00:00
|
|
|
}
|
|
|
|
MozTagAnonymousMemory(aAddr, aSize, "jemalloc");
|
|
|
|
#endif
|
2017-10-27 01:31:50 +00:00
|
|
|
return true;
|
2008-03-15 03:51:44 +00:00
|
|
|
}
|
|
|
|
|
2008-02-09 05:46:59 +00:00
|
|
|
static bool
|
2014-09-26 11:29:00 +00:00
|
|
|
base_pages_alloc(size_t minsize)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-10-29 12:53:37 +00:00
|
|
|
size_t csize;
|
|
|
|
size_t pminsize;
|
|
|
|
|
|
|
|
MOZ_ASSERT(minsize != 0);
|
|
|
|
csize = CHUNK_CEILING(minsize);
|
2017-11-03 03:07:16 +00:00
|
|
|
base_pages = chunk_alloc(csize, kChunkSize, true);
|
2017-10-29 12:53:37 +00:00
|
|
|
if (!base_pages) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
base_next_addr = base_pages;
|
|
|
|
base_past_addr = (void*)((uintptr_t)base_pages + csize);
|
|
|
|
// Leave enough pages for minsize committed, since otherwise they would
|
|
|
|
// have to be immediately recommitted.
|
|
|
|
pminsize = PAGE_CEILING(minsize);
|
|
|
|
base_next_decommitted = (void*)((uintptr_t)base_pages + pminsize);
|
|
|
|
#if defined(MALLOC_DECOMMIT)
|
|
|
|
if (pminsize < csize) {
|
|
|
|
pages_decommit(base_next_decommitted, csize - pminsize);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
base_mapped += csize;
|
|
|
|
base_committed += pminsize;
|
|
|
|
|
|
|
|
return false;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
static void*
|
|
|
|
base_alloc(size_t aSize)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-10-06 08:49:40 +00:00
|
|
|
void* ret;
|
|
|
|
size_t csize;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Round size up to nearest multiple of the cacheline size.
|
2017-10-06 08:49:40 +00:00
|
|
|
csize = CACHELINE_CEILING(aSize);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
MutexAutoLock lock(base_mtx);
|
2017-10-29 12:53:31 +00:00
|
|
|
// Make sure there's enough space for the allocation.
|
2017-10-06 08:49:40 +00:00
|
|
|
if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
|
|
|
|
if (base_pages_alloc(csize)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
2017-10-29 12:53:31 +00:00
|
|
|
// Allocate.
|
2017-10-06 08:49:40 +00:00
|
|
|
ret = base_next_addr;
|
|
|
|
base_next_addr = (void*)((uintptr_t)base_next_addr + csize);
|
2017-10-29 12:53:31 +00:00
|
|
|
// Make sure enough pages are committed for the new allocation.
|
2017-10-06 08:49:40 +00:00
|
|
|
if ((uintptr_t)base_next_addr > (uintptr_t)base_next_decommitted) {
|
|
|
|
void* pbase_next_addr = (void*)(PAGE_CEILING((uintptr_t)base_next_addr));
|
2008-03-15 03:51:44 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
#ifdef MALLOC_DECOMMIT
|
2017-10-27 01:31:50 +00:00
|
|
|
if (!pages_commit(base_next_decommitted,
|
|
|
|
(uintptr_t)pbase_next_addr -
|
|
|
|
(uintptr_t)base_next_decommitted)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2017-10-29 12:53:37 +00:00
|
|
|
#endif
|
2017-10-06 08:49:40 +00:00
|
|
|
base_next_decommitted = pbase_next_addr;
|
2017-10-29 12:53:37 +00:00
|
|
|
base_committed +=
|
|
|
|
(uintptr_t)pbase_next_addr - (uintptr_t)base_next_decommitted;
|
2017-10-06 08:49:40 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
return ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-26 02:11:32 +00:00
|
|
|
static void*
|
|
|
|
base_calloc(size_t aNumber, size_t aSize)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-10-26 02:11:32 +00:00
|
|
|
void* ret = base_alloc(aNumber * aSize);
|
2017-10-27 01:31:50 +00:00
|
|
|
if (ret) {
|
|
|
|
memset(ret, 0, aNumber * aSize);
|
|
|
|
}
|
2017-10-26 02:11:32 +00:00
|
|
|
return ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
static extent_node_t*
|
2008-02-06 23:06:50 +00:00
|
|
|
base_node_alloc(void)
|
|
|
|
{
|
2017-10-29 12:53:37 +00:00
|
|
|
extent_node_t* ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
base_mtx.Lock();
|
|
|
|
if (base_nodes) {
|
|
|
|
ret = base_nodes;
|
|
|
|
base_nodes = *(extent_node_t**)ret;
|
|
|
|
base_mtx.Unlock();
|
|
|
|
} else {
|
|
|
|
base_mtx.Unlock();
|
|
|
|
ret = (extent_node_t*)base_alloc(sizeof(extent_node_t));
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
return ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-10-06 08:49:40 +00:00
|
|
|
base_node_dealloc(extent_node_t* aNode)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-10-06 08:49:40 +00:00
|
|
|
MutexAutoLock lock(base_mtx);
|
|
|
|
*(extent_node_t**)aNode = base_nodes;
|
|
|
|
base_nodes = aNode;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-25 23:50:49 +00:00
|
|
|
struct BaseNodeFreePolicy
|
|
|
|
{
|
|
|
|
void operator()(extent_node_t* aPtr) { base_node_dealloc(aPtr); }
|
|
|
|
};
|
|
|
|
|
2017-10-30 08:19:44 +00:00
|
|
|
using UniqueBaseNode = UniquePtr<extent_node_t, BaseNodeFreePolicy>;
|
2017-10-25 23:50:49 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// End Utility functions/macros.
|
|
|
|
// ***************************************************************************
|
|
|
|
// Begin chunk management functions.
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-08-30 07:53:10 +00:00
|
|
|
#ifdef XP_WIN
|
2009-03-31 16:27:25 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
static void*
|
|
|
|
pages_map(void* aAddr, size_t aSize)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-10-29 12:53:37 +00:00
|
|
|
void* ret = nullptr;
|
|
|
|
ret = VirtualAlloc(aAddr, aSize, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
|
|
|
|
return ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-10-29 12:53:37 +00:00
|
|
|
pages_unmap(void* aAddr, size_t aSize)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-10-29 12:53:37 +00:00
|
|
|
if (VirtualFree(aAddr, 0, MEM_RELEASE) == 0) {
|
|
|
|
_malloc_message(_getprogname(), ": (malloc) Error in VirtualFree()\n");
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
2011-05-22 03:27:00 +00:00
|
|
|
#else
|
2009-02-02 10:50:19 +00:00
|
|
|
|
2017-10-29 12:53:26 +00:00
|
|
|
static void
|
2017-10-29 12:53:37 +00:00
|
|
|
pages_unmap(void* aAddr, size_t aSize)
|
2017-10-29 12:53:26 +00:00
|
|
|
{
|
2017-10-29 12:53:37 +00:00
|
|
|
if (munmap(aAddr, aSize) == -1) {
|
2017-11-01 07:46:44 +00:00
|
|
|
char buf[64];
|
2017-10-29 12:53:26 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
if (strerror_r(errno, buf, sizeof(buf)) == 0) {
|
|
|
|
_malloc_message(
|
|
|
|
_getprogname(), ": (malloc) Error in munmap(): ", buf, "\n");
|
|
|
|
}
|
|
|
|
}
|
2017-10-29 12:53:26 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
static void*
|
|
|
|
pages_map(void* aAddr, size_t aSize)
|
|
|
|
{
|
|
|
|
void* ret;
|
|
|
|
#if defined(__ia64__) || \
|
|
|
|
(defined(__sparc__) && defined(__arch64__) && defined(__linux__))
|
2017-12-03 05:22:05 +00:00
|
|
|
// The JS engine assumes that all allocated pointers have their high 17 bits
|
|
|
|
// clear, which ia64's mmap doesn't support directly. However, we can emulate
|
|
|
|
// it by passing mmap an "addr" parameter with those bits clear. The mmap will
|
|
|
|
// return that address, or the nearest available memory above that address,
|
|
|
|
// providing a near-guarantee that those bits are clear. If they are not, we
|
|
|
|
// return nullptr below to indicate out-of-memory.
|
2017-10-29 12:53:37 +00:00
|
|
|
//
|
2017-12-03 05:22:05 +00:00
|
|
|
// The addr is chosen as 0x0000070000000000, which still allows about 120TB of
|
|
|
|
// virtual address space.
|
2017-10-29 12:53:37 +00:00
|
|
|
//
|
|
|
|
// See Bug 589735 for more information.
|
|
|
|
bool check_placement = true;
|
|
|
|
if (!aAddr) {
|
|
|
|
aAddr = (void*)0x0000070000000000;
|
|
|
|
check_placement = false;
|
|
|
|
}
|
2011-08-29 16:26:22 +00:00
|
|
|
#endif
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2016-12-12 16:35:15 +00:00
|
|
|
#if defined(__sparc__) && defined(__arch64__) && defined(__linux__)
|
2017-10-29 12:53:37 +00:00
|
|
|
const uintptr_t start = 0x0000070000000000ULL;
|
|
|
|
const uintptr_t end = 0x0000800000000000ULL;
|
|
|
|
|
|
|
|
// Copied from js/src/gc/Memory.cpp and adapted for this source
|
|
|
|
uintptr_t hint;
|
|
|
|
void* region = MAP_FAILED;
|
|
|
|
for (hint = start; region == MAP_FAILED && hint + aSize <= end;
|
2017-11-03 03:07:16 +00:00
|
|
|
hint += kChunkSize) {
|
2017-10-29 12:53:37 +00:00
|
|
|
region = mmap((void*)hint,
|
|
|
|
aSize,
|
|
|
|
PROT_READ | PROT_WRITE,
|
|
|
|
MAP_PRIVATE | MAP_ANON,
|
|
|
|
-1,
|
|
|
|
0);
|
|
|
|
if (region != MAP_FAILED) {
|
|
|
|
if (((size_t)region + (aSize - 1)) & 0xffff800000000000) {
|
|
|
|
if (munmap(region, aSize)) {
|
|
|
|
MOZ_ASSERT(errno == ENOMEM);
|
|
|
|
}
|
|
|
|
region = MAP_FAILED;
|
|
|
|
}
|
2016-12-12 16:35:15 +00:00
|
|
|
}
|
2017-10-29 12:53:37 +00:00
|
|
|
}
|
|
|
|
ret = region;
|
2016-12-12 16:35:15 +00:00
|
|
|
#else
|
2017-10-29 12:53:37 +00:00
|
|
|
// We don't use MAP_FIXED here, because it can cause the *replacement*
|
|
|
|
// of existing mappings, and we only want to create new mappings.
|
|
|
|
ret =
|
|
|
|
mmap(aAddr, aSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
|
|
|
|
MOZ_ASSERT(ret);
|
2016-12-12 16:35:15 +00:00
|
|
|
#endif
|
2017-10-29 12:53:37 +00:00
|
|
|
if (ret == MAP_FAILED) {
|
|
|
|
ret = nullptr;
|
|
|
|
}
|
|
|
|
#if defined(__ia64__) || \
|
|
|
|
(defined(__sparc__) && defined(__arch64__) && defined(__linux__))
|
|
|
|
// If the allocated memory doesn't have its upper 17 bits clear, consider it
|
|
|
|
// as out of memory.
|
|
|
|
else if ((long long)ret & 0xffff800000000000) {
|
|
|
|
munmap(ret, aSize);
|
|
|
|
ret = nullptr;
|
|
|
|
}
|
2017-12-03 05:22:05 +00:00
|
|
|
// If the caller requested a specific memory location, verify that's what mmap
|
|
|
|
// returned.
|
2017-10-29 12:53:37 +00:00
|
|
|
else if (check_placement && ret != aAddr) {
|
2011-08-29 16:26:22 +00:00
|
|
|
#else
|
2017-10-29 12:53:37 +00:00
|
|
|
else if (aAddr && ret != aAddr) {
|
2011-08-29 16:26:22 +00:00
|
|
|
#endif
|
2017-10-29 12:53:37 +00:00
|
|
|
// We succeeded in mapping memory, but not in the right place.
|
|
|
|
pages_unmap(ret, aSize);
|
|
|
|
ret = nullptr;
|
|
|
|
}
|
|
|
|
if (ret) {
|
|
|
|
MozTagAnonymousMemory(ret, aSize, "jemalloc");
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(__ia64__) || \
|
|
|
|
(defined(__sparc__) && defined(__arch64__) && defined(__linux__))
|
|
|
|
MOZ_ASSERT(!ret || (!check_placement && ret) ||
|
|
|
|
(check_placement && ret == aAddr));
|
2011-08-29 16:26:22 +00:00
|
|
|
#else
|
2017-10-29 12:53:37 +00:00
|
|
|
MOZ_ASSERT(!ret || (!aAddr && ret != aAddr) || (aAddr && ret == aAddr));
|
2011-08-29 16:26:22 +00:00
|
|
|
#endif
|
2017-10-29 12:53:37 +00:00
|
|
|
return ret;
|
2017-10-29 12:50:49 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
#endif
|
|
|
|
|
2017-08-30 07:54:17 +00:00
|
|
|
#ifdef XP_DARWIN
|
2017-11-03 03:13:17 +00:00
|
|
|
#define VM_COPY_MIN (gPageSize * 32)
|
2011-05-22 03:27:00 +00:00
|
|
|
static inline void
|
2017-10-29 12:53:37 +00:00
|
|
|
pages_copy(void* dest, const void* src, size_t n)
|
2011-05-22 03:27:00 +00:00
|
|
|
{
|
|
|
|
|
2017-11-03 03:13:17 +00:00
|
|
|
MOZ_ASSERT((void*)((uintptr_t)dest & ~gPageSizeMask) == dest);
|
2017-10-29 12:53:37 +00:00
|
|
|
MOZ_ASSERT(n >= VM_COPY_MIN);
|
2017-11-03 03:13:17 +00:00
|
|
|
MOZ_ASSERT((void*)((uintptr_t)src & ~gPageSizeMask) == src);
|
2011-05-22 03:27:00 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
vm_copy(
|
|
|
|
mach_task_self(), (vm_address_t)src, (vm_size_t)n, (vm_address_t)dest);
|
2011-05-22 03:27:00 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
template<size_t Bits>
|
2017-10-06 07:18:01 +00:00
|
|
|
bool
|
|
|
|
AddressRadixTree<Bits>::Init()
|
2008-07-25 21:53:20 +00:00
|
|
|
{
|
2017-10-06 08:20:04 +00:00
|
|
|
mLock.Init();
|
2017-10-06 07:18:01 +00:00
|
|
|
mRoot = (void**)base_calloc(1 << kBitsAtLevel1, sizeof(void*));
|
|
|
|
return mRoot;
|
2008-07-25 21:53:20 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
template<size_t Bits>
|
2017-10-06 01:49:24 +00:00
|
|
|
void**
|
2017-10-06 06:50:00 +00:00
|
|
|
AddressRadixTree<Bits>::GetSlot(void* aKey, bool aCreate)
|
2017-09-28 03:18:14 +00:00
|
|
|
{
|
2017-10-06 01:49:24 +00:00
|
|
|
uintptr_t key = reinterpret_cast<uintptr_t>(aKey);
|
2017-09-28 03:18:14 +00:00
|
|
|
uintptr_t subkey;
|
|
|
|
unsigned i, lshift, height, bits;
|
|
|
|
void** node;
|
|
|
|
void** child;
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
for (i = lshift = 0, height = kHeight, node = mRoot; i < height - 1;
|
2017-09-28 03:18:14 +00:00
|
|
|
i++, lshift += bits, node = child) {
|
2017-10-06 06:50:00 +00:00
|
|
|
bits = i ? kBitsPerLevel : kBitsAtLevel1;
|
2017-10-30 02:43:10 +00:00
|
|
|
subkey = (key << lshift) >> ((sizeof(void*) << 3) - bits);
|
2017-10-29 12:53:37 +00:00
|
|
|
child = (void**)node[subkey];
|
2017-09-28 03:18:14 +00:00
|
|
|
if (!child && aCreate) {
|
2017-10-29 12:53:37 +00:00
|
|
|
child = (void**)base_calloc(1 << kBitsPerLevel, sizeof(void*));
|
2017-09-28 03:18:14 +00:00
|
|
|
if (child) {
|
|
|
|
node[subkey] = child;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!child) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// node is a leaf, so it contains values rather than node
|
|
|
|
// pointers.
|
2017-10-06 06:50:00 +00:00
|
|
|
bits = i ? kBitsPerLevel : kBitsAtLevel1;
|
2017-10-30 02:43:10 +00:00
|
|
|
subkey = (key << lshift) >> ((sizeof(void*) << 3) - bits);
|
2017-09-28 03:18:14 +00:00
|
|
|
return &node[subkey];
|
2011-05-22 03:27:00 +00:00
|
|
|
}
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
template<size_t Bits>
|
2017-10-06 01:49:24 +00:00
|
|
|
void*
|
2017-10-06 06:50:00 +00:00
|
|
|
AddressRadixTree<Bits>::Get(void* aKey)
|
2017-09-28 03:18:14 +00:00
|
|
|
{
|
|
|
|
void* ret = nullptr;
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-10-06 01:49:24 +00:00
|
|
|
void** slot = GetSlot(aKey);
|
2017-09-28 03:18:14 +00:00
|
|
|
|
|
|
|
if (slot) {
|
|
|
|
ret = *slot;
|
|
|
|
}
|
2017-05-18 01:22:20 +00:00
|
|
|
#ifdef MOZ_DEBUG
|
2017-10-06 08:49:40 +00:00
|
|
|
MutexAutoLock lock(mLock);
|
2017-10-29 12:53:31 +00:00
|
|
|
|
|
|
|
// Suppose that it were possible for a jemalloc-allocated chunk to be
|
|
|
|
// munmap()ped, followed by a different allocator in another thread re-using
|
|
|
|
// overlapping virtual memory, all without invalidating the cached rtree
|
|
|
|
// value. The result would be a false positive (the rtree would claim that
|
|
|
|
// jemalloc owns memory that it had actually discarded). I don't think this
|
|
|
|
// scenario is possible, but the following assertion is a prudent sanity
|
|
|
|
// check.
|
2017-09-28 03:18:14 +00:00
|
|
|
if (!slot) {
|
|
|
|
// In case a slot has been created in the meantime.
|
2017-10-06 01:49:24 +00:00
|
|
|
slot = GetSlot(aKey);
|
2017-09-28 03:18:14 +00:00
|
|
|
}
|
|
|
|
if (slot) {
|
2017-10-06 08:49:40 +00:00
|
|
|
// The MutexAutoLock above should act as a memory barrier, forcing
|
2017-09-28 03:18:14 +00:00
|
|
|
// the compiler to emit a new read instruction for *slot.
|
|
|
|
MOZ_ASSERT(ret == *slot);
|
|
|
|
} else {
|
|
|
|
MOZ_ASSERT(ret == nullptr);
|
|
|
|
}
|
2011-05-22 03:27:00 +00:00
|
|
|
#endif
|
2017-09-28 03:18:14 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
template<size_t Bits>
|
2017-10-06 01:49:24 +00:00
|
|
|
bool
|
2017-10-06 06:50:00 +00:00
|
|
|
AddressRadixTree<Bits>::Set(void* aKey, void* aValue)
|
2008-07-25 21:53:20 +00:00
|
|
|
{
|
2017-10-06 08:49:40 +00:00
|
|
|
MutexAutoLock lock(mLock);
|
2017-10-29 12:53:31 +00:00
|
|
|
void** slot = GetSlot(aKey, /* create = */ true);
|
2017-09-28 03:18:14 +00:00
|
|
|
if (slot) {
|
|
|
|
*slot = aValue;
|
|
|
|
}
|
2017-10-06 01:49:24 +00:00
|
|
|
return slot;
|
2008-07-25 21:53:20 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// pages_trim, chunk_alloc_mmap_slow and chunk_alloc_mmap were cherry-picked
|
|
|
|
// from upstream jemalloc 3.4.1 to fix Mozilla bug 956501.
|
2014-01-14 10:06:25 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Return the offset between a and the nearest aligned address at or below a.
|
2017-10-29 12:53:37 +00:00
|
|
|
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
|
|
|
|
((size_t)((uintptr_t)(a) & (alignment - 1)))
|
2011-10-25 20:25:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Return the smallest alignment multiple that is >= s.
|
2017-10-29 12:53:37 +00:00
|
|
|
#define ALIGNMENT_CEILING(s, alignment) \
|
|
|
|
(((s) + (alignment - 1)) & (~(alignment - 1)))
|
2011-10-25 20:25:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
static void*
|
|
|
|
pages_trim(void* addr, size_t alloc_size, size_t leadsize, size_t size)
|
2011-10-25 20:25:50 +00:00
|
|
|
{
|
2017-10-29 12:53:37 +00:00
|
|
|
void* ret = (void*)((uintptr_t)addr + leadsize);
|
2011-10-25 20:25:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
MOZ_ASSERT(alloc_size >= leadsize + size);
|
2017-08-30 07:53:10 +00:00
|
|
|
#ifdef XP_WIN
|
2017-10-29 12:53:37 +00:00
|
|
|
{
|
|
|
|
void* new_addr;
|
|
|
|
|
|
|
|
pages_unmap(addr, alloc_size);
|
|
|
|
new_addr = pages_map(ret, size);
|
|
|
|
if (new_addr == ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
if (new_addr) {
|
|
|
|
pages_unmap(new_addr, size);
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
2014-09-26 11:27:00 +00:00
|
|
|
#else
|
2017-10-29 12:53:37 +00:00
|
|
|
{
|
|
|
|
size_t trailsize = alloc_size - leadsize - size;
|
|
|
|
|
|
|
|
if (leadsize != 0) {
|
|
|
|
pages_unmap(addr, leadsize);
|
|
|
|
}
|
|
|
|
if (trailsize != 0) {
|
|
|
|
pages_unmap((void*)((uintptr_t)ret + size), trailsize);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2014-09-26 11:27:00 +00:00
|
|
|
#endif
|
2014-01-14 10:06:25 +00:00
|
|
|
}
|
2011-10-25 20:25:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
static void*
|
2014-01-14 10:06:25 +00:00
|
|
|
chunk_alloc_mmap_slow(size_t size, size_t alignment)
|
|
|
|
{
|
2017-10-29 12:53:37 +00:00
|
|
|
void *ret, *pages;
|
|
|
|
size_t alloc_size, leadsize;
|
|
|
|
|
2017-11-03 03:13:17 +00:00
|
|
|
alloc_size = size + alignment - gPageSize;
|
2017-10-29 12:53:37 +00:00
|
|
|
// Beware size_t wrap-around.
|
|
|
|
if (alloc_size < size) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
do {
|
|
|
|
pages = pages_map(nullptr, alloc_size);
|
|
|
|
if (!pages) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
leadsize =
|
|
|
|
ALIGNMENT_CEILING((uintptr_t)pages, alignment) - (uintptr_t)pages;
|
|
|
|
ret = pages_trim(pages, alloc_size, leadsize, size);
|
|
|
|
} while (!ret);
|
|
|
|
|
|
|
|
MOZ_ASSERT(ret);
|
|
|
|
return ret;
|
2011-10-25 20:25:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
static void*
|
2014-09-26 11:29:00 +00:00
|
|
|
chunk_alloc_mmap(size_t size, size_t alignment)
|
2011-10-25 20:25:50 +00:00
|
|
|
{
|
2017-10-29 12:53:37 +00:00
|
|
|
void* ret;
|
|
|
|
size_t offset;
|
|
|
|
|
|
|
|
// Ideally, there would be a way to specify alignment to mmap() (like
|
|
|
|
// NetBSD has), but in the absence of such a feature, we have to work
|
|
|
|
// hard to efficiently create aligned mappings. The reliable, but
|
|
|
|
// slow method is to create a mapping that is over-sized, then trim the
|
|
|
|
// excess. However, that always results in one or two calls to
|
|
|
|
// pages_unmap().
|
|
|
|
//
|
|
|
|
// Optimistically try mapping precisely the right amount before falling
|
|
|
|
// back to the slow method, with the expectation that the optimistic
|
|
|
|
// approach works most of the time.
|
|
|
|
ret = pages_map(nullptr, size);
|
|
|
|
if (!ret) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
|
|
|
|
if (offset != 0) {
|
|
|
|
pages_unmap(ret, size);
|
|
|
|
return chunk_alloc_mmap_slow(size, alignment);
|
|
|
|
}
|
2011-10-25 20:25:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
MOZ_ASSERT(ret);
|
|
|
|
return ret;
|
2011-10-25 20:25:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Purge and release the pages in the chunk of length `length` at `addr` to
|
|
|
|
// the OS.
|
|
|
|
// Returns whether the pages are guaranteed to be full of zeroes when the
|
|
|
|
// function returns.
|
|
|
|
// The force_zero argument explicitly requests that the memory is guaranteed
|
|
|
|
// to be full of zeroes when the function returns.
|
2017-07-07 00:46:04 +00:00
|
|
|
static bool
|
2017-10-29 12:53:37 +00:00
|
|
|
pages_purge(void* addr, size_t length, bool force_zero)
|
2014-11-01 11:00:00 +00:00
|
|
|
{
|
2014-11-13 21:52:00 +00:00
|
|
|
#ifdef MALLOC_DECOMMIT
|
2017-10-29 12:53:37 +00:00
|
|
|
pages_decommit(addr, length);
|
|
|
|
return true;
|
|
|
|
#else
|
|
|
|
#ifndef XP_LINUX
|
|
|
|
if (force_zero) {
|
|
|
|
memset(addr, 0, length);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef XP_WIN
|
|
|
|
// The region starting at addr may have been allocated in multiple calls
|
|
|
|
// to VirtualAlloc and recycled, so resetting the entire region in one
|
|
|
|
// go may not be valid. However, since we allocate at least a chunk at a
|
|
|
|
// time, we may touch any region in chunksized increments.
|
2017-11-03 03:07:16 +00:00
|
|
|
size_t pages_size = std::min(length, kChunkSize - GetChunkOffsetForPtr(addr));
|
2017-10-29 12:53:37 +00:00
|
|
|
while (length > 0) {
|
|
|
|
VirtualAlloc(addr, pages_size, MEM_RESET, PAGE_READWRITE);
|
|
|
|
addr = (void*)((uintptr_t)addr + pages_size);
|
|
|
|
length -= pages_size;
|
2017-11-03 03:07:16 +00:00
|
|
|
pages_size = std::min(length, kChunkSize);
|
2017-10-29 12:53:37 +00:00
|
|
|
}
|
|
|
|
return force_zero;
|
2017-10-27 07:48:25 +00:00
|
|
|
#else
|
2017-10-29 12:53:37 +00:00
|
|
|
#ifdef XP_LINUX
|
|
|
|
#define JEMALLOC_MADV_PURGE MADV_DONTNEED
|
|
|
|
#define JEMALLOC_MADV_ZEROS true
|
|
|
|
#else // FreeBSD and Darwin.
|
|
|
|
#define JEMALLOC_MADV_PURGE MADV_FREE
|
|
|
|
#define JEMALLOC_MADV_ZEROS force_zero
|
|
|
|
#endif
|
|
|
|
int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
|
|
|
|
return JEMALLOC_MADV_ZEROS && err == 0;
|
|
|
|
#undef JEMALLOC_MADV_PURGE
|
|
|
|
#undef JEMALLOC_MADV_ZEROS
|
|
|
|
#endif
|
2014-11-01 11:00:00 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-10-25 23:36:26 +00:00
|
|
|
static void*
|
2017-10-25 23:48:18 +00:00
|
|
|
chunk_recycle(size_t aSize, size_t aAlignment, bool* aZeroed)
|
2014-11-01 11:00:00 +00:00
|
|
|
{
|
2017-10-25 23:36:26 +00:00
|
|
|
extent_node_t key;
|
|
|
|
|
2017-11-03 03:07:16 +00:00
|
|
|
size_t alloc_size = aSize + aAlignment - kChunkSize;
|
2017-10-29 12:53:31 +00:00
|
|
|
// Beware size_t wrap-around.
|
2017-10-25 23:48:18 +00:00
|
|
|
if (alloc_size < aSize) {
|
2017-10-25 23:36:26 +00:00
|
|
|
return nullptr;
|
2017-10-25 23:48:18 +00:00
|
|
|
}
|
2017-11-08 08:20:20 +00:00
|
|
|
key.mAddr = nullptr;
|
|
|
|
key.mSize = alloc_size;
|
2017-10-25 23:36:26 +00:00
|
|
|
chunks_mtx.Lock();
|
2017-10-25 23:50:49 +00:00
|
|
|
extent_node_t* node = gChunksBySize.SearchOrNext(&key);
|
2017-10-25 23:36:26 +00:00
|
|
|
if (!node) {
|
|
|
|
chunks_mtx.Unlock();
|
|
|
|
return nullptr;
|
|
|
|
}
|
2017-11-08 08:20:20 +00:00
|
|
|
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)node->mAddr, aAlignment) -
|
|
|
|
(uintptr_t)node->mAddr;
|
|
|
|
MOZ_ASSERT(node->mSize >= leadsize + aSize);
|
|
|
|
size_t trailsize = node->mSize - leadsize - aSize;
|
|
|
|
void* ret = (void*)((uintptr_t)node->mAddr + leadsize);
|
|
|
|
ChunkType chunk_type = node->mChunkType;
|
2017-10-25 23:48:18 +00:00
|
|
|
if (aZeroed) {
|
|
|
|
*aZeroed = (chunk_type == ZEROED_CHUNK);
|
2017-10-25 23:36:26 +00:00
|
|
|
}
|
2017-10-29 12:53:31 +00:00
|
|
|
// Remove node from the tree.
|
2017-10-25 23:36:26 +00:00
|
|
|
gChunksBySize.Remove(node);
|
|
|
|
gChunksByAddress.Remove(node);
|
|
|
|
if (leadsize != 0) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Insert the leading space as a smaller chunk.
|
2017-11-08 08:20:20 +00:00
|
|
|
node->mSize = leadsize;
|
2017-10-25 23:36:26 +00:00
|
|
|
gChunksBySize.Insert(node);
|
|
|
|
gChunksByAddress.Insert(node);
|
|
|
|
node = nullptr;
|
|
|
|
}
|
|
|
|
if (trailsize != 0) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Insert the trailing space as a smaller chunk.
|
2017-10-25 23:36:26 +00:00
|
|
|
if (!node) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// An additional node is required, but
|
|
|
|
// base_node_alloc() can cause a new base chunk to be
|
|
|
|
// allocated. Drop chunks_mtx in order to avoid
|
|
|
|
// deadlock, and if node allocation fails, deallocate
|
|
|
|
// the result before returning an error.
|
2017-10-25 23:36:26 +00:00
|
|
|
chunks_mtx.Unlock();
|
|
|
|
node = base_node_alloc();
|
|
|
|
if (!node) {
|
2017-10-25 23:48:18 +00:00
|
|
|
chunk_dealloc(ret, aSize, chunk_type);
|
2017-10-25 23:36:26 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
chunks_mtx.Lock();
|
|
|
|
}
|
2017-11-08 08:20:20 +00:00
|
|
|
node->mAddr = (void*)((uintptr_t)(ret) + aSize);
|
|
|
|
node->mSize = trailsize;
|
|
|
|
node->mChunkType = chunk_type;
|
2017-10-25 23:36:26 +00:00
|
|
|
gChunksBySize.Insert(node);
|
|
|
|
gChunksByAddress.Insert(node);
|
|
|
|
node = nullptr;
|
|
|
|
}
|
2014-11-13 21:55:00 +00:00
|
|
|
|
2017-10-26 00:43:43 +00:00
|
|
|
gRecycledSize -= aSize;
|
2014-11-13 21:55:00 +00:00
|
|
|
|
2017-10-25 23:36:26 +00:00
|
|
|
chunks_mtx.Unlock();
|
2014-11-01 11:00:00 +00:00
|
|
|
|
2017-10-25 23:48:18 +00:00
|
|
|
if (node) {
|
2017-10-25 23:36:26 +00:00
|
|
|
base_node_dealloc(node);
|
2017-10-25 23:48:18 +00:00
|
|
|
}
|
2014-11-13 21:52:00 +00:00
|
|
|
#ifdef MALLOC_DECOMMIT
|
2017-10-27 01:31:50 +00:00
|
|
|
if (!pages_commit(ret, aSize)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2017-10-25 23:36:26 +00:00
|
|
|
// pages_commit is guaranteed to zero the chunk.
|
2017-10-25 23:48:18 +00:00
|
|
|
if (aZeroed) {
|
|
|
|
*aZeroed = true;
|
2017-10-25 23:36:26 +00:00
|
|
|
}
|
2014-11-13 21:52:00 +00:00
|
|
|
#endif
|
2017-10-25 23:48:18 +00:00
|
|
|
return ret;
|
2014-11-01 11:00:00 +00:00
|
|
|
}
|
|
|
|
|
2017-08-30 07:53:10 +00:00
|
|
|
#ifdef XP_WIN
|
2017-10-29 12:53:31 +00:00
|
|
|
// On Windows, calls to VirtualAlloc and VirtualFree must be matched, making it
|
|
|
|
// awkward to recycle allocations of varying sizes. Therefore we only allow
|
|
|
|
// recycling when the size equals the chunksize, unless deallocation is entirely
|
|
|
|
// disabled.
|
2017-11-03 03:07:16 +00:00
|
|
|
#define CAN_RECYCLE(size) (size == kChunkSize)
|
2014-11-13 21:55:00 +00:00
|
|
|
#else
|
|
|
|
#define CAN_RECYCLE(size) true
|
|
|
|
#endif
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Allocates `size` bytes of system memory aligned for `alignment`.
|
|
|
|
// `base` indicates whether the memory will be used for the base allocator
|
|
|
|
// (e.g. base_alloc).
|
|
|
|
// `zeroed` is an outvalue that returns whether the allocated memory is
|
|
|
|
// guaranteed to be full of zeroes. It can be omitted when the caller doesn't
|
|
|
|
// care about the result.
|
2017-10-25 23:36:26 +00:00
|
|
|
static void*
|
2017-10-25 23:48:18 +00:00
|
|
|
chunk_alloc(size_t aSize, size_t aAlignment, bool aBase, bool* aZeroed)
|
2008-07-01 22:41:14 +00:00
|
|
|
{
|
2017-10-25 23:50:49 +00:00
|
|
|
void* ret = nullptr;
|
2008-07-01 22:41:14 +00:00
|
|
|
|
2017-10-25 23:48:18 +00:00
|
|
|
MOZ_ASSERT(aSize != 0);
|
2017-11-03 03:07:16 +00:00
|
|
|
MOZ_ASSERT((aSize & kChunkSizeMask) == 0);
|
2017-10-25 23:48:18 +00:00
|
|
|
MOZ_ASSERT(aAlignment != 0);
|
2017-11-03 03:07:16 +00:00
|
|
|
MOZ_ASSERT((aAlignment & kChunkSizeMask) == 0);
|
2017-10-25 23:36:26 +00:00
|
|
|
|
|
|
|
// Base allocations can't be fulfilled by recycling because of
|
|
|
|
// possible deadlock or infinite recursion.
|
2017-10-25 23:48:18 +00:00
|
|
|
if (CAN_RECYCLE(aSize) && !aBase) {
|
|
|
|
ret = chunk_recycle(aSize, aAlignment, aZeroed);
|
2017-10-25 23:36:26 +00:00
|
|
|
}
|
2017-10-25 23:50:49 +00:00
|
|
|
if (!ret) {
|
|
|
|
ret = chunk_alloc_mmap(aSize, aAlignment);
|
|
|
|
if (aZeroed) {
|
|
|
|
*aZeroed = true;
|
|
|
|
}
|
2017-10-25 23:36:26 +00:00
|
|
|
}
|
2017-10-25 23:50:49 +00:00
|
|
|
if (ret && !aBase) {
|
2017-10-25 23:36:26 +00:00
|
|
|
if (!gChunkRTree.Set(ret, ret)) {
|
2017-10-25 23:48:18 +00:00
|
|
|
chunk_dealloc(ret, aSize, UNKNOWN_CHUNK);
|
2017-10-25 23:36:26 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-10-26 01:34:37 +00:00
|
|
|
MOZ_ASSERT(GetChunkOffsetForPtr(ret) == 0);
|
2017-10-25 23:48:18 +00:00
|
|
|
return ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-07-06 06:51:25 +00:00
|
|
|
static void
|
2017-10-24 05:11:14 +00:00
|
|
|
chunk_ensure_zero(void* aPtr, size_t aSize, bool aZeroed)
|
2017-07-06 06:51:25 +00:00
|
|
|
{
|
2017-10-24 05:11:14 +00:00
|
|
|
if (aZeroed == false) {
|
|
|
|
memset(aPtr, 0, aSize);
|
|
|
|
}
|
2017-07-06 06:51:25 +00:00
|
|
|
#ifdef MOZ_DEBUG
|
2017-10-24 05:11:14 +00:00
|
|
|
else {
|
|
|
|
size_t i;
|
|
|
|
size_t* p = (size_t*)(uintptr_t)aPtr;
|
2017-07-06 06:51:25 +00:00
|
|
|
|
2017-10-24 05:11:14 +00:00
|
|
|
for (i = 0; i < aSize / sizeof(size_t); i++) {
|
|
|
|
MOZ_ASSERT(p[i] == 0);
|
|
|
|
}
|
|
|
|
}
|
2017-07-06 06:51:25 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-11-01 11:00:00 +00:00
|
|
|
static void
|
2017-10-25 23:48:18 +00:00
|
|
|
chunk_record(void* aChunk, size_t aSize, ChunkType aType)
|
2014-11-01 11:00:00 +00:00
|
|
|
{
|
2017-10-25 23:50:49 +00:00
|
|
|
extent_node_t key;
|
2014-11-01 11:00:00 +00:00
|
|
|
|
2017-10-25 23:48:18 +00:00
|
|
|
if (aType != ZEROED_CHUNK) {
|
|
|
|
if (pages_purge(aChunk, aSize, aType == HUGE_CHUNK)) {
|
|
|
|
aType = ZEROED_CHUNK;
|
2017-10-25 23:36:26 +00:00
|
|
|
}
|
|
|
|
}
|
2017-07-06 05:56:07 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Allocate a node before acquiring chunks_mtx even though it might not
|
|
|
|
// be needed, because base_node_alloc() may cause a new base chunk to
|
|
|
|
// be allocated, which could cause deadlock if chunks_mtx were already
|
|
|
|
// held.
|
2017-10-25 23:50:49 +00:00
|
|
|
UniqueBaseNode xnode(base_node_alloc());
|
2017-10-29 12:53:31 +00:00
|
|
|
// Use xprev to implement conditional deferred deallocation of prev.
|
2017-10-25 23:50:49 +00:00
|
|
|
UniqueBaseNode xprev;
|
2017-10-25 23:36:26 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// RAII deallocates xnode and xprev defined above after unlocking
|
|
|
|
// in order to avoid potential dead-locks
|
2017-10-25 23:50:49 +00:00
|
|
|
MutexAutoLock lock(chunks_mtx);
|
2017-11-08 08:20:20 +00:00
|
|
|
key.mAddr = (void*)((uintptr_t)aChunk + aSize);
|
2017-10-25 23:50:49 +00:00
|
|
|
extent_node_t* node = gChunksByAddress.SearchOrNext(&key);
|
2017-10-29 12:53:31 +00:00
|
|
|
// Try to coalesce forward.
|
2017-11-08 08:20:20 +00:00
|
|
|
if (node && node->mAddr == key.mAddr) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Coalesce chunk with the following address range. This does
|
|
|
|
// not change the position within gChunksByAddress, so only
|
|
|
|
// remove/insert from/into gChunksBySize.
|
2017-10-25 23:36:26 +00:00
|
|
|
gChunksBySize.Remove(node);
|
2017-11-08 08:20:20 +00:00
|
|
|
node->mAddr = aChunk;
|
|
|
|
node->mSize += aSize;
|
|
|
|
if (node->mChunkType != aType) {
|
|
|
|
node->mChunkType = RECYCLED_CHUNK;
|
2017-10-25 23:36:26 +00:00
|
|
|
}
|
|
|
|
gChunksBySize.Insert(node);
|
|
|
|
} else {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Coalescing forward failed, so insert a new node.
|
2017-10-25 23:36:26 +00:00
|
|
|
if (!xnode) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// base_node_alloc() failed, which is an exceedingly
|
|
|
|
// unlikely failure. Leak chunk; its pages have
|
|
|
|
// already been purged, so this is only a virtual
|
|
|
|
// memory leak.
|
2017-10-25 23:50:49 +00:00
|
|
|
return;
|
2017-10-25 23:36:26 +00:00
|
|
|
}
|
2017-10-25 23:50:49 +00:00
|
|
|
node = xnode.release();
|
2017-11-08 08:20:20 +00:00
|
|
|
node->mAddr = aChunk;
|
|
|
|
node->mSize = aSize;
|
|
|
|
node->mChunkType = aType;
|
2017-10-25 23:36:26 +00:00
|
|
|
gChunksByAddress.Insert(node);
|
|
|
|
gChunksBySize.Insert(node);
|
|
|
|
}
|
2014-11-01 11:00:00 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Try to coalesce backward.
|
2017-10-25 23:50:49 +00:00
|
|
|
extent_node_t* prev = gChunksByAddress.Prev(node);
|
2017-11-08 08:20:20 +00:00
|
|
|
if (prev && (void*)((uintptr_t)prev->mAddr + prev->mSize) == aChunk) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Coalesce chunk with the previous address range. This does
|
|
|
|
// not change the position within gChunksByAddress, so only
|
|
|
|
// remove/insert node from/into gChunksBySize.
|
2017-10-25 23:36:26 +00:00
|
|
|
gChunksBySize.Remove(prev);
|
|
|
|
gChunksByAddress.Remove(prev);
|
|
|
|
|
|
|
|
gChunksBySize.Remove(node);
|
2017-11-08 08:20:20 +00:00
|
|
|
node->mAddr = prev->mAddr;
|
|
|
|
node->mSize += prev->mSize;
|
|
|
|
if (node->mChunkType != prev->mChunkType) {
|
|
|
|
node->mChunkType = RECYCLED_CHUNK;
|
2017-10-25 23:36:26 +00:00
|
|
|
}
|
|
|
|
gChunksBySize.Insert(node);
|
2014-11-01 11:00:00 +00:00
|
|
|
|
2017-10-25 23:50:49 +00:00
|
|
|
xprev.reset(prev);
|
2017-10-25 23:36:26 +00:00
|
|
|
}
|
2014-11-01 11:00:00 +00:00
|
|
|
|
2017-10-26 00:43:43 +00:00
|
|
|
gRecycledSize += aSize;
|
2014-11-01 11:00:00 +00:00
|
|
|
}
|
|
|
|
|
2008-02-06 23:06:50 +00:00
|
|
|
static void
|
2017-10-25 23:48:18 +00:00
|
|
|
chunk_dealloc(void* aChunk, size_t aSize, ChunkType aType)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-10-25 23:48:18 +00:00
|
|
|
MOZ_ASSERT(aChunk);
|
2017-10-26 01:34:37 +00:00
|
|
|
MOZ_ASSERT(GetChunkOffsetForPtr(aChunk) == 0);
|
2017-10-25 23:48:18 +00:00
|
|
|
MOZ_ASSERT(aSize != 0);
|
2017-11-03 03:07:16 +00:00
|
|
|
MOZ_ASSERT((aSize & kChunkSizeMask) == 0);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-25 23:48:18 +00:00
|
|
|
gChunkRTree.Unset(aChunk);
|
2017-10-25 23:36:26 +00:00
|
|
|
|
2017-10-25 23:48:18 +00:00
|
|
|
if (CAN_RECYCLE(aSize)) {
|
2017-10-26 00:51:00 +00:00
|
|
|
size_t recycled_so_far = gRecycledSize;
|
2017-10-25 23:36:26 +00:00
|
|
|
// In case some race condition put us above the limit.
|
2017-10-26 00:43:43 +00:00
|
|
|
if (recycled_so_far < gRecycleLimit) {
|
|
|
|
size_t recycle_remaining = gRecycleLimit - recycled_so_far;
|
2017-10-25 23:36:26 +00:00
|
|
|
size_t to_recycle;
|
2017-10-25 23:48:18 +00:00
|
|
|
if (aSize > recycle_remaining) {
|
2017-10-25 23:36:26 +00:00
|
|
|
to_recycle = recycle_remaining;
|
|
|
|
// Drop pages that would overflow the recycle limit
|
2017-10-25 23:48:18 +00:00
|
|
|
pages_trim(aChunk, aSize, 0, to_recycle);
|
2017-10-25 23:36:26 +00:00
|
|
|
} else {
|
2017-10-25 23:48:18 +00:00
|
|
|
to_recycle = aSize;
|
2017-10-25 23:36:26 +00:00
|
|
|
}
|
2017-10-25 23:48:18 +00:00
|
|
|
chunk_record(aChunk, to_recycle, aType);
|
2017-10-25 23:36:26 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2017-08-09 23:49:51 +00:00
|
|
|
|
2017-10-25 23:48:18 +00:00
|
|
|
pages_unmap(aChunk, aSize);
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-08-09 23:49:51 +00:00
|
|
|
#undef CAN_RECYCLE
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// End chunk management functions.
|
|
|
|
// ***************************************************************************
|
|
|
|
// Begin arena.
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
static inline arena_t*
|
Bug 1364358 - Keep track of mozjemalloc thread-local arenas. r=erahm
jemalloc_stats, as well as pre/post-fork hooks are using the `arenas`
list along the `narenas` count to iterate over all arenas setup by
mozjemalloc. Up until previous commit, that was used for automatic
multiple arenas support, which is now removed.
But mozjemalloc still supports running with multiple arenas, in the form
of opted-in, per-thread arenas. After bug 1361258, those arenas weren't
tracked, and now that `arenas` only contains the default arena, we can
now fill it with those thread-local arenas.
Keeping the automatic multiple arenas support, which we don't use and
don't really plan to, would have meant using a separate list for them.
--HG--
extra : rebase_source : f4eb55a65df8cdebff84ca709738f906d0c3c6f5
2017-05-12 12:21:11 +00:00
|
|
|
thread_local_arena(bool enabled)
|
2017-05-10 12:31:51 +00:00
|
|
|
{
|
2017-10-29 12:53:37 +00:00
|
|
|
arena_t* arena;
|
2017-09-12 07:29:11 +00:00
|
|
|
|
|
|
|
if (enabled) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// The arena will essentially be leaked if this function is
|
|
|
|
// called with `false`, but it doesn't matter at the moment.
|
|
|
|
// because in practice nothing actually calls this function
|
|
|
|
// with `false`, except maybe at shutdown.
|
2017-11-16 22:27:35 +00:00
|
|
|
arena =
|
|
|
|
gArenas.CreateArena(/* IsPrivate = */ false, /* Params = */ nullptr);
|
2017-09-12 07:29:11 +00:00
|
|
|
} else {
|
2017-10-27 22:13:58 +00:00
|
|
|
arena = gArenas.GetDefault();
|
2017-09-12 07:29:11 +00:00
|
|
|
}
|
|
|
|
thread_arena.set(arena);
|
|
|
|
return arena;
|
2017-05-10 12:31:51 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
template<>
|
|
|
|
inline void
|
2017-08-31 01:29:11 +00:00
|
|
|
MozJemalloc::jemalloc_thread_local_arena(bool aEnabled)
|
Bug 1364358 - Keep track of mozjemalloc thread-local arenas. r=erahm
jemalloc_stats, as well as pre/post-fork hooks are using the `arenas`
list along the `narenas` count to iterate over all arenas setup by
mozjemalloc. Up until previous commit, that was used for automatic
multiple arenas support, which is now removed.
But mozjemalloc still supports running with multiple arenas, in the form
of opted-in, per-thread arenas. After bug 1361258, those arenas weren't
tracked, and now that `arenas` only contains the default arena, we can
now fill it with those thread-local arenas.
Keeping the automatic multiple arenas support, which we don't use and
don't really plan to, would have meant using a separate list for them.
--HG--
extra : rebase_source : f4eb55a65df8cdebff84ca709738f906d0c3c6f5
2017-05-12 12:21:11 +00:00
|
|
|
{
|
2017-10-27 08:25:18 +00:00
|
|
|
if (malloc_init()) {
|
|
|
|
thread_local_arena(aEnabled);
|
|
|
|
}
|
Bug 1364358 - Keep track of mozjemalloc thread-local arenas. r=erahm
jemalloc_stats, as well as pre/post-fork hooks are using the `arenas`
list along the `narenas` count to iterate over all arenas setup by
mozjemalloc. Up until previous commit, that was used for automatic
multiple arenas support, which is now removed.
But mozjemalloc still supports running with multiple arenas, in the form
of opted-in, per-thread arenas. After bug 1361258, those arenas weren't
tracked, and now that `arenas` only contains the default arena, we can
now fill it with those thread-local arenas.
Keeping the automatic multiple arenas support, which we don't use and
don't really plan to, would have meant using a separate list for them.
--HG--
extra : rebase_source : f4eb55a65df8cdebff84ca709738f906d0c3c6f5
2017-05-12 12:21:11 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Choose an arena based on a per-thread value.
|
2017-10-29 12:53:37 +00:00
|
|
|
static inline arena_t*
|
Bug 1397101 - Only use a thread local arena for small sizes. r=njn
The bin-unused count in memory reports indicates how much memory is
used by runs of small and sub-page allocations that is not actually
allocated. This is generally thought as an indicator of fragmentation.
While this is generally true, with the use of thread local arenas by
stylo, combined with how stylo allocates memory, it ends up also being
an indicator of wasted memory.
For instance, over the lifetime of an AWSY iteration, there are only a
few allocations that ends up in the bucket for 2048 allocated bytes. In
the "worst" case, there's only one. But the run size for such
allocations is 132KiB. Which means just because we're allocating one
buffer of size between 1024 and 2048 bytes, we end up wasting 130+KiB.
Per thread.
Something similar happens with size classes of 512 and 1024, where the
run size is respectively 32KiB and 64KiB, and where there's at most a
handful of allocations of each class ever happening per thread.
Overall, an allocation log from a full AWSY iteration reveals that there
are only 448 of 860700 allocations happening on the stylo arenas that
involve sizes above (and excluding) 512 bytes, so 0.05%.
While there are improvements that can be done to mozjemalloc so that it
doesn't waste more than one page per sub-page size class, they are
changes that are too low-level to land at this time of the release
cycle. However, considering the numbers above and the fact that the
stylo arenas are only really meant to avoid lock contention during the
heavy parallel work involved, a short term, low risk, strategy is to
just delegate all sub-page (> 512, < 4096) and large (>= 4096) to the
main arena. Technically speaking, only sub-page allocations are causing
this waste, but it's more consistent to just delegate everything above
512 bytes.
This should save 132KiB + 64KiB = 196KiB per stylo thread.
--HG--
extra : rebase_source : c7233d60305365e76aa124045b1c9492068d9415
2017-09-13 22:36:39 +00:00
|
|
|
choose_arena(size_t size)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-10-29 12:53:37 +00:00
|
|
|
arena_t* ret = nullptr;
|
2017-10-27 07:33:14 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// We can only use TLS if this is a PIC library, since for the static
|
|
|
|
// library version, libc's malloc is used by TLS allocation, which
|
|
|
|
// introduces a bootstrapping issue.
|
|
|
|
|
2018-02-07 21:28:44 +00:00
|
|
|
if (size > kMaxQuantumClass) {
|
|
|
|
// Force the default arena for larger allocations.
|
|
|
|
ret = gArenas.GetDefault();
|
|
|
|
} else {
|
|
|
|
// Check TLS to see if our thread has requested a pinned arena.
|
Bug 1397101 - Only use a thread local arena for small sizes. r=njn
The bin-unused count in memory reports indicates how much memory is
used by runs of small and sub-page allocations that is not actually
allocated. This is generally thought as an indicator of fragmentation.
While this is generally true, with the use of thread local arenas by
stylo, combined with how stylo allocates memory, it ends up also being
an indicator of wasted memory.
For instance, over the lifetime of an AWSY iteration, there are only a
few allocations that ends up in the bucket for 2048 allocated bytes. In
the "worst" case, there's only one. But the run size for such
allocations is 132KiB. Which means just because we're allocating one
buffer of size between 1024 and 2048 bytes, we end up wasting 130+KiB.
Per thread.
Something similar happens with size classes of 512 and 1024, where the
run size is respectively 32KiB and 64KiB, and where there's at most a
handful of allocations of each class ever happening per thread.
Overall, an allocation log from a full AWSY iteration reveals that there
are only 448 of 860700 allocations happening on the stylo arenas that
involve sizes above (and excluding) 512 bytes, so 0.05%.
While there are improvements that can be done to mozjemalloc so that it
doesn't waste more than one page per sub-page size class, they are
changes that are too low-level to land at this time of the release
cycle. However, considering the numbers above and the fact that the
stylo arenas are only really meant to avoid lock contention during the
heavy parallel work involved, a short term, low risk, strategy is to
just delegate all sub-page (> 512, < 4096) and large (>= 4096) to the
main arena. Technically speaking, only sub-page allocations are causing
this waste, but it's more consistent to just delegate everything above
512 bytes.
This should save 132KiB + 64KiB = 196KiB per stylo thread.
--HG--
extra : rebase_source : c7233d60305365e76aa124045b1c9492068d9415
2017-09-13 22:36:39 +00:00
|
|
|
ret = thread_arena.get();
|
2018-02-07 21:28:44 +00:00
|
|
|
if (!ret) {
|
|
|
|
// Nothing in TLS. Pin this thread to the default arena.
|
|
|
|
ret = thread_local_arena(false);
|
|
|
|
}
|
Bug 1397101 - Only use a thread local arena for small sizes. r=njn
The bin-unused count in memory reports indicates how much memory is
used by runs of small and sub-page allocations that is not actually
allocated. This is generally thought as an indicator of fragmentation.
While this is generally true, with the use of thread local arenas by
stylo, combined with how stylo allocates memory, it ends up also being
an indicator of wasted memory.
For instance, over the lifetime of an AWSY iteration, there are only a
few allocations that ends up in the bucket for 2048 allocated bytes. In
the "worst" case, there's only one. But the run size for such
allocations is 132KiB. Which means just because we're allocating one
buffer of size between 1024 and 2048 bytes, we end up wasting 130+KiB.
Per thread.
Something similar happens with size classes of 512 and 1024, where the
run size is respectively 32KiB and 64KiB, and where there's at most a
handful of allocations of each class ever happening per thread.
Overall, an allocation log from a full AWSY iteration reveals that there
are only 448 of 860700 allocations happening on the stylo arenas that
involve sizes above (and excluding) 512 bytes, so 0.05%.
While there are improvements that can be done to mozjemalloc so that it
doesn't waste more than one page per sub-page size class, they are
changes that are too low-level to land at this time of the release
cycle. However, considering the numbers above and the fact that the
stylo arenas are only really meant to avoid lock contention during the
heavy parallel work involved, a short term, low risk, strategy is to
just delegate all sub-page (> 512, < 4096) and large (>= 4096) to the
main arena. Technically speaking, only sub-page allocations are causing
this waste, but it's more consistent to just delegate everything above
512 bytes.
This should save 132KiB + 64KiB = 196KiB per stylo thread.
--HG--
extra : rebase_source : c7233d60305365e76aa124045b1c9492068d9415
2017-09-13 22:36:39 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-12 07:29:11 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(ret);
|
2017-10-29 12:53:20 +00:00
|
|
|
return ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
static inline void*
|
|
|
|
arena_run_reg_alloc(arena_run_t* run, arena_bin_t* bin)
|
|
|
|
{
|
|
|
|
void* ret;
|
|
|
|
unsigned i, mask, bit, regind;
|
|
|
|
|
2017-11-03 06:23:44 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
|
|
|
|
MOZ_ASSERT(run->mRegionsMinElement < bin->mRunNumRegionsMask);
|
2017-10-29 12:53:37 +00:00
|
|
|
|
2017-11-03 06:23:44 +00:00
|
|
|
// Move the first check outside the loop, so that run->mRegionsMinElement can
|
2017-10-29 12:53:37 +00:00
|
|
|
// be updated unconditionally, without the possibility of updating it
|
|
|
|
// multiple times.
|
2017-11-03 06:23:44 +00:00
|
|
|
i = run->mRegionsMinElement;
|
|
|
|
mask = run->mRegionsMask[i];
|
2017-10-29 12:53:37 +00:00
|
|
|
if (mask != 0) {
|
|
|
|
// Usable allocation found.
|
2017-10-30 08:44:16 +00:00
|
|
|
bit = CountTrailingZeroes32(mask);
|
2017-10-29 12:53:37 +00:00
|
|
|
|
2017-11-01 07:47:59 +00:00
|
|
|
regind = ((i << (LOG2(sizeof(int)) + 3)) + bit);
|
2017-11-03 00:26:07 +00:00
|
|
|
MOZ_ASSERT(regind < bin->mRunNumRegions);
|
|
|
|
ret = (void*)(((uintptr_t)run) + bin->mRunFirstRegionOffset +
|
|
|
|
(bin->mSizeClass * regind));
|
2017-10-29 12:53:37 +00:00
|
|
|
|
|
|
|
// Clear bit.
|
|
|
|
mask ^= (1U << bit);
|
2017-11-03 06:23:44 +00:00
|
|
|
run->mRegionsMask[i] = mask;
|
2017-10-29 12:53:37 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-11-03 00:26:07 +00:00
|
|
|
for (i++; i < bin->mRunNumRegionsMask; i++) {
|
2017-11-03 06:23:44 +00:00
|
|
|
mask = run->mRegionsMask[i];
|
2017-10-29 12:53:37 +00:00
|
|
|
if (mask != 0) {
|
|
|
|
// Usable allocation found.
|
2017-10-30 08:44:16 +00:00
|
|
|
bit = CountTrailingZeroes32(mask);
|
2017-10-29 12:53:37 +00:00
|
|
|
|
2017-11-01 07:47:59 +00:00
|
|
|
regind = ((i << (LOG2(sizeof(int)) + 3)) + bit);
|
2017-11-03 00:26:07 +00:00
|
|
|
MOZ_ASSERT(regind < bin->mRunNumRegions);
|
|
|
|
ret = (void*)(((uintptr_t)run) + bin->mRunFirstRegionOffset +
|
|
|
|
(bin->mSizeClass * regind));
|
2017-10-29 12:53:37 +00:00
|
|
|
|
|
|
|
// Clear bit.
|
|
|
|
mask ^= (1U << bit);
|
2017-11-03 06:23:44 +00:00
|
|
|
run->mRegionsMask[i] = mask;
|
2017-10-29 12:53:37 +00:00
|
|
|
|
|
|
|
// Make a note that nothing before this element
|
|
|
|
// contains a free region.
|
2017-11-03 06:23:44 +00:00
|
|
|
run->mRegionsMinElement = i; // Low payoff: + (mask == 0);
|
2017-10-29 12:53:37 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Not reached.
|
|
|
|
MOZ_DIAGNOSTIC_ASSERT(0);
|
|
|
|
return nullptr;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2017-10-29 12:53:37 +00:00
|
|
|
arena_run_reg_dalloc(arena_run_t* run, arena_bin_t* bin, void* ptr, size_t size)
|
|
|
|
{
|
|
|
|
// To divide by a number D that is not a power of two we multiply
|
|
|
|
// by (2^21 / D) and then right shift by 21 positions.
|
|
|
|
//
|
|
|
|
// X / D
|
|
|
|
//
|
|
|
|
// becomes
|
|
|
|
//
|
2017-11-03 01:10:50 +00:00
|
|
|
// (X * size_invs[(D / kQuantum) - 3]) >> SIZE_INV_SHIFT
|
2017-10-29 12:53:37 +00:00
|
|
|
|
|
|
|
#define SIZE_INV_SHIFT 21
|
2017-11-03 01:10:50 +00:00
|
|
|
#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s * kQuantum)) + 1)
|
2017-10-29 12:53:37 +00:00
|
|
|
// clang-format off
|
|
|
|
static const unsigned size_invs[] = {
|
|
|
|
SIZE_INV(3),
|
|
|
|
SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
|
|
|
|
SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
|
|
|
|
SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
|
|
|
|
SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
|
|
|
|
SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
|
|
|
|
SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
|
|
|
|
SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
|
|
|
|
};
|
|
|
|
// clang-format on
|
|
|
|
unsigned diff, regind, elm, bit;
|
|
|
|
|
2017-11-03 06:23:44 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
|
2017-11-03 03:21:53 +00:00
|
|
|
static_assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3 >=
|
|
|
|
kNumQuantumClasses,
|
2017-11-03 02:41:30 +00:00
|
|
|
"size_invs doesn't have enough values");
|
2017-10-29 12:53:37 +00:00
|
|
|
|
|
|
|
// Avoid doing division with a variable divisor if possible. Using
|
|
|
|
// actual division here can reduce allocator throughput by over 20%!
|
2017-11-03 00:26:07 +00:00
|
|
|
diff =
|
|
|
|
(unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->mRunFirstRegionOffset);
|
2017-11-08 07:20:40 +00:00
|
|
|
if (mozilla::IsPowerOfTwo(size)) {
|
|
|
|
regind = diff >> FloorLog2(size);
|
2017-11-03 01:10:50 +00:00
|
|
|
} else if (size <= ((sizeof(size_invs) / sizeof(unsigned)) * kQuantum) + 2) {
|
|
|
|
regind = size_invs[(size / kQuantum) - 3] * diff;
|
2017-10-29 12:53:37 +00:00
|
|
|
regind >>= SIZE_INV_SHIFT;
|
|
|
|
} else {
|
|
|
|
// size_invs isn't large enough to handle this size class, so
|
|
|
|
// calculate regind using actual division. This only happens
|
|
|
|
// if the user increases small_max via the 'S' runtime
|
|
|
|
// configuration option.
|
|
|
|
regind = diff / size;
|
|
|
|
};
|
|
|
|
MOZ_DIAGNOSTIC_ASSERT(diff == regind * size);
|
2017-11-03 00:26:07 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(regind < bin->mRunNumRegions);
|
2017-10-29 12:53:37 +00:00
|
|
|
|
2017-11-01 07:47:59 +00:00
|
|
|
elm = regind >> (LOG2(sizeof(int)) + 3);
|
2017-11-03 06:23:44 +00:00
|
|
|
if (elm < run->mRegionsMinElement) {
|
|
|
|
run->mRegionsMinElement = elm;
|
2017-10-29 12:53:37 +00:00
|
|
|
}
|
2017-11-01 07:47:59 +00:00
|
|
|
bit = regind - (elm << (LOG2(sizeof(int)) + 3));
|
2017-11-03 06:23:44 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT((run->mRegionsMask[elm] & (1U << bit)) == 0);
|
|
|
|
run->mRegionsMask[elm] |= (1U << bit);
|
2008-02-06 23:06:50 +00:00
|
|
|
#undef SIZE_INV
|
|
|
|
#undef SIZE_INV_SHIFT
|
|
|
|
}
|
|
|
|
|
2017-10-27 01:31:50 +00:00
|
|
|
bool
|
2017-09-15 09:08:23 +00:00
|
|
|
arena_t::SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-09-15 09:08:23 +00:00
|
|
|
arena_chunk_t* chunk;
|
|
|
|
size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-10-26 01:34:37 +00:00
|
|
|
chunk = GetChunkForPtr(aRun);
|
2017-09-15 09:08:23 +00:00
|
|
|
old_ndirty = chunk->ndirty;
|
2017-11-03 03:13:17 +00:00
|
|
|
run_ind = (unsigned)((uintptr_t(aRun) - uintptr_t(chunk)) >> gPageSize2Pow);
|
|
|
|
total_pages = (chunk->map[run_ind].bits & ~gPageSizeMask) >> gPageSize2Pow;
|
|
|
|
need_pages = (aSize >> gPageSize2Pow);
|
2017-09-15 09:08:23 +00:00
|
|
|
MOZ_ASSERT(need_pages > 0);
|
|
|
|
MOZ_ASSERT(need_pages <= total_pages);
|
|
|
|
rem_pages = total_pages - need_pages;
|
|
|
|
|
|
|
|
for (i = 0; i < need_pages; i++) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Commit decommitted pages if necessary. If a decommitted
|
|
|
|
// page is encountered, commit all needed adjacent decommitted
|
|
|
|
// pages in one operation, in order to reduce system call
|
|
|
|
// overhead.
|
2017-09-15 09:08:23 +00:00
|
|
|
if (chunk->map[run_ind + i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) {
|
|
|
|
size_t j;
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Advance i+j to just past the index of the last page
|
|
|
|
// to commit. Clear CHUNK_MAP_DECOMMITTED and
|
|
|
|
// CHUNK_MAP_MADVISED along the way.
|
2017-10-29 12:53:37 +00:00
|
|
|
for (j = 0; i + j < need_pages && (chunk->map[run_ind + i + j].bits &
|
|
|
|
CHUNK_MAP_MADVISED_OR_DECOMMITTED);
|
|
|
|
j++) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// DECOMMITTED and MADVISED are mutually exclusive.
|
2017-09-15 09:08:23 +00:00
|
|
|
MOZ_ASSERT(!(chunk->map[run_ind + i + j].bits & CHUNK_MAP_DECOMMITTED &&
|
2017-10-29 12:53:37 +00:00
|
|
|
chunk->map[run_ind + i + j].bits & CHUNK_MAP_MADVISED));
|
2017-09-15 09:08:23 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
chunk->map[run_ind + i + j].bits &= ~CHUNK_MAP_MADVISED_OR_DECOMMITTED;
|
2017-09-15 09:08:23 +00:00
|
|
|
}
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
#ifdef MALLOC_DECOMMIT
|
2017-10-27 01:31:50 +00:00
|
|
|
bool committed = pages_commit(
|
2017-11-03 03:13:17 +00:00
|
|
|
(void*)(uintptr_t(chunk) + ((run_ind + i) << gPageSize2Pow)),
|
|
|
|
j << gPageSize2Pow);
|
2017-10-27 01:31:50 +00:00
|
|
|
// pages_commit zeroes pages, so mark them as such if it succeeded.
|
|
|
|
// That's checked further below to avoid manually zeroing the pages.
|
2017-10-26 09:12:04 +00:00
|
|
|
for (size_t k = 0; k < j; k++) {
|
2017-10-27 01:31:50 +00:00
|
|
|
chunk->map[run_ind + i + k].bits |=
|
|
|
|
committed ? CHUNK_MAP_ZEROED : CHUNK_MAP_DECOMMITTED;
|
|
|
|
}
|
|
|
|
if (!committed) {
|
|
|
|
return false;
|
2017-10-26 09:12:04 +00:00
|
|
|
}
|
2017-10-29 12:53:37 +00:00
|
|
|
#endif
|
2011-10-10 17:54:42 +00:00
|
|
|
|
2017-09-15 09:08:23 +00:00
|
|
|
mStats.committed += j;
|
|
|
|
}
|
2017-10-26 09:12:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mRunsAvail.Remove(&chunk->map[run_ind]);
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Keep track of trailing unused pages for later use.
|
2017-10-26 09:12:04 +00:00
|
|
|
if (rem_pages > 0) {
|
2017-10-29 12:53:37 +00:00
|
|
|
chunk->map[run_ind + need_pages].bits =
|
2017-11-03 03:13:17 +00:00
|
|
|
(rem_pages << gPageSize2Pow) |
|
|
|
|
(chunk->map[run_ind + need_pages].bits & gPageSizeMask);
|
2017-10-29 12:53:37 +00:00
|
|
|
chunk->map[run_ind + total_pages - 1].bits =
|
2017-11-03 03:13:17 +00:00
|
|
|
(rem_pages << gPageSize2Pow) |
|
|
|
|
(chunk->map[run_ind + total_pages - 1].bits & gPageSizeMask);
|
2017-10-29 12:53:37 +00:00
|
|
|
mRunsAvail.Insert(&chunk->map[run_ind + need_pages]);
|
2017-10-26 09:12:04 +00:00
|
|
|
}
|
2011-10-10 17:54:42 +00:00
|
|
|
|
2017-10-26 09:12:04 +00:00
|
|
|
for (i = 0; i < need_pages; i++) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Zero if necessary.
|
2017-09-15 09:08:23 +00:00
|
|
|
if (aZero) {
|
|
|
|
if ((chunk->map[run_ind + i].bits & CHUNK_MAP_ZEROED) == 0) {
|
2017-11-03 03:13:17 +00:00
|
|
|
memset((void*)(uintptr_t(chunk) + ((run_ind + i) << gPageSize2Pow)),
|
2017-10-29 12:53:37 +00:00
|
|
|
0,
|
2017-11-03 03:13:17 +00:00
|
|
|
gPageSize);
|
2017-10-29 12:53:31 +00:00
|
|
|
// CHUNK_MAP_ZEROED is cleared below.
|
2017-09-15 09:08:23 +00:00
|
|
|
}
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Update dirty page accounting.
|
2017-09-15 09:08:23 +00:00
|
|
|
if (chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) {
|
|
|
|
chunk->ndirty--;
|
|
|
|
mNumDirty--;
|
2017-10-29 12:53:31 +00:00
|
|
|
// CHUNK_MAP_DIRTY is cleared below.
|
2017-09-15 09:08:23 +00:00
|
|
|
}
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Initialize the chunk map.
|
2017-09-15 09:08:23 +00:00
|
|
|
if (aLarge) {
|
|
|
|
chunk->map[run_ind + i].bits = CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
|
|
|
} else {
|
|
|
|
chunk->map[run_ind + i].bits = size_t(aRun) | CHUNK_MAP_ALLOCATED;
|
|
|
|
}
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Set the run size only in the first element for large runs. This is
|
|
|
|
// primarily a debugging aid, since the lack of size info for trailing
|
|
|
|
// pages only matters if the application tries to operate on an
|
|
|
|
// interior pointer.
|
2017-09-15 09:08:23 +00:00
|
|
|
if (aLarge) {
|
|
|
|
chunk->map[run_ind].bits |= aSize;
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-15 09:08:23 +00:00
|
|
|
if (chunk->ndirty == 0 && old_ndirty > 0) {
|
2017-09-26 06:06:00 +00:00
|
|
|
mChunksDirty.Remove(chunk);
|
2017-09-15 09:08:23 +00:00
|
|
|
}
|
2017-10-27 01:31:50 +00:00
|
|
|
return true;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-09-15 08:43:36 +00:00
|
|
|
void
|
|
|
|
arena_t::InitChunk(arena_chunk_t* aChunk, bool aZeroed)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-09-15 08:43:36 +00:00
|
|
|
size_t i;
|
2017-10-29 12:53:31 +00:00
|
|
|
// WARNING: The following relies on !aZeroed meaning "used to be an arena
|
|
|
|
// chunk".
|
|
|
|
// When the chunk we're initializating as an arena chunk is zeroed, we
|
|
|
|
// mark all runs are decommitted and zeroed.
|
|
|
|
// When it is not, which we can assume means it's a recycled arena chunk,
|
|
|
|
// all it can contain is an arena chunk header (which we're overwriting),
|
|
|
|
// and zeroed or poisoned memory (because a recycled arena chunk will
|
|
|
|
// have been emptied before being recycled). In that case, we can get
|
|
|
|
// away with reusing the chunk as-is, marking all runs as madvised.
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
size_t flags =
|
|
|
|
aZeroed ? CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED : CHUNK_MAP_MADVISED;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-03 03:07:16 +00:00
|
|
|
mStats.mapped += kChunkSize;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-15 08:43:36 +00:00
|
|
|
aChunk->arena = this;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Claim that no pages are in use, since the header is merely overhead.
|
2017-09-15 08:43:36 +00:00
|
|
|
aChunk->ndirty = 0;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Initialize the map to contain one maximal free untouched run.
|
2017-05-18 03:24:57 +00:00
|
|
|
#ifdef MALLOC_DECOMMIT
|
2017-10-29 12:53:37 +00:00
|
|
|
arena_run_t* run =
|
2017-11-03 03:16:11 +00:00
|
|
|
(arena_run_t*)(uintptr_t(aChunk) + (gChunkHeaderNumPages << gPageSize2Pow));
|
2017-05-18 03:24:57 +00:00
|
|
|
#endif
|
|
|
|
|
2017-11-03 03:16:11 +00:00
|
|
|
for (i = 0; i < gChunkHeaderNumPages; i++) {
|
2017-09-15 08:43:36 +00:00
|
|
|
aChunk->map[i].bits = 0;
|
|
|
|
}
|
2017-11-03 01:10:50 +00:00
|
|
|
aChunk->map[i].bits = gMaxLargeClass | flags;
|
2017-11-03 03:16:11 +00:00
|
|
|
for (i++; i < gChunkNumPages - 1; i++) {
|
2017-09-15 08:43:36 +00:00
|
|
|
aChunk->map[i].bits = flags;
|
|
|
|
}
|
2017-11-03 03:16:11 +00:00
|
|
|
aChunk->map[gChunkNumPages - 1].bits = gMaxLargeClass | flags;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
|
|
|
#ifdef MALLOC_DECOMMIT
|
2017-10-29 12:53:31 +00:00
|
|
|
// Start out decommitted, in order to force a closer correspondence
|
|
|
|
// between dirty pages and committed untouched pages.
|
2017-11-03 01:10:50 +00:00
|
|
|
pages_decommit(run, gMaxLargeClass);
|
2011-10-10 17:54:42 +00:00
|
|
|
#endif
|
2017-11-03 03:16:11 +00:00
|
|
|
mStats.committed += gChunkHeaderNumPages;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Insert the run into the tree of available runs.
|
2017-11-03 03:16:11 +00:00
|
|
|
mRunsAvail.Insert(&aChunk->map[gChunkHeaderNumPages]);
|
2011-10-24 17:23:47 +00:00
|
|
|
|
|
|
|
#ifdef MALLOC_DOUBLE_PURGE
|
2017-10-30 08:19:44 +00:00
|
|
|
new (&aChunk->chunks_madvised_elem) DoublyLinkedListElement<arena_chunk_t>();
|
2011-10-24 17:23:47 +00:00
|
|
|
#endif
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-09-15 08:50:48 +00:00
|
|
|
void
|
|
|
|
arena_t::DeallocChunk(arena_chunk_t* aChunk)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-09-15 08:50:48 +00:00
|
|
|
if (mSpare) {
|
|
|
|
if (mSpare->ndirty > 0) {
|
2017-09-26 06:06:00 +00:00
|
|
|
aChunk->arena->mChunksDirty.Remove(mSpare);
|
2017-09-15 08:50:48 +00:00
|
|
|
mNumDirty -= mSpare->ndirty;
|
|
|
|
mStats.committed -= mSpare->ndirty;
|
|
|
|
}
|
2011-10-24 17:23:47 +00:00
|
|
|
|
|
|
|
#ifdef MALLOC_DOUBLE_PURGE
|
2017-09-15 08:50:48 +00:00
|
|
|
if (mChunksMAdvised.ElementProbablyInList(mSpare)) {
|
|
|
|
mChunksMAdvised.remove(mSpare);
|
|
|
|
}
|
2011-10-24 17:23:47 +00:00
|
|
|
#endif
|
|
|
|
|
2017-11-03 03:07:16 +00:00
|
|
|
chunk_dealloc((void*)mSpare, kChunkSize, ARENA_CHUNK);
|
|
|
|
mStats.mapped -= kChunkSize;
|
2017-11-03 03:16:11 +00:00
|
|
|
mStats.committed -= gChunkHeaderNumPages;
|
2017-09-15 08:50:48 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-12-03 05:22:05 +00:00
|
|
|
// Remove run from the tree of available runs, so that the arena does not use
|
|
|
|
// it. Dirty page flushing only uses the tree of dirty chunks, so leaving this
|
2017-10-29 12:53:31 +00:00
|
|
|
// chunk in the chunks_* trees is sufficient for that purpose.
|
2017-11-03 03:16:11 +00:00
|
|
|
mRunsAvail.Remove(&aChunk->map[gChunkHeaderNumPages]);
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-09-15 08:50:48 +00:00
|
|
|
mSpare = aChunk;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-09-15 08:57:11 +00:00
|
|
|
arena_run_t*
|
2017-11-03 06:54:20 +00:00
|
|
|
arena_t::AllocRun(size_t aSize, bool aLarge, bool aZero)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-09-15 08:57:11 +00:00
|
|
|
arena_run_t* run;
|
|
|
|
arena_chunk_map_t* mapelm;
|
|
|
|
arena_chunk_map_t key;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-03 01:10:50 +00:00
|
|
|
MOZ_ASSERT(aSize <= gMaxLargeClass);
|
2017-11-03 03:13:17 +00:00
|
|
|
MOZ_ASSERT((aSize & gPageSizeMask) == 0);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Search the arena's chunks for the lowest best fit.
|
2017-09-15 08:57:11 +00:00
|
|
|
key.bits = aSize | CHUNK_MAP_KEY;
|
2017-09-26 06:06:00 +00:00
|
|
|
mapelm = mRunsAvail.SearchOrNext(&key);
|
2017-09-15 08:57:11 +00:00
|
|
|
if (mapelm) {
|
2017-10-26 01:34:37 +00:00
|
|
|
arena_chunk_t* chunk = GetChunkForPtr(mapelm);
|
2017-10-29 12:53:37 +00:00
|
|
|
size_t pageind =
|
|
|
|
(uintptr_t(mapelm) - uintptr_t(chunk->map)) / sizeof(arena_chunk_map_t);
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-11-03 03:13:17 +00:00
|
|
|
run = (arena_run_t*)(uintptr_t(chunk) + (pageind << gPageSize2Pow));
|
2017-10-27 01:31:50 +00:00
|
|
|
} else if (mSpare) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Use the spare.
|
2017-09-15 08:57:11 +00:00
|
|
|
arena_chunk_t* chunk = mSpare;
|
|
|
|
mSpare = nullptr;
|
2017-10-29 12:53:37 +00:00
|
|
|
run = (arena_run_t*)(uintptr_t(chunk) +
|
2017-11-03 03:16:11 +00:00
|
|
|
(gChunkHeaderNumPages << gPageSize2Pow));
|
2017-10-29 12:53:31 +00:00
|
|
|
// Insert the run into the tree of available runs.
|
2017-11-03 03:16:11 +00:00
|
|
|
mRunsAvail.Insert(&chunk->map[gChunkHeaderNumPages]);
|
2017-10-27 01:31:50 +00:00
|
|
|
} else {
|
2017-10-29 12:53:31 +00:00
|
|
|
// No usable runs. Create a new chunk from which to allocate
|
|
|
|
// the run.
|
2017-09-15 08:57:11 +00:00
|
|
|
bool zeroed;
|
2017-10-29 12:53:37 +00:00
|
|
|
arena_chunk_t* chunk =
|
2017-11-03 03:07:16 +00:00
|
|
|
(arena_chunk_t*)chunk_alloc(kChunkSize, kChunkSize, false, &zeroed);
|
2017-09-15 08:57:11 +00:00
|
|
|
if (!chunk) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
InitChunk(chunk, zeroed);
|
2017-10-29 12:53:37 +00:00
|
|
|
run = (arena_run_t*)(uintptr_t(chunk) +
|
2017-11-03 03:16:11 +00:00
|
|
|
(gChunkHeaderNumPages << gPageSize2Pow));
|
2017-09-15 08:57:11 +00:00
|
|
|
}
|
2017-10-29 12:53:31 +00:00
|
|
|
// Update page map.
|
2017-10-27 01:31:50 +00:00
|
|
|
return SplitRun(run, aSize, aLarge, aZero) ? run : nullptr;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-09-15 08:32:21 +00:00
|
|
|
void
|
|
|
|
arena_t::Purge(bool aAll)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-09-15 08:32:21 +00:00
|
|
|
arena_chunk_t* chunk;
|
|
|
|
size_t i, npages;
|
2017-10-29 12:53:31 +00:00
|
|
|
// If all is set purge all dirty pages.
|
2017-09-15 08:32:21 +00:00
|
|
|
size_t dirty_max = aAll ? 1 : mMaxDirty;
|
2017-05-18 01:22:20 +00:00
|
|
|
#ifdef MOZ_DEBUG
|
2017-09-15 08:32:21 +00:00
|
|
|
size_t ndirty = 0;
|
2017-09-27 06:25:19 +00:00
|
|
|
for (auto chunk : mChunksDirty.iter()) {
|
2017-09-15 08:32:21 +00:00
|
|
|
ndirty += chunk->ndirty;
|
2017-09-27 06:25:19 +00:00
|
|
|
}
|
2017-09-15 08:32:21 +00:00
|
|
|
MOZ_ASSERT(ndirty == mNumDirty);
|
2008-02-06 23:06:50 +00:00
|
|
|
#endif
|
2017-09-15 08:32:21 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(aAll || (mNumDirty > mMaxDirty));
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Iterate downward through chunks until enough dirty memory has been
|
|
|
|
// purged. Terminate as soon as possible in order to minimize the
|
|
|
|
// number of system calls, even if a chunk has only been partially
|
|
|
|
// purged.
|
2017-09-15 08:32:21 +00:00
|
|
|
while (mNumDirty > (dirty_max >> 1)) {
|
2011-10-24 17:23:47 +00:00
|
|
|
#ifdef MALLOC_DOUBLE_PURGE
|
2017-09-15 08:32:21 +00:00
|
|
|
bool madvised = false;
|
2011-10-24 17:23:47 +00:00
|
|
|
#endif
|
2017-09-26 06:06:00 +00:00
|
|
|
chunk = mChunksDirty.Last();
|
2017-09-15 08:32:21 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(chunk);
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-11-03 03:16:11 +00:00
|
|
|
for (i = gChunkNumPages - 1; chunk->ndirty > 0; i--) {
|
|
|
|
MOZ_DIAGNOSTIC_ASSERT(i >= gChunkHeaderNumPages);
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-09-15 08:32:21 +00:00
|
|
|
if (chunk->map[i].bits & CHUNK_MAP_DIRTY) {
|
2011-10-24 17:23:47 +00:00
|
|
|
#ifdef MALLOC_DECOMMIT
|
2017-09-15 08:32:21 +00:00
|
|
|
const size_t free_operation = CHUNK_MAP_DECOMMITTED;
|
2011-10-24 17:23:47 +00:00
|
|
|
#else
|
2017-09-15 08:32:21 +00:00
|
|
|
const size_t free_operation = CHUNK_MAP_MADVISED;
|
2011-10-24 17:23:47 +00:00
|
|
|
#endif
|
2017-10-29 12:53:37 +00:00
|
|
|
MOZ_ASSERT((chunk->map[i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) ==
|
|
|
|
0);
|
2017-09-15 08:32:21 +00:00
|
|
|
chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
|
2017-10-29 12:53:31 +00:00
|
|
|
// Find adjacent dirty run(s).
|
2017-11-03 03:16:11 +00:00
|
|
|
for (npages = 1; i > gChunkHeaderNumPages &&
|
2017-10-29 12:53:37 +00:00
|
|
|
(chunk->map[i - 1].bits & CHUNK_MAP_DIRTY);
|
2017-09-15 08:32:21 +00:00
|
|
|
npages++) {
|
|
|
|
i--;
|
2017-10-29 12:53:37 +00:00
|
|
|
MOZ_ASSERT((chunk->map[i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) ==
|
|
|
|
0);
|
2017-09-15 08:32:21 +00:00
|
|
|
chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
|
|
|
|
}
|
|
|
|
chunk->ndirty -= npages;
|
|
|
|
mNumDirty -= npages;
|
2008-02-09 05:46:59 +00:00
|
|
|
|
|
|
|
#ifdef MALLOC_DECOMMIT
|
2017-11-03 03:13:17 +00:00
|
|
|
pages_decommit((void*)(uintptr_t(chunk) + (i << gPageSize2Pow)),
|
|
|
|
(npages << gPageSize2Pow));
|
2011-10-10 17:54:42 +00:00
|
|
|
#endif
|
2017-09-15 08:32:21 +00:00
|
|
|
mStats.committed -= npages;
|
2011-10-10 17:54:42 +00:00
|
|
|
|
|
|
|
#ifndef MALLOC_DECOMMIT
|
2017-11-03 03:13:17 +00:00
|
|
|
madvise((void*)(uintptr_t(chunk) + (i << gPageSize2Pow)),
|
|
|
|
(npages << gPageSize2Pow),
|
2017-10-29 12:53:37 +00:00
|
|
|
MADV_FREE);
|
|
|
|
#ifdef MALLOC_DOUBLE_PURGE
|
2017-09-15 08:32:21 +00:00
|
|
|
madvised = true;
|
2017-10-29 12:53:37 +00:00
|
|
|
#endif
|
2008-02-06 23:06:50 +00:00
|
|
|
#endif
|
2017-09-15 08:32:21 +00:00
|
|
|
if (mNumDirty <= (dirty_max >> 1)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-06-20 17:34:42 +00:00
|
|
|
|
2017-09-15 08:32:21 +00:00
|
|
|
if (chunk->ndirty == 0) {
|
2017-09-26 06:06:00 +00:00
|
|
|
mChunksDirty.Remove(chunk);
|
2017-09-15 08:32:21 +00:00
|
|
|
}
|
2011-10-24 17:23:47 +00:00
|
|
|
#ifdef MALLOC_DOUBLE_PURGE
|
2017-09-15 08:32:21 +00:00
|
|
|
if (madvised) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// The chunk might already be in the list, but this
|
|
|
|
// makes sure it's at the front.
|
2017-09-15 08:32:21 +00:00
|
|
|
if (mChunksMAdvised.ElementProbablyInList(chunk)) {
|
|
|
|
mChunksMAdvised.remove(chunk);
|
|
|
|
}
|
|
|
|
mChunksMAdvised.pushFront(chunk);
|
|
|
|
}
|
2011-10-24 17:23:47 +00:00
|
|
|
#endif
|
2017-09-15 08:32:21 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-09-15 09:01:27 +00:00
|
|
|
void
|
|
|
|
arena_t::DallocRun(arena_run_t* aRun, bool aDirty)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-09-15 09:01:27 +00:00
|
|
|
arena_chunk_t* chunk;
|
|
|
|
size_t size, run_ind, run_pages;
|
|
|
|
|
2017-10-26 01:34:37 +00:00
|
|
|
chunk = GetChunkForPtr(aRun);
|
2017-11-03 03:13:17 +00:00
|
|
|
run_ind = (size_t)((uintptr_t(aRun) - uintptr_t(chunk)) >> gPageSize2Pow);
|
2017-11-03 03:16:11 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(run_ind >= gChunkHeaderNumPages);
|
|
|
|
MOZ_DIAGNOSTIC_ASSERT(run_ind < gChunkNumPages);
|
2017-10-29 12:53:14 +00:00
|
|
|
if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0) {
|
2017-11-03 03:13:17 +00:00
|
|
|
size = chunk->map[run_ind].bits & ~gPageSizeMask;
|
2017-10-29 12:53:14 +00:00
|
|
|
} else {
|
2017-11-03 06:23:44 +00:00
|
|
|
size = aRun->mBin->mRunSize;
|
2017-10-29 12:53:14 +00:00
|
|
|
}
|
2017-11-03 03:13:17 +00:00
|
|
|
run_pages = (size >> gPageSize2Pow);
|
2017-09-15 09:01:27 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Mark pages as unallocated in the chunk map.
|
2017-09-15 09:01:27 +00:00
|
|
|
if (aDirty) {
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < run_pages; i++) {
|
2017-10-29 12:53:37 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) ==
|
|
|
|
0);
|
2017-09-15 09:01:27 +00:00
|
|
|
chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY;
|
|
|
|
}
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-09-15 09:01:27 +00:00
|
|
|
if (chunk->ndirty == 0) {
|
2017-09-26 06:06:00 +00:00
|
|
|
mChunksDirty.Insert(chunk);
|
2017-09-15 09:01:27 +00:00
|
|
|
}
|
|
|
|
chunk->ndirty += run_pages;
|
|
|
|
mNumDirty += run_pages;
|
|
|
|
} else {
|
|
|
|
size_t i;
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-09-15 09:01:27 +00:00
|
|
|
for (i = 0; i < run_pages; i++) {
|
2017-10-29 12:53:37 +00:00
|
|
|
chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
|
2017-09-15 09:01:27 +00:00
|
|
|
}
|
|
|
|
}
|
2017-11-03 03:13:17 +00:00
|
|
|
chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits & gPageSizeMask);
|
2017-10-29 12:53:37 +00:00
|
|
|
chunk->map[run_ind + run_pages - 1].bits =
|
2017-11-03 03:13:17 +00:00
|
|
|
size | (chunk->map[run_ind + run_pages - 1].bits & gPageSizeMask);
|
2017-09-15 09:01:27 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Try to coalesce forward.
|
2017-11-03 03:16:11 +00:00
|
|
|
if (run_ind + run_pages < gChunkNumPages &&
|
2017-10-29 12:53:37 +00:00
|
|
|
(chunk->map[run_ind + run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
|
2017-11-03 03:13:17 +00:00
|
|
|
size_t nrun_size = chunk->map[run_ind + run_pages].bits & ~gPageSizeMask;
|
2017-09-15 09:01:27 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Remove successor from tree of available runs; the coalesced run is
|
|
|
|
// inserted later.
|
2017-10-29 12:53:37 +00:00
|
|
|
mRunsAvail.Remove(&chunk->map[run_ind + run_pages]);
|
2017-09-15 09:01:27 +00:00
|
|
|
|
|
|
|
size += nrun_size;
|
2017-11-03 03:13:17 +00:00
|
|
|
run_pages = size >> gPageSize2Pow;
|
2017-09-15 09:01:27 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(
|
2017-11-03 03:13:17 +00:00
|
|
|
(chunk->map[run_ind + run_pages - 1].bits & ~gPageSizeMask) == nrun_size);
|
2017-10-29 12:53:37 +00:00
|
|
|
chunk->map[run_ind].bits =
|
2017-11-03 03:13:17 +00:00
|
|
|
size | (chunk->map[run_ind].bits & gPageSizeMask);
|
2017-10-29 12:53:37 +00:00
|
|
|
chunk->map[run_ind + run_pages - 1].bits =
|
2017-11-03 03:13:17 +00:00
|
|
|
size | (chunk->map[run_ind + run_pages - 1].bits & gPageSizeMask);
|
2017-09-15 09:01:27 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Try to coalesce backward.
|
2017-11-03 03:16:11 +00:00
|
|
|
if (run_ind > gChunkHeaderNumPages &&
|
2017-10-29 12:53:37 +00:00
|
|
|
(chunk->map[run_ind - 1].bits & CHUNK_MAP_ALLOCATED) == 0) {
|
2017-11-03 03:13:17 +00:00
|
|
|
size_t prun_size = chunk->map[run_ind - 1].bits & ~gPageSizeMask;
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-11-03 03:13:17 +00:00
|
|
|
run_ind -= prun_size >> gPageSize2Pow;
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Remove predecessor from tree of available runs; the coalesced run is
|
|
|
|
// inserted later.
|
2017-09-26 06:06:00 +00:00
|
|
|
mRunsAvail.Remove(&chunk->map[run_ind]);
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-09-15 09:01:27 +00:00
|
|
|
size += prun_size;
|
2017-11-03 03:13:17 +00:00
|
|
|
run_pages = size >> gPageSize2Pow;
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-11-03 03:13:17 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind].bits & ~gPageSizeMask) ==
|
2017-10-29 12:53:37 +00:00
|
|
|
prun_size);
|
|
|
|
chunk->map[run_ind].bits =
|
2017-11-03 03:13:17 +00:00
|
|
|
size | (chunk->map[run_ind].bits & gPageSizeMask);
|
2017-10-29 12:53:37 +00:00
|
|
|
chunk->map[run_ind + run_pages - 1].bits =
|
2017-11-03 03:13:17 +00:00
|
|
|
size | (chunk->map[run_ind + run_pages - 1].bits & gPageSizeMask);
|
2017-09-15 09:01:27 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Insert into tree of available runs, now that coalescing is complete.
|
2017-09-26 06:06:00 +00:00
|
|
|
mRunsAvail.Insert(&chunk->map[run_ind]);
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Deallocate chunk if it is now completely unused.
|
2017-11-03 03:16:11 +00:00
|
|
|
if ((chunk->map[gChunkHeaderNumPages].bits &
|
2017-11-03 03:13:17 +00:00
|
|
|
(~gPageSizeMask | CHUNK_MAP_ALLOCATED)) == gMaxLargeClass) {
|
2017-09-15 09:01:27 +00:00
|
|
|
DeallocChunk(chunk);
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Enforce mMaxDirty.
|
2017-09-15 09:01:27 +00:00
|
|
|
if (mNumDirty > mMaxDirty) {
|
|
|
|
Purge(false);
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-09-15 09:11:12 +00:00
|
|
|
void
|
2017-10-29 12:53:37 +00:00
|
|
|
arena_t::TrimRunHead(arena_chunk_t* aChunk,
|
|
|
|
arena_run_t* aRun,
|
|
|
|
size_t aOldSize,
|
2017-09-15 09:11:12 +00:00
|
|
|
size_t aNewSize)
|
2008-02-09 05:46:59 +00:00
|
|
|
{
|
2017-11-03 03:13:17 +00:00
|
|
|
size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> gPageSize2Pow;
|
|
|
|
size_t head_npages = (aOldSize - aNewSize) >> gPageSize2Pow;
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-09-15 09:11:12 +00:00
|
|
|
MOZ_ASSERT(aOldSize > aNewSize);
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Update the chunk map so that arena_t::RunDalloc() can treat the
|
|
|
|
// leading run as separately allocated.
|
2017-10-29 12:53:37 +00:00
|
|
|
aChunk->map[pageind].bits =
|
|
|
|
(aOldSize - aNewSize) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
|
|
|
aChunk->map[pageind + head_npages].bits =
|
|
|
|
aNewSize | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-09-15 09:11:12 +00:00
|
|
|
DallocRun(aRun, false);
|
2008-02-09 05:46:59 +00:00
|
|
|
}
|
|
|
|
|
2017-09-15 09:14:33 +00:00
|
|
|
void
|
2017-10-29 12:53:37 +00:00
|
|
|
arena_t::TrimRunTail(arena_chunk_t* aChunk,
|
|
|
|
arena_run_t* aRun,
|
|
|
|
size_t aOldSize,
|
|
|
|
size_t aNewSize,
|
|
|
|
bool aDirty)
|
2008-02-09 05:46:59 +00:00
|
|
|
{
|
2017-11-03 03:13:17 +00:00
|
|
|
size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> gPageSize2Pow;
|
|
|
|
size_t npages = aNewSize >> gPageSize2Pow;
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-09-15 09:14:33 +00:00
|
|
|
MOZ_ASSERT(aOldSize > aNewSize);
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Update the chunk map so that arena_t::RunDalloc() can treat the
|
|
|
|
// trailing run as separately allocated.
|
2017-10-29 12:53:37 +00:00
|
|
|
aChunk->map[pageind].bits = aNewSize | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
|
|
|
aChunk->map[pageind + npages].bits =
|
|
|
|
(aOldSize - aNewSize) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-09-15 09:14:33 +00:00
|
|
|
DallocRun((arena_run_t*)(uintptr_t(aRun) + aNewSize), aDirty);
|
2008-02-09 05:46:59 +00:00
|
|
|
}
|
|
|
|
|
2017-09-15 09:23:33 +00:00
|
|
|
arena_run_t*
|
|
|
|
arena_t::GetNonFullBinRun(arena_bin_t* aBin)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-09-15 09:23:33 +00:00
|
|
|
arena_chunk_map_t* mapelm;
|
|
|
|
arena_run_t* run;
|
|
|
|
unsigned i, remainder;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Look for a usable run.
|
2017-11-03 00:26:07 +00:00
|
|
|
mapelm = aBin->mNonFullRuns.First();
|
2017-09-15 09:23:33 +00:00
|
|
|
if (mapelm) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// run is guaranteed to have available space.
|
2017-11-03 00:26:07 +00:00
|
|
|
aBin->mNonFullRuns.Remove(mapelm);
|
2017-11-03 03:13:17 +00:00
|
|
|
run = (arena_run_t*)(mapelm->bits & ~gPageSizeMask);
|
2017-09-15 09:23:33 +00:00
|
|
|
return run;
|
|
|
|
}
|
2017-10-29 12:53:31 +00:00
|
|
|
// No existing runs have any space available.
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Allocate a new run.
|
2017-11-03 06:54:20 +00:00
|
|
|
run = AllocRun(aBin->mRunSize, false, false);
|
2017-10-29 12:53:14 +00:00
|
|
|
if (!run) {
|
2017-09-15 09:23:33 +00:00
|
|
|
return nullptr;
|
2017-10-29 12:53:14 +00:00
|
|
|
}
|
2017-10-29 12:53:31 +00:00
|
|
|
// Don't initialize if a race in arena_t::RunAlloc() allowed an existing
|
|
|
|
// run to become usable.
|
2017-11-03 00:26:07 +00:00
|
|
|
if (run == aBin->mCurrentRun) {
|
2017-09-15 09:23:33 +00:00
|
|
|
return run;
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Initialize run internals.
|
2017-11-03 06:23:44 +00:00
|
|
|
run->mBin = aBin;
|
2017-09-15 09:23:33 +00:00
|
|
|
|
2017-11-03 00:26:07 +00:00
|
|
|
for (i = 0; i < aBin->mRunNumRegionsMask - 1; i++) {
|
2017-11-03 06:23:44 +00:00
|
|
|
run->mRegionsMask[i] = UINT_MAX;
|
2017-09-15 09:23:33 +00:00
|
|
|
}
|
2017-11-03 00:26:07 +00:00
|
|
|
remainder = aBin->mRunNumRegions & ((1U << (LOG2(sizeof(int)) + 3)) - 1);
|
2017-09-15 09:23:33 +00:00
|
|
|
if (remainder == 0) {
|
2017-11-03 06:23:44 +00:00
|
|
|
run->mRegionsMask[i] = UINT_MAX;
|
2017-09-15 09:23:33 +00:00
|
|
|
} else {
|
2017-10-29 12:53:31 +00:00
|
|
|
// The last element has spare bits that need to be unset.
|
2017-11-03 06:23:44 +00:00
|
|
|
run->mRegionsMask[i] =
|
2017-11-01 07:47:59 +00:00
|
|
|
(UINT_MAX >> ((1U << (LOG2(sizeof(int)) + 3)) - remainder));
|
2017-09-15 09:23:33 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-03 06:23:44 +00:00
|
|
|
run->mRegionsMinElement = 0;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-03 06:23:44 +00:00
|
|
|
run->mNumFree = aBin->mRunNumRegions;
|
2017-10-27 08:29:12 +00:00
|
|
|
#if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
|
2017-11-03 06:23:44 +00:00
|
|
|
run->mMagic = ARENA_RUN_MAGIC;
|
2008-02-06 23:06:50 +00:00
|
|
|
#endif
|
|
|
|
|
2017-11-03 00:26:07 +00:00
|
|
|
aBin->mNumRuns++;
|
2017-09-15 09:23:33 +00:00
|
|
|
return run;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-11-08 06:53:24 +00:00
|
|
|
void
|
|
|
|
arena_bin_t::Init(SizeClass aSizeClass)
|
2017-10-29 12:53:37 +00:00
|
|
|
{
|
Bug 1414168 - Change how run sizes are calculated. r=njn
There are multiple flaws to the current code:
- The loop calculating the right parameters for a given run size is
repeated.
- The loop trying different run sizes doesn't actually work to fulfil
the overhead constraint: while it stops when the constraint is
fulfilled, the values that are kept are those from the previous
iteration, which may well be well over the constraint.
In practice, the latter resulted in a few surprising results:
- most size classes had an overhead slightly over the constraint
(1.562%), which, while not terribly bad, doesn't match the set
expectations.
- some size classes ended up with relatively good overheads only because
of the additional constraint that run sizes had to be larger than the
run size of smaller size classes. Without this constraint, some size
classes would end up with overheads well over 2% just because that
happens to be the last overhead value before reaching below the 1.5%
constraint.
Furthermore, for higher-level fragmentation concerns, smaller run sizes
are better than larger run sizes, and in many cases, smaller run sizes
can yield the same (or even sometimes, better) overhead as larger run
sizes. For example, the current code choses 8KiB for runs of size 112,
but using 4KiB runs would actually yield the same number of regions, and
the same overhead.
We thus change the calculation to:
- not force runs to be smaller than those of smaller classes.
- avoid the code repetition.
- actually enforce its overhead constraint, but make it 1.6%.
- for especially small size classes, relax the overhead constraint to
2.4%.
This leads to an uneven set of run sizes:
size class before after
4 4 KiB 4 KiB
8 4 KiB 4 KiB
16 4 KiB 4 KiB
32 4 KiB 4 KiB
48 4 KiB 4 KiB
64 4 KiB 4 KiB
80 4 KiB 4 KiB
96 4 KiB 4 KiB
112 8 KiB 4 KiB
128 8 KiB 8 KiB
144 8 KiB 4 KiB
160 8 KiB 8 KiB
176 8 KiB 4 KiB
192 12 KiB 4 KiB
208 12 KiB 8 KiB
224 12 KiB 4 KiB
240 12 KiB 4 KiB
256 16 KiB 16 KiB
272 16 KiB 4 KiB
288 16 KiB 4 KiB
304 16 KiB 12 KiB
320 20 KiB 12 KiB
336 20 KiB 4 KiB
352 20 KiB 8 KiB
368 20 KiB 4 KiB
384 24 KiB 8 KiB
400 24 KiB 20 KiB
416 24 KiB 16 KiB
432 24 KiB 12 KiB
448 28 KiB 4 KiB
464 28 KiB 16 KiB
480 28 KiB 8 KiB
496 28 KiB 20 KiB
512 32 KiB 32 KiB
1024 64 KiB 64 KiB
2048 132 KiB 128 KiB
* Note: before is before this change only, not before the set of changes
from this bug; before that, the run size for 96 could be 8 KiB in some
configurations.
In most cases, the overhead hasn't changed, with a few exceptions:
- Improvements:
size class before after
208 1.823% 0.977%
304 1.660% 1.042%
320 1.562% 1.042%
400 0.716% 0.391%
464 1.283% 0.879%
480 1.228% 0.391%
496 1.395% 0.703%
- Regressions:
352 0.312% 1.172%
416 0.130% 0.977%
2048 1.515% 1.562%
For the regressions, the values are either still well within the
constraint or very close to the previous value, that I don't feel like
it's worth trying to avoid them, with the risk of making things worse
for other size classes.
--HG--
extra : rebase_source : fdff18df8a0a35c24162313d4adb1a1c24fb6e82
2017-11-08 05:04:10 +00:00
|
|
|
size_t try_run_size;
|
2017-10-29 12:53:37 +00:00
|
|
|
unsigned try_nregs, try_mask_nelms, try_reg0_offset;
|
2017-11-03 06:23:44 +00:00
|
|
|
// Size of the run header, excluding mRegionsMask.
|
|
|
|
static const size_t kFixedHeaderSize = offsetof(arena_run_t, mRegionsMask);
|
2017-10-29 12:53:37 +00:00
|
|
|
|
2017-11-08 06:53:24 +00:00
|
|
|
MOZ_ASSERT(aSizeClass.Size() <= gMaxBinClass);
|
|
|
|
|
Bug 1414168 - Change how run sizes are calculated. r=njn
There are multiple flaws to the current code:
- The loop calculating the right parameters for a given run size is
repeated.
- The loop trying different run sizes doesn't actually work to fulfil
the overhead constraint: while it stops when the constraint is
fulfilled, the values that are kept are those from the previous
iteration, which may well be well over the constraint.
In practice, the latter resulted in a few surprising results:
- most size classes had an overhead slightly over the constraint
(1.562%), which, while not terribly bad, doesn't match the set
expectations.
- some size classes ended up with relatively good overheads only because
of the additional constraint that run sizes had to be larger than the
run size of smaller size classes. Without this constraint, some size
classes would end up with overheads well over 2% just because that
happens to be the last overhead value before reaching below the 1.5%
constraint.
Furthermore, for higher-level fragmentation concerns, smaller run sizes
are better than larger run sizes, and in many cases, smaller run sizes
can yield the same (or even sometimes, better) overhead as larger run
sizes. For example, the current code choses 8KiB for runs of size 112,
but using 4KiB runs would actually yield the same number of regions, and
the same overhead.
We thus change the calculation to:
- not force runs to be smaller than those of smaller classes.
- avoid the code repetition.
- actually enforce its overhead constraint, but make it 1.6%.
- for especially small size classes, relax the overhead constraint to
2.4%.
This leads to an uneven set of run sizes:
size class before after
4 4 KiB 4 KiB
8 4 KiB 4 KiB
16 4 KiB 4 KiB
32 4 KiB 4 KiB
48 4 KiB 4 KiB
64 4 KiB 4 KiB
80 4 KiB 4 KiB
96 4 KiB 4 KiB
112 8 KiB 4 KiB
128 8 KiB 8 KiB
144 8 KiB 4 KiB
160 8 KiB 8 KiB
176 8 KiB 4 KiB
192 12 KiB 4 KiB
208 12 KiB 8 KiB
224 12 KiB 4 KiB
240 12 KiB 4 KiB
256 16 KiB 16 KiB
272 16 KiB 4 KiB
288 16 KiB 4 KiB
304 16 KiB 12 KiB
320 20 KiB 12 KiB
336 20 KiB 4 KiB
352 20 KiB 8 KiB
368 20 KiB 4 KiB
384 24 KiB 8 KiB
400 24 KiB 20 KiB
416 24 KiB 16 KiB
432 24 KiB 12 KiB
448 28 KiB 4 KiB
464 28 KiB 16 KiB
480 28 KiB 8 KiB
496 28 KiB 20 KiB
512 32 KiB 32 KiB
1024 64 KiB 64 KiB
2048 132 KiB 128 KiB
* Note: before is before this change only, not before the set of changes
from this bug; before that, the run size for 96 could be 8 KiB in some
configurations.
In most cases, the overhead hasn't changed, with a few exceptions:
- Improvements:
size class before after
208 1.823% 0.977%
304 1.660% 1.042%
320 1.562% 1.042%
400 0.716% 0.391%
464 1.283% 0.879%
480 1.228% 0.391%
496 1.395% 0.703%
- Regressions:
352 0.312% 1.172%
416 0.130% 0.977%
2048 1.515% 1.562%
For the regressions, the values are either still well within the
constraint or very close to the previous value, that I don't feel like
it's worth trying to avoid them, with the risk of making things worse
for other size classes.
--HG--
extra : rebase_source : fdff18df8a0a35c24162313d4adb1a1c24fb6e82
2017-11-08 05:04:10 +00:00
|
|
|
try_run_size = gPageSize;
|
2017-10-29 12:53:37 +00:00
|
|
|
|
2017-11-08 06:53:24 +00:00
|
|
|
mCurrentRun = nullptr;
|
|
|
|
mNonFullRuns.Init();
|
|
|
|
mSizeClass = aSizeClass.Size();
|
|
|
|
mNumRuns = 0;
|
|
|
|
|
2017-11-03 00:26:07 +00:00
|
|
|
// mRunSize expansion loop.
|
2017-11-06 22:42:21 +00:00
|
|
|
while (true) {
|
2017-11-08 06:53:24 +00:00
|
|
|
try_nregs = ((try_run_size - kFixedHeaderSize) / mSizeClass) +
|
2017-10-29 12:53:37 +00:00
|
|
|
1; // Counter-act try_nregs-- in loop.
|
Bug 1414168 - Change how run sizes are calculated. r=njn
There are multiple flaws to the current code:
- The loop calculating the right parameters for a given run size is
repeated.
- The loop trying different run sizes doesn't actually work to fulfil
the overhead constraint: while it stops when the constraint is
fulfilled, the values that are kept are those from the previous
iteration, which may well be well over the constraint.
In practice, the latter resulted in a few surprising results:
- most size classes had an overhead slightly over the constraint
(1.562%), which, while not terribly bad, doesn't match the set
expectations.
- some size classes ended up with relatively good overheads only because
of the additional constraint that run sizes had to be larger than the
run size of smaller size classes. Without this constraint, some size
classes would end up with overheads well over 2% just because that
happens to be the last overhead value before reaching below the 1.5%
constraint.
Furthermore, for higher-level fragmentation concerns, smaller run sizes
are better than larger run sizes, and in many cases, smaller run sizes
can yield the same (or even sometimes, better) overhead as larger run
sizes. For example, the current code choses 8KiB for runs of size 112,
but using 4KiB runs would actually yield the same number of regions, and
the same overhead.
We thus change the calculation to:
- not force runs to be smaller than those of smaller classes.
- avoid the code repetition.
- actually enforce its overhead constraint, but make it 1.6%.
- for especially small size classes, relax the overhead constraint to
2.4%.
This leads to an uneven set of run sizes:
size class before after
4 4 KiB 4 KiB
8 4 KiB 4 KiB
16 4 KiB 4 KiB
32 4 KiB 4 KiB
48 4 KiB 4 KiB
64 4 KiB 4 KiB
80 4 KiB 4 KiB
96 4 KiB 4 KiB
112 8 KiB 4 KiB
128 8 KiB 8 KiB
144 8 KiB 4 KiB
160 8 KiB 8 KiB
176 8 KiB 4 KiB
192 12 KiB 4 KiB
208 12 KiB 8 KiB
224 12 KiB 4 KiB
240 12 KiB 4 KiB
256 16 KiB 16 KiB
272 16 KiB 4 KiB
288 16 KiB 4 KiB
304 16 KiB 12 KiB
320 20 KiB 12 KiB
336 20 KiB 4 KiB
352 20 KiB 8 KiB
368 20 KiB 4 KiB
384 24 KiB 8 KiB
400 24 KiB 20 KiB
416 24 KiB 16 KiB
432 24 KiB 12 KiB
448 28 KiB 4 KiB
464 28 KiB 16 KiB
480 28 KiB 8 KiB
496 28 KiB 20 KiB
512 32 KiB 32 KiB
1024 64 KiB 64 KiB
2048 132 KiB 128 KiB
* Note: before is before this change only, not before the set of changes
from this bug; before that, the run size for 96 could be 8 KiB in some
configurations.
In most cases, the overhead hasn't changed, with a few exceptions:
- Improvements:
size class before after
208 1.823% 0.977%
304 1.660% 1.042%
320 1.562% 1.042%
400 0.716% 0.391%
464 1.283% 0.879%
480 1.228% 0.391%
496 1.395% 0.703%
- Regressions:
352 0.312% 1.172%
416 0.130% 0.977%
2048 1.515% 1.562%
For the regressions, the values are either still well within the
constraint or very close to the previous value, that I don't feel like
it's worth trying to avoid them, with the risk of making things worse
for other size classes.
--HG--
extra : rebase_source : fdff18df8a0a35c24162313d4adb1a1c24fb6e82
2017-11-08 05:04:10 +00:00
|
|
|
|
|
|
|
// The do..while loop iteratively reduces the number of regions until
|
|
|
|
// the run header and the regions no longer overlap. A closed formula
|
|
|
|
// would be quite messy, since there is an interdependency between the
|
|
|
|
// header's mask length and the number of regions.
|
2017-10-29 12:53:37 +00:00
|
|
|
do {
|
|
|
|
try_nregs--;
|
|
|
|
try_mask_nelms =
|
2017-11-01 07:47:59 +00:00
|
|
|
(try_nregs >> (LOG2(sizeof(int)) + 3)) +
|
|
|
|
((try_nregs & ((1U << (LOG2(sizeof(int)) + 3)) - 1)) ? 1 : 0);
|
2017-11-08 06:53:24 +00:00
|
|
|
try_reg0_offset = try_run_size - (try_nregs * mSizeClass);
|
Bug 1414168 - Base run offset calculations on the fixed header size, excluding regs_mask. r=njn
On 64-bit platforms, sizeof(arena_run_t) includes a padding at the end
of the struct to align to 64-bit, since the last field, regs_mask, is
32-bit, and its offset can be a multiple of 64-bit depending on the
configuration. But we're doing size calculations for a dynamically-sized
regs_mask based on sizeof(arena_run_t), completely ignoring that
padding.
Instead, we use the offset of regs_mask as a base for the calculation.
Practically speaking, this doesn't change much with the current set of
values, but could affect the overheads when we squeeze run sizes more.
--HG--
extra : rebase_source : a3bdf10a507b81aa0b2b437031b884e18499dc8f
2017-11-08 01:08:37 +00:00
|
|
|
} while (kFixedHeaderSize + (sizeof(unsigned) * try_mask_nelms) >
|
2017-10-29 12:53:37 +00:00
|
|
|
try_reg0_offset);
|
2017-11-06 22:42:21 +00:00
|
|
|
|
|
|
|
// Don't allow runs larger than the largest possible large size class.
|
|
|
|
if (try_run_size > gMaxLargeClass) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
Bug 1414168 - Change and move the relaxed calculation rule for small size classes. r=njn
First and foremost, the code and corresponding comment weren't in
agreement on what's going on.
The code checks:
RUN_MAX_OVRHD * (bin->mSizeClass << 3) <= RUN_MAX_OVRHD_RELAX
which is equivalent to:
(bin->mSizeClass << 3) <= RUN_MAX_OVRHD_RELAX / RUN_MAX_OVRHD
replacing constants:
(bin->mSizeClass << 3) <= 0x1800 / 0x3d
The left hand side is just bin->mSizeClass * 8, and the right hand side
is about 100, so this can be roughly summarized as:
bin->mSizeClass <= 12
The comment says the overhead constraint is relaxed for runs with a
per-region overhead greater than RUN_MAX_OVRHD / (mSizeClass << (3+RUN_BFP)).
Which, on itself, doesn't make sense, because it translates to
61 / (mSizeClass * 32768), which, even for a size class of 1 would mean
less than 0.2%, and this value would be even smaller for bigger classes.
The comment would make more sense with RUN_MAX_OVRHD_RELAX, but would
still not match what the code was doing.
So we change how the relaxed rule works, as per the comment in the new
code, and make it happen after the standard run overhead constraint has
been checked.
--HG--
extra : rebase_source : cec35b5bfec416761fbfbcffdc2b39f0098af849
2017-11-07 05:36:07 +00:00
|
|
|
// Try to keep the run overhead below kRunOverhead.
|
2017-11-08 06:53:24 +00:00
|
|
|
if (Fraction(try_reg0_offset, try_run_size) <= kRunOverhead) {
|
2017-11-06 22:42:21 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
Bug 1414168 - Change how run sizes are calculated. r=njn
There are multiple flaws to the current code:
- The loop calculating the right parameters for a given run size is
repeated.
- The loop trying different run sizes doesn't actually work to fulfil
the overhead constraint: while it stops when the constraint is
fulfilled, the values that are kept are those from the previous
iteration, which may well be well over the constraint.
In practice, the latter resulted in a few surprising results:
- most size classes had an overhead slightly over the constraint
(1.562%), which, while not terribly bad, doesn't match the set
expectations.
- some size classes ended up with relatively good overheads only because
of the additional constraint that run sizes had to be larger than the
run size of smaller size classes. Without this constraint, some size
classes would end up with overheads well over 2% just because that
happens to be the last overhead value before reaching below the 1.5%
constraint.
Furthermore, for higher-level fragmentation concerns, smaller run sizes
are better than larger run sizes, and in many cases, smaller run sizes
can yield the same (or even sometimes, better) overhead as larger run
sizes. For example, the current code choses 8KiB for runs of size 112,
but using 4KiB runs would actually yield the same number of regions, and
the same overhead.
We thus change the calculation to:
- not force runs to be smaller than those of smaller classes.
- avoid the code repetition.
- actually enforce its overhead constraint, but make it 1.6%.
- for especially small size classes, relax the overhead constraint to
2.4%.
This leads to an uneven set of run sizes:
size class before after
4 4 KiB 4 KiB
8 4 KiB 4 KiB
16 4 KiB 4 KiB
32 4 KiB 4 KiB
48 4 KiB 4 KiB
64 4 KiB 4 KiB
80 4 KiB 4 KiB
96 4 KiB 4 KiB
112 8 KiB 4 KiB
128 8 KiB 8 KiB
144 8 KiB 4 KiB
160 8 KiB 8 KiB
176 8 KiB 4 KiB
192 12 KiB 4 KiB
208 12 KiB 8 KiB
224 12 KiB 4 KiB
240 12 KiB 4 KiB
256 16 KiB 16 KiB
272 16 KiB 4 KiB
288 16 KiB 4 KiB
304 16 KiB 12 KiB
320 20 KiB 12 KiB
336 20 KiB 4 KiB
352 20 KiB 8 KiB
368 20 KiB 4 KiB
384 24 KiB 8 KiB
400 24 KiB 20 KiB
416 24 KiB 16 KiB
432 24 KiB 12 KiB
448 28 KiB 4 KiB
464 28 KiB 16 KiB
480 28 KiB 8 KiB
496 28 KiB 20 KiB
512 32 KiB 32 KiB
1024 64 KiB 64 KiB
2048 132 KiB 128 KiB
* Note: before is before this change only, not before the set of changes
from this bug; before that, the run size for 96 could be 8 KiB in some
configurations.
In most cases, the overhead hasn't changed, with a few exceptions:
- Improvements:
size class before after
208 1.823% 0.977%
304 1.660% 1.042%
320 1.562% 1.042%
400 0.716% 0.391%
464 1.283% 0.879%
480 1.228% 0.391%
496 1.395% 0.703%
- Regressions:
352 0.312% 1.172%
416 0.130% 0.977%
2048 1.515% 1.562%
For the regressions, the values are either still well within the
constraint or very close to the previous value, that I don't feel like
it's worth trying to avoid them, with the risk of making things worse
for other size classes.
--HG--
extra : rebase_source : fdff18df8a0a35c24162313d4adb1a1c24fb6e82
2017-11-08 05:04:10 +00:00
|
|
|
// If the overhead is larger than the size class, it means the size class
|
|
|
|
// is small and doesn't align very well with the header. It's desirable to
|
|
|
|
// have smaller run sizes for them, so relax the overhead requirement.
|
2017-11-08 06:53:24 +00:00
|
|
|
if (try_reg0_offset > mSizeClass) {
|
|
|
|
if (Fraction(try_reg0_offset, try_run_size) <= kRunRelaxedOverhead) {
|
Bug 1414168 - Change how run sizes are calculated. r=njn
There are multiple flaws to the current code:
- The loop calculating the right parameters for a given run size is
repeated.
- The loop trying different run sizes doesn't actually work to fulfil
the overhead constraint: while it stops when the constraint is
fulfilled, the values that are kept are those from the previous
iteration, which may well be well over the constraint.
In practice, the latter resulted in a few surprising results:
- most size classes had an overhead slightly over the constraint
(1.562%), which, while not terribly bad, doesn't match the set
expectations.
- some size classes ended up with relatively good overheads only because
of the additional constraint that run sizes had to be larger than the
run size of smaller size classes. Without this constraint, some size
classes would end up with overheads well over 2% just because that
happens to be the last overhead value before reaching below the 1.5%
constraint.
Furthermore, for higher-level fragmentation concerns, smaller run sizes
are better than larger run sizes, and in many cases, smaller run sizes
can yield the same (or even sometimes, better) overhead as larger run
sizes. For example, the current code choses 8KiB for runs of size 112,
but using 4KiB runs would actually yield the same number of regions, and
the same overhead.
We thus change the calculation to:
- not force runs to be smaller than those of smaller classes.
- avoid the code repetition.
- actually enforce its overhead constraint, but make it 1.6%.
- for especially small size classes, relax the overhead constraint to
2.4%.
This leads to an uneven set of run sizes:
size class before after
4 4 KiB 4 KiB
8 4 KiB 4 KiB
16 4 KiB 4 KiB
32 4 KiB 4 KiB
48 4 KiB 4 KiB
64 4 KiB 4 KiB
80 4 KiB 4 KiB
96 4 KiB 4 KiB
112 8 KiB 4 KiB
128 8 KiB 8 KiB
144 8 KiB 4 KiB
160 8 KiB 8 KiB
176 8 KiB 4 KiB
192 12 KiB 4 KiB
208 12 KiB 8 KiB
224 12 KiB 4 KiB
240 12 KiB 4 KiB
256 16 KiB 16 KiB
272 16 KiB 4 KiB
288 16 KiB 4 KiB
304 16 KiB 12 KiB
320 20 KiB 12 KiB
336 20 KiB 4 KiB
352 20 KiB 8 KiB
368 20 KiB 4 KiB
384 24 KiB 8 KiB
400 24 KiB 20 KiB
416 24 KiB 16 KiB
432 24 KiB 12 KiB
448 28 KiB 4 KiB
464 28 KiB 16 KiB
480 28 KiB 8 KiB
496 28 KiB 20 KiB
512 32 KiB 32 KiB
1024 64 KiB 64 KiB
2048 132 KiB 128 KiB
* Note: before is before this change only, not before the set of changes
from this bug; before that, the run size for 96 could be 8 KiB in some
configurations.
In most cases, the overhead hasn't changed, with a few exceptions:
- Improvements:
size class before after
208 1.823% 0.977%
304 1.660% 1.042%
320 1.562% 1.042%
400 0.716% 0.391%
464 1.283% 0.879%
480 1.228% 0.391%
496 1.395% 0.703%
- Regressions:
352 0.312% 1.172%
416 0.130% 0.977%
2048 1.515% 1.562%
For the regressions, the values are either still well within the
constraint or very close to the previous value, that I don't feel like
it's worth trying to avoid them, with the risk of making things worse
for other size classes.
--HG--
extra : rebase_source : fdff18df8a0a35c24162313d4adb1a1c24fb6e82
2017-11-08 05:04:10 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Bug 1414168 - Change and move the relaxed calculation rule for small size classes. r=njn
First and foremost, the code and corresponding comment weren't in
agreement on what's going on.
The code checks:
RUN_MAX_OVRHD * (bin->mSizeClass << 3) <= RUN_MAX_OVRHD_RELAX
which is equivalent to:
(bin->mSizeClass << 3) <= RUN_MAX_OVRHD_RELAX / RUN_MAX_OVRHD
replacing constants:
(bin->mSizeClass << 3) <= 0x1800 / 0x3d
The left hand side is just bin->mSizeClass * 8, and the right hand side
is about 100, so this can be roughly summarized as:
bin->mSizeClass <= 12
The comment says the overhead constraint is relaxed for runs with a
per-region overhead greater than RUN_MAX_OVRHD / (mSizeClass << (3+RUN_BFP)).
Which, on itself, doesn't make sense, because it translates to
61 / (mSizeClass * 32768), which, even for a size class of 1 would mean
less than 0.2%, and this value would be even smaller for bigger classes.
The comment would make more sense with RUN_MAX_OVRHD_RELAX, but would
still not match what the code was doing.
So we change how the relaxed rule works, as per the comment in the new
code, and make it happen after the standard run overhead constraint has
been checked.
--HG--
extra : rebase_source : cec35b5bfec416761fbfbcffdc2b39f0098af849
2017-11-07 05:36:07 +00:00
|
|
|
// The run header includes one bit per region of the given size. For sizes
|
|
|
|
// small enough, the number of regions is large enough that growing the run
|
|
|
|
// size barely moves the needle for the overhead because of all those bits.
|
|
|
|
// For example, for a size of 8 bytes, adding 4KiB to the run size adds
|
|
|
|
// close to 512 bits to the header, which is 64 bytes.
|
|
|
|
// With such overhead, there is no way to get to the wanted overhead above,
|
2017-11-03 06:23:44 +00:00
|
|
|
// so we give up if the required size for mRegionsMask more than doubles the
|
Bug 1414168 - Change and move the relaxed calculation rule for small size classes. r=njn
First and foremost, the code and corresponding comment weren't in
agreement on what's going on.
The code checks:
RUN_MAX_OVRHD * (bin->mSizeClass << 3) <= RUN_MAX_OVRHD_RELAX
which is equivalent to:
(bin->mSizeClass << 3) <= RUN_MAX_OVRHD_RELAX / RUN_MAX_OVRHD
replacing constants:
(bin->mSizeClass << 3) <= 0x1800 / 0x3d
The left hand side is just bin->mSizeClass * 8, and the right hand side
is about 100, so this can be roughly summarized as:
bin->mSizeClass <= 12
The comment says the overhead constraint is relaxed for runs with a
per-region overhead greater than RUN_MAX_OVRHD / (mSizeClass << (3+RUN_BFP)).
Which, on itself, doesn't make sense, because it translates to
61 / (mSizeClass * 32768), which, even for a size class of 1 would mean
less than 0.2%, and this value would be even smaller for bigger classes.
The comment would make more sense with RUN_MAX_OVRHD_RELAX, but would
still not match what the code was doing.
So we change how the relaxed rule works, as per the comment in the new
code, and make it happen after the standard run overhead constraint has
been checked.
--HG--
extra : rebase_source : cec35b5bfec416761fbfbcffdc2b39f0098af849
2017-11-07 05:36:07 +00:00
|
|
|
// size of the run header.
|
Bug 1414168 - Base run offset calculations on the fixed header size, excluding regs_mask. r=njn
On 64-bit platforms, sizeof(arena_run_t) includes a padding at the end
of the struct to align to 64-bit, since the last field, regs_mask, is
32-bit, and its offset can be a multiple of 64-bit depending on the
configuration. But we're doing size calculations for a dynamically-sized
regs_mask based on sizeof(arena_run_t), completely ignoring that
padding.
Instead, we use the offset of regs_mask as a base for the calculation.
Practically speaking, this doesn't change much with the current set of
values, but could affect the overheads when we squeeze run sizes more.
--HG--
extra : rebase_source : a3bdf10a507b81aa0b2b437031b884e18499dc8f
2017-11-08 01:08:37 +00:00
|
|
|
if (try_mask_nelms * sizeof(unsigned) >= kFixedHeaderSize) {
|
2017-11-06 22:42:21 +00:00
|
|
|
break;
|
|
|
|
}
|
Bug 1414168 - Change and move the relaxed calculation rule for small size classes. r=njn
First and foremost, the code and corresponding comment weren't in
agreement on what's going on.
The code checks:
RUN_MAX_OVRHD * (bin->mSizeClass << 3) <= RUN_MAX_OVRHD_RELAX
which is equivalent to:
(bin->mSizeClass << 3) <= RUN_MAX_OVRHD_RELAX / RUN_MAX_OVRHD
replacing constants:
(bin->mSizeClass << 3) <= 0x1800 / 0x3d
The left hand side is just bin->mSizeClass * 8, and the right hand side
is about 100, so this can be roughly summarized as:
bin->mSizeClass <= 12
The comment says the overhead constraint is relaxed for runs with a
per-region overhead greater than RUN_MAX_OVRHD / (mSizeClass << (3+RUN_BFP)).
Which, on itself, doesn't make sense, because it translates to
61 / (mSizeClass * 32768), which, even for a size class of 1 would mean
less than 0.2%, and this value would be even smaller for bigger classes.
The comment would make more sense with RUN_MAX_OVRHD_RELAX, but would
still not match what the code was doing.
So we change how the relaxed rule works, as per the comment in the new
code, and make it happen after the standard run overhead constraint has
been checked.
--HG--
extra : rebase_source : cec35b5bfec416761fbfbcffdc2b39f0098af849
2017-11-07 05:36:07 +00:00
|
|
|
|
Bug 1414168 - Change how run sizes are calculated. r=njn
There are multiple flaws to the current code:
- The loop calculating the right parameters for a given run size is
repeated.
- The loop trying different run sizes doesn't actually work to fulfil
the overhead constraint: while it stops when the constraint is
fulfilled, the values that are kept are those from the previous
iteration, which may well be well over the constraint.
In practice, the latter resulted in a few surprising results:
- most size classes had an overhead slightly over the constraint
(1.562%), which, while not terribly bad, doesn't match the set
expectations.
- some size classes ended up with relatively good overheads only because
of the additional constraint that run sizes had to be larger than the
run size of smaller size classes. Without this constraint, some size
classes would end up with overheads well over 2% just because that
happens to be the last overhead value before reaching below the 1.5%
constraint.
Furthermore, for higher-level fragmentation concerns, smaller run sizes
are better than larger run sizes, and in many cases, smaller run sizes
can yield the same (or even sometimes, better) overhead as larger run
sizes. For example, the current code choses 8KiB for runs of size 112,
but using 4KiB runs would actually yield the same number of regions, and
the same overhead.
We thus change the calculation to:
- not force runs to be smaller than those of smaller classes.
- avoid the code repetition.
- actually enforce its overhead constraint, but make it 1.6%.
- for especially small size classes, relax the overhead constraint to
2.4%.
This leads to an uneven set of run sizes:
size class before after
4 4 KiB 4 KiB
8 4 KiB 4 KiB
16 4 KiB 4 KiB
32 4 KiB 4 KiB
48 4 KiB 4 KiB
64 4 KiB 4 KiB
80 4 KiB 4 KiB
96 4 KiB 4 KiB
112 8 KiB 4 KiB
128 8 KiB 8 KiB
144 8 KiB 4 KiB
160 8 KiB 8 KiB
176 8 KiB 4 KiB
192 12 KiB 4 KiB
208 12 KiB 8 KiB
224 12 KiB 4 KiB
240 12 KiB 4 KiB
256 16 KiB 16 KiB
272 16 KiB 4 KiB
288 16 KiB 4 KiB
304 16 KiB 12 KiB
320 20 KiB 12 KiB
336 20 KiB 4 KiB
352 20 KiB 8 KiB
368 20 KiB 4 KiB
384 24 KiB 8 KiB
400 24 KiB 20 KiB
416 24 KiB 16 KiB
432 24 KiB 12 KiB
448 28 KiB 4 KiB
464 28 KiB 16 KiB
480 28 KiB 8 KiB
496 28 KiB 20 KiB
512 32 KiB 32 KiB
1024 64 KiB 64 KiB
2048 132 KiB 128 KiB
* Note: before is before this change only, not before the set of changes
from this bug; before that, the run size for 96 could be 8 KiB in some
configurations.
In most cases, the overhead hasn't changed, with a few exceptions:
- Improvements:
size class before after
208 1.823% 0.977%
304 1.660% 1.042%
320 1.562% 1.042%
400 0.716% 0.391%
464 1.283% 0.879%
480 1.228% 0.391%
496 1.395% 0.703%
- Regressions:
352 0.312% 1.172%
416 0.130% 0.977%
2048 1.515% 1.562%
For the regressions, the values are either still well within the
constraint or very close to the previous value, that I don't feel like
it's worth trying to avoid them, with the risk of making things worse
for other size classes.
--HG--
extra : rebase_source : fdff18df8a0a35c24162313d4adb1a1c24fb6e82
2017-11-08 05:04:10 +00:00
|
|
|
// Try more aggressive settings.
|
|
|
|
try_run_size += gPageSize;
|
2017-11-06 22:42:21 +00:00
|
|
|
}
|
2017-10-29 12:53:37 +00:00
|
|
|
|
Bug 1414168 - Change how run sizes are calculated. r=njn
There are multiple flaws to the current code:
- The loop calculating the right parameters for a given run size is
repeated.
- The loop trying different run sizes doesn't actually work to fulfil
the overhead constraint: while it stops when the constraint is
fulfilled, the values that are kept are those from the previous
iteration, which may well be well over the constraint.
In practice, the latter resulted in a few surprising results:
- most size classes had an overhead slightly over the constraint
(1.562%), which, while not terribly bad, doesn't match the set
expectations.
- some size classes ended up with relatively good overheads only because
of the additional constraint that run sizes had to be larger than the
run size of smaller size classes. Without this constraint, some size
classes would end up with overheads well over 2% just because that
happens to be the last overhead value before reaching below the 1.5%
constraint.
Furthermore, for higher-level fragmentation concerns, smaller run sizes
are better than larger run sizes, and in many cases, smaller run sizes
can yield the same (or even sometimes, better) overhead as larger run
sizes. For example, the current code choses 8KiB for runs of size 112,
but using 4KiB runs would actually yield the same number of regions, and
the same overhead.
We thus change the calculation to:
- not force runs to be smaller than those of smaller classes.
- avoid the code repetition.
- actually enforce its overhead constraint, but make it 1.6%.
- for especially small size classes, relax the overhead constraint to
2.4%.
This leads to an uneven set of run sizes:
size class before after
4 4 KiB 4 KiB
8 4 KiB 4 KiB
16 4 KiB 4 KiB
32 4 KiB 4 KiB
48 4 KiB 4 KiB
64 4 KiB 4 KiB
80 4 KiB 4 KiB
96 4 KiB 4 KiB
112 8 KiB 4 KiB
128 8 KiB 8 KiB
144 8 KiB 4 KiB
160 8 KiB 8 KiB
176 8 KiB 4 KiB
192 12 KiB 4 KiB
208 12 KiB 8 KiB
224 12 KiB 4 KiB
240 12 KiB 4 KiB
256 16 KiB 16 KiB
272 16 KiB 4 KiB
288 16 KiB 4 KiB
304 16 KiB 12 KiB
320 20 KiB 12 KiB
336 20 KiB 4 KiB
352 20 KiB 8 KiB
368 20 KiB 4 KiB
384 24 KiB 8 KiB
400 24 KiB 20 KiB
416 24 KiB 16 KiB
432 24 KiB 12 KiB
448 28 KiB 4 KiB
464 28 KiB 16 KiB
480 28 KiB 8 KiB
496 28 KiB 20 KiB
512 32 KiB 32 KiB
1024 64 KiB 64 KiB
2048 132 KiB 128 KiB
* Note: before is before this change only, not before the set of changes
from this bug; before that, the run size for 96 could be 8 KiB in some
configurations.
In most cases, the overhead hasn't changed, with a few exceptions:
- Improvements:
size class before after
208 1.823% 0.977%
304 1.660% 1.042%
320 1.562% 1.042%
400 0.716% 0.391%
464 1.283% 0.879%
480 1.228% 0.391%
496 1.395% 0.703%
- Regressions:
352 0.312% 1.172%
416 0.130% 0.977%
2048 1.515% 1.562%
For the regressions, the values are either still well within the
constraint or very close to the previous value, that I don't feel like
it's worth trying to avoid them, with the risk of making things worse
for other size classes.
--HG--
extra : rebase_source : fdff18df8a0a35c24162313d4adb1a1c24fb6e82
2017-11-08 05:04:10 +00:00
|
|
|
MOZ_ASSERT(kFixedHeaderSize + (sizeof(unsigned) * try_mask_nelms) <=
|
|
|
|
try_reg0_offset);
|
|
|
|
MOZ_ASSERT((try_mask_nelms << (LOG2(sizeof(int)) + 3)) >= try_nregs);
|
2017-10-29 12:53:37 +00:00
|
|
|
|
|
|
|
// Copy final settings.
|
2017-11-08 06:53:24 +00:00
|
|
|
mRunSize = try_run_size;
|
|
|
|
mRunNumRegions = try_nregs;
|
|
|
|
mRunNumRegionsMask = try_mask_nelms;
|
|
|
|
mRunFirstRegionOffset = try_reg0_offset;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-09-15 10:11:52 +00:00
|
|
|
void*
|
|
|
|
arena_t::MallocSmall(size_t aSize, bool aZero)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-09-15 10:11:52 +00:00
|
|
|
void* ret;
|
|
|
|
arena_bin_t* bin;
|
|
|
|
arena_run_t* run;
|
2017-11-02 23:53:34 +00:00
|
|
|
SizeClass sizeClass(aSize);
|
|
|
|
aSize = sizeClass.Size();
|
|
|
|
|
|
|
|
switch (sizeClass.Type()) {
|
|
|
|
case SizeClass::Tiny:
|
2017-11-03 01:10:50 +00:00
|
|
|
bin = &mBins[FloorLog2(aSize / kMinTinyClass)];
|
2017-11-02 23:53:34 +00:00
|
|
|
break;
|
|
|
|
case SizeClass::Quantum:
|
2017-11-03 03:21:53 +00:00
|
|
|
bin = &mBins[kNumTinyClasses + (aSize / kQuantum) - 1];
|
2017-11-02 23:53:34 +00:00
|
|
|
break;
|
|
|
|
case SizeClass::SubPage:
|
2017-11-03 03:21:53 +00:00
|
|
|
bin = &mBins[kNumTinyClasses + kNumQuantumClasses +
|
|
|
|
(FloorLog2(aSize / kMaxQuantumClass) - 1)];
|
2017-11-02 23:53:34 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected size class type");
|
2017-09-15 10:11:52 +00:00
|
|
|
}
|
2017-11-03 00:26:07 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(aSize == bin->mSizeClass);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
{
|
|
|
|
MutexAutoLock lock(mLock);
|
2017-11-03 06:48:40 +00:00
|
|
|
run = bin->mCurrentRun;
|
|
|
|
if (MOZ_UNLIKELY(!run || run->mNumFree == 0)) {
|
|
|
|
run = bin->mCurrentRun = GetNonFullBinRun(bin);
|
2017-10-06 08:49:40 +00:00
|
|
|
}
|
2017-11-03 06:48:40 +00:00
|
|
|
if (MOZ_UNLIKELY(!run)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
|
|
|
|
MOZ_DIAGNOSTIC_ASSERT(run->mNumFree > 0);
|
|
|
|
ret = arena_run_reg_alloc(run, bin);
|
|
|
|
MOZ_DIAGNOSTIC_ASSERT(ret);
|
|
|
|
run->mNumFree--;
|
2017-10-06 08:49:40 +00:00
|
|
|
if (!ret) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
mStats.allocated_small += aSize;
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-10 07:59:21 +00:00
|
|
|
if (!aZero) {
|
|
|
|
ApplyZeroOrJunk(ret, aSize);
|
2017-10-29 12:53:14 +00:00
|
|
|
} else {
|
2017-09-15 10:11:52 +00:00
|
|
|
memset(ret, 0, aSize);
|
2017-10-29 12:53:14 +00:00
|
|
|
}
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-09-15 10:11:52 +00:00
|
|
|
return ret;
|
2008-02-09 05:46:59 +00:00
|
|
|
}
|
|
|
|
|
2017-09-15 10:14:00 +00:00
|
|
|
void*
|
|
|
|
arena_t::MallocLarge(size_t aSize, bool aZero)
|
2008-02-09 05:46:59 +00:00
|
|
|
{
|
2017-09-15 10:14:00 +00:00
|
|
|
void* ret;
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Large allocation.
|
2017-09-15 10:14:00 +00:00
|
|
|
aSize = PAGE_CEILING(aSize);
|
2017-10-06 08:49:40 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
MutexAutoLock lock(mLock);
|
2017-11-03 06:54:20 +00:00
|
|
|
ret = AllocRun(aSize, true, aZero);
|
2017-10-06 08:49:40 +00:00
|
|
|
if (!ret) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
mStats.allocated_large += aSize;
|
2017-09-15 10:14:00 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-10 07:59:21 +00:00
|
|
|
if (!aZero) {
|
|
|
|
ApplyZeroOrJunk(ret, aSize);
|
2017-09-15 10:14:00 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:20 +00:00
|
|
|
return ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-09-15 10:20:09 +00:00
|
|
|
void*
|
|
|
|
arena_t::Malloc(size_t aSize, bool aZero)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-09-15 10:20:09 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(mMagic == ARENA_MAGIC);
|
|
|
|
MOZ_ASSERT(aSize != 0);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-13 23:31:53 +00:00
|
|
|
if (aSize <= gMaxBinClass) {
|
|
|
|
return MallocSmall(aSize, aZero);
|
|
|
|
}
|
2017-11-03 01:10:50 +00:00
|
|
|
if (aSize <= gMaxLargeClass) {
|
2017-11-13 23:31:53 +00:00
|
|
|
return MallocLarge(aSize, aZero);
|
2017-09-21 04:23:22 +00:00
|
|
|
}
|
2017-11-13 23:31:53 +00:00
|
|
|
return MallocHuge(aSize, aZero);
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Only handles large allocations that require more than page alignment.
|
2017-09-15 11:28:23 +00:00
|
|
|
void*
|
2017-11-13 23:16:28 +00:00
|
|
|
arena_t::PallocLarge(size_t aAlignment, size_t aSize, size_t aAllocSize)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-09-15 11:28:23 +00:00
|
|
|
void* ret;
|
|
|
|
size_t offset;
|
|
|
|
arena_chunk_t* chunk;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-03 03:13:17 +00:00
|
|
|
MOZ_ASSERT((aSize & gPageSizeMask) == 0);
|
|
|
|
MOZ_ASSERT((aAlignment & gPageSizeMask) == 0);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
{
|
|
|
|
MutexAutoLock lock(mLock);
|
2017-11-03 06:54:20 +00:00
|
|
|
ret = AllocRun(aAllocSize, true, false);
|
2017-10-06 08:49:40 +00:00
|
|
|
if (!ret) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-26 01:34:37 +00:00
|
|
|
chunk = GetChunkForPtr(ret);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
offset = uintptr_t(ret) & (aAlignment - 1);
|
2017-11-03 03:13:17 +00:00
|
|
|
MOZ_ASSERT((offset & gPageSizeMask) == 0);
|
2017-10-06 08:49:40 +00:00
|
|
|
MOZ_ASSERT(offset < aAllocSize);
|
|
|
|
if (offset == 0) {
|
|
|
|
TrimRunTail(chunk, (arena_run_t*)ret, aAllocSize, aSize, false);
|
|
|
|
} else {
|
|
|
|
size_t leadsize, trailsize;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
leadsize = aAlignment - offset;
|
|
|
|
if (leadsize > 0) {
|
2017-10-29 12:53:37 +00:00
|
|
|
TrimRunHead(
|
|
|
|
chunk, (arena_run_t*)ret, aAllocSize, aAllocSize - leadsize);
|
2017-10-06 08:49:40 +00:00
|
|
|
ret = (void*)(uintptr_t(ret) + leadsize);
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
trailsize = aAllocSize - leadsize - aSize;
|
|
|
|
if (trailsize != 0) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Trim trailing space.
|
2017-10-06 08:49:40 +00:00
|
|
|
MOZ_ASSERT(trailsize < aAllocSize);
|
|
|
|
TrimRunTail(chunk, (arena_run_t*)ret, aSize + trailsize, aSize, false);
|
|
|
|
}
|
2017-09-15 11:28:23 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
mStats.allocated_large += aSize;
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-10 07:59:21 +00:00
|
|
|
ApplyZeroOrJunk(ret, aSize);
|
2017-09-15 11:28:23 +00:00
|
|
|
return ret;
|
2008-02-09 05:46:59 +00:00
|
|
|
}
|
|
|
|
|
2017-11-13 23:21:09 +00:00
|
|
|
void*
|
|
|
|
arena_t::Palloc(size_t aAlignment, size_t aSize)
|
2008-02-09 05:46:59 +00:00
|
|
|
{
|
2017-09-21 04:58:17 +00:00
|
|
|
void* ret;
|
|
|
|
size_t ceil_size;
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Round size up to the nearest multiple of alignment.
|
|
|
|
//
|
|
|
|
// This done, we can take advantage of the fact that for each small
|
|
|
|
// size class, every object is aligned at the smallest power of two
|
|
|
|
// that is non-zero in the base two representation of the size. For
|
|
|
|
// example:
|
|
|
|
//
|
|
|
|
// Size | Base 2 | Minimum alignment
|
|
|
|
// -----+----------+------------------
|
|
|
|
// 96 | 1100000 | 32
|
|
|
|
// 144 | 10100000 | 32
|
|
|
|
// 192 | 11000000 | 64
|
|
|
|
//
|
|
|
|
// Depending on runtime settings, it is possible that arena_malloc()
|
|
|
|
// will further round up to a power of two, but that never causes
|
|
|
|
// correctness issues.
|
2017-09-21 04:58:17 +00:00
|
|
|
ceil_size = ALIGNMENT_CEILING(aSize, aAlignment);
|
2017-10-29 12:53:31 +00:00
|
|
|
|
|
|
|
// (ceil_size < aSize) protects against the combination of maximal
|
|
|
|
// alignment and size greater than maximal alignment.
|
2017-09-21 04:58:17 +00:00
|
|
|
if (ceil_size < aSize) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// size_t overflow.
|
2017-09-21 04:58:17 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-11-03 03:13:17 +00:00
|
|
|
if (ceil_size <= gPageSize ||
|
|
|
|
(aAlignment <= gPageSize && ceil_size <= gMaxLargeClass)) {
|
2017-11-13 23:21:09 +00:00
|
|
|
ret = Malloc(ceil_size, false);
|
2017-09-21 04:58:17 +00:00
|
|
|
} else {
|
|
|
|
size_t run_size;
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// We can't achieve sub-page alignment, so round up alignment
|
|
|
|
// permanently; it makes later calculations simpler.
|
2017-09-21 04:58:17 +00:00
|
|
|
aAlignment = PAGE_CEILING(aAlignment);
|
|
|
|
ceil_size = PAGE_CEILING(aSize);
|
2017-10-29 12:53:31 +00:00
|
|
|
|
|
|
|
// (ceil_size < aSize) protects against very large sizes within
|
|
|
|
// pagesize of SIZE_T_MAX.
|
|
|
|
//
|
|
|
|
// (ceil_size + aAlignment < ceil_size) protects against the
|
|
|
|
// combination of maximal alignment and ceil_size large enough
|
|
|
|
// to cause overflow. This is similar to the first overflow
|
|
|
|
// check above, but it needs to be repeated due to the new
|
|
|
|
// ceil_size value, which may now be *equal* to maximal
|
|
|
|
// alignment, whereas before we only detected overflow if the
|
|
|
|
// original size was *greater* than maximal alignment.
|
2017-09-21 04:58:17 +00:00
|
|
|
if (ceil_size < aSize || ceil_size + aAlignment < ceil_size) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// size_t overflow.
|
2017-09-21 04:58:17 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Calculate the size of the over-size run that arena_palloc()
|
|
|
|
// would need to allocate in order to guarantee the alignment.
|
2017-09-21 04:58:17 +00:00
|
|
|
if (ceil_size >= aAlignment) {
|
2017-11-03 03:13:17 +00:00
|
|
|
run_size = ceil_size + aAlignment - gPageSize;
|
2017-09-21 04:58:17 +00:00
|
|
|
} else {
|
2017-10-29 12:53:31 +00:00
|
|
|
// It is possible that (aAlignment << 1) will cause
|
|
|
|
// overflow, but it doesn't matter because we also
|
|
|
|
// subtract pagesize, which in the case of overflow
|
|
|
|
// leaves us with a very large run_size. That causes
|
|
|
|
// the first conditional below to fail, which means
|
|
|
|
// that the bogus run_size value never gets used for
|
|
|
|
// anything important.
|
2017-11-03 03:13:17 +00:00
|
|
|
run_size = (aAlignment << 1) - gPageSize;
|
2017-09-21 04:58:17 +00:00
|
|
|
}
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-11-03 01:10:50 +00:00
|
|
|
if (run_size <= gMaxLargeClass) {
|
2017-11-13 23:21:09 +00:00
|
|
|
ret = PallocLarge(aAlignment, ceil_size, run_size);
|
2017-11-03 03:07:16 +00:00
|
|
|
} else if (aAlignment <= kChunkSize) {
|
2017-11-13 23:29:39 +00:00
|
|
|
ret = MallocHuge(ceil_size, false);
|
2017-10-29 12:53:14 +00:00
|
|
|
} else {
|
2017-11-13 23:29:39 +00:00
|
|
|
ret = PallocHuge(ceil_size, aAlignment, false);
|
2017-10-29 12:53:14 +00:00
|
|
|
}
|
2017-09-21 04:58:17 +00:00
|
|
|
}
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-09-21 04:58:17 +00:00
|
|
|
MOZ_ASSERT((uintptr_t(ret) & (aAlignment - 1)) == 0);
|
|
|
|
return ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Return the size of the allocation pointed to by ptr.
|
2008-02-06 23:06:50 +00:00
|
|
|
static size_t
|
2017-10-29 12:53:37 +00:00
|
|
|
arena_salloc(const void* ptr)
|
|
|
|
{
|
|
|
|
size_t ret;
|
|
|
|
arena_chunk_t* chunk;
|
|
|
|
size_t pageind, mapbits;
|
|
|
|
|
|
|
|
MOZ_ASSERT(ptr);
|
|
|
|
MOZ_ASSERT(GetChunkOffsetForPtr(ptr) != 0);
|
|
|
|
|
|
|
|
chunk = GetChunkForPtr(ptr);
|
2017-11-03 03:13:17 +00:00
|
|
|
pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> gPageSize2Pow);
|
2017-10-29 12:53:37 +00:00
|
|
|
mapbits = chunk->map[pageind].bits;
|
|
|
|
MOZ_DIAGNOSTIC_ASSERT((mapbits & CHUNK_MAP_ALLOCATED) != 0);
|
|
|
|
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
|
2017-11-03 03:13:17 +00:00
|
|
|
arena_run_t* run = (arena_run_t*)(mapbits & ~gPageSizeMask);
|
2017-11-03 06:23:44 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
|
|
|
|
ret = run->mBin->mSizeClass;
|
2017-10-29 12:53:37 +00:00
|
|
|
} else {
|
2017-11-03 03:13:17 +00:00
|
|
|
ret = mapbits & ~gPageSizeMask;
|
2017-10-29 12:53:37 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(ret != 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-11-09 04:49:33 +00:00
|
|
|
class AllocInfo
|
2008-04-12 19:39:11 +00:00
|
|
|
{
|
2017-11-09 04:49:33 +00:00
|
|
|
public:
|
|
|
|
template<bool Validate = false>
|
|
|
|
static inline AllocInfo Get(const void* aPtr)
|
|
|
|
{
|
|
|
|
// If the allocator is not initialized, the pointer can't belong to it.
|
|
|
|
if (Validate && malloc_initialized == false) {
|
2017-11-09 04:53:27 +00:00
|
|
|
return AllocInfo();
|
2017-11-09 04:49:33 +00:00
|
|
|
}
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-11-09 04:49:33 +00:00
|
|
|
auto chunk = GetChunkForPtr(aPtr);
|
|
|
|
if (Validate) {
|
|
|
|
if (!chunk || !gChunkRTree.Get(chunk)) {
|
2017-11-09 04:53:27 +00:00
|
|
|
return AllocInfo();
|
2017-11-09 04:49:33 +00:00
|
|
|
}
|
|
|
|
}
|
2008-04-12 19:39:11 +00:00
|
|
|
|
2017-11-09 04:49:33 +00:00
|
|
|
if (chunk != aPtr) {
|
|
|
|
MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
|
2017-11-09 04:53:27 +00:00
|
|
|
return AllocInfo(arena_salloc(aPtr), chunk);
|
2017-11-09 04:49:33 +00:00
|
|
|
}
|
2017-09-15 06:13:52 +00:00
|
|
|
|
2017-11-09 04:49:33 +00:00
|
|
|
extent_node_t key;
|
2017-10-06 08:49:40 +00:00
|
|
|
|
2017-11-09 04:49:33 +00:00
|
|
|
// Huge allocation
|
|
|
|
key.mAddr = chunk;
|
|
|
|
MutexAutoLock lock(huge_mtx);
|
|
|
|
extent_node_t* node = huge.Search(&key);
|
|
|
|
if (Validate && !node) {
|
2017-11-09 04:53:27 +00:00
|
|
|
return AllocInfo();
|
2017-11-09 04:49:33 +00:00
|
|
|
}
|
2017-11-09 04:53:27 +00:00
|
|
|
return AllocInfo(node->mSize, node);
|
2017-09-15 06:13:52 +00:00
|
|
|
}
|
2008-04-12 19:39:11 +00:00
|
|
|
|
2017-11-09 04:49:33 +00:00
|
|
|
// Validate ptr before assuming that it points to an allocation. Currently,
|
|
|
|
// the following validation is performed:
|
|
|
|
//
|
|
|
|
// + Check that ptr is not nullptr.
|
|
|
|
//
|
|
|
|
// + Check that ptr lies within a mapped chunk.
|
|
|
|
static inline AllocInfo GetValidated(const void* aPtr)
|
|
|
|
{
|
|
|
|
return Get<true>(aPtr);
|
2017-10-06 08:49:40 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-09 04:53:27 +00:00
|
|
|
AllocInfo()
|
|
|
|
: mSize(0)
|
|
|
|
, mChunk(nullptr)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
explicit AllocInfo(size_t aSize, arena_chunk_t* aChunk)
|
|
|
|
: mSize(aSize)
|
|
|
|
, mChunk(aChunk)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(mSize <= gMaxLargeClass);
|
|
|
|
}
|
|
|
|
|
|
|
|
explicit AllocInfo(size_t aSize, extent_node_t* aNode)
|
2017-11-09 04:49:33 +00:00
|
|
|
: mSize(aSize)
|
2017-11-09 04:53:27 +00:00
|
|
|
, mNode(aNode)
|
2017-11-09 04:49:33 +00:00
|
|
|
{
|
2017-11-09 04:53:27 +00:00
|
|
|
MOZ_ASSERT(mSize > gMaxLargeClass);
|
2017-11-09 04:49:33 +00:00
|
|
|
}
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-11-09 04:49:33 +00:00
|
|
|
size_t Size() { return mSize; }
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-09 04:53:27 +00:00
|
|
|
arena_t* Arena()
|
|
|
|
{
|
|
|
|
return (mSize <= gMaxLargeClass) ? mChunk->arena : mNode->mArena;
|
|
|
|
}
|
|
|
|
|
2017-11-09 04:49:33 +00:00
|
|
|
private:
|
|
|
|
size_t mSize;
|
2017-11-09 04:53:27 +00:00
|
|
|
union {
|
|
|
|
// Pointer to the chunk associated with the allocation for small
|
|
|
|
// and large allocations.
|
|
|
|
arena_chunk_t* mChunk;
|
|
|
|
|
|
|
|
// Pointer to the extent node for huge allocations.
|
|
|
|
extent_node_t* mNode;
|
|
|
|
};
|
2017-11-09 04:49:33 +00:00
|
|
|
};
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
template<>
|
|
|
|
inline void
|
2017-08-31 01:29:11 +00:00
|
|
|
MozJemalloc::jemalloc_ptr_info(const void* aPtr, jemalloc_ptr_info_t* aInfo)
|
2017-09-01 01:52:23 +00:00
|
|
|
{
|
2017-10-26 01:34:37 +00:00
|
|
|
arena_chunk_t* chunk = GetChunkForPtr(aPtr);
|
2017-09-01 01:52:23 +00:00
|
|
|
|
|
|
|
// Is the pointer null, or within one chunk's size of null?
|
2017-10-27 08:25:18 +00:00
|
|
|
// Alternatively, if the allocator is not initialized yet, the pointer
|
|
|
|
// can't be known.
|
|
|
|
if (!chunk || !malloc_initialized) {
|
2017-09-01 01:52:23 +00:00
|
|
|
*aInfo = { TagUnknown, nullptr, 0 };
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-10-06 01:49:24 +00:00
|
|
|
// Look for huge allocations before looking for |chunk| in gChunkRTree.
|
|
|
|
// This is necessary because |chunk| won't be in gChunkRTree if it's
|
2017-09-01 01:52:23 +00:00
|
|
|
// the second or subsequent chunk in a huge allocation.
|
|
|
|
extent_node_t* node;
|
|
|
|
extent_node_t key;
|
2017-10-06 08:49:40 +00:00
|
|
|
{
|
|
|
|
MutexAutoLock lock(huge_mtx);
|
2017-11-08 08:20:20 +00:00
|
|
|
key.mAddr = const_cast<void*>(aPtr);
|
2017-10-29 12:53:37 +00:00
|
|
|
node =
|
|
|
|
reinterpret_cast<RedBlackTree<extent_node_t, ExtentTreeBoundsTrait>*>(
|
|
|
|
&huge)
|
|
|
|
->Search(&key);
|
2017-10-06 08:49:40 +00:00
|
|
|
if (node) {
|
2017-11-08 08:20:20 +00:00
|
|
|
*aInfo = { TagLiveHuge, node->mAddr, node->mSize };
|
2017-10-06 08:49:40 +00:00
|
|
|
return;
|
|
|
|
}
|
2017-09-01 01:52:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// It's not a huge allocation. Check if we have a known chunk.
|
2017-10-06 07:18:01 +00:00
|
|
|
if (!gChunkRTree.Get(chunk)) {
|
2017-09-01 01:52:23 +00:00
|
|
|
*aInfo = { TagUnknown, nullptr, 0 };
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-09-15 08:20:01 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
|
2017-09-01 01:52:23 +00:00
|
|
|
|
|
|
|
// Get the page number within the chunk.
|
2017-11-03 03:13:17 +00:00
|
|
|
size_t pageind = (((uintptr_t)aPtr - (uintptr_t)chunk) >> gPageSize2Pow);
|
2017-11-03 03:16:11 +00:00
|
|
|
if (pageind < gChunkHeaderNumPages) {
|
2017-09-01 01:52:23 +00:00
|
|
|
// Within the chunk header.
|
|
|
|
*aInfo = { TagUnknown, nullptr, 0 };
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t mapbits = chunk->map[pageind].bits;
|
|
|
|
|
|
|
|
if (!(mapbits & CHUNK_MAP_ALLOCATED)) {
|
|
|
|
PtrInfoTag tag = TagFreedPageDirty;
|
2017-10-29 12:53:14 +00:00
|
|
|
if (mapbits & CHUNK_MAP_DIRTY) {
|
2017-09-01 01:52:23 +00:00
|
|
|
tag = TagFreedPageDirty;
|
2017-10-29 12:53:14 +00:00
|
|
|
} else if (mapbits & CHUNK_MAP_DECOMMITTED) {
|
2017-09-01 01:52:23 +00:00
|
|
|
tag = TagFreedPageDecommitted;
|
2017-10-29 12:53:14 +00:00
|
|
|
} else if (mapbits & CHUNK_MAP_MADVISED) {
|
2017-09-01 01:52:23 +00:00
|
|
|
tag = TagFreedPageMadvised;
|
2017-10-29 12:53:14 +00:00
|
|
|
} else if (mapbits & CHUNK_MAP_ZEROED) {
|
2017-09-01 01:52:23 +00:00
|
|
|
tag = TagFreedPageZeroed;
|
2017-10-29 12:53:14 +00:00
|
|
|
} else {
|
2017-09-01 01:52:23 +00:00
|
|
|
MOZ_CRASH();
|
2017-10-29 12:53:14 +00:00
|
|
|
}
|
2017-09-01 01:52:23 +00:00
|
|
|
|
2017-11-03 03:13:17 +00:00
|
|
|
void* pageaddr = (void*)(uintptr_t(aPtr) & ~gPageSizeMask);
|
|
|
|
*aInfo = { tag, pageaddr, gPageSize };
|
2017-09-01 01:52:23 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mapbits & CHUNK_MAP_LARGE) {
|
|
|
|
// It's a large allocation. Only the first page of a large
|
|
|
|
// allocation contains its size, so if the address is not in
|
|
|
|
// the first page, scan back to find the allocation size.
|
|
|
|
size_t size;
|
|
|
|
while (true) {
|
2017-11-03 03:13:17 +00:00
|
|
|
size = mapbits & ~gPageSizeMask;
|
2017-09-01 01:52:23 +00:00
|
|
|
if (size != 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The following two return paths shouldn't occur in
|
|
|
|
// practice unless there is heap corruption.
|
|
|
|
pageind--;
|
2017-11-03 03:16:11 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(pageind >= gChunkHeaderNumPages);
|
|
|
|
if (pageind < gChunkHeaderNumPages) {
|
2017-09-01 01:52:23 +00:00
|
|
|
*aInfo = { TagUnknown, nullptr, 0 };
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
mapbits = chunk->map[pageind].bits;
|
|
|
|
MOZ_DIAGNOSTIC_ASSERT(mapbits & CHUNK_MAP_LARGE);
|
|
|
|
if (!(mapbits & CHUNK_MAP_LARGE)) {
|
|
|
|
*aInfo = { TagUnknown, nullptr, 0 };
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-03 03:13:17 +00:00
|
|
|
void* addr = ((char*)chunk) + (pageind << gPageSize2Pow);
|
2017-09-01 01:52:23 +00:00
|
|
|
*aInfo = { TagLiveLarge, addr, size };
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// It must be a small allocation.
|
2017-11-03 03:13:17 +00:00
|
|
|
auto run = (arena_run_t*)(mapbits & ~gPageSizeMask);
|
2017-11-03 06:23:44 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
|
2017-09-01 01:52:23 +00:00
|
|
|
|
|
|
|
// The allocation size is stored in the run metadata.
|
2017-11-03 06:23:44 +00:00
|
|
|
size_t size = run->mBin->mSizeClass;
|
2017-09-01 01:52:23 +00:00
|
|
|
|
|
|
|
// Address of the first possible pointer in the run after its headers.
|
2017-11-03 06:23:44 +00:00
|
|
|
uintptr_t reg0_addr = (uintptr_t)run + run->mBin->mRunFirstRegionOffset;
|
2017-09-01 01:52:23 +00:00
|
|
|
if (aPtr < (void*)reg0_addr) {
|
|
|
|
// In the run header.
|
|
|
|
*aInfo = { TagUnknown, nullptr, 0 };
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Position in the run.
|
|
|
|
unsigned regind = ((uintptr_t)aPtr - reg0_addr) / size;
|
|
|
|
|
|
|
|
// Pointer to the allocation's base address.
|
|
|
|
void* addr = (void*)(reg0_addr + regind * size);
|
|
|
|
|
|
|
|
// Check if the allocation has been freed.
|
2017-11-01 07:47:59 +00:00
|
|
|
unsigned elm = regind >> (LOG2(sizeof(int)) + 3);
|
|
|
|
unsigned bit = regind - (elm << (LOG2(sizeof(int)) + 3));
|
2017-10-29 12:53:37 +00:00
|
|
|
PtrInfoTag tag =
|
2017-11-03 06:23:44 +00:00
|
|
|
((run->mRegionsMask[elm] & (1U << bit))) ? TagFreedSmall : TagLiveSmall;
|
2017-09-01 01:52:23 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
*aInfo = { tag, addr, size };
|
2017-09-01 01:52:23 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
namespace Debug {
|
|
|
|
// Helper for debuggers. We don't want it to be inlined and optimized out.
|
|
|
|
MOZ_NEVER_INLINE jemalloc_ptr_info_t*
|
|
|
|
jemalloc_ptr_info(const void* aPtr)
|
2017-10-24 23:01:41 +00:00
|
|
|
{
|
2017-10-29 12:53:37 +00:00
|
|
|
static jemalloc_ptr_info_t info;
|
|
|
|
MozJemalloc::jemalloc_ptr_info(aPtr, &info);
|
|
|
|
return &info;
|
|
|
|
}
|
2017-10-24 23:01:41 +00:00
|
|
|
}
|
|
|
|
|
2017-09-15 11:37:47 +00:00
|
|
|
void
|
2017-10-29 12:53:37 +00:00
|
|
|
arena_t::DallocSmall(arena_chunk_t* aChunk,
|
|
|
|
void* aPtr,
|
|
|
|
arena_chunk_map_t* aMapElm)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-09-15 11:37:47 +00:00
|
|
|
arena_run_t* run;
|
|
|
|
arena_bin_t* bin;
|
|
|
|
size_t size;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-03 03:13:17 +00:00
|
|
|
run = (arena_run_t*)(aMapElm->bits & ~gPageSizeMask);
|
2017-11-03 06:23:44 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
|
|
|
|
bin = run->mBin;
|
2017-11-03 00:26:07 +00:00
|
|
|
size = bin->mSizeClass;
|
|
|
|
MOZ_DIAGNOSTIC_ASSERT(uintptr_t(aPtr) >=
|
|
|
|
uintptr_t(run) + bin->mRunFirstRegionOffset);
|
2017-10-29 12:53:37 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(
|
2017-11-03 00:26:07 +00:00
|
|
|
(uintptr_t(aPtr) - (uintptr_t(run) + bin->mRunFirstRegionOffset)) % size ==
|
|
|
|
0);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-15 11:37:47 +00:00
|
|
|
memset(aPtr, kAllocPoison, size);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-15 11:37:47 +00:00
|
|
|
arena_run_reg_dalloc(run, bin, aPtr, size);
|
2017-11-03 06:23:44 +00:00
|
|
|
run->mNumFree++;
|
2017-09-15 11:37:47 +00:00
|
|
|
|
2017-11-03 06:23:44 +00:00
|
|
|
if (run->mNumFree == bin->mRunNumRegions) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Deallocate run.
|
2017-11-03 00:26:07 +00:00
|
|
|
if (run == bin->mCurrentRun) {
|
|
|
|
bin->mCurrentRun = nullptr;
|
|
|
|
} else if (bin->mRunNumRegions != 1) {
|
2017-10-29 12:53:37 +00:00
|
|
|
size_t run_pageind =
|
2017-11-03 03:13:17 +00:00
|
|
|
(uintptr_t(run) - uintptr_t(aChunk)) >> gPageSize2Pow;
|
2017-09-15 11:37:47 +00:00
|
|
|
arena_chunk_map_t* run_mapelm = &aChunk->map[run_pageind];
|
2017-10-29 12:53:31 +00:00
|
|
|
|
|
|
|
// This block's conditional is necessary because if the
|
|
|
|
// run only contains one region, then it never gets
|
|
|
|
// inserted into the non-full runs tree.
|
2017-11-03 00:26:07 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(bin->mNonFullRuns.Search(run_mapelm) == run_mapelm);
|
|
|
|
bin->mNonFullRuns.Remove(run_mapelm);
|
2017-09-15 11:37:47 +00:00
|
|
|
}
|
2017-10-27 08:29:12 +00:00
|
|
|
#if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
|
2017-11-03 06:23:44 +00:00
|
|
|
run->mMagic = 0;
|
2008-02-06 23:06:50 +00:00
|
|
|
#endif
|
2017-09-15 11:37:47 +00:00
|
|
|
DallocRun(run, true);
|
2017-11-03 00:26:07 +00:00
|
|
|
bin->mNumRuns--;
|
2017-11-03 06:23:44 +00:00
|
|
|
} else if (run->mNumFree == 1 && run != bin->mCurrentRun) {
|
2017-11-03 00:26:07 +00:00
|
|
|
// Make sure that bin->mCurrentRun always refers to the lowest
|
2017-10-29 12:53:31 +00:00
|
|
|
// non-full run, if one exists.
|
2017-11-03 00:26:07 +00:00
|
|
|
if (!bin->mCurrentRun) {
|
|
|
|
bin->mCurrentRun = run;
|
|
|
|
} else if (uintptr_t(run) < uintptr_t(bin->mCurrentRun)) {
|
|
|
|
// Switch mCurrentRun.
|
2017-11-03 06:23:44 +00:00
|
|
|
if (bin->mCurrentRun->mNumFree > 0) {
|
2017-11-03 00:26:07 +00:00
|
|
|
arena_chunk_t* runcur_chunk = GetChunkForPtr(bin->mCurrentRun);
|
2017-10-29 12:53:37 +00:00
|
|
|
size_t runcur_pageind =
|
2017-11-03 00:26:07 +00:00
|
|
|
(uintptr_t(bin->mCurrentRun) - uintptr_t(runcur_chunk)) >>
|
2017-11-03 03:13:17 +00:00
|
|
|
gPageSize2Pow;
|
2017-09-15 11:37:47 +00:00
|
|
|
arena_chunk_map_t* runcur_mapelm = &runcur_chunk->map[runcur_pageind];
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Insert runcur.
|
2017-11-03 00:26:07 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(!bin->mNonFullRuns.Search(runcur_mapelm));
|
|
|
|
bin->mNonFullRuns.Insert(runcur_mapelm);
|
2017-09-15 11:37:47 +00:00
|
|
|
}
|
2017-11-03 00:26:07 +00:00
|
|
|
bin->mCurrentRun = run;
|
2017-09-15 11:37:47 +00:00
|
|
|
} else {
|
2017-10-29 12:53:37 +00:00
|
|
|
size_t run_pageind =
|
2017-11-03 03:13:17 +00:00
|
|
|
(uintptr_t(run) - uintptr_t(aChunk)) >> gPageSize2Pow;
|
2017-10-29 12:53:37 +00:00
|
|
|
arena_chunk_map_t* run_mapelm = &aChunk->map[run_pageind];
|
2017-09-15 11:37:47 +00:00
|
|
|
|
2017-11-03 00:26:07 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(bin->mNonFullRuns.Search(run_mapelm) == nullptr);
|
|
|
|
bin->mNonFullRuns.Insert(run_mapelm);
|
2017-09-15 11:37:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
mStats.allocated_small -= size;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-09-15 11:40:36 +00:00
|
|
|
void
|
|
|
|
arena_t::DallocLarge(arena_chunk_t* aChunk, void* aPtr)
|
2008-02-09 05:46:59 +00:00
|
|
|
{
|
2017-11-03 03:13:17 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT((uintptr_t(aPtr) & gPageSizeMask) == 0);
|
|
|
|
size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> gPageSize2Pow;
|
|
|
|
size_t size = aChunk->map[pageind].bits & ~gPageSizeMask;
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-09-15 11:40:36 +00:00
|
|
|
memset(aPtr, kAllocPoison, size);
|
|
|
|
mStats.allocated_large -= size;
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-09-15 11:40:36 +00:00
|
|
|
DallocRun((arena_run_t*)aPtr, true);
|
2008-02-09 05:46:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2017-11-08 08:43:47 +00:00
|
|
|
arena_dalloc(void* aPtr, size_t aOffset, arena_t* aArena)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-10-06 08:49:40 +00:00
|
|
|
MOZ_ASSERT(aPtr);
|
|
|
|
MOZ_ASSERT(aOffset != 0);
|
2017-10-26 01:34:37 +00:00
|
|
|
MOZ_ASSERT(GetChunkOffsetForPtr(aPtr) == aOffset);
|
2017-10-06 08:49:40 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
auto chunk = (arena_chunk_t*)((uintptr_t)aPtr - aOffset);
|
2017-10-06 08:49:40 +00:00
|
|
|
auto arena = chunk->arena;
|
|
|
|
MOZ_ASSERT(arena);
|
|
|
|
MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
|
2017-11-08 08:43:47 +00:00
|
|
|
MOZ_RELEASE_ASSERT(!aArena || arena == aArena);
|
2017-10-06 08:49:40 +00:00
|
|
|
|
|
|
|
MutexAutoLock lock(arena->mLock);
|
2017-11-03 03:13:17 +00:00
|
|
|
size_t pageind = aOffset >> gPageSize2Pow;
|
2017-10-06 08:49:40 +00:00
|
|
|
arena_chunk_map_t* mapelm = &chunk->map[pageind];
|
|
|
|
MOZ_DIAGNOSTIC_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
|
|
|
|
if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Small allocation.
|
2017-10-06 08:49:40 +00:00
|
|
|
arena->DallocSmall(chunk, aPtr, mapelm);
|
|
|
|
} else {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Large allocation.
|
2017-10-06 08:49:40 +00:00
|
|
|
arena->DallocLarge(chunk, aPtr);
|
|
|
|
}
|
2008-02-09 05:46:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2017-11-08 08:43:47 +00:00
|
|
|
idalloc(void* ptr, arena_t* aArena)
|
2008-02-09 05:46:59 +00:00
|
|
|
{
|
2017-10-29 12:53:37 +00:00
|
|
|
size_t offset;
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
MOZ_ASSERT(ptr);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
offset = GetChunkOffsetForPtr(ptr);
|
|
|
|
if (offset != 0) {
|
2017-11-08 08:43:47 +00:00
|
|
|
arena_dalloc(ptr, offset, aArena);
|
2017-10-29 12:53:37 +00:00
|
|
|
} else {
|
2017-11-08 08:43:47 +00:00
|
|
|
huge_dalloc(ptr, aArena);
|
2017-10-29 12:53:37 +00:00
|
|
|
}
|
2008-02-09 05:46:59 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-15 11:44:34 +00:00
|
|
|
void
|
2017-10-29 12:53:37 +00:00
|
|
|
arena_t::RallocShrinkLarge(arena_chunk_t* aChunk,
|
|
|
|
void* aPtr,
|
|
|
|
size_t aSize,
|
2017-09-15 11:44:34 +00:00
|
|
|
size_t aOldSize)
|
2008-02-09 05:46:59 +00:00
|
|
|
{
|
2017-09-15 11:44:34 +00:00
|
|
|
MOZ_ASSERT(aSize < aOldSize);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Shrink the run, and make trailing pages available for other
|
|
|
|
// allocations.
|
2017-10-06 08:49:40 +00:00
|
|
|
MutexAutoLock lock(mLock);
|
2017-09-15 11:44:34 +00:00
|
|
|
TrimRunTail(aChunk, (arena_run_t*)aPtr, aOldSize, aSize, true);
|
|
|
|
mStats.allocated_large -= aOldSize - aSize;
|
2008-02-09 05:46:59 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Returns whether reallocation was successful.
|
2017-09-15 11:50:42 +00:00
|
|
|
bool
|
2017-10-29 12:53:37 +00:00
|
|
|
arena_t::RallocGrowLarge(arena_chunk_t* aChunk,
|
|
|
|
void* aPtr,
|
|
|
|
size_t aSize,
|
2017-09-15 11:50:42 +00:00
|
|
|
size_t aOldSize)
|
2008-02-09 05:46:59 +00:00
|
|
|
{
|
2017-11-03 03:13:17 +00:00
|
|
|
size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> gPageSize2Pow;
|
|
|
|
size_t npages = aOldSize >> gPageSize2Pow;
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
MutexAutoLock lock(mLock);
|
2017-10-29 12:53:37 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(aOldSize ==
|
2017-11-03 03:13:17 +00:00
|
|
|
(aChunk->map[pageind].bits & ~gPageSizeMask));
|
2017-02-15 14:48:00 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Try to extend the run.
|
2017-09-15 11:50:42 +00:00
|
|
|
MOZ_ASSERT(aSize > aOldSize);
|
2017-11-03 03:16:11 +00:00
|
|
|
if (pageind + npages < gChunkNumPages &&
|
2017-10-29 12:53:37 +00:00
|
|
|
(aChunk->map[pageind + npages].bits & CHUNK_MAP_ALLOCATED) == 0 &&
|
2017-11-03 03:13:17 +00:00
|
|
|
(aChunk->map[pageind + npages].bits & ~gPageSizeMask) >=
|
2017-10-29 12:53:37 +00:00
|
|
|
aSize - aOldSize) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// The next run is available and sufficiently large. Split the
|
|
|
|
// following run, then merge the first part with the existing
|
|
|
|
// allocation.
|
2017-10-27 01:31:50 +00:00
|
|
|
if (!SplitRun((arena_run_t*)(uintptr_t(aChunk) +
|
2017-11-03 03:13:17 +00:00
|
|
|
((pageind + npages) << gPageSize2Pow)),
|
2017-10-27 01:31:50 +00:00
|
|
|
aSize - aOldSize,
|
|
|
|
true,
|
|
|
|
false)) {
|
|
|
|
return false;
|
|
|
|
}
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
aChunk->map[pageind].bits = aSize | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
|
|
|
aChunk->map[pageind + npages].bits = CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-09-15 11:50:42 +00:00
|
|
|
mStats.allocated_large += aSize - aOldSize;
|
2017-10-27 01:14:04 +00:00
|
|
|
return true;
|
2017-09-15 11:50:42 +00:00
|
|
|
}
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-10-27 01:14:04 +00:00
|
|
|
return false;
|
2008-02-09 05:46:59 +00:00
|
|
|
}
|
|
|
|
|
2017-11-17 00:50:27 +00:00
|
|
|
void*
|
|
|
|
arena_t::RallocSmallOrLarge(void* aPtr, size_t aSize, size_t aOldSize)
|
2008-02-09 05:46:59 +00:00
|
|
|
{
|
2017-09-21 04:58:17 +00:00
|
|
|
void* ret;
|
|
|
|
size_t copysize;
|
2017-11-15 05:50:33 +00:00
|
|
|
SizeClass sizeClass(aSize);
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Try to avoid moving the allocation.
|
2017-11-15 05:50:33 +00:00
|
|
|
if (aOldSize <= gMaxLargeClass && sizeClass.Size() == aOldSize) {
|
|
|
|
if (aSize < aOldSize) {
|
2017-12-03 05:22:05 +00:00
|
|
|
memset((void*)(uintptr_t(aPtr) + aSize), kAllocPoison, aOldSize - aSize);
|
2017-11-15 05:50:33 +00:00
|
|
|
}
|
|
|
|
return aPtr;
|
|
|
|
}
|
|
|
|
if (sizeClass.Type() == SizeClass::Large && aOldSize > gMaxBinClass &&
|
|
|
|
aOldSize <= gMaxLargeClass) {
|
|
|
|
arena_chunk_t* chunk = GetChunkForPtr(aPtr);
|
|
|
|
if (sizeClass.Size() < aOldSize) {
|
|
|
|
// Fill before shrinking in order to avoid a race.
|
|
|
|
memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
|
2017-11-17 00:50:27 +00:00
|
|
|
RallocShrinkLarge(chunk, aPtr, sizeClass.Size(), aOldSize);
|
2017-11-02 23:53:34 +00:00
|
|
|
return aPtr;
|
2017-09-21 04:58:17 +00:00
|
|
|
}
|
2017-11-17 00:50:27 +00:00
|
|
|
if (RallocGrowLarge(chunk, aPtr, sizeClass.Size(), aOldSize)) {
|
2017-11-16 07:34:31 +00:00
|
|
|
ApplyZeroOrJunk((void*)((uintptr_t)aPtr + aOldSize), aSize - aOldSize);
|
2017-09-21 04:58:17 +00:00
|
|
|
return aPtr;
|
|
|
|
}
|
|
|
|
}
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// If we get here, then aSize and aOldSize are different enough that we
|
|
|
|
// need to move the object. In that case, fall back to allocating new
|
|
|
|
// space and copying.
|
2017-11-17 00:50:27 +00:00
|
|
|
ret = Malloc(aSize, false);
|
2017-09-21 04:58:17 +00:00
|
|
|
if (!ret) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Junk/zero-filling were already done by arena_t::Malloc().
|
2017-09-21 04:58:17 +00:00
|
|
|
copysize = (aSize < aOldSize) ? aSize : aOldSize;
|
2008-02-09 05:46:59 +00:00
|
|
|
#ifdef VM_COPY_MIN
|
2017-09-21 04:58:17 +00:00
|
|
|
if (copysize >= VM_COPY_MIN) {
|
|
|
|
pages_copy(ret, aPtr, copysize);
|
|
|
|
} else
|
2008-02-09 05:46:59 +00:00
|
|
|
#endif
|
2017-09-21 04:58:17 +00:00
|
|
|
{
|
|
|
|
memcpy(ret, aPtr, copysize);
|
|
|
|
}
|
2017-11-17 00:50:27 +00:00
|
|
|
idalloc(aPtr, this);
|
2017-09-21 04:58:17 +00:00
|
|
|
return ret;
|
2008-02-09 05:46:59 +00:00
|
|
|
}
|
|
|
|
|
2017-11-17 00:50:27 +00:00
|
|
|
void*
|
|
|
|
arena_t::Ralloc(void* aPtr, size_t aSize, size_t aOldSize)
|
2008-02-09 05:46:59 +00:00
|
|
|
{
|
2017-11-17 00:50:27 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(mMagic == ARENA_MAGIC);
|
2017-09-21 04:58:17 +00:00
|
|
|
MOZ_ASSERT(aPtr);
|
|
|
|
MOZ_ASSERT(aSize != 0);
|
2008-02-09 05:46:59 +00:00
|
|
|
|
2017-11-17 00:50:27 +00:00
|
|
|
return (aSize <= gMaxLargeClass) ? RallocSmallOrLarge(aPtr, aSize, aOldSize)
|
|
|
|
: RallocHuge(aPtr, aSize, aOldSize);
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-11-16 22:27:35 +00:00
|
|
|
arena_t::arena_t(arena_params_t* aParams)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-09-15 08:38:58 +00:00
|
|
|
unsigned i;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-27 23:42:59 +00:00
|
|
|
MOZ_RELEASE_ASSERT(mLock.Init());
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-21 05:24:37 +00:00
|
|
|
memset(&mLink, 0, sizeof(mLink));
|
2017-09-15 08:38:58 +00:00
|
|
|
memset(&mStats, 0, sizeof(arena_stats_t));
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Initialize chunks.
|
2017-09-26 06:06:00 +00:00
|
|
|
mChunksDirty.Init();
|
2011-10-24 17:23:47 +00:00
|
|
|
#ifdef MALLOC_DOUBLE_PURGE
|
2017-10-30 08:19:44 +00:00
|
|
|
new (&mChunksMAdvised) DoublyLinkedList<arena_chunk_t>();
|
2011-10-24 17:23:47 +00:00
|
|
|
#endif
|
2017-09-15 08:38:58 +00:00
|
|
|
mSpare = nullptr;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-15 08:38:58 +00:00
|
|
|
mNumDirty = 0;
|
2017-11-16 22:27:35 +00:00
|
|
|
|
|
|
|
// The default maximum amount of dirty pages allowed on arenas is a fraction
|
|
|
|
// of opt_dirty_max.
|
|
|
|
mMaxDirty =
|
|
|
|
(aParams && aParams->mMaxDirty) ? aParams->mMaxDirty : (opt_dirty_max / 8);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-26 06:06:00 +00:00
|
|
|
mRunsAvail.Init();
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Initialize bins.
|
2017-11-02 23:53:34 +00:00
|
|
|
SizeClass sizeClass(1);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-02 23:53:34 +00:00
|
|
|
for (i = 0;; i++) {
|
2017-11-08 06:53:24 +00:00
|
|
|
arena_bin_t& bin = mBins[i];
|
|
|
|
bin.Init(sizeClass);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-03 01:10:50 +00:00
|
|
|
// SizeClass doesn't want sizes larger than gMaxSubPageClass for now.
|
|
|
|
if (sizeClass.Size() == gMaxSubPageClass) {
|
2017-11-02 23:53:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
sizeClass = sizeClass.Next();
|
2017-09-15 08:38:58 +00:00
|
|
|
}
|
2017-11-03 03:21:53 +00:00
|
|
|
MOZ_ASSERT(i ==
|
|
|
|
kNumTinyClasses + kNumQuantumClasses + gNumSubPageClasses - 1);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-27 08:29:12 +00:00
|
|
|
#if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
|
2017-09-15 08:38:58 +00:00
|
|
|
mMagic = ARENA_MAGIC;
|
2012-02-29 16:25:00 +00:00
|
|
|
#endif
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-27 22:13:58 +00:00
|
|
|
arena_t*
|
2017-11-16 22:27:35 +00:00
|
|
|
ArenaCollection::CreateArena(bool aIsPrivate, arena_params_t* aParams)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-10-27 23:42:59 +00:00
|
|
|
fallible_t fallible;
|
2017-11-16 22:27:35 +00:00
|
|
|
arena_t* ret = new (fallible) arena_t(aParams);
|
2017-10-27 23:42:59 +00:00
|
|
|
if (!ret) {
|
2017-10-27 22:13:58 +00:00
|
|
|
// Only reached if there is an OOM error.
|
|
|
|
|
|
|
|
// OOM here is quite inconvenient to propagate, since dealing with it
|
|
|
|
// would require a check for failure in the fast path. Instead, punt
|
|
|
|
// by using the first arena.
|
|
|
|
// In practice, this is an extremely unlikely failure.
|
|
|
|
_malloc_message(_getprogname(), ": (malloc) Error initializing arena\n");
|
|
|
|
|
|
|
|
return mDefaultArena;
|
2017-09-21 05:24:37 +00:00
|
|
|
}
|
Bug 1364358 - Keep track of mozjemalloc thread-local arenas. r=erahm
jemalloc_stats, as well as pre/post-fork hooks are using the `arenas`
list along the `narenas` count to iterate over all arenas setup by
mozjemalloc. Up until previous commit, that was used for automatic
multiple arenas support, which is now removed.
But mozjemalloc still supports running with multiple arenas, in the form
of opted-in, per-thread arenas. After bug 1361258, those arenas weren't
tracked, and now that `arenas` only contains the default arena, we can
now fill it with those thread-local arenas.
Keeping the automatic multiple arenas support, which we don't use and
don't really plan to, would have meant using a separate list for them.
--HG--
extra : rebase_source : f4eb55a65df8cdebff84ca709738f906d0c3c6f5
2017-05-12 12:21:11 +00:00
|
|
|
|
2017-10-27 22:13:58 +00:00
|
|
|
MutexAutoLock lock(mLock);
|
Bug 1364358 - Keep track of mozjemalloc thread-local arenas. r=erahm
jemalloc_stats, as well as pre/post-fork hooks are using the `arenas`
list along the `narenas` count to iterate over all arenas setup by
mozjemalloc. Up until previous commit, that was used for automatic
multiple arenas support, which is now removed.
But mozjemalloc still supports running with multiple arenas, in the form
of opted-in, per-thread arenas. After bug 1361258, those arenas weren't
tracked, and now that `arenas` only contains the default arena, we can
now fill it with those thread-local arenas.
Keeping the automatic multiple arenas support, which we don't use and
don't really plan to, would have meant using a separate list for them.
--HG--
extra : rebase_source : f4eb55a65df8cdebff84ca709738f906d0c3c6f5
2017-05-12 12:21:11 +00:00
|
|
|
|
2017-09-21 05:24:37 +00:00
|
|
|
// TODO: Use random Ids.
|
2017-10-27 22:13:58 +00:00
|
|
|
ret->mId = mLastArenaId++;
|
2017-10-30 22:13:39 +00:00
|
|
|
(aIsPrivate ? mPrivateArenas : mArenas).Insert(ret);
|
2017-09-21 05:24:37 +00:00
|
|
|
return ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// End arena.
|
|
|
|
// ***************************************************************************
|
|
|
|
// Begin general internal functions.
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-13 23:29:39 +00:00
|
|
|
void*
|
|
|
|
arena_t::MallocHuge(size_t aSize, bool aZero)
|
2014-09-26 11:29:00 +00:00
|
|
|
{
|
2017-11-13 23:29:39 +00:00
|
|
|
return PallocHuge(aSize, kChunkSize, aZero);
|
2014-09-26 11:29:00 +00:00
|
|
|
}
|
|
|
|
|
2017-11-13 23:29:39 +00:00
|
|
|
void*
|
|
|
|
arena_t::PallocHuge(size_t aSize, size_t aAlignment, bool aZero)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-10-06 08:49:40 +00:00
|
|
|
void* ret;
|
|
|
|
size_t csize;
|
|
|
|
size_t psize;
|
|
|
|
extent_node_t* node;
|
|
|
|
bool zeroed;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Allocate one or more contiguous chunks for this request.
|
2017-10-06 08:49:40 +00:00
|
|
|
csize = CHUNK_CEILING(aSize);
|
|
|
|
if (csize == 0) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// size is large enough to cause size_t wrap-around.
|
2017-10-06 08:49:40 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Allocate an extent node with which to track the chunk.
|
2017-10-06 08:49:40 +00:00
|
|
|
node = base_node_alloc();
|
|
|
|
if (!node) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
ret = chunk_alloc(csize, aAlignment, false, &zeroed);
|
|
|
|
if (!ret) {
|
|
|
|
base_node_dealloc(node);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
if (aZero) {
|
|
|
|
chunk_ensure_zero(ret, csize, zeroed);
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Insert node into huge.
|
2017-11-08 08:20:20 +00:00
|
|
|
node->mAddr = ret;
|
2017-10-06 08:49:40 +00:00
|
|
|
psize = PAGE_CEILING(aSize);
|
2017-11-08 08:20:20 +00:00
|
|
|
node->mSize = psize;
|
2017-11-13 23:29:39 +00:00
|
|
|
node->mArena = this;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
{
|
|
|
|
MutexAutoLock lock(huge_mtx);
|
|
|
|
huge.Insert(node);
|
2011-10-07 18:39:53 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Although we allocated space for csize bytes, we indicate that we've
|
|
|
|
// allocated only psize bytes.
|
|
|
|
//
|
|
|
|
// If DECOMMIT is defined, this is a reasonable thing to do, since
|
|
|
|
// we'll explicitly decommit the bytes in excess of psize.
|
|
|
|
//
|
|
|
|
// If DECOMMIT is not defined, then we're relying on the OS to be lazy
|
|
|
|
// about how it allocates physical pages to mappings. If we never
|
|
|
|
// touch the pages in excess of psize, the OS won't allocate a physical
|
|
|
|
// page, and we won't use more than psize bytes of physical memory.
|
|
|
|
//
|
|
|
|
// A correct program will only touch memory in excess of how much it
|
|
|
|
// requested if it first calls malloc_usable_size and finds out how
|
2017-11-08 08:20:20 +00:00
|
|
|
// much space it has to play with. But because we set node->mSize =
|
2017-10-29 12:53:31 +00:00
|
|
|
// psize above, malloc_usable_size will return psize, not csize, and
|
|
|
|
// the program will (hopefully) never touch bytes in excess of psize.
|
|
|
|
// Thus those bytes won't take up space in physical memory, and we can
|
|
|
|
// reasonably claim we never "allocated" them in the first place.
|
2017-10-06 08:49:40 +00:00
|
|
|
huge_allocated += psize;
|
|
|
|
huge_mapped += csize;
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2008-03-15 03:51:44 +00:00
|
|
|
#ifdef MALLOC_DECOMMIT
|
2017-10-29 12:53:14 +00:00
|
|
|
if (csize - psize > 0) {
|
2017-10-06 08:49:40 +00:00
|
|
|
pages_decommit((void*)((uintptr_t)ret + psize), csize - psize);
|
2017-10-29 12:53:14 +00:00
|
|
|
}
|
2008-03-15 03:51:44 +00:00
|
|
|
#endif
|
|
|
|
|
2017-11-10 07:59:21 +00:00
|
|
|
if (!aZero) {
|
2017-10-29 12:53:37 +00:00
|
|
|
#ifdef MALLOC_DECOMMIT
|
2017-11-10 07:59:21 +00:00
|
|
|
ApplyZeroOrJunk(ret, psize);
|
2017-10-29 12:53:37 +00:00
|
|
|
#else
|
2017-11-10 07:59:21 +00:00
|
|
|
ApplyZeroOrJunk(ret, csize);
|
2017-10-29 12:53:37 +00:00
|
|
|
#endif
|
2017-10-06 08:49:40 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
return ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-11-17 00:50:27 +00:00
|
|
|
void*
|
|
|
|
arena_t::RallocHuge(void* aPtr, size_t aSize, size_t aOldSize)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-10-06 08:49:40 +00:00
|
|
|
void* ret;
|
|
|
|
size_t copysize;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Avoid moving the allocation if the size class would not change.
|
2017-11-03 01:10:50 +00:00
|
|
|
if (aOldSize > gMaxLargeClass &&
|
2017-10-06 08:49:40 +00:00
|
|
|
CHUNK_CEILING(aSize) == CHUNK_CEILING(aOldSize)) {
|
|
|
|
size_t psize = PAGE_CEILING(aSize);
|
|
|
|
if (aSize < aOldSize) {
|
|
|
|
memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
|
|
|
|
}
|
2008-03-15 03:51:44 +00:00
|
|
|
#ifdef MALLOC_DECOMMIT
|
2017-10-06 08:49:40 +00:00
|
|
|
if (psize < aOldSize) {
|
|
|
|
extent_node_t key;
|
|
|
|
|
|
|
|
pages_decommit((void*)((uintptr_t)aPtr + psize), aOldSize - psize);
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Update recorded size.
|
2017-10-06 08:49:40 +00:00
|
|
|
MutexAutoLock lock(huge_mtx);
|
2017-11-08 08:20:20 +00:00
|
|
|
key.mAddr = const_cast<void*>(aPtr);
|
2017-10-06 08:49:40 +00:00
|
|
|
extent_node_t* node = huge.Search(&key);
|
|
|
|
MOZ_ASSERT(node);
|
2017-11-08 08:20:20 +00:00
|
|
|
MOZ_ASSERT(node->mSize == aOldSize);
|
2017-11-17 00:50:27 +00:00
|
|
|
MOZ_RELEASE_ASSERT(node->mArena == this);
|
2017-10-06 08:49:40 +00:00
|
|
|
huge_allocated -= aOldSize - psize;
|
2017-10-29 12:53:31 +00:00
|
|
|
// No need to change huge_mapped, because we didn't (un)map anything.
|
2017-11-08 08:20:20 +00:00
|
|
|
node->mSize = psize;
|
2017-10-06 08:49:40 +00:00
|
|
|
} else if (psize > aOldSize) {
|
2017-10-27 01:31:50 +00:00
|
|
|
if (!pages_commit((void*)((uintptr_t)aPtr + aOldSize),
|
|
|
|
psize - aOldSize)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2017-10-06 08:49:40 +00:00
|
|
|
}
|
2011-10-07 18:39:53 +00:00
|
|
|
#endif
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Although we don't have to commit or decommit anything if
|
|
|
|
// DECOMMIT is not defined and the size class didn't change, we
|
|
|
|
// do need to update the recorded size if the size increased,
|
|
|
|
// so malloc_usable_size doesn't return a value smaller than
|
|
|
|
// what was requested via realloc().
|
2017-10-06 08:49:40 +00:00
|
|
|
if (psize > aOldSize) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Update recorded size.
|
2017-10-06 08:49:40 +00:00
|
|
|
extent_node_t key;
|
|
|
|
MutexAutoLock lock(huge_mtx);
|
2017-11-08 08:20:20 +00:00
|
|
|
key.mAddr = const_cast<void*>(aPtr);
|
2017-10-06 08:49:40 +00:00
|
|
|
extent_node_t* node = huge.Search(&key);
|
|
|
|
MOZ_ASSERT(node);
|
2017-11-08 08:20:20 +00:00
|
|
|
MOZ_ASSERT(node->mSize == aOldSize);
|
2017-11-17 00:50:27 +00:00
|
|
|
MOZ_RELEASE_ASSERT(node->mArena == this);
|
2017-10-06 08:49:40 +00:00
|
|
|
huge_allocated += psize - aOldSize;
|
2017-10-29 12:53:31 +00:00
|
|
|
// No need to change huge_mapped, because we didn't
|
|
|
|
// (un)map anything.
|
2017-11-08 08:20:20 +00:00
|
|
|
node->mSize = psize;
|
2017-10-06 08:49:40 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-16 07:34:31 +00:00
|
|
|
if (aSize > aOldSize) {
|
|
|
|
ApplyZeroOrJunk((void*)((uintptr_t)aPtr + aOldSize), aSize - aOldSize);
|
2017-10-06 08:49:40 +00:00
|
|
|
}
|
|
|
|
return aPtr;
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// If we get here, then aSize and aOldSize are different enough that we
|
|
|
|
// need to use a different size class. In that case, fall back to
|
|
|
|
// allocating new space and copying.
|
2017-11-17 00:50:27 +00:00
|
|
|
ret = MallocHuge(aSize, false);
|
2017-10-06 08:49:40 +00:00
|
|
|
if (!ret) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
copysize = (aSize < aOldSize) ? aSize : aOldSize;
|
2008-02-09 05:46:59 +00:00
|
|
|
#ifdef VM_COPY_MIN
|
2017-10-06 08:49:40 +00:00
|
|
|
if (copysize >= VM_COPY_MIN) {
|
|
|
|
pages_copy(ret, aPtr, copysize);
|
|
|
|
} else
|
2008-02-09 05:46:59 +00:00
|
|
|
#endif
|
2017-10-06 08:49:40 +00:00
|
|
|
{
|
|
|
|
memcpy(ret, aPtr, copysize);
|
|
|
|
}
|
2017-11-17 00:50:27 +00:00
|
|
|
idalloc(aPtr, this);
|
2017-10-06 08:49:40 +00:00
|
|
|
return ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-11-08 08:43:47 +00:00
|
|
|
huge_dalloc(void* aPtr, arena_t* aArena)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-10-06 08:49:40 +00:00
|
|
|
extent_node_t* node;
|
|
|
|
{
|
|
|
|
extent_node_t key;
|
|
|
|
MutexAutoLock lock(huge_mtx);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Extract from tree of huge allocations.
|
2017-11-08 08:20:20 +00:00
|
|
|
key.mAddr = aPtr;
|
2017-10-06 08:49:40 +00:00
|
|
|
node = huge.Search(&key);
|
|
|
|
MOZ_ASSERT(node);
|
2017-11-08 08:20:20 +00:00
|
|
|
MOZ_ASSERT(node->mAddr == aPtr);
|
2017-11-08 08:43:47 +00:00
|
|
|
MOZ_RELEASE_ASSERT(!aArena || node->mArena == aArena);
|
2017-10-06 08:49:40 +00:00
|
|
|
huge.Remove(node);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-08 08:20:20 +00:00
|
|
|
huge_allocated -= node->mSize;
|
|
|
|
huge_mapped -= CHUNK_CEILING(node->mSize);
|
2017-10-06 08:49:40 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Unmap chunk.
|
2017-11-08 08:20:20 +00:00
|
|
|
chunk_dealloc(node->mAddr, CHUNK_CEILING(node->mSize), HUGE_CHUNK);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
base_node_dealloc(node);
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-08-31 02:36:09 +00:00
|
|
|
static size_t
|
|
|
|
GetKernelPageSize()
|
|
|
|
{
|
|
|
|
static size_t kernel_page_size = ([]() {
|
|
|
|
#ifdef XP_WIN
|
|
|
|
SYSTEM_INFO info;
|
|
|
|
GetSystemInfo(&info);
|
|
|
|
return info.dwPageSize;
|
|
|
|
#else
|
|
|
|
long result = sysconf(_SC_PAGESIZE);
|
|
|
|
MOZ_ASSERT(result != -1);
|
|
|
|
return result;
|
|
|
|
#endif
|
|
|
|
})();
|
|
|
|
return kernel_page_size;
|
|
|
|
}
|
|
|
|
|
2017-10-27 08:05:47 +00:00
|
|
|
// Returns whether the allocator was successfully initialized.
|
Bug 1417234 - Use SRWLock as Mutex for mozjemalloc on Windows. r=njn
SRWLock is more lightweight than CriticalSection, but is only available
on Windows Vista and more. So until we actually dropped support Windows
XP, we had to use CriticalSection.
Now that all supported Windows versions do have SRWLock, this is a
switch we can make, and not only because SRWLock is more lightweight,
but because it can be statically initialized like on other platforms,
allowing to use the same initialization code as on other platforms,
and removing the requirement for a DllMain, which in turn can allow
to statically link mozjemalloc in some cases, instead of requiring a
shared library (DllMain only works on shared libraries), or manually
call the initialization function soon enough.
There is a downside, though: SRWLock, as opposed to CriticalSection, is
not fair, meaning it can have thread scheduling implications, and can
theoretically increase latency on some threads. However, it is the
default used by Rust Mutex, meaning it's at least good enough there.
Let's see how things go with this.
--HG--
extra : rebase_source : 337dc4e245e461fd0ea23a2b6b53981346a545c6
2017-11-14 03:58:33 +00:00
|
|
|
static bool
|
|
|
|
malloc_init_hard()
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-09-12 07:29:11 +00:00
|
|
|
unsigned i;
|
2017-10-29 12:53:37 +00:00
|
|
|
const char* opts;
|
2017-09-12 07:29:11 +00:00
|
|
|
long result;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-22 22:25:08 +00:00
|
|
|
AutoLock<StaticMutex> lock(gInitLock);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-12 07:29:11 +00:00
|
|
|
if (malloc_initialized) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Another thread initialized the allocator before this one
|
|
|
|
// acquired gInitLock.
|
2017-10-27 08:05:47 +00:00
|
|
|
return true;
|
2017-09-12 07:29:11 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-09-12 07:29:11 +00:00
|
|
|
if (!thread_arena.init()) {
|
2017-10-27 08:05:47 +00:00
|
|
|
return true;
|
2017-09-12 07:29:11 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Get page size and number of CPUs
|
2017-09-12 07:29:11 +00:00
|
|
|
result = GetKernelPageSize();
|
2017-10-29 12:53:31 +00:00
|
|
|
// We assume that the page size is a power of 2.
|
2017-09-12 07:29:11 +00:00
|
|
|
MOZ_ASSERT(((result - 1) & result) == 0);
|
2017-09-28 06:27:59 +00:00
|
|
|
#ifdef MALLOC_STATIC_PAGESIZE
|
2017-11-03 03:13:17 +00:00
|
|
|
if (gPageSize % (size_t)result) {
|
2017-10-29 12:53:37 +00:00
|
|
|
_malloc_message(
|
|
|
|
_getprogname(),
|
|
|
|
"Compile-time page size does not divide the runtime one.\n");
|
2017-09-12 07:29:11 +00:00
|
|
|
MOZ_CRASH();
|
|
|
|
}
|
2016-08-12 11:36:00 +00:00
|
|
|
#else
|
2017-11-03 03:13:17 +00:00
|
|
|
gPageSize = (size_t)result;
|
2017-11-01 09:33:24 +00:00
|
|
|
DefineGlobals();
|
2011-10-26 08:17:34 +00:00
|
|
|
#endif
|
2008-07-01 22:41:14 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Get runtime configuration.
|
2017-09-12 07:29:11 +00:00
|
|
|
if ((opts = getenv("MALLOC_OPTIONS"))) {
|
|
|
|
for (i = 0; opts[i] != '\0'; i++) {
|
|
|
|
unsigned j, nreps;
|
|
|
|
bool nseen;
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Parse repetition count, if any.
|
2017-09-12 07:29:11 +00:00
|
|
|
for (nreps = 0, nseen = false;; i++, nseen = true) {
|
|
|
|
switch (opts[i]) {
|
2017-10-29 12:53:37 +00:00
|
|
|
case '0':
|
|
|
|
case '1':
|
|
|
|
case '2':
|
|
|
|
case '3':
|
|
|
|
case '4':
|
|
|
|
case '5':
|
|
|
|
case '6':
|
|
|
|
case '7':
|
|
|
|
case '8':
|
|
|
|
case '9':
|
2017-09-12 07:29:11 +00:00
|
|
|
nreps *= 10;
|
|
|
|
nreps += opts[i] - '0';
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto MALLOC_OUT;
|
|
|
|
}
|
|
|
|
}
|
2017-10-29 12:53:37 +00:00
|
|
|
MALLOC_OUT:
|
2017-10-29 12:53:14 +00:00
|
|
|
if (nseen == false) {
|
2017-09-12 07:29:11 +00:00
|
|
|
nreps = 1;
|
2017-10-29 12:53:14 +00:00
|
|
|
}
|
2017-09-12 07:29:11 +00:00
|
|
|
|
|
|
|
for (j = 0; j < nreps; j++) {
|
|
|
|
switch (opts[i]) {
|
2017-10-29 12:53:37 +00:00
|
|
|
case 'f':
|
|
|
|
opt_dirty_max >>= 1;
|
|
|
|
break;
|
|
|
|
case 'F':
|
|
|
|
if (opt_dirty_max == 0) {
|
|
|
|
opt_dirty_max = 1;
|
|
|
|
} else if ((opt_dirty_max << 1) != 0) {
|
|
|
|
opt_dirty_max <<= 1;
|
|
|
|
}
|
|
|
|
break;
|
2017-05-18 01:22:20 +00:00
|
|
|
#ifdef MOZ_DEBUG
|
2017-10-29 12:53:37 +00:00
|
|
|
case 'j':
|
|
|
|
opt_junk = false;
|
|
|
|
break;
|
|
|
|
case 'J':
|
|
|
|
opt_junk = true;
|
|
|
|
break;
|
2008-02-09 05:46:59 +00:00
|
|
|
#endif
|
2017-05-18 01:22:20 +00:00
|
|
|
#ifdef MOZ_DEBUG
|
2017-10-29 12:53:37 +00:00
|
|
|
case 'z':
|
|
|
|
opt_zero = false;
|
|
|
|
break;
|
|
|
|
case 'Z':
|
|
|
|
opt_zero = true;
|
|
|
|
break;
|
2008-02-09 05:46:59 +00:00
|
|
|
#endif
|
2017-10-29 12:53:37 +00:00
|
|
|
default: {
|
|
|
|
char cbuf[2];
|
|
|
|
|
|
|
|
cbuf[0] = opts[i];
|
|
|
|
cbuf[1] = '\0';
|
|
|
|
_malloc_message(_getprogname(),
|
|
|
|
": (malloc) Unsupported character "
|
|
|
|
"in malloc options: '",
|
|
|
|
cbuf,
|
|
|
|
"'\n");
|
|
|
|
}
|
2017-09-12 07:29:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-26 00:43:43 +00:00
|
|
|
gRecycledSize = 0;
|
2017-09-12 07:29:11 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Initialize chunks data.
|
2017-10-06 08:20:04 +00:00
|
|
|
chunks_mtx.Init();
|
2017-10-25 23:29:07 +00:00
|
|
|
gChunksBySize.Init();
|
|
|
|
gChunksByAddress.Init();
|
2017-09-12 07:29:11 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Initialize huge allocation data.
|
2017-10-06 08:20:04 +00:00
|
|
|
huge_mtx.Init();
|
2017-09-26 06:06:00 +00:00
|
|
|
huge.Init();
|
2017-09-12 07:29:11 +00:00
|
|
|
huge_allocated = 0;
|
|
|
|
huge_mapped = 0;
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Initialize base allocation data structures.
|
2017-09-12 07:29:11 +00:00
|
|
|
base_mapped = 0;
|
|
|
|
base_committed = 0;
|
|
|
|
base_nodes = nullptr;
|
2017-10-06 08:20:04 +00:00
|
|
|
base_mtx.Init();
|
2017-09-12 07:29:11 +00:00
|
|
|
|
2017-10-27 22:13:58 +00:00
|
|
|
// Initialize arenas collection here.
|
|
|
|
if (!gArenas.Init()) {
|
2017-10-27 08:05:47 +00:00
|
|
|
return false;
|
2017-09-12 07:29:11 +00:00
|
|
|
}
|
Bug 1397101 - Reduce the number of dirty pages we allow to be kept in thread local arenas. r=njn
Until bug 1361258, there was only ever one mozjemalloc arena, and the
number of dirty pages we allow to be kept dirty, fixed to 1MB per arena,
was, in fact, 1MB for an entire process.
With stylo using thread local arenas, we now can have multiple arenas
per process, multiplying that number of dirty pages.
While those dirty pages may be reused later on, when other allocations
end up filling them later on, the fact that a relatively large number of
them is kept around for each stylo thread (in proportion to the amount of
memory ever allocated by stylo), combined with the fact that the memory
use from stylo depends on the workload generated by the pages being
visited, those dirty pages may very well not be used for a rather long
time. This is less of a problem with the main arena, used for most
everything else.
So, for each arena except the main one, we decrease the number of dirty
pages we allow to be kept around to 1/8 of the current value. We do this
by introducing a per-arena configuration of that maximum number.
--HG--
extra : rebase_source : 75eebb175b3746d5ca1c371606cface50ec70f2f
2017-09-13 22:26:30 +00:00
|
|
|
|
2017-10-27 22:13:58 +00:00
|
|
|
// Assign the default arena to the initial thread.
|
|
|
|
thread_arena.set(gArenas.GetDefault());
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 07:18:01 +00:00
|
|
|
if (!gChunkRTree.Init()) {
|
2017-10-27 08:05:47 +00:00
|
|
|
return false;
|
2017-09-12 07:29:11 +00:00
|
|
|
}
|
2008-07-25 21:53:20 +00:00
|
|
|
|
2017-09-12 07:29:11 +00:00
|
|
|
malloc_initialized = true;
|
2011-05-22 03:27:00 +00:00
|
|
|
|
2017-10-24 23:01:41 +00:00
|
|
|
// Dummy call so that the function is not removed by dead-code elimination
|
|
|
|
Debug::jemalloc_ptr_info(nullptr);
|
|
|
|
|
2017-08-30 07:54:17 +00:00
|
|
|
#if !defined(XP_WIN) && !defined(XP_DARWIN)
|
2017-10-29 12:53:31 +00:00
|
|
|
// Prevent potential deadlock on malloc locks after fork.
|
2017-10-29 12:53:37 +00:00
|
|
|
pthread_atfork(
|
|
|
|
_malloc_prefork, _malloc_postfork_parent, _malloc_postfork_child);
|
2015-05-22 10:01:00 +00:00
|
|
|
#endif
|
|
|
|
|
2017-10-27 08:05:47 +00:00
|
|
|
return true;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// End general internal functions.
|
|
|
|
// ***************************************************************************
|
|
|
|
// Begin malloc(3)-compatible functions.
|
|
|
|
|
|
|
|
// The BaseAllocator class is a helper class that implements the base allocator
|
|
|
|
// functions (malloc, calloc, realloc, free, memalign) for a given arena,
|
|
|
|
// or an appropriately chosen arena (per choose_arena()) when none is given.
|
2017-10-29 12:53:37 +00:00
|
|
|
struct BaseAllocator
|
|
|
|
{
|
|
|
|
#define MALLOC_DECL(name, return_type, ...) \
|
2017-09-21 05:24:37 +00:00
|
|
|
inline return_type name(__VA_ARGS__);
|
|
|
|
|
|
|
|
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
|
|
|
|
#include "malloc_decls.h"
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
explicit BaseAllocator(arena_t* aArena)
|
|
|
|
: mArena(aArena)
|
|
|
|
{
|
|
|
|
}
|
2017-09-21 05:24:37 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
arena_t* mArena;
|
|
|
|
};
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
#define MALLOC_DECL(name, return_type, ...) \
|
|
|
|
template<> \
|
|
|
|
inline return_type MozJemalloc::name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
|
|
|
|
{ \
|
|
|
|
BaseAllocator allocator(nullptr); \
|
|
|
|
return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
|
2017-09-21 05:24:37 +00:00
|
|
|
}
|
|
|
|
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
|
|
|
|
#include "malloc_decls.h"
|
|
|
|
|
|
|
|
inline void*
|
|
|
|
BaseAllocator::malloc(size_t aSize)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-08-31 01:29:11 +00:00
|
|
|
void* ret;
|
2017-11-13 23:00:17 +00:00
|
|
|
arena_t* arena;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-27 08:05:47 +00:00
|
|
|
if (!malloc_init()) {
|
2017-08-31 01:29:11 +00:00
|
|
|
ret = nullptr;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-08-31 01:29:11 +00:00
|
|
|
if (aSize == 0) {
|
|
|
|
aSize = 1;
|
|
|
|
}
|
2017-11-13 23:00:17 +00:00
|
|
|
arena = mArena ? mArena : choose_arena(aSize);
|
2017-11-13 23:31:53 +00:00
|
|
|
ret = arena->Malloc(aSize, /* zero = */ false);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
|
|
|
RETURN:
|
2017-08-31 01:29:11 +00:00
|
|
|
if (!ret) {
|
|
|
|
errno = ENOMEM;
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-08-31 01:29:11 +00:00
|
|
|
return ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-09-21 05:24:37 +00:00
|
|
|
inline void*
|
|
|
|
BaseAllocator::memalign(size_t aAlignment, size_t aSize)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-08-31 01:29:11 +00:00
|
|
|
MOZ_ASSERT(((aAlignment - 1) & aAlignment) == 0);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-27 08:05:47 +00:00
|
|
|
if (!malloc_init()) {
|
2017-08-31 02:47:22 +00:00
|
|
|
return nullptr;
|
2017-08-31 01:29:11 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-08-31 01:29:11 +00:00
|
|
|
if (aSize == 0) {
|
|
|
|
aSize = 1;
|
|
|
|
}
|
2010-05-19 20:46:08 +00:00
|
|
|
|
2017-08-31 01:29:11 +00:00
|
|
|
aAlignment = aAlignment < sizeof(void*) ? sizeof(void*) : aAlignment;
|
2017-11-13 23:00:17 +00:00
|
|
|
arena_t* arena = mArena ? mArena : choose_arena(aSize);
|
2017-11-13 23:21:09 +00:00
|
|
|
return arena->Palloc(aAlignment, aSize);
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-09-21 05:24:37 +00:00
|
|
|
inline void*
|
|
|
|
BaseAllocator::calloc(size_t aNum, size_t aSize)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-10-29 12:53:37 +00:00
|
|
|
void* ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-30 02:28:17 +00:00
|
|
|
if (malloc_init()) {
|
|
|
|
CheckedInt<size_t> checkedSize = CheckedInt<size_t>(aNum) * aSize;
|
|
|
|
if (checkedSize.isValid()) {
|
|
|
|
size_t allocSize = checkedSize.value();
|
|
|
|
if (allocSize == 0) {
|
|
|
|
allocSize = 1;
|
|
|
|
}
|
2017-11-13 23:00:17 +00:00
|
|
|
arena_t* arena = mArena ? mArena : choose_arena(allocSize);
|
2017-11-13 23:31:53 +00:00
|
|
|
ret = arena->Malloc(allocSize, /* zero = */ true);
|
2017-10-30 02:28:17 +00:00
|
|
|
} else {
|
|
|
|
ret = nullptr;
|
|
|
|
}
|
|
|
|
} else {
|
2017-08-31 01:29:11 +00:00
|
|
|
ret = nullptr;
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-08-31 01:29:11 +00:00
|
|
|
if (!ret) {
|
|
|
|
errno = ENOMEM;
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-08-31 01:29:11 +00:00
|
|
|
return ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-09-21 05:24:37 +00:00
|
|
|
inline void*
|
|
|
|
BaseAllocator::realloc(void* aPtr, size_t aSize)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-08-31 01:29:11 +00:00
|
|
|
void* ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-08-31 01:29:11 +00:00
|
|
|
if (aSize == 0) {
|
|
|
|
aSize = 1;
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-08-31 01:29:11 +00:00
|
|
|
if (aPtr) {
|
2017-10-27 08:25:18 +00:00
|
|
|
MOZ_RELEASE_ASSERT(malloc_initialized);
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-11-17 00:50:27 +00:00
|
|
|
auto info = AllocInfo::Get(aPtr);
|
|
|
|
auto arena = info.Arena();
|
|
|
|
MOZ_RELEASE_ASSERT(!mArena || arena == mArena);
|
|
|
|
ret = arena->Ralloc(aPtr, aSize, info.Size());
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-08-31 01:29:11 +00:00
|
|
|
if (!ret) {
|
|
|
|
errno = ENOMEM;
|
|
|
|
}
|
|
|
|
} else {
|
2017-10-27 08:05:47 +00:00
|
|
|
if (!malloc_init()) {
|
2017-08-31 01:29:11 +00:00
|
|
|
ret = nullptr;
|
|
|
|
} else {
|
2017-11-13 23:00:17 +00:00
|
|
|
arena_t* arena = mArena ? mArena : choose_arena(aSize);
|
2017-11-13 23:31:53 +00:00
|
|
|
ret = arena->Malloc(aSize, /* zero = */ false);
|
2017-08-31 01:29:11 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-08-31 01:29:11 +00:00
|
|
|
if (!ret) {
|
|
|
|
errno = ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-08-31 01:29:11 +00:00
|
|
|
return ret;
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-09-21 05:24:37 +00:00
|
|
|
inline void
|
|
|
|
BaseAllocator::free(void* aPtr)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-08-31 01:29:11 +00:00
|
|
|
size_t offset;
|
|
|
|
|
2017-10-26 01:34:37 +00:00
|
|
|
// A version of idalloc that checks for nullptr pointer.
|
|
|
|
offset = GetChunkOffsetForPtr(aPtr);
|
2017-08-31 01:29:11 +00:00
|
|
|
if (offset != 0) {
|
2017-10-27 08:25:18 +00:00
|
|
|
MOZ_RELEASE_ASSERT(malloc_initialized);
|
2017-11-08 08:43:47 +00:00
|
|
|
arena_dalloc(aPtr, offset, mArena);
|
2017-08-31 01:29:11 +00:00
|
|
|
} else if (aPtr) {
|
2017-10-27 08:25:18 +00:00
|
|
|
MOZ_RELEASE_ASSERT(malloc_initialized);
|
2017-11-08 08:43:47 +00:00
|
|
|
huge_dalloc(aPtr, mArena);
|
2017-08-31 01:29:11 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-09-21 02:46:57 +00:00
|
|
|
template<void* (*memalign)(size_t, size_t)>
|
|
|
|
struct AlignedAllocator
|
|
|
|
{
|
2017-10-29 12:53:37 +00:00
|
|
|
static inline int posix_memalign(void** aMemPtr,
|
|
|
|
size_t aAlignment,
|
|
|
|
size_t aSize)
|
2017-09-21 02:46:57 +00:00
|
|
|
{
|
|
|
|
void* result;
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// alignment must be a power of two and a multiple of sizeof(void*)
|
2017-09-21 02:46:57 +00:00
|
|
|
if (((aAlignment - 1) & aAlignment) != 0 || aAlignment < sizeof(void*)) {
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// The 0-->1 size promotion is done in the memalign() call below
|
2017-09-21 02:46:57 +00:00
|
|
|
result = memalign(aAlignment, aSize);
|
|
|
|
|
|
|
|
if (!result) {
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
*aMemPtr = result;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
static inline void* aligned_alloc(size_t aAlignment, size_t aSize)
|
2017-09-21 02:46:57 +00:00
|
|
|
{
|
|
|
|
if (aSize % aAlignment) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return memalign(aAlignment, aSize);
|
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
static inline void* valloc(size_t aSize)
|
2017-09-21 02:46:57 +00:00
|
|
|
{
|
|
|
|
return memalign(GetKernelPageSize(), aSize);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
template<>
|
|
|
|
inline int
|
2017-09-21 02:46:57 +00:00
|
|
|
MozJemalloc::posix_memalign(void** aMemPtr, size_t aAlignment, size_t aSize)
|
|
|
|
{
|
|
|
|
return AlignedAllocator<memalign>::posix_memalign(aMemPtr, aAlignment, aSize);
|
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
template<>
|
|
|
|
inline void*
|
2017-09-21 02:46:57 +00:00
|
|
|
MozJemalloc::aligned_alloc(size_t aAlignment, size_t aSize)
|
|
|
|
{
|
|
|
|
return AlignedAllocator<memalign>::aligned_alloc(aAlignment, aSize);
|
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
template<>
|
|
|
|
inline void*
|
2017-09-21 02:46:57 +00:00
|
|
|
MozJemalloc::valloc(size_t aSize)
|
|
|
|
{
|
|
|
|
return AlignedAllocator<memalign>::valloc(aSize);
|
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// End malloc(3)-compatible functions.
|
|
|
|
// ***************************************************************************
|
|
|
|
// Begin non-standard functions.
|
2011-11-04 03:53:41 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// This was added by Mozilla for use by SQLite.
|
2017-10-29 12:53:37 +00:00
|
|
|
template<>
|
|
|
|
inline size_t
|
2017-08-31 01:29:11 +00:00
|
|
|
MozJemalloc::malloc_good_size(size_t aSize)
|
2011-11-04 03:53:41 +00:00
|
|
|
{
|
2017-11-15 05:50:33 +00:00
|
|
|
if (aSize <= gMaxLargeClass) {
|
|
|
|
// Small or large
|
2017-11-02 23:53:34 +00:00
|
|
|
aSize = SizeClass(aSize).Size();
|
2017-08-31 01:29:11 +00:00
|
|
|
} else {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Huge. We use PAGE_CEILING to get psize, instead of using
|
|
|
|
// CHUNK_CEILING to get csize. This ensures that this
|
|
|
|
// malloc_usable_size(malloc(n)) always matches
|
|
|
|
// malloc_good_size(n).
|
2017-08-31 01:29:11 +00:00
|
|
|
aSize = PAGE_CEILING(aSize);
|
|
|
|
}
|
|
|
|
return aSize;
|
2011-11-04 03:53:41 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
template<>
|
|
|
|
inline size_t
|
2017-08-31 01:29:11 +00:00
|
|
|
MozJemalloc::malloc_usable_size(usable_ptr_t aPtr)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-11-09 04:49:33 +00:00
|
|
|
return AllocInfo::GetValidated(aPtr).Size();
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
template<>
|
|
|
|
inline void
|
2017-08-31 01:29:11 +00:00
|
|
|
MozJemalloc::jemalloc_stats(jemalloc_stats_t* aStats)
|
2008-06-20 17:34:42 +00:00
|
|
|
{
|
2017-09-27 23:06:23 +00:00
|
|
|
size_t non_arena_mapped, chunk_header_size;
|
2017-08-31 01:29:11 +00:00
|
|
|
|
2017-10-27 08:25:18 +00:00
|
|
|
if (!aStats) {
|
|
|
|
return;
|
|
|
|
}
|
2017-11-16 07:35:03 +00:00
|
|
|
if (!malloc_init()) {
|
2017-10-27 08:25:18 +00:00
|
|
|
memset(aStats, 0, sizeof(*aStats));
|
|
|
|
return;
|
|
|
|
}
|
2017-08-31 01:29:11 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Gather runtime settings.
|
2017-08-31 01:29:11 +00:00
|
|
|
aStats->opt_junk = opt_junk;
|
|
|
|
aStats->opt_zero = opt_zero;
|
2017-11-03 01:10:50 +00:00
|
|
|
aStats->quantum = kQuantum;
|
|
|
|
aStats->small_max = kMaxQuantumClass;
|
|
|
|
aStats->large_max = gMaxLargeClass;
|
2017-11-03 03:07:16 +00:00
|
|
|
aStats->chunksize = kChunkSize;
|
2017-11-03 03:13:17 +00:00
|
|
|
aStats->page_size = gPageSize;
|
2017-08-31 01:29:11 +00:00
|
|
|
aStats->dirty_max = opt_dirty_max;
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Gather current memory usage statistics.
|
2017-10-27 22:13:58 +00:00
|
|
|
aStats->narenas = 0;
|
2017-08-31 01:29:11 +00:00
|
|
|
aStats->mapped = 0;
|
|
|
|
aStats->allocated = 0;
|
|
|
|
aStats->waste = 0;
|
|
|
|
aStats->page_cache = 0;
|
|
|
|
aStats->bookkeeping = 0;
|
|
|
|
aStats->bin_unused = 0;
|
|
|
|
|
|
|
|
non_arena_mapped = 0;
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Get huge mapped/allocated.
|
2017-10-06 08:49:40 +00:00
|
|
|
{
|
|
|
|
MutexAutoLock lock(huge_mtx);
|
|
|
|
non_arena_mapped += huge_mapped;
|
|
|
|
aStats->allocated += huge_allocated;
|
|
|
|
MOZ_ASSERT(huge_mapped >= huge_allocated);
|
|
|
|
}
|
2008-06-20 17:34:42 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Get base mapped/allocated.
|
2017-10-06 08:49:40 +00:00
|
|
|
{
|
|
|
|
MutexAutoLock lock(base_mtx);
|
|
|
|
non_arena_mapped += base_mapped;
|
|
|
|
aStats->bookkeeping += base_committed;
|
|
|
|
MOZ_ASSERT(base_mapped >= base_committed);
|
|
|
|
}
|
2017-08-31 01:29:11 +00:00
|
|
|
|
2017-10-27 22:13:58 +00:00
|
|
|
gArenas.mLock.Lock();
|
2017-10-29 12:53:31 +00:00
|
|
|
// Iterate over arenas.
|
2017-10-27 22:13:58 +00:00
|
|
|
for (auto arena : gArenas.iter()) {
|
2017-08-31 01:29:11 +00:00
|
|
|
size_t arena_mapped, arena_allocated, arena_committed, arena_dirty, j,
|
2017-10-29 12:53:37 +00:00
|
|
|
arena_unused, arena_headers;
|
2017-08-31 01:29:11 +00:00
|
|
|
arena_run_t* run;
|
|
|
|
|
|
|
|
arena_headers = 0;
|
|
|
|
arena_unused = 0;
|
2008-06-20 17:34:42 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
{
|
|
|
|
MutexAutoLock lock(arena->mLock);
|
2013-07-29 16:10:53 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
arena_mapped = arena->mStats.mapped;
|
2014-05-22 00:34:06 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// "committed" counts dirty and allocated memory.
|
2017-11-03 03:13:17 +00:00
|
|
|
arena_committed = arena->mStats.committed << gPageSize2Pow;
|
2013-07-29 16:10:53 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
arena_allocated =
|
|
|
|
arena->mStats.allocated_small + arena->mStats.allocated_large;
|
2013-07-29 16:10:53 +00:00
|
|
|
|
2017-11-03 03:13:17 +00:00
|
|
|
arena_dirty = arena->mNumDirty << gPageSize2Pow;
|
2013-08-15 18:15:04 +00:00
|
|
|
|
2017-11-03 03:21:53 +00:00
|
|
|
for (j = 0; j < kNumTinyClasses + kNumQuantumClasses + gNumSubPageClasses;
|
|
|
|
j++) {
|
2017-10-06 08:49:40 +00:00
|
|
|
arena_bin_t* bin = &arena->mBins[j];
|
|
|
|
size_t bin_unused = 0;
|
2013-08-15 18:15:04 +00:00
|
|
|
|
2017-11-03 00:26:07 +00:00
|
|
|
for (auto mapelm : bin->mNonFullRuns.iter()) {
|
2017-11-03 03:13:17 +00:00
|
|
|
run = (arena_run_t*)(mapelm->bits & ~gPageSizeMask);
|
2017-11-03 06:23:44 +00:00
|
|
|
bin_unused += run->mNumFree * bin->mSizeClass;
|
2017-10-06 08:49:40 +00:00
|
|
|
}
|
2013-08-15 18:15:04 +00:00
|
|
|
|
2017-11-03 00:26:07 +00:00
|
|
|
if (bin->mCurrentRun) {
|
2017-11-03 06:23:44 +00:00
|
|
|
bin_unused += bin->mCurrentRun->mNumFree * bin->mSizeClass;
|
2017-10-06 08:49:40 +00:00
|
|
|
}
|
2014-05-22 00:34:06 +00:00
|
|
|
|
2017-10-06 08:49:40 +00:00
|
|
|
arena_unused += bin_unused;
|
2017-11-03 00:26:07 +00:00
|
|
|
arena_headers += bin->mNumRuns * bin->mRunFirstRegionOffset;
|
2017-10-06 08:49:40 +00:00
|
|
|
}
|
2017-08-31 01:29:11 +00:00
|
|
|
}
|
2014-05-22 00:34:06 +00:00
|
|
|
|
2017-08-31 01:29:11 +00:00
|
|
|
MOZ_ASSERT(arena_mapped >= arena_committed);
|
|
|
|
MOZ_ASSERT(arena_committed >= arena_allocated + arena_dirty);
|
2014-05-22 00:34:06 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// "waste" is committed memory that is neither dirty nor
|
|
|
|
// allocated.
|
2017-08-31 01:29:11 +00:00
|
|
|
aStats->mapped += arena_mapped;
|
|
|
|
aStats->allocated += arena_allocated;
|
|
|
|
aStats->page_cache += arena_dirty;
|
2017-10-29 12:53:37 +00:00
|
|
|
aStats->waste += arena_committed - arena_allocated - arena_dirty -
|
|
|
|
arena_unused - arena_headers;
|
2017-08-31 01:29:11 +00:00
|
|
|
aStats->bin_unused += arena_unused;
|
|
|
|
aStats->bookkeeping += arena_headers;
|
2017-10-27 22:13:58 +00:00
|
|
|
aStats->narenas++;
|
2017-08-31 01:29:11 +00:00
|
|
|
}
|
2017-10-27 22:13:58 +00:00
|
|
|
gArenas.mLock.Unlock();
|
2013-08-15 18:15:04 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Account for arena chunk headers in bookkeeping rather than waste.
|
2017-08-31 01:29:11 +00:00
|
|
|
chunk_header_size =
|
2017-11-03 03:16:11 +00:00
|
|
|
((aStats->mapped / aStats->chunksize) * gChunkHeaderNumPages)
|
2017-11-03 03:13:17 +00:00
|
|
|
<< gPageSize2Pow;
|
2008-06-20 17:34:42 +00:00
|
|
|
|
2017-08-31 01:29:11 +00:00
|
|
|
aStats->mapped += non_arena_mapped;
|
|
|
|
aStats->bookkeeping += chunk_header_size;
|
|
|
|
aStats->waste -= chunk_header_size;
|
2014-05-22 00:34:06 +00:00
|
|
|
|
2017-08-31 01:29:11 +00:00
|
|
|
MOZ_ASSERT(aStats->mapped >= aStats->allocated + aStats->waste +
|
2017-10-29 12:53:37 +00:00
|
|
|
aStats->page_cache + aStats->bookkeeping);
|
2008-06-20 17:34:42 +00:00
|
|
|
}
|
|
|
|
|
2011-10-24 17:23:47 +00:00
|
|
|
#ifdef MALLOC_DOUBLE_PURGE
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Explicitly remove all of this chunk's MADV_FREE'd pages from memory.
|
2011-10-24 17:23:47 +00:00
|
|
|
static void
|
2017-10-26 02:11:32 +00:00
|
|
|
hard_purge_chunk(arena_chunk_t* aChunk)
|
2011-10-24 17:23:47 +00:00
|
|
|
{
|
2017-10-29 12:53:31 +00:00
|
|
|
// See similar logic in arena_t::Purge().
|
2017-11-03 03:16:11 +00:00
|
|
|
for (size_t i = gChunkHeaderNumPages; i < gChunkNumPages; i++) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Find all adjacent pages with CHUNK_MAP_MADVISED set.
|
2017-10-26 02:11:32 +00:00
|
|
|
size_t npages;
|
|
|
|
for (npages = 0; aChunk->map[i + npages].bits & CHUNK_MAP_MADVISED &&
|
2017-11-03 03:16:11 +00:00
|
|
|
i + npages < gChunkNumPages;
|
2017-10-26 02:11:32 +00:00
|
|
|
npages++) {
|
2017-10-29 12:53:31 +00:00
|
|
|
// Turn off the chunk's MADV_FREED bit and turn on its
|
|
|
|
// DECOMMITTED bit.
|
2017-10-26 02:11:32 +00:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(
|
|
|
|
!(aChunk->map[i + npages].bits & CHUNK_MAP_DECOMMITTED));
|
|
|
|
aChunk->map[i + npages].bits ^= CHUNK_MAP_MADVISED_OR_DECOMMITTED;
|
|
|
|
}
|
2011-10-24 17:23:47 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// We could use mincore to find out which pages are actually
|
|
|
|
// present, but it's not clear that's better.
|
2017-10-26 02:11:32 +00:00
|
|
|
if (npages > 0) {
|
2017-11-03 03:13:17 +00:00
|
|
|
pages_decommit(((char*)aChunk) + (i << gPageSize2Pow),
|
|
|
|
npages << gPageSize2Pow);
|
|
|
|
Unused << pages_commit(((char*)aChunk) + (i << gPageSize2Pow),
|
|
|
|
npages << gPageSize2Pow);
|
2017-10-26 02:11:32 +00:00
|
|
|
}
|
|
|
|
i += npages;
|
|
|
|
}
|
2011-10-24 17:23:47 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Explicitly remove all of this arena's MADV_FREE'd pages from memory.
|
2017-09-15 08:34:53 +00:00
|
|
|
void
|
|
|
|
arena_t::HardPurge()
|
2011-10-24 17:23:47 +00:00
|
|
|
{
|
2017-10-06 08:49:40 +00:00
|
|
|
MutexAutoLock lock(mLock);
|
2011-10-24 17:23:47 +00:00
|
|
|
|
2017-09-15 08:34:53 +00:00
|
|
|
while (!mChunksMAdvised.isEmpty()) {
|
|
|
|
arena_chunk_t* chunk = mChunksMAdvised.popFront();
|
|
|
|
hard_purge_chunk(chunk);
|
|
|
|
}
|
2011-10-24 17:23:47 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
template<>
|
|
|
|
inline void
|
2017-08-31 01:29:11 +00:00
|
|
|
MozJemalloc::jemalloc_purge_freed_pages()
|
2011-10-24 17:23:47 +00:00
|
|
|
{
|
2017-10-27 08:25:18 +00:00
|
|
|
if (malloc_initialized) {
|
2017-10-27 22:13:58 +00:00
|
|
|
MutexAutoLock lock(gArenas.mLock);
|
|
|
|
for (auto arena : gArenas.iter()) {
|
2017-10-27 08:25:18 +00:00
|
|
|
arena->HardPurge();
|
|
|
|
}
|
2017-08-31 01:29:11 +00:00
|
|
|
}
|
2011-10-24 17:23:47 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
#else // !defined MALLOC_DOUBLE_PURGE
|
2011-10-24 17:23:47 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
template<>
|
|
|
|
inline void
|
2017-08-31 01:29:11 +00:00
|
|
|
MozJemalloc::jemalloc_purge_freed_pages()
|
2011-10-24 17:23:47 +00:00
|
|
|
{
|
2017-10-29 12:53:31 +00:00
|
|
|
// Do nothing.
|
2011-10-24 17:23:47 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
#endif // defined MALLOC_DOUBLE_PURGE
|
2011-10-24 17:23:47 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
template<>
|
|
|
|
inline void
|
2017-08-31 01:29:11 +00:00
|
|
|
MozJemalloc::jemalloc_free_dirty_pages(void)
|
2012-11-08 19:06:50 +00:00
|
|
|
{
|
2017-10-27 08:25:18 +00:00
|
|
|
if (malloc_initialized) {
|
2017-10-27 22:13:58 +00:00
|
|
|
MutexAutoLock lock(gArenas.mLock);
|
|
|
|
for (auto arena : gArenas.iter()) {
|
2017-10-27 08:25:18 +00:00
|
|
|
MutexAutoLock arena_lock(arena->mLock);
|
|
|
|
arena->Purge(true);
|
|
|
|
}
|
2017-08-31 01:29:11 +00:00
|
|
|
}
|
2012-11-08 19:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-09-21 05:24:37 +00:00
|
|
|
inline arena_t*
|
2017-10-30 22:13:39 +00:00
|
|
|
ArenaCollection::GetById(arena_id_t aArenaId, bool aIsPrivate)
|
2017-09-21 05:24:37 +00:00
|
|
|
{
|
2017-10-27 08:25:18 +00:00
|
|
|
if (!malloc_initialized) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2017-10-27 23:42:59 +00:00
|
|
|
// Use AlignedStorage2 to avoid running the arena_t constructor, while
|
|
|
|
// we only need it as a placeholder for mId.
|
|
|
|
mozilla::AlignedStorage2<arena_t> key;
|
|
|
|
key.addr()->mId = aArenaId;
|
2017-10-27 22:13:58 +00:00
|
|
|
MutexAutoLock lock(mLock);
|
2017-10-30 22:13:39 +00:00
|
|
|
arena_t* result = (aIsPrivate ? mPrivateArenas : mArenas).Search(key.addr());
|
2017-09-21 05:24:37 +00:00
|
|
|
MOZ_RELEASE_ASSERT(result);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef NIGHTLY_BUILD
|
2017-10-29 12:53:37 +00:00
|
|
|
template<>
|
|
|
|
inline arena_id_t
|
2017-11-16 22:27:35 +00:00
|
|
|
MozJemalloc::moz_create_arena_with_params(arena_params_t* aParams)
|
2017-09-21 05:24:37 +00:00
|
|
|
{
|
2017-10-27 08:25:18 +00:00
|
|
|
if (malloc_init()) {
|
2017-11-16 22:27:35 +00:00
|
|
|
arena_t* arena = gArenas.CreateArena(/* IsPrivate = */ true, aParams);
|
2017-10-27 08:25:18 +00:00
|
|
|
return arena->mId;
|
|
|
|
}
|
|
|
|
return 0;
|
2017-09-21 05:24:37 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
template<>
|
|
|
|
inline void
|
2017-09-21 05:24:37 +00:00
|
|
|
MozJemalloc::moz_dispose_arena(arena_id_t aArenaId)
|
|
|
|
{
|
2017-10-30 22:13:39 +00:00
|
|
|
arena_t* arena = gArenas.GetById(aArenaId, /* IsPrivate = */ true);
|
2017-11-08 08:43:47 +00:00
|
|
|
MOZ_RELEASE_ASSERT(arena);
|
|
|
|
gArenas.DisposeArena(arena);
|
2017-09-21 05:24:37 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
#define MALLOC_DECL(name, return_type, ...) \
|
|
|
|
template<> \
|
|
|
|
inline return_type MozJemalloc::moz_arena_##name( \
|
|
|
|
arena_id_t aArenaId, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
|
|
|
|
{ \
|
2017-11-01 08:20:54 +00:00
|
|
|
BaseAllocator allocator( \
|
|
|
|
gArenas.GetById(aArenaId, /* IsPrivate = */ true)); \
|
2017-10-29 12:53:37 +00:00
|
|
|
return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
|
2017-09-21 05:24:37 +00:00
|
|
|
}
|
|
|
|
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
|
|
|
|
#include "malloc_decls.h"
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
#define MALLOC_DECL(name, return_type, ...) \
|
|
|
|
template<> \
|
|
|
|
inline return_type MozJemalloc::name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
|
|
|
|
{ \
|
|
|
|
return DummyArenaAllocator<MozJemalloc>::name( \
|
|
|
|
ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
|
2017-09-21 22:22:38 +00:00
|
|
|
}
|
|
|
|
#define MALLOC_FUNCS MALLOC_FUNCS_ARENA
|
|
|
|
#include "malloc_decls.h"
|
|
|
|
|
2017-09-21 05:24:37 +00:00
|
|
|
#endif
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// End non-standard functions.
|
|
|
|
// ***************************************************************************
|
|
|
|
// Begin library-private functions, used by threading libraries for protection
|
|
|
|
// of malloc during fork(). These functions are only called if the program is
|
|
|
|
// running in threaded mode, so there is no need to check whether the program
|
|
|
|
// is threaded here.
|
2017-08-30 07:54:17 +00:00
|
|
|
#ifndef XP_DARWIN
|
2017-01-19 00:37:22 +00:00
|
|
|
static
|
|
|
|
#endif
|
2017-10-29 12:53:37 +00:00
|
|
|
void
|
|
|
|
_malloc_prefork(void)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-10-29 12:53:31 +00:00
|
|
|
// Acquire all mutexes in a safe order.
|
2017-10-27 22:13:58 +00:00
|
|
|
gArenas.mLock.Lock();
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-27 22:13:58 +00:00
|
|
|
for (auto arena : gArenas.iter()) {
|
2017-10-06 08:20:04 +00:00
|
|
|
arena->mLock.Lock();
|
2017-09-27 23:06:23 +00:00
|
|
|
}
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 08:20:04 +00:00
|
|
|
base_mtx.Lock();
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 08:20:04 +00:00
|
|
|
huge_mtx.Lock();
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-08-30 07:54:17 +00:00
|
|
|
#ifndef XP_DARWIN
|
2017-01-19 00:37:22 +00:00
|
|
|
static
|
|
|
|
#endif
|
2017-10-29 12:53:37 +00:00
|
|
|
void
|
|
|
|
_malloc_postfork_parent(void)
|
2008-02-06 23:06:50 +00:00
|
|
|
{
|
2017-10-29 12:53:31 +00:00
|
|
|
// Release all mutexes, now that fork() has completed.
|
2017-10-06 08:20:04 +00:00
|
|
|
huge_mtx.Unlock();
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-06 08:20:04 +00:00
|
|
|
base_mtx.Unlock();
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2017-10-27 22:13:58 +00:00
|
|
|
for (auto arena : gArenas.iter()) {
|
2017-10-06 08:20:04 +00:00
|
|
|
arena->mLock.Unlock();
|
2017-09-27 23:06:23 +00:00
|
|
|
}
|
2017-10-27 22:13:58 +00:00
|
|
|
|
|
|
|
gArenas.mLock.Unlock();
|
2008-02-06 23:06:50 +00:00
|
|
|
}
|
|
|
|
|
2017-08-30 07:54:17 +00:00
|
|
|
#ifndef XP_DARWIN
|
2017-01-20 01:06:41 +00:00
|
|
|
static
|
|
|
|
#endif
|
2017-10-29 12:53:37 +00:00
|
|
|
void
|
|
|
|
_malloc_postfork_child(void)
|
2017-01-20 01:06:41 +00:00
|
|
|
{
|
2017-10-29 12:53:31 +00:00
|
|
|
// Reinitialize all mutexes, now that fork() has completed.
|
2017-10-06 08:20:04 +00:00
|
|
|
huge_mtx.Init();
|
2017-01-20 01:06:41 +00:00
|
|
|
|
2017-10-06 08:20:04 +00:00
|
|
|
base_mtx.Init();
|
2017-01-20 01:06:41 +00:00
|
|
|
|
2017-10-27 22:13:58 +00:00
|
|
|
for (auto arena : gArenas.iter()) {
|
2017-10-06 08:20:04 +00:00
|
|
|
arena->mLock.Init();
|
2017-09-27 23:06:23 +00:00
|
|
|
}
|
2017-10-27 22:13:58 +00:00
|
|
|
|
|
|
|
gArenas.mLock.Init();
|
2017-01-20 01:06:41 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// End library-private functions.
|
|
|
|
// ***************************************************************************
|
2017-08-31 03:02:01 +00:00
|
|
|
#ifdef MOZ_REPLACE_MALLOC
|
2017-10-29 12:53:31 +00:00
|
|
|
// Windows doesn't come with weak imports as they are possible with
|
|
|
|
// LD_PRELOAD or DYLD_INSERT_LIBRARIES on Linux/OSX. On this platform,
|
|
|
|
// the replacement functions are defined as variable pointers to the
|
|
|
|
// function resolved with GetProcAddress() instead of weak definitions
|
|
|
|
// of functions. On Android, the same needs to happen as well, because
|
|
|
|
// the Android linker doesn't handle weak linking with non LD_PRELOADed
|
|
|
|
// libraries, but LD_PRELOADing is not very convenient on Android, with
|
|
|
|
// the zygote.
|
2017-08-31 03:02:01 +00:00
|
|
|
#ifdef XP_DARWIN
|
2017-10-29 12:53:37 +00:00
|
|
|
#define MOZ_REPLACE_WEAK __attribute__((weak_import))
|
2017-11-24 07:36:29 +00:00
|
|
|
#elif defined(XP_WIN) || defined(ANDROID)
|
|
|
|
#define MOZ_DYNAMIC_REPLACE_INIT
|
|
|
|
#define replace_init replace_init_decl
|
2017-08-31 03:02:01 +00:00
|
|
|
#elif defined(__GNUC__)
|
2017-10-29 12:53:37 +00:00
|
|
|
#define MOZ_REPLACE_WEAK __attribute__((weak))
|
2017-08-31 03:02:01 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#include "replace_malloc.h"
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
#define MALLOC_DECL(name, return_type, ...) MozJemalloc::name,
|
2017-08-31 03:02:01 +00:00
|
|
|
|
Bug 1420353 - Change how replace-malloc initializes, part 1. r=njn
The allocator API is a moving target, and every time we change it, the
surface for replace-malloc libraries grows. This causes some build
system problems, because of the tricks in replace_malloc.mk, which
require the full list of symbols.
Considering the above and the goal of moving some of the replace-malloc
libraries into mozglue, it becomes simpler to reduce the replace-malloc
exposure to the initialization functions.
So instead of the allocator poking into replace-malloc libraries for all
the functions, we expect their replace_init function to alter the table
of allocator functions it's passed to register its own functions.
This means replace-malloc implementations now need to copy the original
table, which is not a bad thing, as it allows function calls with one
level of indirection less. It also replace_init functions to not
actually register the replace-malloc functions in some cases, which will
be useful when linking some replace-malloc libraries into mozglue.
Note this is binary compatible with previously built replace-malloc
libraries, but because those libraries wouldn't update the function
table, they would stay disabled.
--HG--
extra : rebase_source : 2518f6ebe76b4c82359e98369de6a5a8c3ca9967
2017-11-22 08:24:29 +00:00
|
|
|
static malloc_table_t gReplaceMallocTable = {
|
2017-08-31 03:02:01 +00:00
|
|
|
#include "malloc_decls.h"
|
|
|
|
};
|
|
|
|
|
2017-11-24 07:36:29 +00:00
|
|
|
#ifdef MOZ_DYNAMIC_REPLACE_INIT
|
|
|
|
#undef replace_init
|
|
|
|
typedef decltype(replace_init_decl) replace_init_impl_t;
|
|
|
|
static replace_init_impl_t* replace_init = nullptr;
|
2017-08-31 03:02:01 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef XP_WIN
|
|
|
|
typedef HMODULE replace_malloc_handle_t;
|
|
|
|
|
|
|
|
static replace_malloc_handle_t
|
|
|
|
replace_malloc_handle()
|
|
|
|
{
|
|
|
|
char replace_malloc_lib[1024];
|
2017-10-29 12:53:37 +00:00
|
|
|
if (GetEnvironmentVariableA("MOZ_REPLACE_MALLOC_LIB",
|
|
|
|
replace_malloc_lib,
|
2017-08-31 03:02:01 +00:00
|
|
|
sizeof(replace_malloc_lib)) > 0) {
|
|
|
|
return LoadLibraryA(replace_malloc_lib);
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2017-11-24 07:36:29 +00:00
|
|
|
#define REPLACE_MALLOC_GET_INIT_FUNC(handle) \
|
|
|
|
(replace_init_impl_t*)GetProcAddress(handle, "replace_init")
|
2017-08-31 03:02:01 +00:00
|
|
|
|
|
|
|
#elif defined(ANDROID)
|
2017-10-29 12:53:37 +00:00
|
|
|
#include <dlfcn.h>
|
2017-08-31 03:02:01 +00:00
|
|
|
|
|
|
|
typedef void* replace_malloc_handle_t;
|
|
|
|
|
|
|
|
static replace_malloc_handle_t
|
|
|
|
replace_malloc_handle()
|
|
|
|
{
|
2017-10-29 12:53:37 +00:00
|
|
|
const char* replace_malloc_lib = getenv("MOZ_REPLACE_MALLOC_LIB");
|
2017-08-31 03:02:01 +00:00
|
|
|
if (replace_malloc_lib && *replace_malloc_lib) {
|
|
|
|
return dlopen(replace_malloc_lib, RTLD_LAZY);
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2017-11-24 07:36:29 +00:00
|
|
|
#define REPLACE_MALLOC_GET_INIT_FUNC(handle) \
|
|
|
|
(replace_init_impl_t*)dlsym(handle, "replace_init")
|
2017-08-31 03:02:01 +00:00
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
static void
|
|
|
|
replace_malloc_init_funcs();
|
2017-10-29 12:50:49 +00:00
|
|
|
|
2017-11-23 08:24:19 +00:00
|
|
|
#ifdef MOZ_REPLACE_MALLOC_STATIC
|
|
|
|
extern "C" void
|
|
|
|
logalloc_init(malloc_table_t*, ReplaceMallocBridge**);
|
2017-11-27 23:10:48 +00:00
|
|
|
|
|
|
|
extern "C" void
|
|
|
|
dmd_init(malloc_table_t*, ReplaceMallocBridge**);
|
2017-11-23 08:24:19 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
bool
|
|
|
|
Equals(malloc_table_t& aTable1, malloc_table_t& aTable2)
|
|
|
|
{
|
|
|
|
return memcmp(&aTable1, &aTable2, sizeof(malloc_table_t)) == 0;
|
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// Below is the malloc implementation overriding jemalloc and calling the
|
|
|
|
// replacement functions if they exist.
|
Bug 1420353 - Change how replace-malloc initializes, part 1. r=njn
The allocator API is a moving target, and every time we change it, the
surface for replace-malloc libraries grows. This causes some build
system problems, because of the tricks in replace_malloc.mk, which
require the full list of symbols.
Considering the above and the goal of moving some of the replace-malloc
libraries into mozglue, it becomes simpler to reduce the replace-malloc
exposure to the initialization functions.
So instead of the allocator poking into replace-malloc libraries for all
the functions, we expect their replace_init function to alter the table
of allocator functions it's passed to register its own functions.
This means replace-malloc implementations now need to copy the original
table, which is not a bad thing, as it allows function calls with one
level of indirection less. It also replace_init functions to not
actually register the replace-malloc functions in some cases, which will
be useful when linking some replace-malloc libraries into mozglue.
Note this is binary compatible with previously built replace-malloc
libraries, but because those libraries wouldn't update the function
table, they would stay disabled.
--HG--
extra : rebase_source : 2518f6ebe76b4c82359e98369de6a5a8c3ca9967
2017-11-22 08:24:29 +00:00
|
|
|
static bool gReplaceMallocInitialized = false;
|
2017-11-24 07:02:05 +00:00
|
|
|
static ReplaceMallocBridge* gReplaceMallocBridge = nullptr;
|
2017-08-31 03:02:01 +00:00
|
|
|
static void
|
|
|
|
init()
|
|
|
|
{
|
2017-11-23 08:24:19 +00:00
|
|
|
#ifdef MOZ_REPLACE_MALLOC_STATIC
|
|
|
|
malloc_table_t initialTable = gReplaceMallocTable;
|
|
|
|
#endif
|
|
|
|
|
2017-11-24 07:36:29 +00:00
|
|
|
#ifdef MOZ_DYNAMIC_REPLACE_INIT
|
Bug 1420353 - Change how replace-malloc initializes, part 1. r=njn
The allocator API is a moving target, and every time we change it, the
surface for replace-malloc libraries grows. This causes some build
system problems, because of the tricks in replace_malloc.mk, which
require the full list of symbols.
Considering the above and the goal of moving some of the replace-malloc
libraries into mozglue, it becomes simpler to reduce the replace-malloc
exposure to the initialization functions.
So instead of the allocator poking into replace-malloc libraries for all
the functions, we expect their replace_init function to alter the table
of allocator functions it's passed to register its own functions.
This means replace-malloc implementations now need to copy the original
table, which is not a bad thing, as it allows function calls with one
level of indirection less. It also replace_init functions to not
actually register the replace-malloc functions in some cases, which will
be useful when linking some replace-malloc libraries into mozglue.
Note this is binary compatible with previously built replace-malloc
libraries, but because those libraries wouldn't update the function
table, they would stay disabled.
--HG--
extra : rebase_source : 2518f6ebe76b4c82359e98369de6a5a8c3ca9967
2017-11-22 08:24:29 +00:00
|
|
|
replace_malloc_handle_t handle = replace_malloc_handle();
|
|
|
|
if (handle) {
|
2017-11-24 07:36:29 +00:00
|
|
|
replace_init = REPLACE_MALLOC_GET_INIT_FUNC(handle);
|
Bug 1420353 - Change how replace-malloc initializes, part 1. r=njn
The allocator API is a moving target, and every time we change it, the
surface for replace-malloc libraries grows. This causes some build
system problems, because of the tricks in replace_malloc.mk, which
require the full list of symbols.
Considering the above and the goal of moving some of the replace-malloc
libraries into mozglue, it becomes simpler to reduce the replace-malloc
exposure to the initialization functions.
So instead of the allocator poking into replace-malloc libraries for all
the functions, we expect their replace_init function to alter the table
of allocator functions it's passed to register its own functions.
This means replace-malloc implementations now need to copy the original
table, which is not a bad thing, as it allows function calls with one
level of indirection less. It also replace_init functions to not
actually register the replace-malloc functions in some cases, which will
be useful when linking some replace-malloc libraries into mozglue.
Note this is binary compatible with previously built replace-malloc
libraries, but because those libraries wouldn't update the function
table, they would stay disabled.
--HG--
extra : rebase_source : 2518f6ebe76b4c82359e98369de6a5a8c3ca9967
2017-11-22 08:24:29 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-08-31 03:02:01 +00:00
|
|
|
// Set this *before* calling replace_init, otherwise if replace_init calls
|
|
|
|
// malloc() we'll get an infinite loop.
|
Bug 1420353 - Change how replace-malloc initializes, part 1. r=njn
The allocator API is a moving target, and every time we change it, the
surface for replace-malloc libraries grows. This causes some build
system problems, because of the tricks in replace_malloc.mk, which
require the full list of symbols.
Considering the above and the goal of moving some of the replace-malloc
libraries into mozglue, it becomes simpler to reduce the replace-malloc
exposure to the initialization functions.
So instead of the allocator poking into replace-malloc libraries for all
the functions, we expect their replace_init function to alter the table
of allocator functions it's passed to register its own functions.
This means replace-malloc implementations now need to copy the original
table, which is not a bad thing, as it allows function calls with one
level of indirection less. It also replace_init functions to not
actually register the replace-malloc functions in some cases, which will
be useful when linking some replace-malloc libraries into mozglue.
Note this is binary compatible with previously built replace-malloc
libraries, but because those libraries wouldn't update the function
table, they would stay disabled.
--HG--
extra : rebase_source : 2518f6ebe76b4c82359e98369de6a5a8c3ca9967
2017-11-22 08:24:29 +00:00
|
|
|
gReplaceMallocInitialized = true;
|
2017-08-31 03:02:01 +00:00
|
|
|
if (replace_init) {
|
2017-11-24 07:02:05 +00:00
|
|
|
replace_init(&gReplaceMallocTable, &gReplaceMallocBridge);
|
2017-08-31 03:02:01 +00:00
|
|
|
}
|
2017-11-23 08:24:19 +00:00
|
|
|
#ifdef MOZ_REPLACE_MALLOC_STATIC
|
|
|
|
if (Equals(initialTable, gReplaceMallocTable)) {
|
|
|
|
logalloc_init(&gReplaceMallocTable, &gReplaceMallocBridge);
|
|
|
|
}
|
2017-11-27 23:10:48 +00:00
|
|
|
#ifdef MOZ_DMD
|
|
|
|
if (Equals(initialTable, gReplaceMallocTable)) {
|
|
|
|
dmd_init(&gReplaceMallocTable, &gReplaceMallocBridge);
|
|
|
|
}
|
|
|
|
#endif
|
2017-11-23 08:24:19 +00:00
|
|
|
#endif
|
Bug 1420353 - Change how replace-malloc initializes, part 1. r=njn
The allocator API is a moving target, and every time we change it, the
surface for replace-malloc libraries grows. This causes some build
system problems, because of the tricks in replace_malloc.mk, which
require the full list of symbols.
Considering the above and the goal of moving some of the replace-malloc
libraries into mozglue, it becomes simpler to reduce the replace-malloc
exposure to the initialization functions.
So instead of the allocator poking into replace-malloc libraries for all
the functions, we expect their replace_init function to alter the table
of allocator functions it's passed to register its own functions.
This means replace-malloc implementations now need to copy the original
table, which is not a bad thing, as it allows function calls with one
level of indirection less. It also replace_init functions to not
actually register the replace-malloc functions in some cases, which will
be useful when linking some replace-malloc libraries into mozglue.
Note this is binary compatible with previously built replace-malloc
libraries, but because those libraries wouldn't update the function
table, they would stay disabled.
--HG--
extra : rebase_source : 2518f6ebe76b4c82359e98369de6a5a8c3ca9967
2017-11-22 08:24:29 +00:00
|
|
|
replace_malloc_init_funcs();
|
2017-08-31 03:02:01 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
#define MALLOC_DECL(name, return_type, ...) \
|
|
|
|
template<> \
|
|
|
|
inline return_type ReplaceMalloc::name( \
|
|
|
|
ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
|
|
|
|
{ \
|
Bug 1420353 - Change how replace-malloc initializes, part 1. r=njn
The allocator API is a moving target, and every time we change it, the
surface for replace-malloc libraries grows. This causes some build
system problems, because of the tricks in replace_malloc.mk, which
require the full list of symbols.
Considering the above and the goal of moving some of the replace-malloc
libraries into mozglue, it becomes simpler to reduce the replace-malloc
exposure to the initialization functions.
So instead of the allocator poking into replace-malloc libraries for all
the functions, we expect their replace_init function to alter the table
of allocator functions it's passed to register its own functions.
This means replace-malloc implementations now need to copy the original
table, which is not a bad thing, as it allows function calls with one
level of indirection less. It also replace_init functions to not
actually register the replace-malloc functions in some cases, which will
be useful when linking some replace-malloc libraries into mozglue.
Note this is binary compatible with previously built replace-malloc
libraries, but because those libraries wouldn't update the function
table, they would stay disabled.
--HG--
extra : rebase_source : 2518f6ebe76b4c82359e98369de6a5a8c3ca9967
2017-11-22 08:24:29 +00:00
|
|
|
if (MOZ_UNLIKELY(!gReplaceMallocInitialized)) { \
|
2017-10-29 12:53:37 +00:00
|
|
|
init(); \
|
|
|
|
} \
|
Bug 1420353 - Change how replace-malloc initializes, part 1. r=njn
The allocator API is a moving target, and every time we change it, the
surface for replace-malloc libraries grows. This causes some build
system problems, because of the tricks in replace_malloc.mk, which
require the full list of symbols.
Considering the above and the goal of moving some of the replace-malloc
libraries into mozglue, it becomes simpler to reduce the replace-malloc
exposure to the initialization functions.
So instead of the allocator poking into replace-malloc libraries for all
the functions, we expect their replace_init function to alter the table
of allocator functions it's passed to register its own functions.
This means replace-malloc implementations now need to copy the original
table, which is not a bad thing, as it allows function calls with one
level of indirection less. It also replace_init functions to not
actually register the replace-malloc functions in some cases, which will
be useful when linking some replace-malloc libraries into mozglue.
Note this is binary compatible with previously built replace-malloc
libraries, but because those libraries wouldn't update the function
table, they would stay disabled.
--HG--
extra : rebase_source : 2518f6ebe76b4c82359e98369de6a5a8c3ca9967
2017-11-22 08:24:29 +00:00
|
|
|
return gReplaceMallocTable.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
|
2017-08-31 03:02:01 +00:00
|
|
|
}
|
|
|
|
#include "malloc_decls.h"
|
|
|
|
|
|
|
|
MOZ_JEMALLOC_API struct ReplaceMallocBridge*
|
|
|
|
get_bridge(void)
|
|
|
|
{
|
Bug 1420353 - Change how replace-malloc initializes, part 1. r=njn
The allocator API is a moving target, and every time we change it, the
surface for replace-malloc libraries grows. This causes some build
system problems, because of the tricks in replace_malloc.mk, which
require the full list of symbols.
Considering the above and the goal of moving some of the replace-malloc
libraries into mozglue, it becomes simpler to reduce the replace-malloc
exposure to the initialization functions.
So instead of the allocator poking into replace-malloc libraries for all
the functions, we expect their replace_init function to alter the table
of allocator functions it's passed to register its own functions.
This means replace-malloc implementations now need to copy the original
table, which is not a bad thing, as it allows function calls with one
level of indirection less. It also replace_init functions to not
actually register the replace-malloc functions in some cases, which will
be useful when linking some replace-malloc libraries into mozglue.
Note this is binary compatible with previously built replace-malloc
libraries, but because those libraries wouldn't update the function
table, they would stay disabled.
--HG--
extra : rebase_source : 2518f6ebe76b4c82359e98369de6a5a8c3ca9967
2017-11-22 08:24:29 +00:00
|
|
|
if (MOZ_UNLIKELY(!gReplaceMallocInitialized)) {
|
2017-08-31 03:02:01 +00:00
|
|
|
init();
|
2017-10-29 12:53:14 +00:00
|
|
|
}
|
2017-11-24 07:02:05 +00:00
|
|
|
return gReplaceMallocBridge;
|
2017-08-31 03:02:01 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// posix_memalign, aligned_alloc, memalign and valloc all implement some kind
|
|
|
|
// of aligned memory allocation. For convenience, a replace-malloc library can
|
|
|
|
// skip defining replace_posix_memalign, replace_aligned_alloc and
|
|
|
|
// replace_valloc, and default implementations will be automatically derived
|
|
|
|
// from replace_memalign.
|
2017-08-31 03:02:01 +00:00
|
|
|
static void
|
|
|
|
replace_malloc_init_funcs()
|
|
|
|
{
|
Bug 1420353 - Change how replace-malloc initializes, part 1. r=njn
The allocator API is a moving target, and every time we change it, the
surface for replace-malloc libraries grows. This causes some build
system problems, because of the tricks in replace_malloc.mk, which
require the full list of symbols.
Considering the above and the goal of moving some of the replace-malloc
libraries into mozglue, it becomes simpler to reduce the replace-malloc
exposure to the initialization functions.
So instead of the allocator poking into replace-malloc libraries for all
the functions, we expect their replace_init function to alter the table
of allocator functions it's passed to register its own functions.
This means replace-malloc implementations now need to copy the original
table, which is not a bad thing, as it allows function calls with one
level of indirection less. It also replace_init functions to not
actually register the replace-malloc functions in some cases, which will
be useful when linking some replace-malloc libraries into mozglue.
Note this is binary compatible with previously built replace-malloc
libraries, but because those libraries wouldn't update the function
table, they would stay disabled.
--HG--
extra : rebase_source : 2518f6ebe76b4c82359e98369de6a5a8c3ca9967
2017-11-22 08:24:29 +00:00
|
|
|
if (gReplaceMallocTable.posix_memalign == MozJemalloc::posix_memalign &&
|
|
|
|
gReplaceMallocTable.memalign != MozJemalloc::memalign) {
|
|
|
|
gReplaceMallocTable.posix_memalign =
|
2017-10-29 12:53:37 +00:00
|
|
|
AlignedAllocator<ReplaceMalloc::memalign>::posix_memalign;
|
2017-08-31 03:02:01 +00:00
|
|
|
}
|
Bug 1420353 - Change how replace-malloc initializes, part 1. r=njn
The allocator API is a moving target, and every time we change it, the
surface for replace-malloc libraries grows. This causes some build
system problems, because of the tricks in replace_malloc.mk, which
require the full list of symbols.
Considering the above and the goal of moving some of the replace-malloc
libraries into mozglue, it becomes simpler to reduce the replace-malloc
exposure to the initialization functions.
So instead of the allocator poking into replace-malloc libraries for all
the functions, we expect their replace_init function to alter the table
of allocator functions it's passed to register its own functions.
This means replace-malloc implementations now need to copy the original
table, which is not a bad thing, as it allows function calls with one
level of indirection less. It also replace_init functions to not
actually register the replace-malloc functions in some cases, which will
be useful when linking some replace-malloc libraries into mozglue.
Note this is binary compatible with previously built replace-malloc
libraries, but because those libraries wouldn't update the function
table, they would stay disabled.
--HG--
extra : rebase_source : 2518f6ebe76b4c82359e98369de6a5a8c3ca9967
2017-11-22 08:24:29 +00:00
|
|
|
if (gReplaceMallocTable.aligned_alloc == MozJemalloc::aligned_alloc &&
|
|
|
|
gReplaceMallocTable.memalign != MozJemalloc::memalign) {
|
|
|
|
gReplaceMallocTable.aligned_alloc =
|
2017-10-29 12:53:37 +00:00
|
|
|
AlignedAllocator<ReplaceMalloc::memalign>::aligned_alloc;
|
2017-08-31 03:02:01 +00:00
|
|
|
}
|
Bug 1420353 - Change how replace-malloc initializes, part 1. r=njn
The allocator API is a moving target, and every time we change it, the
surface for replace-malloc libraries grows. This causes some build
system problems, because of the tricks in replace_malloc.mk, which
require the full list of symbols.
Considering the above and the goal of moving some of the replace-malloc
libraries into mozglue, it becomes simpler to reduce the replace-malloc
exposure to the initialization functions.
So instead of the allocator poking into replace-malloc libraries for all
the functions, we expect their replace_init function to alter the table
of allocator functions it's passed to register its own functions.
This means replace-malloc implementations now need to copy the original
table, which is not a bad thing, as it allows function calls with one
level of indirection less. It also replace_init functions to not
actually register the replace-malloc functions in some cases, which will
be useful when linking some replace-malloc libraries into mozglue.
Note this is binary compatible with previously built replace-malloc
libraries, but because those libraries wouldn't update the function
table, they would stay disabled.
--HG--
extra : rebase_source : 2518f6ebe76b4c82359e98369de6a5a8c3ca9967
2017-11-22 08:24:29 +00:00
|
|
|
if (gReplaceMallocTable.valloc == MozJemalloc::valloc &&
|
|
|
|
gReplaceMallocTable.memalign != MozJemalloc::memalign) {
|
|
|
|
gReplaceMallocTable.valloc =
|
2017-10-29 12:53:37 +00:00
|
|
|
AlignedAllocator<ReplaceMalloc::memalign>::valloc;
|
2017-08-31 03:02:01 +00:00
|
|
|
}
|
Bug 1420353 - Change how replace-malloc initializes, part 1. r=njn
The allocator API is a moving target, and every time we change it, the
surface for replace-malloc libraries grows. This causes some build
system problems, because of the tricks in replace_malloc.mk, which
require the full list of symbols.
Considering the above and the goal of moving some of the replace-malloc
libraries into mozglue, it becomes simpler to reduce the replace-malloc
exposure to the initialization functions.
So instead of the allocator poking into replace-malloc libraries for all
the functions, we expect their replace_init function to alter the table
of allocator functions it's passed to register its own functions.
This means replace-malloc implementations now need to copy the original
table, which is not a bad thing, as it allows function calls with one
level of indirection less. It also replace_init functions to not
actually register the replace-malloc functions in some cases, which will
be useful when linking some replace-malloc libraries into mozglue.
Note this is binary compatible with previously built replace-malloc
libraries, but because those libraries wouldn't update the function
table, they would stay disabled.
--HG--
extra : rebase_source : 2518f6ebe76b4c82359e98369de6a5a8c3ca9967
2017-11-22 08:24:29 +00:00
|
|
|
if (gReplaceMallocTable.moz_create_arena_with_params ==
|
|
|
|
MozJemalloc::moz_create_arena_with_params &&
|
|
|
|
gReplaceMallocTable.malloc != MozJemalloc::malloc) {
|
2017-10-29 12:53:37 +00:00
|
|
|
#define MALLOC_DECL(name, ...) \
|
Bug 1420353 - Change how replace-malloc initializes, part 1. r=njn
The allocator API is a moving target, and every time we change it, the
surface for replace-malloc libraries grows. This causes some build
system problems, because of the tricks in replace_malloc.mk, which
require the full list of symbols.
Considering the above and the goal of moving some of the replace-malloc
libraries into mozglue, it becomes simpler to reduce the replace-malloc
exposure to the initialization functions.
So instead of the allocator poking into replace-malloc libraries for all
the functions, we expect their replace_init function to alter the table
of allocator functions it's passed to register its own functions.
This means replace-malloc implementations now need to copy the original
table, which is not a bad thing, as it allows function calls with one
level of indirection less. It also replace_init functions to not
actually register the replace-malloc functions in some cases, which will
be useful when linking some replace-malloc libraries into mozglue.
Note this is binary compatible with previously built replace-malloc
libraries, but because those libraries wouldn't update the function
table, they would stay disabled.
--HG--
extra : rebase_source : 2518f6ebe76b4c82359e98369de6a5a8c3ca9967
2017-11-22 08:24:29 +00:00
|
|
|
gReplaceMallocTable.name = DummyArenaAllocator<ReplaceMalloc>::name;
|
2017-09-21 22:22:38 +00:00
|
|
|
#define MALLOC_FUNCS MALLOC_FUNCS_ARENA
|
|
|
|
#include "malloc_decls.h"
|
|
|
|
}
|
2017-08-31 03:02:01 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
#endif // MOZ_REPLACE_MALLOC
|
2017-10-29 12:53:37 +00:00
|
|
|
// ***************************************************************************
|
|
|
|
// Definition of all the _impl functions
|
2017-08-31 03:02:01 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
#define GENERIC_MALLOC_DECL2(name, name_impl, return_type, ...) \
|
|
|
|
return_type name_impl(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
|
|
|
|
{ \
|
|
|
|
return DefaultMalloc::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
|
2017-08-31 03:02:01 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
#define GENERIC_MALLOC_DECL(name, return_type, ...) \
|
2017-09-21 06:27:12 +00:00
|
|
|
GENERIC_MALLOC_DECL2(name, name##_impl, return_type, ##__VA_ARGS__)
|
2017-09-13 05:25:21 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
#define MALLOC_DECL(...) \
|
|
|
|
MOZ_MEMORY_API MACRO_CALL(GENERIC_MALLOC_DECL, (__VA_ARGS__))
|
2017-08-31 01:29:11 +00:00
|
|
|
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
|
|
|
|
#include "malloc_decls.h"
|
|
|
|
|
2017-09-21 06:27:12 +00:00
|
|
|
#undef GENERIC_MALLOC_DECL
|
2017-10-29 12:53:37 +00:00
|
|
|
#define GENERIC_MALLOC_DECL(name, return_type, ...) \
|
2017-09-21 06:27:12 +00:00
|
|
|
GENERIC_MALLOC_DECL2(name, name, return_type, ##__VA_ARGS__)
|
2017-09-13 05:25:21 +00:00
|
|
|
|
2017-10-29 12:53:37 +00:00
|
|
|
#define MALLOC_DECL(...) \
|
|
|
|
MOZ_JEMALLOC_API MACRO_CALL(GENERIC_MALLOC_DECL, (__VA_ARGS__))
|
2017-09-21 22:22:38 +00:00
|
|
|
#define MALLOC_FUNCS (MALLOC_FUNCS_JEMALLOC | MALLOC_FUNCS_ARENA)
|
2017-08-31 01:29:11 +00:00
|
|
|
#include "malloc_decls.h"
|
2017-10-29 12:53:37 +00:00
|
|
|
// ***************************************************************************
|
2008-02-06 23:06:50 +00:00
|
|
|
|
2010-11-05 17:26:51 +00:00
|
|
|
#ifdef HAVE_DLOPEN
|
2017-10-29 12:53:37 +00:00
|
|
|
#include <dlfcn.h>
|
2009-07-30 03:15:50 +00:00
|
|
|
#endif
|
|
|
|
|
2017-08-31 03:02:01 +00:00
|
|
|
#if defined(__GLIBC__) && !defined(__UCLIBC__)
|
2017-10-29 12:53:31 +00:00
|
|
|
// glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
|
|
|
|
// to inconsistently reference libc's malloc(3)-compatible functions
|
|
|
|
// (bug 493541).
|
|
|
|
//
|
|
|
|
// These definitions interpose hooks in glibc. The functions are actually
|
|
|
|
// passed an extra argument for the caller return address, which will be
|
|
|
|
// ignored.
|
|
|
|
|
2017-05-12 12:52:25 +00:00
|
|
|
extern "C" {
|
2017-08-31 01:29:11 +00:00
|
|
|
MOZ_EXPORT void (*__free_hook)(void*) = free_impl;
|
|
|
|
MOZ_EXPORT void* (*__malloc_hook)(size_t) = malloc_impl;
|
|
|
|
MOZ_EXPORT void* (*__realloc_hook)(void*, size_t) = realloc_impl;
|
|
|
|
MOZ_EXPORT void* (*__memalign_hook)(size_t, size_t) = memalign_impl;
|
2017-05-12 12:52:25 +00:00
|
|
|
}
|
2009-07-30 03:15:50 +00:00
|
|
|
|
2017-08-31 03:02:01 +00:00
|
|
|
#elif defined(RTLD_DEEPBIND)
|
2017-10-29 12:53:31 +00:00
|
|
|
// XXX On systems that support RTLD_GROUP or DF_1_GROUP, do their
|
|
|
|
// implementations permit similar inconsistencies? Should STV_SINGLETON
|
|
|
|
// visibility be used for interposition where available?
|
2017-10-29 12:53:37 +00:00
|
|
|
#error "Interposing malloc is unsafe on this system without libc malloc hooks."
|
2008-02-06 23:06:50 +00:00
|
|
|
#endif
|
2011-06-27 19:44:51 +00:00
|
|
|
|
2017-08-30 07:53:10 +00:00
|
|
|
#ifdef XP_WIN
|
2017-08-31 05:17:49 +00:00
|
|
|
void*
|
|
|
|
_recalloc(void* aPtr, size_t aCount, size_t aSize)
|
|
|
|
{
|
2017-11-09 04:49:33 +00:00
|
|
|
size_t oldsize = aPtr ? AllocInfo::Get(aPtr).Size() : 0;
|
2017-10-30 02:28:17 +00:00
|
|
|
CheckedInt<size_t> checkedSize = CheckedInt<size_t>(aCount) * aSize;
|
|
|
|
|
|
|
|
if (!checkedSize.isValid()) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t newsize = checkedSize.value();
|
2017-08-31 05:17:49 +00:00
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// In order for all trailing bytes to be zeroed, the caller needs to
|
|
|
|
// use calloc(), followed by recalloc(). However, the current calloc()
|
|
|
|
// implementation only zeros the bytes requested, so if recalloc() is
|
|
|
|
// to work 100% correctly, calloc() will need to change to zero
|
|
|
|
// trailing bytes.
|
2017-08-31 05:17:49 +00:00
|
|
|
aPtr = DefaultMalloc::realloc(aPtr, newsize);
|
|
|
|
if (aPtr && oldsize < newsize) {
|
|
|
|
memset((void*)((uintptr_t)aPtr + oldsize), 0, newsize - oldsize);
|
|
|
|
}
|
|
|
|
|
|
|
|
return aPtr;
|
|
|
|
}
|
|
|
|
|
2017-10-29 12:53:31 +00:00
|
|
|
// This impl of _expand doesn't ever actually expand or shrink blocks: it
|
|
|
|
// simply replies that you may continue using a shrunk block.
|
2017-08-31 05:17:49 +00:00
|
|
|
void*
|
|
|
|
_expand(void* aPtr, size_t newsize)
|
|
|
|
{
|
2017-11-09 04:49:33 +00:00
|
|
|
if (AllocInfo::Get(aPtr).Size() >= newsize) {
|
2017-08-31 05:17:49 +00:00
|
|
|
return aPtr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
_msize(void* aPtr)
|
|
|
|
{
|
|
|
|
return DefaultMalloc::malloc_usable_size(aPtr);
|
|
|
|
}
|
2011-06-27 19:44:51 +00:00
|
|
|
#endif
|