Bug 1531716 - Part 4: Replace jstypes macros with constexpr functions. r=jonco

JS_BIT and JS_BITMASK are only used in contexts where uint32_t is used, so these
two functions are now typed to accept and return uint32_t.

JS_HOWMANY and the three JS_ROUND functions are only used with size_t inputs,
so these four functions are now typed to accept and return size_t.

Differential Revision: https://phabricator.services.mozilla.com/D51142

--HG--
extra : moz-landing-system : lando
This commit is contained in:
André Bargull 2019-11-04 14:04:35 +00:00
parent 5efd76b5a4
commit 11c3c57ab3
27 changed files with 123 additions and 104 deletions

View File

@ -701,7 +701,7 @@ static const uintptr_t JSCLASS_RESERVED_SLOTS_SHIFT = 8;
static const uint32_t JSCLASS_RESERVED_SLOTS_WIDTH = 8;
static const uint32_t JSCLASS_RESERVED_SLOTS_MASK =
JS_BITMASK(JSCLASS_RESERVED_SLOTS_WIDTH);
js::BitMask(JSCLASS_RESERVED_SLOTS_WIDTH);
static constexpr uint32_t JSCLASS_HAS_RESERVED_SLOTS(uint32_t n) {
return (n & JSCLASS_RESERVED_SLOTS_MASK) << JSCLASS_RESERVED_SLOTS_SHIFT;
@ -759,7 +759,7 @@ static constexpr uint32_t JSCLASS_GLOBAL_FLAGS =
// Fast access to the original value of each standard class's prototype.
static const uint32_t JSCLASS_CACHED_PROTO_SHIFT = JSCLASS_HIGH_FLAGS_SHIFT + 9;
static const uint32_t JSCLASS_CACHED_PROTO_MASK =
JS_BITMASK(js::JSCLASS_CACHED_PROTO_WIDTH);
js::BitMask(js::JSCLASS_CACHED_PROTO_WIDTH);
static_assert(JSProto_LIMIT <= (JSCLASS_CACHED_PROTO_MASK + 1),
"JSProtoKey must not exceed the maximum cacheable proto-mask");

View File

@ -230,14 +230,15 @@ struct Zone {
};
struct String {
static const uint32_t NON_ATOM_BIT = JS_BIT(1);
static const uint32_t LINEAR_BIT = JS_BIT(4);
static const uint32_t INLINE_CHARS_BIT = JS_BIT(6);
static const uint32_t LATIN1_CHARS_BIT = JS_BIT(9);
static const uint32_t EXTERNAL_FLAGS = LINEAR_BIT | NON_ATOM_BIT | JS_BIT(8);
static const uint32_t TYPE_FLAGS_MASK = JS_BITMASK(9) - JS_BIT(2) - JS_BIT(0);
static const uint32_t PERMANENT_ATOM_MASK = NON_ATOM_BIT | JS_BIT(8);
static const uint32_t PERMANENT_ATOM_FLAGS = JS_BIT(8);
static const uint32_t NON_ATOM_BIT = js::Bit(1);
static const uint32_t LINEAR_BIT = js::Bit(4);
static const uint32_t INLINE_CHARS_BIT = js::Bit(6);
static const uint32_t LATIN1_CHARS_BIT = js::Bit(9);
static const uint32_t EXTERNAL_FLAGS = LINEAR_BIT | NON_ATOM_BIT | js::Bit(8);
static const uint32_t TYPE_FLAGS_MASK =
js::BitMask(9) - js::Bit(2) - js::Bit(0);
static const uint32_t PERMANENT_ATOM_MASK = NON_ATOM_BIT | js::Bit(8);
static const uint32_t PERMANENT_ATOM_FLAGS = js::Bit(8);
uintptr_t flags_;
#if JS_BITS_PER_WORD == 32

View File

@ -219,7 +219,7 @@ MapIteratorObject* MapIteratorObject::create(JSContext* cx, HandleObject obj,
iterobj->setSlot(RangeSlot, PrivateValue(nullptr));
iterobj->setSlot(KindSlot, Int32Value(int32_t(kind)));
const size_t size = JS_ROUNDUP(sizeof(ValueMap::Range), gc::CellAlignBytes);
const size_t size = RoundUp(sizeof(ValueMap::Range), gc::CellAlignBytes);
buffer = nursery.allocateBufferSameLocation(iterobj, size);
if (buffer) {
break;
@ -992,7 +992,7 @@ SetIteratorObject* SetIteratorObject::create(JSContext* cx, HandleObject obj,
iterobj->setSlot(RangeSlot, PrivateValue(nullptr));
iterobj->setSlot(KindSlot, Int32Value(int32_t(kind)));
const size_t size = JS_ROUNDUP(sizeof(ValueSet::Range), gc::CellAlignBytes);
const size_t size = RoundUp(sizeof(ValueSet::Range), gc::CellAlignBytes);
buffer = nursery.allocateBufferSameLocation(iterobj, size);
if (buffer) {
break;

View File

@ -162,7 +162,7 @@ static size_t NextSize(size_t start, size_t used) {
// After 1 MB, grow more gradually, to waste less memory.
// The sequence (in megabytes) begins:
// 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, ...
return JS_ROUNDUP(used / 8, mb);
return RoundUp(used / 8, mb);
}
LifoAlloc::UniqueBumpChunk LifoAlloc::newChunkWithCapacity(size_t n,

View File

@ -477,7 +477,7 @@ bool BytecodeEmitter::emitDupAt(unsigned slotFromTop, unsigned count) {
return emit1(JSOP_DUP2);
}
if (slotFromTop >= JS_BIT(24)) {
if (slotFromTop >= Bit(24)) {
reportError(nullptr, JSMSG_TOO_MANY_LOCALS);
return false;
}
@ -2069,11 +2069,11 @@ bool BytecodeEmitter::emitNumberOp(double dval) {
}
uint32_t u = uint32_t(ival);
if (u < JS_BIT(16)) {
if (u < Bit(16)) {
if (!emitUint16Operand(JSOP_UINT16, u)) {
return false;
}
} else if (u < JS_BIT(24)) {
} else if (u < Bit(24)) {
BytecodeOffset off;
if (!emitN(JSOP_UINT24, 3, &off)) {
return false;
@ -2272,7 +2272,7 @@ bool BytecodeEmitter::isRunOnceLambda() {
bool BytecodeEmitter::allocateResumeIndex(BytecodeOffset offset,
uint32_t* resumeIndex) {
static constexpr uint32_t MaxResumeIndex = JS_BITMASK(24);
static constexpr uint32_t MaxResumeIndex = BitMask(24);
static_assert(
MaxResumeIndex < uint32_t(AbstractGeneratorObject::RESUME_INDEX_RUNNING),

View File

@ -214,9 +214,9 @@ inline bool SN_IS_TERMINATOR(jssrcnote* sn) { return *sn == SRC_NULL; }
#define SN_TYPE_BITS 5
#define SN_DELTA_BITS 3
#define SN_XDELTA_BITS 6
#define SN_TYPE_MASK (JS_BITMASK(SN_TYPE_BITS) << SN_DELTA_BITS)
#define SN_DELTA_MASK ((ptrdiff_t)JS_BITMASK(SN_DELTA_BITS))
#define SN_XDELTA_MASK ((ptrdiff_t)JS_BITMASK(SN_XDELTA_BITS))
#define SN_TYPE_MASK (js::BitMask(SN_TYPE_BITS) << SN_DELTA_BITS)
#define SN_DELTA_MASK ((ptrdiff_t)js::BitMask(SN_DELTA_BITS))
#define SN_XDELTA_MASK ((ptrdiff_t)js::BitMask(SN_XDELTA_BITS))
#define SN_MAKE_NOTE(sn, t, d) \
(*(sn) = (jssrcnote)(((t) << SN_DELTA_BITS) | ((d)&SN_DELTA_MASK)))
@ -235,8 +235,8 @@ inline bool SN_IS_TERMINATOR(jssrcnote* sn) { return *sn == SRC_NULL; }
(SN_IS_XDELTA(sn) ? SN_MAKE_XDELTA(sn, delta) \
: SN_MAKE_NOTE(sn, SN_TYPE(sn), delta))
#define SN_DELTA_LIMIT ((ptrdiff_t)JS_BIT(SN_DELTA_BITS))
#define SN_XDELTA_LIMIT ((ptrdiff_t)JS_BIT(SN_XDELTA_BITS))
#define SN_DELTA_LIMIT ((ptrdiff_t)js::Bit(SN_DELTA_BITS))
#define SN_XDELTA_LIMIT ((ptrdiff_t)js::Bit(SN_XDELTA_BITS))
/*
* Offset fields follow certain notes and are frequency-encoded: an offset in

View File

@ -32,7 +32,7 @@ bool SwitchEmitter::TableGenerator::addNumber(int32_t caseValue) {
return true;
}
if (unsigned(caseValue + int(JS_BIT(15))) >= unsigned(JS_BIT(16))) {
if (unsigned(caseValue + int(Bit(15))) >= unsigned(Bit(16))) {
setInvalid();
return true;
}
@ -48,7 +48,7 @@ bool SwitchEmitter::TableGenerator::addNumber(int32_t caseValue) {
// We bias caseValue by 65536 if it's negative, and hope that's a rare case
// (because it requires a malloc'd bitmap).
if (caseValue < 0) {
caseValue += JS_BIT(16);
caseValue += Bit(16);
}
if (caseValue >= intmapBitLength_) {
size_t newLength = NumWordsForBitArrayOfLength(caseValue + 1);
@ -87,7 +87,7 @@ void SwitchEmitter::TableGenerator::finish(uint32_t caseCount) {
// Compute table length and select condswitch instead if overlarge
// or more than half-sparse.
tableLength_ = uint32_t(high_ - low_ + 1);
if (tableLength_ >= JS_BIT(16) || tableLength_ > 2 * caseCount) {
if (tableLength_ >= Bit(16) || tableLength_ > 2 * caseCount) {
setInvalid();
}
}
@ -139,7 +139,7 @@ bool SwitchEmitter::emitLexical(Handle<LexicalScope::Data*> bindings) {
bool SwitchEmitter::validateCaseCount(uint32_t caseCount) {
MOZ_ASSERT(state_ == State::Discriminant || state_ == State::Lexical);
if (caseCount > JS_BIT(16)) {
if (caseCount > Bit(16)) {
bce_->reportError(switchPos_, JSMSG_TOO_MANY_CASES);
return false;
}

View File

@ -33,7 +33,7 @@ static int testStructuredCloneReaderFuzz(const uint8_t* buf, size_t size) {
// Make sure to pad the buffer to a multiple of kSegmentAlignment
const size_t kSegmentAlignment = 8;
size_t buf_size = JS_ROUNDUP(size, kSegmentAlignment);
size_t buf_size = RoundUp(size, kSegmentAlignment);
JS::StructuredCloneScope scope = JS::StructuredCloneScope::DifferentProcess;

View File

@ -69,16 +69,16 @@ struct alignas(gc::CellAlignBytes) Cell {
public:
// The low bits of the first word of each Cell are reserved for GC flags.
static constexpr int ReservedBits = 2;
static constexpr uintptr_t RESERVED_MASK = JS_BITMASK(ReservedBits);
static constexpr uintptr_t RESERVED_MASK = BitMask(ReservedBits);
// Indicates if the cell is currently a RelocationOverlay
static constexpr uintptr_t FORWARD_BIT = JS_BIT(0);
static constexpr uintptr_t FORWARD_BIT = Bit(0);
// When a Cell is in the nursery, this will indicate if it is a JSString (1)
// or JSObject (0). When not in nursery, this bit is still reserved for
// JSString to use as JSString::NON_ATOM bit. This may be removed by Bug
// 1376646.
static constexpr uintptr_t JSSTRING_BIT = JS_BIT(1);
static constexpr uintptr_t JSSTRING_BIT = Bit(1);
MOZ_ALWAYS_INLINE bool isTenured() const { return !IsInsideNursery(this); }
MOZ_ALWAYS_INLINE const TenuredCell& asTenured() const;

View File

@ -286,14 +286,14 @@ const AllocKind gc::slotsToThingKind[] = {
// Check that reserved bits of a Cell are compatible with our typical allocators
// since most derived classes will store a pointer in the first word.
static_assert(js::detail::LIFO_ALLOC_ALIGN > JS_BITMASK(Cell::ReservedBits),
static_assert(js::detail::LIFO_ALLOC_ALIGN > BitMask(Cell::ReservedBits),
"Cell::ReservedBits should support LifoAlloc");
static_assert(CellAlignBytes > JS_BITMASK(Cell::ReservedBits),
static_assert(CellAlignBytes > BitMask(Cell::ReservedBits),
"Cell::ReservedBits should support gc::Cell");
static_assert(
sizeof(uintptr_t) > JS_BITMASK(Cell::ReservedBits),
sizeof(uintptr_t) > BitMask(Cell::ReservedBits),
"Cell::ReservedBits should support small malloc / aligned globals");
static_assert(js::jit::CodeAlignment > JS_BITMASK(Cell::ReservedBits),
static_assert(js::jit::CodeAlignment > BitMask(Cell::ReservedBits),
"Cell::ReservedBits should support JIT code");
static_assert(mozilla::ArrayLength(slotsToThingKind) ==

View File

@ -100,7 +100,7 @@ namespace js {
template <typename T>
static inline T* AllocateObjectBuffer(JSContext* cx, uint32_t count) {
size_t nbytes = JS_ROUNDUP(count * sizeof(T), sizeof(Value));
size_t nbytes = RoundUp(count * sizeof(T), sizeof(Value));
T* buffer = static_cast<T*>(cx->nursery().allocateBuffer(cx->zone(), nbytes));
if (!buffer) {
ReportOutOfMemory(cx);
@ -114,7 +114,7 @@ static inline T* AllocateObjectBuffer(JSContext* cx, JSObject* obj,
if (cx->isHelperThreadContext()) {
return cx->pod_malloc<T>(count);
}
size_t nbytes = JS_ROUNDUP(count * sizeof(T), sizeof(Value));
size_t nbytes = RoundUp(count * sizeof(T), sizeof(Value));
T* buffer = static_cast<T*>(cx->nursery().allocateBuffer(obj, nbytes));
if (!buffer) {
ReportOutOfMemory(cx);

View File

@ -140,8 +140,8 @@ void js::NurseryDecommitTask::queueRange(
// Only save this to decommit later if there's at least one page to
// decommit.
if (JS_ROUNDUP(newCapacity, SystemPageSize()) >=
JS_ROUNDDOWN(Nursery::NurseryChunkUsableSize, SystemPageSize())) {
if (RoundUp(newCapacity, SystemPageSize()) >=
RoundDown(Nursery::NurseryChunkUsableSize, SystemPageSize())) {
// Clear the existing decommit request because it may be a larger request
// for the same chunk.
partialChunk = nullptr;
@ -384,7 +384,7 @@ void js::Nursery::enterZealMode() {
JS_FRESH_NURSERY_PATTERN,
MemCheckKind::MakeUndefined);
}
capacity_ = JS_ROUNDUP(tunables().gcMaxNurseryBytes(), ChunkSize);
capacity_ = RoundUp(tunables().gcMaxNurseryBytes(), ChunkSize);
setCurrentEnd();
}
}
@ -445,8 +445,7 @@ Cell* js::Nursery::allocateString(Zone* zone, size_t size, AllocKind kind) {
// RelocationOverlay.
MOZ_ASSERT(size >= sizeof(RelocationOverlay));
size_t allocSize =
JS_ROUNDUP(sizeof(StringLayout) - 1 + size, CellAlignBytes);
size_t allocSize = RoundUp(sizeof(StringLayout) - 1 + size, CellAlignBytes);
auto header = static_cast<StringLayout*>(allocate(allocSize));
if (!header) {
return nullptr;
@ -1327,7 +1326,7 @@ bool js::Nursery::allocateNextChunk(const unsigned chunkno,
MOZ_ASSERT((chunkno == currentChunk_ + 1) ||
(chunkno == 0 && allocatedChunkCount() == 0));
MOZ_ASSERT(chunkno == allocatedChunkCount());
MOZ_ASSERT(chunkno < JS_HOWMANY(capacity(), ChunkSize));
MOZ_ASSERT(chunkno < HowMany(capacity(), ChunkSize));
if (!chunks_.resize(newCount)) {
return false;
@ -1442,10 +1441,10 @@ bool js::Nursery::maybeResizeExact(JS::GCReason reason) {
size_t js::Nursery::roundSize(size_t size) {
if (size >= ChunkSize) {
size = JS_ROUND(size, ChunkSize);
size = Round(size, ChunkSize);
} else {
size = Min(JS_ROUND(size, SubChunkStep),
JS_ROUNDDOWN(NurseryChunkUsableSize, SubChunkStep));
size = Min(Round(size, SubChunkStep),
RoundDown(NurseryChunkUsableSize, SubChunkStep));
}
MOZ_ASSERT(size >= ArenaSize);
return size;
@ -1529,7 +1528,7 @@ void js::Nursery::shrinkAllocableSpace(size_t newCapacity) {
}
MOZ_ASSERT(newCapacity < capacity_);
unsigned newCount = JS_HOWMANY(newCapacity, ChunkSize);
unsigned newCount = HowMany(newCapacity, ChunkSize);
if (newCount < allocatedChunkCount()) {
freeChunksFrom(newCount);
}

View File

@ -194,7 +194,7 @@ class Nursery {
// collection.
unsigned maxChunkCount() const {
MOZ_ASSERT(capacity());
return JS_HOWMANY(capacity(), gc::ChunkSize);
return HowMany(capacity(), gc::ChunkSize);
}
void enable();

View File

@ -927,9 +927,9 @@ static void AllocateAndInitTypedArrayBuffer(JSContext* cx,
size_t nbytes = count * obj->bytesPerElement();
MOZ_ASSERT((CheckedUint32(nbytes) + sizeof(Value)).isValid(),
"JS_ROUNDUP must not overflow");
"RoundUp must not overflow");
nbytes = JS_ROUNDUP(nbytes, sizeof(Value));
nbytes = RoundUp(nbytes, sizeof(Value));
void* buf = cx->nursery().allocateZeroedBuffer(obj, nbytes,
js::ArrayBufferContentsArena);
if (buf) {

View File

@ -659,8 +659,7 @@ void AtomicMemcpyDownUnsynchronized(uint8_t* dest, const uint8_t* src,
void (*copyWord)(uint8_t * dest, const uint8_t* src);
if (((uintptr_t(dest) ^ uintptr_t(src)) & WORDMASK) == 0) {
const uint8_t* cutoff =
(const uint8_t*)JS_ROUNDUP(uintptr_t(src), WORDSIZE);
const uint8_t* cutoff = (const uint8_t*)RoundUp(uintptr_t(src), WORDSIZE);
MOZ_ASSERT(cutoff <= lim); // because nbytes >= WORDSIZE
while (src < cutoff) {
AtomicCopyByteUnsynchronized(dest++, src++);
@ -861,7 +860,7 @@ bool InitializeJittedAtomics() {
// Allocate executable memory.
uint32_t codeLength = masm.bytesNeeded();
size_t roundedCodeLength = JS_ROUNDUP(codeLength, ExecutableCodePageSize);
size_t roundedCodeLength = RoundUp(codeLength, ExecutableCodePageSize);
uint8_t* code = (uint8_t*)AllocateExecutableMemory(
roundedCodeLength, ProtectionSetting::Writable,
MemCheckKind::MakeUndefined);

View File

@ -25,6 +25,9 @@
#include "mozilla/Casting.h"
#include "mozilla/Types.h"
#include <stddef.h>
#include <stdint.h>
// jstypes.h is (or should be!) included by every file in SpiderMonkey.
// js-config.h also should be included by every file. So include it here.
// XXX: including it in js/RequiredDefines.h should be a better option, since
@ -77,24 +80,34 @@
while (0)
/***********************************************************************
** MACROS: JS_BIT
** JS_BITMASK
** FUNCTIONS: Bit
** BitMask
** DESCRIPTION:
** Bit masking macros. XXX n must be <= 31 to be portable
** Bit masking functions. XXX n must be <= 31 to be portable
***********************************************************************/
#define JS_BIT(n) ((uint32_t)1 << (n))
#define JS_BITMASK(n) (JS_BIT(n) - 1)
namespace js {
constexpr uint32_t Bit(uint32_t n) { return uint32_t(1) << n; }
constexpr uint32_t BitMask(uint32_t n) { return Bit(n) - 1; }
} // namespace js
/***********************************************************************
** MACROS: JS_HOWMANY
** JS_ROUNDUP
** FUNCTIONS: HowMany
** RoundUp
** RoundDown
** Round
** DESCRIPTION:
** Commonly used macros for operations on compatible types.
** Commonly used functions for operations on compatible types.
***********************************************************************/
#define JS_HOWMANY(x, y) (((x) + (y)-1) / (y))
#define JS_ROUNDUP(x, y) (JS_HOWMANY(x, y) * (y))
#define JS_ROUNDDOWN(x, y) (((x) / (y)) * (y))
#define JS_ROUND(x, y) ((((x) + (y) / 2) / (y)) * (y))
namespace js {
constexpr size_t HowMany(size_t x, size_t y) { return (x + y - 1) / y; }
constexpr size_t RoundUp(size_t x, size_t y) { return HowMany(x, y) * y; }
constexpr size_t RoundDown(size_t x, size_t y) { return (x / y) * y; }
constexpr size_t Round(size_t x, size_t y) { return ((x + y / 2) / y) * y; }
} // namespace js
#if defined(JS_64BIT)
# define JS_BITS_PER_WORD 64

View File

@ -759,7 +759,7 @@ static bool CreateSpecificWasmBuffer(
uint32_t cur = clampedMaxSize.value() / 2;
for (; cur > initialSize; cur /= 2) {
uint32_t clampedMaxSize = JS_ROUNDUP(cur, wasm::PageSize);
uint32_t clampedMaxSize = RoundUp(cur, wasm::PageSize);
buffer = RawbufT::Allocate(initialSize, Some(clampedMaxSize), mappedSize);
if (buffer) {
break;
@ -774,7 +774,7 @@ static bool CreateSpecificWasmBuffer(
// Try to grow our chunk as much as possible.
for (size_t d = cur / 2; d >= wasm::PageSize; d /= 2) {
buffer->tryGrowMaxSizeInPlace(JS_ROUNDUP(d, wasm::PageSize));
buffer->tryGrowMaxSizeInPlace(RoundUp(d, wasm::PageSize));
}
}
@ -974,7 +974,7 @@ inline size_t ArrayBufferObject::associatedBytes() const {
if (bufferKind() == MALLOCED) {
return byteLength();
} else if (bufferKind() == MAPPED) {
return JS_ROUNDUP(byteLength(), js::gc::SystemPageSize());
return RoundUp(byteLength(), js::gc::SystemPageSize());
} else {
MOZ_CRASH("Unexpected buffer kind");
}
@ -1168,7 +1168,7 @@ ArrayBufferObject* ArrayBufferObject::createForContents(
} else if (contents.kind() == EXTERNAL) {
// Store the FreeInfo in the inline data slots so that we
// don't use up slots for it in non-refcounted array buffers.
size_t freeInfoSlots = JS_HOWMANY(sizeof(FreeInfo), sizeof(Value));
size_t freeInfoSlots = HowMany(sizeof(FreeInfo), sizeof(Value));
MOZ_ASSERT(reservedSlots + freeInfoSlots <= NativeObject::MAX_FIXED_SLOTS,
"FreeInfo must fit in inline slots");
nslots += freeInfoSlots;
@ -1176,7 +1176,7 @@ ArrayBufferObject* ArrayBufferObject::createForContents(
// The ABO is taking ownership, so account the bytes against the zone.
nAllocated = nbytes;
if (contents.kind() == MAPPED) {
nAllocated = JS_ROUNDUP(nbytes, js::gc::SystemPageSize());
nAllocated = RoundUp(nbytes, js::gc::SystemPageSize());
} else {
MOZ_ASSERT(contents.kind() == MALLOCED,
"should have handled all possible callers' kinds");
@ -1220,7 +1220,7 @@ ArrayBufferObject* ArrayBufferObject::createZeroed(
size_t nslots = JSCLASS_RESERVED_SLOTS(&class_);
uint8_t* data;
if (nbytes <= MaxInlineBytes) {
int newSlots = JS_HOWMANY(nbytes, sizeof(Value));
int newSlots = HowMany(nbytes, sizeof(Value));
MOZ_ASSERT(int(nbytes) <= newSlots * int(sizeof(Value)));
nslots += newSlots;

View File

@ -48,7 +48,7 @@ class BigInt final
private:
// The low NumFlagBitsReservedForGC flag bits are reserved.
static constexpr uintptr_t SignBit = JS_BIT(Base::NumFlagBitsReservedForGC);
static constexpr uintptr_t SignBit = js::Bit(Base::NumFlagBitsReservedForGC);
static constexpr size_t InlineDigitsLength =
(js::gc::MinCellSize - sizeof(Base)) / sizeof(Digit);

View File

@ -45,6 +45,13 @@ namespace frontend {
class TokenStreamAnyChars;
}
// Temporary definitions until irregexp is updated from upstream.
namespace irregexp {
constexpr size_t JS_HOWMANY(size_t x, size_t y) { return (x + y - 1) / y; }
constexpr size_t JS_ROUNDUP(size_t x, size_t y) { return JS_HOWMANY(x, y) * y; }
} // namespace irregexp
extern RegExpObject* RegExpAlloc(JSContext* cx, NewObjectKind newKind,
HandleObject proto = nullptr);

View File

@ -291,7 +291,7 @@ MOZ_ALWAYS_INLINE ShapeTable::Entry& ShapeTable::searchUnchecked(jsid id) {
/* Collision: double hash. */
uint32_t sizeLog2 = HASH_BITS - hashShift_;
HashNumber hash2 = Hash2(hash0, sizeLog2, hashShift_);
uint32_t sizeMask = JS_BITMASK(sizeLog2);
uint32_t sizeMask = BitMask(sizeLog2);
/* Save the first removed entry pointer so we can recycle it if adding. */
Entry* firstRemoved;

View File

@ -46,7 +46,7 @@ bool ShapeIC::init(JSContext* cx) {
bool ShapeTable::init(JSContext* cx, Shape* lastProp) {
uint32_t sizeLog2 = CeilingLog2Size(entryCount_);
uint32_t size = JS_BIT(sizeLog2);
uint32_t size = Bit(sizeLog2);
if (entryCount_ >= size - (size >> 2)) {
sizeLog2++;
}
@ -54,7 +54,7 @@ bool ShapeTable::init(JSContext* cx, Shape* lastProp) {
sizeLog2 = MIN_SIZE_LOG2;
}
size = JS_BIT(sizeLog2);
size = Bit(sizeLog2);
entries_.reset(cx->pod_calloc<Entry>(size));
if (!entries_) {
return false;
@ -225,8 +225,8 @@ bool ShapeTable::change(JSContext* cx, int log2Delta) {
*/
uint32_t oldLog2 = HASH_BITS - hashShift_;
uint32_t newLog2 = oldLog2 + log2Delta;
uint32_t oldSize = JS_BIT(oldLog2);
uint32_t newSize = JS_BIT(newLog2);
uint32_t oldSize = Bit(oldLog2);
uint32_t newSize = Bit(newLog2);
Entry* newTable = cx->maybe_pod_calloc<Entry>(newSize);
if (!newTable) {
return false;

View File

@ -207,8 +207,8 @@ typedef JSGetterOp GetterOp;
typedef JSSetterOp SetterOp;
/* Limit on the number of slotful properties in an object. */
static const uint32_t SHAPE_INVALID_SLOT = JS_BIT(24) - 1;
static const uint32_t SHAPE_MAXIMUM_SLOT = JS_BIT(24) - 2;
static const uint32_t SHAPE_INVALID_SLOT = Bit(24) - 1;
static const uint32_t SHAPE_MAXIMUM_SLOT = Bit(24) - 2;
enum class MaybeAdding { Adding = true, NotAdding = false };
@ -337,7 +337,7 @@ class ShapeTable {
// This value is low because it's common for a ShapeTable to be created
// with an entryCount of zero.
static const uint32_t MIN_SIZE_LOG2 = 2;
static const uint32_t MIN_SIZE = JS_BIT(MIN_SIZE_LOG2);
static const uint32_t MIN_SIZE = Bit(MIN_SIZE_LOG2);
uint32_t hashShift_; /* multiplicative hash shift */
@ -415,7 +415,7 @@ class ShapeTable {
}
// By definition, hashShift = HASH_BITS - log2(capacity).
uint32_t capacity() const { return JS_BIT(HASH_BITS - hashShift_); }
uint32_t capacity() const { return Bit(HASH_BITS - hashShift_); }
// Whether we need to grow. We want to do this if the load factor
// is >= 0.75
@ -903,7 +903,7 @@ class Shape : public gc::TenuredCell {
// For other shapes in the property tree with a parent, stores the
// parent's slot index (which may be invalid), and invalid for all
// other shapes.
SLOT_MASK = JS_BIT(24) - 1,
SLOT_MASK = BitMask(24),
// Number of fixed slots in objects with this shape.
// FIXED_SLOTS_MAX is the biggest count of fixed slots a Shape can store.

View File

@ -262,17 +262,17 @@ class JSString : public js::gc::CellWithLengthAndFlags<js::gc::Cell> {
"JSString::flags must reserve enough bits for Cell");
static const uint32_t NON_ATOM_BIT = js::gc::Cell::JSSTRING_BIT;
static const uint32_t LINEAR_BIT = JS_BIT(4);
static const uint32_t DEPENDENT_BIT = JS_BIT(5);
static const uint32_t INLINE_CHARS_BIT = JS_BIT(6);
static const uint32_t LINEAR_BIT = js::Bit(4);
static const uint32_t DEPENDENT_BIT = js::Bit(5);
static const uint32_t INLINE_CHARS_BIT = js::Bit(6);
static const uint32_t EXTENSIBLE_FLAGS =
NON_ATOM_BIT | LINEAR_BIT | JS_BIT(7);
static const uint32_t EXTERNAL_FLAGS = NON_ATOM_BIT | LINEAR_BIT | JS_BIT(8);
NON_ATOM_BIT | LINEAR_BIT | js::Bit(7);
static const uint32_t EXTERNAL_FLAGS = NON_ATOM_BIT | LINEAR_BIT | js::Bit(8);
static const uint32_t FAT_INLINE_MASK = INLINE_CHARS_BIT | JS_BIT(7);
static const uint32_t PERMANENT_ATOM_MASK = NON_ATOM_BIT | JS_BIT(8);
static const uint32_t PERMANENT_ATOM_FLAGS = JS_BIT(8);
static const uint32_t FAT_INLINE_MASK = INLINE_CHARS_BIT | js::Bit(7);
static const uint32_t PERMANENT_ATOM_MASK = NON_ATOM_BIT | js::Bit(8);
static const uint32_t PERMANENT_ATOM_FLAGS = js::Bit(8);
/* Initial flags for thin inline and fat inline strings. */
static const uint32_t INIT_THIN_INLINE_FLAGS =
@ -285,14 +285,14 @@ class JSString : public js::gc::CellWithLengthAndFlags<js::gc::Cell> {
NON_ATOM_BIT | LINEAR_BIT | DEPENDENT_BIT;
static const uint32_t TYPE_FLAGS_MASK =
JS_BITMASK(9) - JS_BITMASK(3) + js::gc::Cell::JSSTRING_BIT;
js::BitMask(9) - js::BitMask(3) + js::gc::Cell::JSSTRING_BIT;
static const uint32_t LATIN1_CHARS_BIT = JS_BIT(9);
static const uint32_t LATIN1_CHARS_BIT = js::Bit(9);
static const uint32_t INDEX_VALUE_BIT = JS_BIT(10);
static const uint32_t INDEX_VALUE_BIT = js::Bit(10);
static const uint32_t INDEX_VALUE_SHIFT = 16;
static const uint32_t PINNED_ATOM_BIT = JS_BIT(11);
static const uint32_t PINNED_ATOM_BIT = js::Bit(11);
static const uint32_t MAX_LENGTH = js::MaxStringLength;

View File

@ -2036,14 +2036,14 @@ JSString* JSStructuredCloneReader::readStringImpl(uint32_t nchars) {
}
JSString* JSStructuredCloneReader::readString(uint32_t data) {
uint32_t nchars = data & JS_BITMASK(31);
uint32_t nchars = data & BitMask(31);
bool latin1 = data & (1 << 31);
return latin1 ? readStringImpl<Latin1Char>(nchars)
: readStringImpl<char16_t>(nchars);
}
BigInt* JSStructuredCloneReader::readBigInt(uint32_t data) {
size_t length = data & JS_BITMASK(31);
size_t length = data & BitMask(31);
bool isNegative = data & (1 << 31);
if (length == 0) {
return BigInt::zero(context());

View File

@ -124,7 +124,7 @@ bool TypedArrayObject::ensureHasBuffer(JSContext* cx,
// If the object is in the nursery, the buffer will be freed by the next
// nursery GC. Free the data slot pointer if the object has no inline data.
size_t nbytes = JS_ROUNDUP(tarray->byteLength(), sizeof(Value));
size_t nbytes = RoundUp(tarray->byteLength(), sizeof(Value));
Nursery& nursery = cx->nursery();
if (tarray->isTenured() && !tarray->hasInlineElements() &&
!nursery.isInside(tarray->elements())) {
@ -170,7 +170,7 @@ void TypedArrayObject::finalize(JSFreeOp* fop, JSObject* obj) {
// Free the data slot pointer if it does not point into the old JSObject.
if (!curObj->hasInlineElements()) {
size_t nbytes = JS_ROUNDUP(curObj->byteLength(), sizeof(Value));
size_t nbytes = RoundUp(curObj->byteLength(), sizeof(Value));
fop->free_(obj, curObj->elements(), nbytes, MemoryUse::TypedArrayElements);
}
}
@ -207,7 +207,7 @@ size_t TypedArrayObject::objectMoved(JSObject* obj, JSObject* old) {
Nursery& nursery = obj->runtimeFromMainThread()->gc.nursery();
if (!nursery.isInside(buf)) {
nursery.removeMallocedBuffer(buf);
size_t nbytes = JS_ROUNDUP(newObj->byteLength(), sizeof(Value));
size_t nbytes = RoundUp(newObj->byteLength(), sizeof(Value));
AddCellMemory(newObj, nbytes, MemoryUse::TypedArrayElements);
return 0;
}
@ -236,10 +236,10 @@ size_t TypedArrayObject::objectMoved(JSObject* obj, JSObject* old) {
} else {
MOZ_ASSERT(!oldObj->hasInlineElements());
MOZ_ASSERT((CheckedUint32(nbytes) + sizeof(Value)).isValid(),
"JS_ROUNDUP must not overflow");
"RoundUp must not overflow");
AutoEnterOOMUnsafeRegion oomUnsafe;
nbytes = JS_ROUNDUP(nbytes, sizeof(Value));
nbytes = RoundUp(nbytes, sizeof(Value));
void* data = newObj->zone()->pod_arena_malloc<uint8_t>(
js::ArrayBufferContentsArena, nbytes);
if (!data) {
@ -566,9 +566,9 @@ class TypedArrayObjectTemplate : public TypedArrayObject {
if (!fitsInline) {
MOZ_ASSERT(len > 0);
MOZ_ASSERT((CheckedUint32(nbytes) + sizeof(Value)).isValid(),
"JS_ROUNDUP must not overflow");
"RoundUp must not overflow");
nbytes = JS_ROUNDUP(nbytes, sizeof(Value));
nbytes = RoundUp(nbytes, sizeof(Value));
buf = cx->nursery().allocateZeroedBuffer(obj, nbytes,
js::ArrayBufferContentsArena);
if (!buf) {

View File

@ -107,7 +107,7 @@ CodeSegment::~CodeSegment() {
static uint32_t RoundupCodeLength(uint32_t codeLength) {
// AllocateExecutableMemory() requires a multiple of ExecutableCodePageSize.
return JS_ROUNDUP(codeLength, ExecutableCodePageSize);
return RoundUp(codeLength, ExecutableCodePageSize);
}
/* static */

View File

@ -1553,7 +1553,7 @@ class XPCWrappedNative final : public nsIXPConnectWrappedNative {
private:
enum {
// Flags bits for mFlatJSObject:
FLAT_JS_OBJECT_VALID = JS_BIT(0)
FLAT_JS_OBJECT_VALID = js::Bit(0)
};
bool Init(JSContext* cx, nsIXPCScriptable* scriptable);