Bug 1081379 - Optimize ArrayBuffer.transfer for 64-bit asm.js-ified ArrayBuffers (r=sfink)

--HG--
extra : rebase_source : fc2af81e096354a1c12e51b08e5886a3744a77c0
This commit is contained in:
Luke Wagner 2014-10-17 00:15:30 -05:00
parent 4db14ca832
commit 33d331b7e1
3 changed files with 193 additions and 20 deletions

View File

@ -235,6 +235,54 @@ assertEq(get(4), 13);
set(BUF_CHANGE_MIN, 262);
assertEq(get(BUF_CHANGE_MIN), 0);
if (ArrayBuffer.transfer) {
var buf1 = new ArrayBuffer(BUF_CHANGE_MIN);
var {get, set, changeHeap} = asmLink(m, this, null, buf1);
set(0, 100);
set(BUF_CHANGE_MIN - 4, 101);
set(BUF_CHANGE_MIN, 102);
var buf2 = ArrayBuffer.transfer(buf1);
assertEq(changeHeap(buf2), true);
assertEq(buf1.byteLength, 0);
assertEq(buf2.byteLength, BUF_CHANGE_MIN);
assertEq(get(0), 100);
assertEq(get(BUF_CHANGE_MIN-4), 101);
assertEq(get(BUF_CHANGE_MIN), 0);
assertEq(get(2*BUF_CHANGE_MIN-4), 0);
var buf3 = ArrayBuffer.transfer(buf2, 3*BUF_CHANGE_MIN);
assertEq(changeHeap(buf3), true);
assertEq(buf2.byteLength, 0);
assertEq(buf3.byteLength, 3*BUF_CHANGE_MIN);
assertEq(get(0), 100);
assertEq(get(BUF_CHANGE_MIN-4), 101);
assertEq(get(BUF_CHANGE_MIN), 0);
assertEq(get(2*BUF_CHANGE_MIN), 0);
set(BUF_CHANGE_MIN, 102);
set(2*BUF_CHANGE_MIN, 103);
assertEq(get(BUF_CHANGE_MIN), 102);
assertEq(get(2*BUF_CHANGE_MIN), 103);
var buf4 = ArrayBuffer.transfer(buf3, 2*BUF_CHANGE_MIN);
assertEq(changeHeap(buf4), true);
assertEq(buf3.byteLength, 0);
assertEq(buf4.byteLength, 2*BUF_CHANGE_MIN);
assertEq(get(0), 100);
assertEq(get(BUF_CHANGE_MIN-4), 101);
assertEq(get(BUF_CHANGE_MIN), 102);
assertEq(get(2*BUF_CHANGE_MIN), 0);
var buf5 = ArrayBuffer.transfer(buf4, 3*BUF_CHANGE_MIN);
assertEq(changeHeap(buf5), true);
assertEq(buf4.byteLength, 0);
assertEq(buf5.byteLength, 3*BUF_CHANGE_MIN);
assertEq(get(0), 100);
assertEq(get(BUF_CHANGE_MIN-4), 101);
assertEq(get(BUF_CHANGE_MIN), 102);
assertEq(get(2*BUF_CHANGE_MIN), 0);
var buf6 = ArrayBuffer.transfer(buf5, 0);
assertEq(buf5.byteLength, 0);
assertEq(buf6.byteLength, 0);
assertEq(changeHeap(buf6), false);
}
var buf1 = new ArrayBuffer(BUF_CHANGE_MIN);
var buf2 = new ArrayBuffer(BUF_CHANGE_MIN);
var m = asmCompile('glob', 'ffis', 'b', USE_ASM +
@ -292,3 +340,16 @@ changeToBuf = buf1;
assertEq(test(0), 155);
changeToBuf = buf1;
assertEq(test(0), 126);
if (ArrayBuffer.transfer) {
var buf = new ArrayBuffer(BUF_CHANGE_MIN);
new Int32Array(buf)[0] = 3;
var ffi = function() {
var buf2 = ArrayBuffer.transfer(buf, 2*BUF_CHANGE_MIN);
new Int32Array(buf2)[BUF_CHANGE_MIN/4] = 13;
assertEq(changeHeap(buf2), true);
return 1
}
var {test, changeHeap} = asmLink(m, this, {ffi:ffi}, buf);
assertEq(test(BUF_CHANGE_MIN), 14);
}

View File

@ -1,4 +1,5 @@
load(libdir + "asserts.js");
load(libdir + "asm.js");
// Currently, ArrayBuffer.transfer is #ifdef NIGHTLY_BUILD. When
// ArrayBuffer.transfer is enabled on release, this test should be removed.
@ -83,3 +84,30 @@ test(4, 1000);
test(1000, 0);
test(1000, 4);
test(1000, 1000);
// asm.js:
function testAsmJS(N1, N2) {
var buf1 = new ArrayBuffer(N1);
asmLink(asmCompile('stdlib', 'ffis', 'buf', USE_ASM + "var i32=new stdlib.Int32Array(buf); function f() {} return f"), this, null, buf1);
var i32 = new Int32Array(buf1);
for (var i = 0; i < i32.length; i+=100)
i32[i] = i;
var buf2 = XF(buf1, N2);
assertEq(buf1.byteLength, 0);
assertEq(i32.length, 0);
assertEq(buf2.byteLength, N2);
var i32 = new Int32Array(buf2);
var i = 0;
for (; i < Math.min(N1, N2)/4; i+=100)
assertEq(i32[i], i);
for (; i < i32.length; i+=100) {
assertEq(i32[i], 0);
i32[i] = -i;
}
}
testAsmJS(BUF_MIN, 0);
testAsmJS(BUF_MIN, BUF_MIN);
testAsmJS(BUF_MIN, 2*BUF_MIN);
testAsmJS(2*BUF_MIN, BUF_MIN);

View File

@ -242,7 +242,103 @@ ArrayBufferObject::fun_isView(JSContext *cx, unsigned argc, Value *vp)
return true;
}
#ifdef JS_CPU_X64
static void
ReleaseAsmJSMappedData(void *base)
{
MOZ_ASSERT(uintptr_t(base) % AsmJSPageSize == 0);
# ifdef XP_WIN
VirtualFree(base, 0, MEM_RELEASE);
# else
munmap(base, AsmJSMappedSize);
# if defined(MOZ_VALGRIND) && defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE)
// Tell Valgrind/Memcheck to recommence reporting accesses in the
// previously-inaccessible region.
if (AsmJSMappedSize > 0) {
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(base, AsmJSMappedSize);
}
# endif
# endif
}
#else
static void
ReleaseAsmJSMappedData(void *base)
{
MOZ_CRASH("Only x64 has asm.js mapped buffers");
}
#endif
#ifdef NIGHTLY_BUILD
# ifdef JS_CPU_X64
static bool
TransferAsmJSMappedBuffer(JSContext *cx, CallArgs args, Handle<ArrayBufferObject*> oldBuffer,
size_t newByteLength)
{
size_t oldByteLength = oldBuffer->byteLength();
MOZ_ASSERT(oldByteLength % AsmJSPageSize == 0);
MOZ_ASSERT(newByteLength % AsmJSPageSize == 0);
ArrayBufferObject::BufferContents stolen =
ArrayBufferObject::stealContents(cx, oldBuffer, /* hasStealableContents = */ true);
if (!stolen)
return false;
MOZ_ASSERT(stolen.kind() == ArrayBufferObject::ASMJS_MAPPED);
uint8_t *data = stolen.data();
if (newByteLength > oldByteLength) {
void *diffStart = data + oldByteLength;
size_t diffLength = newByteLength - oldByteLength;
# ifdef XP_WIN
if (!VirtualAlloc(diffStart, diffLength, MEM_COMMIT, PAGE_READWRITE)) {
ReleaseAsmJSMappedData(data);
js_ReportOutOfMemory(cx);
return false;
}
# else
// To avoid memset, use MAP_FIXED to clobber the newly-accessible pages
// with zero pages.
int flags = MAP_FIXED | MAP_PRIVATE | MAP_ANON;
if (mmap(diffStart, diffLength, PROT_READ | PROT_WRITE, flags, -1, 0) == MAP_FAILED) {
ReleaseAsmJSMappedData(data);
js_ReportOutOfMemory(cx);
return false;
}
# endif
} else if (newByteLength < oldByteLength) {
void *diffStart = data + newByteLength;
size_t diffLength = oldByteLength - newByteLength;
# ifdef XP_WIN
if (!VirtualFree(diffStart, diffLength, MEM_DECOMMIT)) {
ReleaseAsmJSMappedData(data);
js_ReportOutOfMemory(cx);
return false;
}
# else
if (madvise(diffStart, diffLength, MADV_DONTNEED) ||
mprotect(diffStart, diffLength, PROT_NONE))
{
ReleaseAsmJSMappedData(data);
js_ReportOutOfMemory(cx);
return false;
}
# endif
}
ArrayBufferObject::BufferContents newContents =
ArrayBufferObject::BufferContents::create<ArrayBufferObject::ASMJS_MAPPED>(data);
RootedObject newBuffer(cx, ArrayBufferObject::create(cx, newByteLength, newContents));
if (!newBuffer) {
ReleaseAsmJSMappedData(data);
return false;
}
args.rval().setObject(*newBuffer);
return true;
}
# endif // defined(JS_CPU_X64)
/*
* Experimental implementation of ArrayBuffer.transfer:
* https://gist.github.com/andhow/95fb9e49996615764eff
@ -306,6 +402,13 @@ ArrayBufferObject::fun_transfer(JSContext *cx, unsigned argc, Value *vp)
return false;
newData = nullptr;
} else {
# ifdef JS_CPU_X64
// With a 4gb mapped asm.js buffer, we can simply enable/disable access
// to the delta as long as the requested length is page-sized.
if (oldBuffer->isAsmJSMapped() && (newByteLength % AsmJSPageSize) == 0)
return TransferAsmJSMappedBuffer(cx, args, oldBuffer, newByteLength);
# endif
// Since we try to realloc below, only allow stealing malloc'd buffers.
// If !hasMallocedContents, stealContents will malloc a copy which we
// can then realloc.
@ -338,7 +441,7 @@ ArrayBufferObject::fun_transfer(JSContext *cx, unsigned argc, Value *vp)
args.rval().setObject(*newBuffer);
return true;
}
#endif
#endif // defined(NIGHTLY_BUILD)
/*
* new ArrayBuffer(byteLength)
@ -537,25 +640,6 @@ ArrayBufferObject::prepareForAsmJS(JSContext *cx, Handle<ArrayBufferObject*> buf
return true;
}
static void
ReleaseAsmJSMappedData(void *base)
{
MOZ_ASSERT(uintptr_t(base) % AsmJSPageSize == 0);
#ifdef XP_WIN
VirtualFree(base, 0, MEM_RELEASE);
#else
munmap(base, AsmJSMappedSize);
# if defined(MOZ_VALGRIND) && defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE)
// Tell Valgrind/Memcheck to recommence reporting accesses in the
// previously-inaccessible region.
if (AsmJSMappedSize > 0) {
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(base, AsmJSMappedSize);
}
# endif
#endif
}
#else // JS_CODEGEN_X64
bool
ArrayBufferObject::prepareForAsmJS(JSContext *cx, Handle<ArrayBufferObject*> buffer,